diff --git "a/6488.jsonl" "b/6488.jsonl" new file mode 100644--- /dev/null +++ "b/6488.jsonl" @@ -0,0 +1,647 @@ +{"seq_id":"504220740","text":"import numpy as np\nimport pandas as pd\n\ndef ratings_mat(titles, reviews_df):\n '''\n Takes a series of titles and sparse dataframe of user reviews\n and transfer data into a user-item dataframe\n INPUT - \n titles: SERIES\n reviews_df: DATAFRAME \n OUTPUT -\n ratings_matrix: DATAFRAME \n '''\n columns = ['user', 'special', 'rating']\n ratings_matrix = pd.DataFrame(columns=columns)\n \n for row in range(reviews_df.shape[0]):\n user_reviews = reviews_df.iloc[row][reviews_df.iloc[row] > 0]\n scores = user_reviews.values\n ids = user_reviews.index\n to_add = np.array([np.array([row]*len(scores)),ids, scores]).T\n ratings_matrix = pd.DataFrame(np.append(ratings_matrix.values, to_add, axis=0), \n columns=columns)\n return ratings_matrix","sub_path":"src/make_ratings_matrix.py","file_name":"make_ratings_matrix.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"616949349","text":"import sys\nfrom functools import reduce\n\ndef run_test(case_number, generator):\n r, c, w = [int(x) for x in next(generator).split()]\n\n hits_per_row = int(c / w)\n rest = c % w\n hits = hits_per_row * (r - 1)\n\n last_row = hits_per_row - 1\n\n open_fields = c - (last_row * w)\n # overlap = open_fields - (open_fields - w) * 2\n\n additional = min(w + 1, open_fields)\n\n result = hits + last_row + additional\n\n print('Case #%d: %d' % (case_number, result))\n\ndef main():\n generator = get_file()\n number_of_tests = int(next(generator))\n for test in range(1, number_of_tests + 1):\n run_test(test, generator)\n\ndef get_file():\n for line in sys.stdin:\n yield line\n \nif __name__ == '__main__':\n main()","sub_path":"solutions_5640146288377856_0/Python/Fettn/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"430859934","text":"# -*- coding: utf-8 -*-\n# created by Venom for Fenomscrapers (updated 6-19-2022)\n\"\"\"\n\tFenomscrapers Project\n\"\"\"\n\nimport re\nfrom urllib.parse import quote_plus, unquote_plus\nfrom cocoscrapers.modules import cleantitle\nfrom cocoscrapers.modules import client\nfrom cocoscrapers.modules import source_utils\n\n\nclass source:\n\tpriority = 5\n\tpack_capable = False\n\thasMovies = True\n\thasEpisodes = True\n\tdef __init__(self):\n\t\tself.language = ['en']\n\t\tself.base_link = \"https://nyaa.si\"\n\t\tself.search_link = '/?f=0&c=0_0&q=%s'\n\t\tself.min_seeders = 1\n\n\tdef sources(self, data, hostDict):\n\t\tsources = []\n\t\tif not data: return sources\n\t\tappend = sources.append\n\t\ttry:\n\t\t\ttitle = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']\n\t\t\ttitle = title.replace('&', 'and').replace('Special Victims Unit', 'SVU').replace('/', ' ').replace('$', 's')\n\t\t\taliases = data['aliases']\n\t\t\tyear = data['year']\n\t\t\thdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else year\n\t\t\thdlr2 = 'S%d - %d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else year\n\n\t\t\tquery = '%s %s' % (re.sub(r'[^A-Za-z0-9\\s\\.-]+', '', title), hdlr)\n\t\t\tquery2 = '%s %s' % (re.sub(r'[^A-Za-z0-9\\s\\.-]+', '', title), hdlr2)\n\t\t\turls = []\n\t\t\turl = self.search_link % quote_plus(query)\n\t\t\turl = '%s%s' % (self.base_link, url)\n\t\t\turls.append(url)\n\t\t\turl2 = self.search_link % quote_plus(query2)\n\t\t\turl2 = '%s%s' % (self.base_link, url2)\n\t\t\turls.append(url2)\n\t\t\t# log_utils.log('urls = %s' % urls)\n\t\t\tundesirables = source_utils.get_undesirables()\n\t\t\tcheck_foreign_audio = source_utils.check_foreign_audio()\n\t\texcept:\n\t\t\tsource_utils.scraper_error('NYYAA')\n\t\t\treturn sources\n\n\t\tfor url in urls:\n\t\t\ttry:\n\t\t\t\tresults = client.request(url, timeout=5)\n\t\t\t\tif not results or 'magnet:' not in results: return sources\n\t\t\t\tresults = re.sub(r'[\\n\\t]', '', results)\n\t\t\t\ttbody = client.parseDOM(results, 'tbody')\n\t\t\t\trows = client.parseDOM(tbody, 'tr')\n\n\t\t\t\tfor row in rows:\n\t\t\t\t\tlinks = zip(\n\t\t\t\t\t\t\t\t\tre.findall(r'href\\s*=\\s*[\"\\'](magnet:[^\"\\']+)[\"\\']', row, re.DOTALL | re.I),\n\t\t\t\t\t\t\t\t\tre.findall(r'((?:\\d+\\,\\d+\\.\\d+|\\d+\\.\\d+|\\d+\\,\\d+|\\d+)\\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row, re.DOTALL),\n\t\t\t\t\t\t\t\t\t[re.findall(r'([0-9]+)', row, re.DOTALL)])\n\t\t\t\t\tfor link in links:\n\t\t\t\t\t\turl = unquote_plus(link[0]).replace('&', '&').split('&tr')[0].replace(' ', '.')\n\t\t\t\t\t\turl = source_utils.strip_non_ascii_and_unprintable(url)\n\t\t\t\t\t\thash = re.search(r'btih:(.*?)&', url, re.I).group(1)\n\t\t\t\t\t\tname = source_utils.clean_name(url.split('&dn=')[1])\n\n\t\t\t\t\t\tif hdlr not in name and hdlr2 not in name: continue\n\t\t\t\t\t\tif source_utils.remove_lang(name, check_foreign_audio): continue\n\t\t\t\t\t\t# if undesirables and source_utils.remove_undesirables(name_info, undesirables): continue\n\n\t\t\t\t\t\tif hdlr in name:\n\t\t\t\t\t\t\tt = name.split(hdlr)[0].replace(year, '').replace('(', '').replace(')', '').replace('&', 'and').replace('.US.', '.').replace('.us.', '.')\n\t\t\t\t\t\tif hdlr2 in name:\n\t\t\t\t\t\t\tt = name.split(hdlr2)[0].replace(year, '').replace('(', '').replace(')', '').replace('&', 'and').replace('.US.', '.').replace('.us.', '.')\n\t\t\t\t\t\t# if cleantitle.get(t) != cleantitle.get(title): continue # Anime title matching is a bitch!\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tseeders = int(link[2][0])\n\t\t\t\t\t\t\tif self.min_seeders > seeders: continue\n\t\t\t\t\t\texcept: seeders = 0\n\n\t\t\t\t\t\tquality, info = source_utils.get_release_quality(name, url)\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsize = link[1]\n\t\t\t\t\t\t\tdsize, isize = source_utils._size(size)\n\t\t\t\t\t\t\tinfo.insert(0, isize)\n\t\t\t\t\t\texcept: dsize = 0\n\t\t\t\t\t\tinfo = ' | '.join(info)\n\n\t\t\t\t\t\tappend({'provider': 'nyaa', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,\n\t\t\t\t\t\t\t\t\t\t'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})\n\t\t\texcept:\n\t\t\t\tsource_utils.scraper_error('NYAA')\n\t\t\t\treturn sources\n\t\treturn sources","sub_path":"script.module.cocoscrapers/lib/cocoscrapers/sources_cocoscrapers/torrents/nyaa.py","file_name":"nyaa.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"496810429","text":"# nafd_merge.py - merge path/row data into larger mosaic\n# Chris Toney, christoney@fs.fed.us\n\n# v2, 20151118\n#\t- add option for reproject source files or not\n\nfrom osgeo import gdal\nfrom osgeo.gdalconst import *\nfrom osgeo import osr\nimport os\nimport csv\nimport subprocess\nimport glob\n\n# for dist attrib maps:\n#in_files = \"/nobackup/nadf3/attribution/data/national/outputs/pXXXrXXX/w2pXXXrXXX_TSNational_TSallAgent_VCTDF_TSCalValTodd_pred_agent.img\"\n# for VCT maps 2010:\n#in_files = \"/nobackup/nadf3/share/FourthNationalRun/VCTstacks/pXXXrXXX/mmu_outputs/w2pXXXrXXX_2010*_distbMap_v2\"\n# for VCT maps 2010 Albers:\nin_files = \"/nobackup/nadf3/share/FourthNationalRun/VCTstacks_albers/pXXXrXXX/w2pXXXrXXX_2010*_distbMap_v2_albers\"\n\nin_nodata = 0\nreproject = False\nout_proj4 = \"+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs\"\nref_file = \"/nobackup/nadf3/share/VCT_National_Mosaic_v5/annual_us_mosaic_combine_mmu2_2010\"\nout_dir = \"/nobackup/nadf3/attribution/data/national/outputs/mosaic_test/\"\nout_file = \"vct_2010_conus_test_albers.img\"\nout_fmt = \"HFA\"\nout_fill_value = 0\nkeep_pathrow_reprojects = True\npath_row_reproject_fmt = 'VRT'\n\n# get the path/rows and process each one\nf = open('pathrows_ul_to_lr.txt')\npathrows = f.read().splitlines()\nf.close()\n\nerrors = {}\n\nfor pathrow in pathrows:\n\tthis_in_file = in_files.replace(\"pXXXrXXX\", pathrow)\n\n\t# for dist attrib maps:\n\t#if (not(os.path.isfile(this_in_file))):\n\t#\tthis_in_file = this_in_file.replace(\"TSNational\", \"TSWest\")\n\t# for VCT maps:\t\n\ttry:\n\t\tthis_in_file = glob.glob(this_in_file)[0]\n\texcept:\n\t\tcontinue\n\n\t# temp: fix up these file names\n\tpath_row_out_file = os.path.join(out_dir, (pathrow + \".vrt\"))\n\n\tif reproject:\n\t\targlist = ['gdalwarp']\n\t\targlist.append('-t_srs')\n\t\targlist.append(out_proj4)\n\t\targlist.append('-tr')\n\t\targlist.append('30')\n\t\targlist.append('30')\n\t\targlist.append('-of')\n\t\targlist.append(path_row_reproject_fmt)\n\t\targlist.append(this_in_file)\n\t\targlist.append(path_row_out_file)\n\telse:\n\t\targlist = ['gdal_translate']\n\t\targlist.append('-of')\n\t\targlist.append('VRT')\n\t\targlist.append(this_in_file)\n\t\targlist.append(path_row_out_file)\n\n\tret = subprocess.call(arglist)\n\tif (ret != 0):\n\t\terrors[pathrow] = ret\n\t\tprint(\"gdalwarp for \" + pathrow + \" returned \" + str(ret))\n\n# write a log file\nf = open('merge_reproj_errors.txt', 'wb')\nwriter = csv.writer(f)\nwriter.writerow(['pathrow','ret_code'])\nfor pr, e in errors.iteritems():\n\twriter.writerow([pr]+[e])\nf.close()\n\n# create the output file\nprint(\"Creating the output file...\")\nds = gdal.Open(ref_file)\nxsize = ds.RasterXSize\nysize = ds.RasterYSize\ngt = ds.GetGeoTransform()\nif not gt is None:\n\txmin = gt[0]\n\tymax = gt[3]\n\txmax = xmin + (xsize * gt[1])\n\tymin = ymax + (ysize * gt[5])\nds = None\n\nout_driver = gdal.GetDriverByName(out_fmt)\nout_filename = os.path.join(out_dir, out_file)\nds_out = out_driver.Create(out_filename, xsize, ysize, 1, GDT_Byte)\nsrs = osr.SpatialReference()\nsrs.ImportFromProj4(out_proj4)\nds_out.SetProjection(srs.ExportToWkt())\nds_out.SetGeoTransform(gt)\nds_out.GetRasterBand(1).Fill(out_fill_value)\nds_out = None\n\nprint(\"gdal_merge...\")\n# merge in the pathrows\nfor pathrow in pathrows:\n\t# temp: fix up these file names as above\n\tpath_row_file = os.path.join(out_dir, (pathrow + \".vrt\"))\n\n\targlist = ['python']\n\targlist.append('gdal_merge.py')\n\targlist.append('-v')\n\targlist.append('-o')\n\targlist.append(out_filename)\n\targlist.append('-n')\n\targlist.append(str(in_nodata))\n\targlist.append(path_row_file)\n\tprint(arglist)\n\tret = subprocess.call(arglist)\n\tprint(str(ret))\n\nprint(out_filename)\nprint(\"Done.\")\n\n","sub_path":"nafd_merge.py","file_name":"nafd_merge.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"376332897","text":"\nfrom django.utils import timezone\n\nfrom website.mails import mails\nfrom website.notifications import utils\nfrom website.reviews import signals as reviews_signals\n\n# Handle email notifications including: update comment, accept, and reject of submission.\n@reviews_signals.reviews_email.connect\ndef reviews_notification(self, creator, template, context, action):\n # Avoid AppRegistryNotReady error\n from website.notifications.emails import notify_global_event\n recipients = list(action.target.contributors)\n time_now = action.created if action is not None else timezone.now()\n node = action.target\n notify_global_event(\n event='global_reviews',\n sender_user=creator,\n node=node,\n timestamp=time_now,\n recipients=recipients,\n template=template,\n context=context\n )\n\n# Handle email notifications for a new submission.\n@reviews_signals.reviews_email_submit.connect\ndef reviews_submit_notification(self, recipients, context):\n # Avoid AppRegistryNotReady error\n from website.notifications.emails import get_user_subscriptions\n from website import settings\n\n event_type = utils.find_subscription_type('global_reviews')\n if context['reviewable'].provider._id == 'osf':\n context['logo'] = settings.OSF_PREPRINTS_LOGO\n else:\n context['logo'] = context['reviewable'].provider._id\n\n for recipient in recipients:\n user_subscriptions = get_user_subscriptions(recipient, event_type)\n context['no_future_emails'] = user_subscriptions['none']\n context['is_creator'] = recipient == context['reviewable'].creator\n context['provider_name'] = context['reviewable'].provider.name\n mails.send_mail(\n recipient.username,\n mails.REVIEWS_SUBMISSION_CONFIRMATION,\n mimetype='html',\n user=recipient,\n **context\n )\n\n\n# Handle email notifications to notify moderators of new submissions.\n@reviews_signals.reviews_email_submit_moderators_notifications.connect\ndef reviews_submit_notification_moderators(self, timestamp, context):\n # imports moved here to avoid AppRegistryNotReady error\n from osf.models import NotificationSubscription\n from website.profile.utils import get_profile_image_url\n from website.notifications import emails\n from website import settings\n\n # Get NotificationSubscription instance, which contains reference to all subscribers\n provider_subscription = NotificationSubscription.load('{}_new_pending_submissions'.format(context['reviewable'].provider._id))\n # Set message\n context['message'] = u'submitted \"{}\".'.format(context['reviewable'].title)\n # Set url for profile image of the submitter\n context['profile_image_url'] = get_profile_image_url(context['referrer'])\n # Set submission url\n context['reviews_submission_url'] = '{}reviews/preprints/{}/{}'.format(settings.DOMAIN, context['reviewable'].provider._id, context['reviewable']._id)\n # Store emails to be sent to subscribers instantly (at a 5 min interval)\n emails.store_emails(provider_subscription.email_transactional.all().values_list('guids___id', flat=True),\n 'email_transactional',\n 'new_pending_submissions',\n context['referrer'],\n context['reviewable'],\n timestamp,\n abstract_provider=context['reviewable'].provider,\n **context)\n\n # Store emails to be sent to subscribers daily\n emails.store_emails(provider_subscription.email_digest.all().values_list('guids___id', flat=True),\n 'email_digest',\n 'new_pending_submissions',\n context['referrer'],\n context['reviewable'],\n timestamp,\n abstract_provider=context['reviewable'].provider,\n **context)\n\n\n# Handle email notifications to notify moderators of new withdrawal requests\n@reviews_signals.reviews_email_withdrawal_requests.connect\ndef reviews_withdrawal_requests_notification(self, timestamp, context):\n # imports moved here to avoid AppRegistryNotReady error\n from osf.models import NotificationSubscription\n from website.profile.utils import get_profile_image_url\n from website.notifications import emails\n from website import settings\n\n # Get NotificationSubscription instance, which contains reference to all subscribers\n provider_subscription = NotificationSubscription.load(\n '{}_new_pending_submissions'.format(context['reviewable'].provider._id))\n preprint = context['reviewable']\n preprint_word = preprint.provider.preprint_word\n\n # Set message\n context['message'] = u'has requested withdrawal of the {} \"{}\".'.format(preprint_word, preprint.title)\n # Set url for profile image of the submitter\n context['profile_image_url'] = get_profile_image_url(context['requester'])\n # Set submission url\n context['reviews_submission_url'] = '{}reviews/preprints/{}/{}'.format(settings.DOMAIN,\n preprint.provider._id,\n preprint._id)\n # Store emails to be sent to subscribers instantly (at a 5 min interval)\n emails.store_emails(provider_subscription.email_transactional.all().values_list('guids___id', flat=True),\n 'email_transactional',\n 'new_pending_submissions',\n context['requester'],\n preprint,\n timestamp,\n abstract_provider=preprint.provider,\n **context)\n\n # Store emails to be sent to subscribers daily\n emails.store_emails(provider_subscription.email_digest.all().values_list('guids___id', flat=True),\n 'email_digest',\n 'new_pending_submissions',\n context['requester'],\n preprint,\n timestamp,\n abstract_provider=preprint.provider,\n **context)\n","sub_path":"website/reviews/listeners.py","file_name":"listeners.py","file_ext":"py","file_size_in_byte":6270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"599737585","text":"# connect4.py\n\n# A simple game of connect4 with a text interface\n# based on the wordSearch code written in class.\n\ndef playConnect4():\n rows = 6\n cols = 7\n board = makeBoard(rows, cols)\n player = \"X\"\n moveCount = 0\n printBoard(board)\n while (moveCount < rows*cols):\n moveCol = getMoveCol(board, player)\n moveRow = getMoveRow(board, moveCol)\n board[moveRow][moveCol] = player\n printBoard(board)\n if checkForWin(board, player):\n print(\"*** Player %s Wins!!! ***\" % player)\n return\n moveCount += 1\n player = \"O\" if (player == \"X\") else \"X\"\n print(\"*** Tie Game!!! ***\")\n\ndef makeBoard(rows, cols):\n return [ ([\"-\"] * cols) for row in range(rows) ]\n\ndef printBoard(board):\n rows = len(board)\n cols = len(board[0])\n print()\n # first print the column headers\n print(\" \", end=\"\")\n for col in range(cols):\n print(str(col+1).center(3), \" \", end=\"\")\n print()\n # now print the board\n for row in range(rows):\n print(\" \", end=\"\")\n for col in range(cols):\n print(board[row][col].center(3), \" \", end=\"\")\n print()\n\ndef getMoveCol(board, player):\n cols = len(board[0])\n while True:\n response = input(\"Enter player %s's move (column number) --> \" %\n (player))\n try:\n moveCol = int(response)-1 # -1 since user sees cols starting at 1\n if ((moveCol < 0) or (moveCol >= cols)):\n print(\"Columns must be between 1 and %d. \" % (cols), end=\"\")\n elif (board[0][moveCol] != \"-\"):\n print(\"That column is full! \", end=\"\")\n else:\n return moveCol\n except:\n # they did not even enter an integer!\n print(\"Columns must be integer values! \", end=\"\")\n print(\"Please try again.\")\n\ndef getMoveRow(board, moveCol):\n # find first open row from bottom\n rows = len(board)\n for moveRow in range(rows-1, -1, -1):\n if (board[moveRow][moveCol] == \"-\"):\n return moveRow\n # should never get here!\n assert(False)\n\ndef checkForWin(board, player):\n winningWord = player * 4\n return (wordSearch(board, winningWord) != None) # that was easy!\n\n##############################################\n# taken from wordSearch.py\n##############################################\n\ndef wordSearch(board, word):\n (rows, cols) = (len(board), len(board[0]))\n for row in range(rows):\n for col in range(cols):\n result = wordSearchFromCell(board, word, row, col)\n if (result != None):\n return result\n return None\n\ndef wordSearchFromCell(board, word, startRow, startCol):\n for drow in [-1, 0, +1]:\n for dcol in [-1, 0, +1]:\n if ((drow != 0) or (dcol != 0)):\n result = wordSearchFromCellInDirection(board, word,\n startRow, startCol,\n drow, dcol)\n if (result != None):\n return result\n return None\n\ndef wordSearchFromCellInDirection(board, word, startRow, startCol, drow, dcol):\n (rows, cols) = (len(board), len(board[0]))\n dirNames = [ [\"up-left\" , \"up\", \"up-right\"],\n [\"left\" , \"\" , \"right\" ],\n [\"down-left\", \"down\", \"down-right\" ] ]\n for i in range(len(word)):\n row = startRow + i*drow\n col = startCol + i*dcol\n if ((row < 0) or (row >= rows) or\n (col < 0) or (col >= cols) or\n (board[row][col] != word[i])):\n return None\n return (word, (startRow, startCol), dirNames[drow+1][dcol+1])\n\nplayConnect4()\n","sub_path":"week_5/2d_Lists/connect4.py","file_name":"connect4.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"517676503","text":"# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom pants.backend.python.targets.python_binary import PythonBinary\nfrom pants.base.exceptions import TargetDefinitionException\nfrom pants.base.payload import Payload\nfrom pants.base.payload_field import PrimitiveField\nfrom pants.build_graph.target import Target\n\n\nclass PythonAWSLambda(Target):\n \"\"\"A self-contained Python function suitable for uploading to AWS Lambda.\n\n :API: public\n \"\"\"\n\n def __init__(self,\n binary=None,\n handler=None,\n **kwargs):\n \"\"\"\n :param string binary: Target spec of the ``python_binary`` that contains the handler.\n :param string handler: Lambda handler entrypoint (module.dotted.name:handler_func).\n \"\"\"\n payload = Payload()\n payload.add_fields({\n 'binary': PrimitiveField(binary),\n 'handler': PrimitiveField(handler),\n })\n super().__init__(payload=payload, **kwargs)\n\n @classmethod\n def alias(cls):\n return 'python_awslambda'\n\n @classmethod\n def compute_dependency_specs(cls, kwargs=None, payload=None):\n for spec in super().compute_dependency_specs(kwargs, payload):\n yield spec\n target_representation = kwargs or payload.as_dict()\n binary = target_representation.get('binary')\n if binary:\n yield binary\n\n @property\n def binary(self):\n \"\"\"Returns the binary that builds the pex for this lambda.\"\"\"\n dependencies = self.dependencies\n if len(dependencies) != 1:\n raise TargetDefinitionException(self, f'An app must define exactly one binary '\n 'dependency, have: {dependencies}')\n binary = dependencies[0]\n if not isinstance(binary, PythonBinary):\n raise TargetDefinitionException(self, f'Expected binary dependency to be a python_binary '\n 'target, found {binary}')\n return binary\n\n @property\n def handler(self):\n \"\"\"Return the handler function for the lambda.\"\"\"\n return self.payload.handler\n","sub_path":"contrib/awslambda/python/src/python/pants/contrib/awslambda/python/targets/python_awslambda.py","file_name":"python_awslambda.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"454297910","text":"from IO import io\nfrom channels import INPUT, OUTPUT\nfrom time import sleep\n\nclass Elevator:\n\tdef __init__(self):\n\t\tself.moving = False\n\t\tself.direction = OUTPUT.MOTOR_DOWN\n\t\tself.NUM_FLOORS = INPUT.NUM_FLOORS\n\n\t\tfor light in OUTPUT.LIGHTS:\n\t\t\tif light != -1:\n\t\t\t\tio.setBit(light, 0)\n\n\tdef setSpeed(self, speed):\n\t\tif speed > 0:\n\t\t\tself.direction = OUTPUT.MOTOR_UP\n\t\telif speed < 0:\n\t\t\tself.direction = OUTPUT.MOTOR_DOWN\n\t\telse:\n\t\t\tself.stop()\n\n\t\tio.setBit(OUTPUT.MOTORDIR, self.direction)\n\t\tio.writeAnalog(OUTPUT.MOTOR, 2048+4*abs(speed))\n\t\tself.moving = True\n\n\tdef stop(self):\n\t\tif not self.moving:\n\t\t\treturn\n\t\tif self.direction is OUTPUT.MOTOR_DOWN:\n\t\t\tio.setBit(OUTPUT.MOTORDIR, OUTPUT.MOTOR_UP)\n\t\telse:\n\t\t\tio.setBit(OUTPUT.MOTORDIR, OUTPUT.MOTOR_DOWN)\n\t\tsleep(0.02)\n\t\tio.writeAnalog(OUTPUT.MOTOR, 2048)\n\n\tdef setButtonLamp(self, floor, buttonType, value):\n\t\tassert(floor >= 0), \"ERR_ floor < 0\"\n\t\tassert(floor < self.NUM_FLOORS), \"ERR_ floor > NUM_FLOORS\"\n\t\tassert(buttonType >= 0), \"ERR_ buttonType < 0\"\n\t\tassert(buttonType < self.NUM_FLOORS - 1), \"ERR_ buttonType > NUM_FLOORS\"\n\n\t\tio.setBit(INPUT.BUTTON_FLOORS[floor][buttonType], value)\n\n\tdef setMotorDirection(self, dir):\n\t\tassert(dir in MOTOR_DIRECTION), \"ERR: Invalid motor direction!\"\n\t\tio.writeAnalog(MOTOR, dir)\n\n\tdef setFloorIndicator(self, floor):\n\t\tassert(floor >= 0), \"ERR_ floor < 0\"\n\t\tassert(floor < self.NUM_FLOORS), \"ERR_ floor > NUM_FLOORS\"\n\n\t\tif floor & 0x02:\n\t\t\tio.setBit(OUTPUT.FLOOR_IND1, 1)\n\t\telse:\n\t\t\tio.setBit(OUTPUT.FLOOR_IND1, 0)\n\n\t\tif floor & 0x01:\n\t\t\tio.setBit(OUTPUT.FLOOR_IND2, 1)\n\t\telse:\n\t\t\tio.setBit(OUTPUT.FLOOR_IND2, 0)\n\n\n\tdef getButtonSignal(self, button, floor):\n\t\tassert(floor >= 0)\n\t\tassert(floor < self.NUM_FLOORS)\n\t\tif(io.readBit(INPUT.BUTTON_FLOORS[floor][button])):\n\t\t\treturn 1\n\t\t\n\t\telse:\n\t\t\treturn 0\n\n\t\t\t\n\tdef getFloorSensorSignal(self):\n\t\tfor index, sensor in enumerate(INPUT.SENSORS):\n\t\t\tif io.readBit(sensor):\n\t\t\t\treturn index\n\n\t\treturn -1\n \n\tdef setDoorLamp(self, value):\n\t\tassert(value >= 0), \"ERR: door lamp value < 0\"\n\t\tassert(value < 1), \"ERR: door lamp value > 1\"\n\t\tio.setBit(OUTPUT.DOOROPEN, value)\n\n\tdef getStopSignal(self):\n\t\treturn io.readBit(INPUT.STOP)\n\n\tdef getObstructionSignal():\n\t\treturn io.readBit(INPUT.OBSTRUCTION)\n\n","sub_path":"Ex05-Elevator-driver/py_driver/elev.py","file_name":"elev.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"89103873","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom tensorflow.core.framework import node_def_pb2\nfrom tensorflow.core.framework import attr_value_pb2\n\nfrom ..graph_base import GraphRewriterBase\nfrom ..graph_util import GraphAnalyzer\n\n\nclass StripUnusedNodesOptimizer(GraphRewriterBase):\n def __init__(self, model, input_node_names, output_node_names):\n super().__init__(model)\n self.input_node_names = input_node_names\n self.output_node_names = output_node_names\n\n def do_transformation(self):\n cur_graph = GraphAnalyzer()\n\n # according to https://github.com/onnx/tensorflow-onnx/issues/77\n for node in self.model.node:\n if node.op == 'RefSwitch':\n node.op = 'Switch'\n for index in range(len(node.input)):\n if 'moving_' in node.input[index]:\n node.input[index] = node.input[index] + '/read'\n elif node.op == 'AssignSub':\n node.op = 'Sub'\n if 'use_locking' in node.attr:\n del node.attr['use_locking']\n elif node.op == 'AssignAdd':\n node.op = 'Add'\n if 'use_locking' in node.attr:\n del node.attr['use_locking']\n elif node.op == 'Assign':\n node.op = 'Identity'\n if 'use_locking' in node.attr:\n del node.attr['use_locking']\n if 'validate_shape' in node.attr:\n del node.attr['validate_shape']\n if len(node.input) == 2:\n # input0: ref: Should be from a Variable node. May be uninitialized.\n # input1: value: The value to be assigned to the variable.\n node.input[0] = node.input[1]\n del node.input[1]\n\n cur_graph.graph = self.model\n\n graph_info = cur_graph.parse_graph()\n\n for name in self.input_node_names:\n if ':' in name:\n self.logger.debug(\"Name {} appears to refer to a Tensor, \"\n \"not a Operation.\".format(name))\n return False\n\n type_attr = {\"Sub\": \"T\"}\n\n not_found = {name for name in self.input_node_names}\n for node_name, _ in graph_info.items():\n if node_name in not_found:\n not_found.remove(node_name)\n node = graph_info[node_name].node\n original_output = graph_info[node_name].outputs\n placeholder_node = node_def_pb2.NodeDef()\n placeholder_node.op = \"Placeholder\"\n placeholder_node.name = node.name\n\n if \"dtype\" in node.attr:\n placeholder_node.attr[\"dtype\"].CopyFrom(\n attr_value_pb2.AttrValue(type=node.attr[\"dtype\"].type))\n elif node.op in type_attr.keys():\n placeholder_node.attr[\"dtype\"].CopyFrom(\n attr_value_pb2.AttrValue(type=node.attr[type_attr[node.op]].type))\n else:\n raise KeyError(\"%s op's type attribute is not found,\"\n \"you should add it to type_attr dict\" % node.op)\n if \"_output_shapes\" in node.attr:\n placeholder_node.attr[\"_output_shapes\"].CopyFrom(node.attr[\"_output_shapes\"])\n if \"shape\" in node.attr:\n placeholder_node.attr[\"shape\"].CopyFrom(node.attr[\"shape\"])\n\n cur_graph.remove_node(node_name)\n\n cur_graph.replace_const_node(placeholder_node, [node_name], original_output)\n\n import tensorflow as tf\n return tf.compat.v1.graph_util.extract_sub_graph(cur_graph.dump_graph(),\n self.output_node_names)\n","sub_path":"lpot/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes.py","file_name":"strip_unused_nodes.py","file_ext":"py","file_size_in_byte":4418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"118426345","text":"from setuptools import find_packages, setup\n\nrequires = [\n 'Flask==1.0.3',\n]\n\nsetup(\n name=\"josephtuazon.com\",\n version='0.0.1',\n description=\"Website of Joseph Niel Tuazon\",\n author=\"Joseph Niel Tuazon\",\n author_email=\"josephnieltuazon@gmail.com\",\n url=\"https://github.com/josephniel/josephtuazon.com\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=requires,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"147028304","text":"# -- encoding:utf-8 --\n\"\"\"\nCreate by ibf on 2018/6/22\n\"\"\"\n\nimport os\nimport time\nimport re\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SQLContext, Row\nfrom pyspark.ml.feature import PolynomialExpansion, VectorAssembler, PCA\nfrom pyspark.ml.regression import LinearRegression, GBTRegressor\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml import Pipeline\n\nif 'SPARK_HOME' not in os.environ:\n os.environ['SPARK_HOME'] = 'D:\\syl\\dev\\spark-1.6.1-bin-2.5.0-cdh5.3.6'\n\nif __name__ == '__main__':\n name = '线性回归'\n # name = 'GBDT'\n\n # 1. 上下文构建\n conf = SparkConf() \\\n .setMaster('local[10]') \\\n .setAppName('boston housing app')\n sc = SparkContext(conf=conf)\n sqlContext = SQLContext(sparkContext=sc)\n\n # 2. 读取数据形成DataFrame\n boston_housing_rdd = sc.textFile('../datas/boston_housing.data')\n row_boston_housing_rdd = boston_housing_rdd.map(lambda line: re.split('\\\\s+', line.strip())) \\\n .filter(lambda arr: len(arr) == 14) \\\n .map(lambda arr: Row(CRIM=float(arr[0]), ZN=float(arr[1]), \\\n INDUS=float(arr[2]), CHAS=float(arr[3]), NOX=float(arr[4]), \\\n RM=float(arr[5]), AGE=float(arr[6]), DIS=float(arr[7]), \\\n RAD=float(arr[8]), TAX=float(arr[9]), PTRATIO=float(arr[10]), \\\n B=float(arr[11]), LASTAT=float(arr[12]), MEDV=float(arr[13])))\n boston_housing_df = sqlContext.createDataFrame(row_boston_housing_rdd)\n print(\"数据对应的Schema信息为:{}\".format(boston_housing_df.schema))\n boston_housing_df.show(truncate=False)\n\n # 3. 特征工程\n # a. 将数据分割为训练集和测试集\n train_df, test_df = boston_housing_df.randomSplit(weights=[0.7, 0.3], seed=28)\n\n # b. 合并特征\n input_all_feature_names = boston_housing_df.schema.names\n input_all_feature_names.remove('MEDV')\n vector = VectorAssembler(inputCols=input_all_feature_names, outputCol='f1')\n\n # c. 多项式扩展\n poly = PolynomialExpansion(degree=3, inputCol='f1', outputCol='f2')\n\n # d. 降维\n pca = PCA(k=75, inputCol='f2', outputCol='features')\n\n # 4. 模型训练&结果预测\n # a. 因为模型训练其实是一个迭代的过程,所以数据最好缓存\n train_df.cache()\n\n # b. 模型构建\n if name == '线性回归':\n \"\"\"\n featuresCol=\"features\", 特征属性所对应的列名称\n labelCol=\"label\", 标签y对应的列名称\n predictionCol=\"prediction\", 模型预测值对应的列名称,要求模型训练前该列不存在\n maxIter=100, 迭代次数\n regParam=0.0, 惩罚项的学习率或者学习因子,当设置为0的时候,表示不使用惩罚性/正则项,计算公式: regParam * ((1-p)*L2 + p*L1)\n elasticNetParam=0.0, 给定EN弹性网络中,p的值。当设置为0的时候,表示使用L2正则,此时算法即:Ridge,当设置为1的时候,表示只使用L1正则,此时算法即:Lasso\n tol=1e-6,\n fitIntercept=True, 模型训练中,是否训练截距项,True表示训练\n standardization=True, 在模型训练之前是否做一个数据的标注化操作,默认为True,表示做。\n solver=\"auto\", 算法模型的底层求解的方式方法\n weightCol=None 各个样本的权重所对应的列名称,可选。\n \"\"\"\n algo = LinearRegression(featuresCol='features', labelCol='MEDV', predictionCol='prediction', maxIter=100,\n regParam=0.1, elasticNetParam=0.0)\n elif name == 'GBDT':\n algo = GBTRegressor(featuresCol=\"features\", labelCol=\"MEDV\", predictionCol=\"prediction\", maxDepth=2,\n maxIter=100, subsamplingRate=0.8, stepSize=0.1)\n else:\n raise Exception(\"现在的代码不支持该算法:{}\".format(name))\n # 构建管道模型\n pipline_algo = Pipeline(stages=[vector, poly, pca, algo])\n\n # c. 模型训练\n algo_model = pipline_algo.fit(train_df)\n\n # d. 训练好后对数据做一个预测(预测结果可以通过DataFrame或者RDD的相关API进行输出)\n train_predict_result_df = algo_model.transform(train_df)\n train_predict_result_df.show(truncate=False)\n train_predict_result_df.select('MEDV', 'prediction').show(truncate=False)\n test_predict_result_df = algo_model.transform(test_df)\n test_predict_result_df.select('MEDV', 'prediction').show(truncate=False)\n\n # 5. 模型效果评估\n # metricName:给定采用何种评估指标,默认为rmse;可选参数:mse|rmse|r2|mae\n evaluator = RegressionEvaluator(predictionCol=\"prediction\", labelCol=\"MEDV\", metricName=\"r2\")\n print(\"{}算法在训练集上的R2值为:{}\".format(name, evaluator.evaluate(train_predict_result_df)))\n print(\"{}算法在测试集上的R2值为:{}\".format(name, evaluator.evaluate(test_predict_result_df)))\n\n # 为了看一下4040界面,休息一下\n time.sleep(60)\n","sub_path":"pyspark/ml/pipline_boston_housing_regression.py","file_name":"pipline_boston_housing_regression.py","file_ext":"py","file_size_in_byte":5026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"155291511","text":"# Did this code successfully run on Leetcode: Yes\r\n# Time Complexity: O(n)\r\n# Space Complexity:\r\n# Logic: use i to traverse through the list from 2nd element & j to point at a location where removal is required. if\r\n# i-1 == i then increment count else its a new element & make count = 1. keep on replacing jth element for count <= 2\r\n\r\nclass Solution:\r\n def removeDuplicates(self, nums):\r\n count = 1\r\n j = 1\r\n for i in range(1, len(nums)):\r\n if nums[i-1] == nums[i]:\r\n count += 1\r\n else:\r\n count = 1\r\n if count <= 2:\r\n nums[j] = nums[i]\r\n j += 1\r\n return j","sub_path":"Remove Duplicates from Sorted Array II.py","file_name":"Remove Duplicates from Sorted Array II.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"539576519","text":"import numpy as np\r\nimport random\r\n\r\ndef get_input():\r\n row = list(map(int, input().split()))\r\n n = len(row)\r\n D = np.zeros((n, n))\r\n D[0] += row\r\n for i in range(1, n):\r\n D[i] += list(map(int, input().split()))\r\n return n, D\r\n\r\ndef get_next_node(current, nodes, posibilities):\r\n posib_list = []\r\n acc = 0\r\n\r\n # Считает границы вероятностей перехода в узлы\r\n # Пример: posib_list = [0.1, 0.4, 0.8, 1.0]\r\n # Первый узел - от 0 до 0.1, второй - от 0.1 до 0.4, и т.д. \r\n for node in nodes:\r\n acc += posibilities[current, node]\r\n posib_list.append(acc)\r\n posib_list = np.array(posib_list)\r\n posib_list = posib_list / acc\r\n\r\n # Возврат случайного узла с использованием вероятностей\r\n # Пример: при r = 0.76 и posib_list = [0.1, 0.4, 0.8, 1.0]\r\n # вернет значение узла nodes[2] (т.к. 0.4 < 0.76 <= 0.8)\r\n r = random.random()\r\n for i in range(len(nodes)):\r\n if r <= posib_list[i]:\r\n return nodes[i]\r\n\r\n\r\ndef get_desirabilities(n, D):\r\n \"\"\"Возвращает матрицу N размером n x n.\r\n\r\n Каждый элемент матрицы N[i, j] равен 1 / D[i, j]\r\n \"\"\"\r\n N = np.zeros((n, n))\r\n for i in range(n):\r\n for j in range(n):\r\n if D[i, j] != 0:\r\n N[i, j] = 1 / D[i, j]\r\n else:\r\n N[i, j] = 0\r\n return N\r\n\r\ndef update_pheromone_changes(nodes, Q, L, changes_list):\r\n \"\"\"Добавляет в список changes_list матрицу с изменениями феромона.\r\n\r\n Args:\r\n nodes: список узлов в порядке посещения\r\n Q: постоянный параметр\r\n L: длина пути\r\n \"\"\"\r\n n = len(nodes)\r\n changes = np.zeros((n, n))\r\n\r\n for i in range(n-1):\r\n changes[nodes[i], nodes[i+1]] = Q / L\r\n changes[nodes[-1], nodes[0]] = Q / L\r\n changes_list.append(changes)\r\n\r\ndef update_pheromone_values(pheromone, changes_list, P):\r\n \"\"\"Обновляет матрицу со значениями феромона.\r\n\r\n Args:\r\n pheromone: матрица со значениями феромона\r\n changes_list: список, содержащий матрицы изменений феромона\r\n P: скорость выветривания феромона\r\n \r\n Returns:\r\n Матрица с новыми значениями феромона\r\n \"\"\"\r\n pheromone = pheromone * (1 - P)\r\n for new_pher in changes_list:\r\n pheromone += new_pher\r\n return pheromone\r\n\r\ndef get_posibilities(PH, N, A, B):\r\n return (PH**A)*(N**B)\r\n\r\ndef get_path_length(D, nodes):\r\n n = len(nodes)\r\n L = 0\r\n for i in range(n-1):\r\n L += D[nodes[i], nodes[i+1]]\r\n L += D[nodes[n-1],nodes[0]]\r\n return L\r\n\r\ndef run(n, D, Q, P, A, B):\r\n \"\"\"Реализует алгоритм муравьиного поиска.\r\n\r\n Args:\r\n n: количество узлов\r\n D: матрица длин переходов между узлами\r\n Q: константа, влияющая на величину добавки феромона\r\n P: скорость ослабления феромона\r\n A: коэффициент влияния феромона на переходе (\"стадность\")\r\n B: коэффициент влияния длины перехода (\"жадность\")\r\n\r\n Returns:\r\n Длина самого короткого маршрута\r\n \"\"\"\r\n N = get_desirabilities(n, D)\r\n ants = 2 * n\r\n L_min = n * np.max(D)\r\n epochs = 5\r\n\r\n for cur_node in range(n):\r\n PH = np.ones((n, n))\r\n for e in range(epochs):\r\n pheromone_changes = []\r\n for i in range(ants):\r\n nodes = [cur_node]\r\n free_nodes = [j for j in range(n)]\r\n free_nodes.remove(cur_node)\r\n posibilities = get_posibilities(PH, N, A, B)\r\n for j in range(n-1):\r\n node = get_next_node(nodes[-1], free_nodes, posibilities)\r\n free_nodes.remove(node)\r\n nodes.append(node)\r\n assert(free_nodes == [])\r\n L = get_path_length(D, nodes)\r\n if L < L_min:\r\n L_min = L\r\n update_pheromone_changes(nodes, Q, L, pheromone_changes)\r\n PH = update_pheromone_values(PH, pheromone_changes, P)\r\n return L_min\r\n\r\nif __name__ == '__main__':\r\n n, D = get_input()\r\n N = get_desirabilities(n, D)\r\n Q = 3\r\n P = 0.8\r\n A = 0.5\r\n B = 1.2\r\n L = run(n, D, Q, P, A, B)\r\n print(int(L))\r\n","sub_path":"ant_colony.py","file_name":"ant_colony.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"515283625","text":"from .cached_compile import cached_compile\nfrom .macro import macro\nimport weakref, ast\nimport types\nfrom collections import OrderedDict\nfrom . import Managed\n\n#TODO: ability to inherit from _registrars for subcontexts\n#TODO: when cells are registered as ctx.registrar.x.register, they must be a\n# part of ctx (ctx._part_of)\n_registrars = {}\n\nclass RegistrarAccessor:\n def __init__(self, context, registrars=_registrars):\n self._context = weakref.ref(context)\n self._registrars = registrars\n def __getattr__(self, attr):\n return RegistrarProxy(self, self._registrars[attr])\n\nclass RegistrarProxy:\n def __init__(self, accessor, registrar):\n self._accessor = accessor\n self._registrar = registrar\n\n def __getattr__(self, attr):\n return getattr(self._registrar, attr)\n\n def update(self, update_keys):\n context = self._accessor._context()\n if context is None:\n return\n return self._registrar.update(context, update_keys)\n\n def connect(self, key, target, namespace_name = None):\n context = self._accessor._context()\n if context is None:\n return\n return self._registrar.connect(context, key, target, namespace_name)\n\nclass BaseRegistrar:\n name = None\n _register_type = None\n def __init__(self):\n cls = self.__class__\n #monkeypatch until I properly learn to get the method binding working\n #or until never, because the macro needs self, too\n self.register = types.MethodType(\n macro(\n type=OrderedDict(\n _arg1=\"self\",\n _arg2=cls._register_type,\n ),\n with_context=False,\n registrar=self\n )\n (cls.register),\n self\n )\n\n def register(self,*args, **kwargs):\n raise NotImplementedError\n\n def get(self, key):\n raise NotImplementedError\n\n def _register(self, data, data_name):\n from .context import get_active_context\n ctx = get_active_context()\n if ctx is None:\n return\n manager = ctx._manager\n manager.add_registrar_item(self.name, self._register_type, data, data_name)\n\n def _unregister(self, data, data_name):\n from .context import get_active_context\n ctx = get_active_context()\n if ctx is None:\n return\n manager = ctx._manager\n try: ###\n manager.remove_registrar_item(self.name, self._register_type, data, data_name)\n except Exception:\n pass\n\n\n def update(self, context, update_keys):\n manager = context._manager\n for key in update_keys:\n manager.update_registrar_key(self, key)\n\n def connect(self, context, key, target, namespace_name):\n from .worker import Worker\n from .context import Context, get_active_context\n from .macro import _macro_registrar\n manager = context._manager\n if isinstance(target, Worker):\n if namespace_name is None:\n namespace_name = key\n manager.add_registrar_listener(self, key, target, namespace_name)\n target.receive_registrar_update(self.name, key, namespace_name)\n elif isinstance(target, Context):\n assert namespace_name is None\n assert target is get_active_context(), (target, get_active_context)\n _macro_registrar.append((self, manager, key))\n else:\n raise TypeError(target)\n\n def get(self, key):\n raise NotImplementedError\n\nclass RegistrarObject(Managed):\n registrar = None\n registered = []\n\n def __init__(self, registrar, registered, data, data_name):\n from .macro import get_macro_mode\n from .context import get_active_context\n if get_macro_mode():\n ctx = get_active_context()\n ctx._add_new_registrar_object(self)\n assert isinstance(registrar, BaseRegistrar)\n self.registrar = registrar\n self.registered = registered\n self.data = data\n self.data_name = data_name\n super().__init__()\n\n def unregister(self):\n raise NotImplementedError\n\n def re_register(self, value):\n self.data = value\n return self\n\n def destroy(self):\n if self._destroyed:\n return\n self.unregister()\n super().destroy()\n\nclass SilkRegistrarObject(RegistrarObject):\n\n def unregister(self):\n from seamless import silk\n silk.unregister(self.registered)\n self.registrar._unregister(self.data, self.data_name)\n\n def re_register(self, silkcode):\n context = self.context\n if context is None:\n return self\n self.unregister()\n from seamless import silk\n registered_types = silk.register(silkcode)\n updated_keys = [k for k in registered_types]\n updated_keys += [k for k in self.registered if k not in updated_keys]\n updated_keys2 = []\n updated_keys2 += updated_keys\n for ar in 1,2,3:\n for k in updated_keys:\n updated_keys2.append(k + ar * \"Array\")\n #TODO: figure out dependent types and add them\n self.registered = registered_types\n self.registrar.update(context, updated_keys2)\n self.registrar._register(self.data,self.data_name)\n super().re_register(silkcode)\n return self\n\nclass SilkRegistrar(BaseRegistrar):\n #TODO: setting up private Silk namespaces for subcontexts\n _register_type = (\"text\", \"code\", \"silk\")\n _registrar_object_class = SilkRegistrarObject\n\n #@macro(type=(\"text\", \"code\", \"silk\"), with_context=False,_registrar=True)\n def register(self,silkcode, name=None):\n self._register(silkcode,name)\n from seamless import silk\n registered_types = silk.register(silkcode)\n return self._registrar_object_class(self, registered_types, silkcode, name)\n\n def get(self, key):\n from seamless.silk import Silk\n try:\n return getattr(Silk, key)\n except AttributeError:\n raise KeyError(key)\n\nclass EvalRegistrarObject(RegistrarObject):\n\n def unregister(self):\n namespace = self.registrar._namespace\n for t in self.registered:\n if t in namespace:\n del namespace[t]\n self.registrar._unregister(self.data, self.data_name)\n\n def re_register(self, pythoncode):\n context = self.context\n if context is None:\n return self\n self.unregister()\n namespace = self.registrar._namespace\n variables_old = list(namespace.keys())\n title = self.data_name\n if title is None:\n title = \"\"\n code = cached_compile(pythoncode, title, \"exec\")\n exec(code, namespace)\n registered_types = [v for v in namespace if v not in variables_old]\n updated_keys = [k for k in registered_types]\n updated_keys += [k for k in self.registered if k not in updated_keys and not k.startswith(\"__\")]\n self.data = pythoncode\n self.registered = registered_types\n self.registrar.update(context, updated_keys)\n super().re_register(pythoncode)\n return self\n\nclass EvalRegistrar(BaseRegistrar):\n _register_type = (\"text\", \"code\", \"python\")\n _registrar_object_class = EvalRegistrarObject\n\n def __init__(self, namespace):\n self._namespace = namespace\n BaseRegistrar.__init__(self)\n\n #@macro(type=(\"text\", \"code\", \"python\"), with_context=False,_registrar=True)\n def register(self, pythoncode, name=None):\n self._register(pythoncode, name)\n variables_old = list(self._namespace.keys())\n title = name\n if title is None:\n title = \"\"\n code = cached_compile(pythoncode, title, \"exec\")\n exec(code, self._namespace)\n registered_types = [v for v in self._namespace if v not in variables_old and not v.startswith(\"__\")]\n return self._registrar_object_class(self, registered_types, pythoncode, name)\n\n def get(self, key):\n return self._namespace[key]\n\nclass GLShaderRegistrarObject(RegistrarObject):\n\n def __init__(self, registrar, registered, data, data_name):\n super().__init__(registrar, registered, data, data_name)\n self._bound = False\n self._shader_id = None\n self._parse(data)\n\n @property\n def shader_id(self):\n return self._shader_id\n\n def unregister(self):\n self.destroy()\n namespace = self.registrar._namespace\n t = self.data_name\n if t in namespace:\n del namespace[t]\n\n self.registrar._unregister(self.data, t)\n\n def re_register(self, gl_shader):\n self.destroy()\n self._parse(gl_shader)\n super().re_register(gl_shader)\n return self\n\n def _parse(self, gl_shader):\n #TODO: STUB!\n self.gl_shader = gl_shader\n\n def bind(self):\n from .. import opengl\n if self._bound:\n return\n\n self._bound = True\n\n def destroy():\n from .. import opengl\n if self._destroyed:\n return\n if self._bound and opengl():\n pass #TODO: clean up shaders\n super().destroy()\n\nclass GLShaderRegistrar(BaseRegistrar):\n _register_type = \"json\"\n _registrar_object_class = GLShaderRegistrarObject\n def register(self, gl_shader):\n name = gl_shader[\"name\"]\n shader_obj = self._registrar_object_class(self, [name], gl_shader, name)\n self._namespace[name] = shader_obj\n return shader_obj\n\ndef add_registrar(name, registrar):\n assert isinstance(registrar, BaseRegistrar)\n assert name not in _registrars, name\n registrar.name = name\n _registrars[name] = registrar\n\nadd_registrar(\"silk\", SilkRegistrar())\nadd_registrar(\"python\", EvalRegistrar({}))\n","sub_path":"seamless/core/registrar.py","file_name":"registrar.py","file_ext":"py","file_size_in_byte":9846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"33865162","text":"def start(context, log, args):\n \"\"\"\n Watch output of bash cmd\n \"\"\"\n node_name = args['node_name']\n cmd = args['cmd']\n name = args.get('name', '')\n\n server = context.particular_node(node_name)\n res = server.run(cmd, warn_only=True)\n log.info('node={0}, monitor={1}, result={2}'.format(node_name, name, ''.join(res)))\n\n","sub_path":"lab/monitors/cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"355408358","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2018 zack \n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\n\n\"\"\"\nfrom subprocess import Popen, PIPE\nfrom multiprocessing import Process\nimport time\nget_ini = \"-DCPSConfigFIle rtps.ini\"\npub_cmd = \"./publisher \" + get_ini\nsub_cmd = \"./subscriber \" + get_ini\nset_time_pub = [20,50,100]\nset_time_sub = [1]\npakage_size = [50,100,150,200,250]\n\ndef tpub():\n pub = Popen(pub_cmd.split(\" \"), stdin=PIPE,stdout=PIPE)\n get_return = \"\"\n while(1):\n get_return = (pub.stdout.readline())\n print(\"pub \" + str(get_return))\n if(get_return == \"topic name? \\n\"):\n pub.stdin.write(\"A\\n\")\n elif(get_return == \"delay us\\n\"):\n delay_time = str(set_time_pub[0]* 1000) + \"\\n\"\n pub.stdin.write(delay_time)\n elif(get_return == \"send data\"):\n pub.stdin.write(\"A\\n\")\n elif(get_return == \"end\\n\"):\n break\n else:\n print(get_return)\n\ndef tsub():\n sub = Popen(sub_cmd.split(\" \"), stdin=PIPE,stdout=PIPE)\n get_return = \"\"\n while(1):\n get_return = (sub.stdout.readline())\n print(\"sub \" + str(get_return))\n if(get_return == \"topic name? \\n\"):\n sub.stdin.write(\"A\\n\")\n elif(get_return == \"delay us\\n\"):\n delay_time = str(set_time_sub[0]* 1000) + \"\\n\"\n sub.stdin.write(delay_time)\n elif(get_return == \"end\\n\"):\n break\n elif(get_return ==\"file name\\n\"):\n sub.stdout.write(\"run_test\\n\")\n break\n else:\n print(get_return)\n\n\np = Process(target=tpub)\ns = Process(target=tsub)\np.start()\ns.start()\np.join()\ns.join()\n","sub_path":"input_cmd/run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"340737492","text":"import os\nimport numpy as np\n\nrate = 16000\n\ndef getBatches(x_train, batch_size):\n batches = 0\n\n for u in x_train:\n batches += batchGet(u)\n\n print(str(batches) + \" batches.\")\n print(\"BATCHES\", batches)\n\n return batches\n\n\ndef batchGet(path):\n\n audio = os.path.getsize(path)\n\n sampletime = 3\n samplesize = sampletime * rate\n\n audio_padding_mins = 60 * 5\n\n audio = audio - (2 * (audio_padding_mins * rate))\n\n pathBatches = audio // samplesize\n return pathBatches\n","sub_path":"__train/batchnumber.py","file_name":"batchnumber.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"432800506","text":"#import pandas as pd\nfrom flask import Flask\nimport redis\n\n\nr = redis.Redis(host='redis-server',port = 6379)\nr.set('visits',0)\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\n \n@app.route('/',methods=['GET'])\ndef check_for_status():\n return 'app is up and running'\n\n@app.route('/check/', methods=['GET'])\ndef check_flask():\n total_visits=fetch_and_update_redis_table()\n return f\"number of visits = {total_visits}\"\n\ndef fetch_and_update_redis_table():\n total_visits=r.get('visits')\n r.set('visits',int(total_visits)+1)\n return total_visits\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')","sub_path":"helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"300760629","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nimport math\n\nwidth = 600\nheight = 600\n\n\nclass Point3D:\n def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n\n \ndef draw(R,a):\n P = Point3D(0,0,0)\n Delta_U = 0.1\n Delta_V = 0.1\n Pi_2 = math.pi/2\n v = -Pi_2\n \n glBegin(GL_LINE_LOOP)\n while v < Pi_2:\n u = 0\n while u < 2*math.pi:\n P.x = (R+a*math.cos(v))*math.cos(u)\n P.y = (R+a*math.cos(v))*math.sin(u)\n P.z = a*math.sin(v)\n glVertex3f(P.x, P.y, P.z)\n u += Delta_U\n v += Delta_V\n glEnd()\n glFlush()\n\n\ndef display():\n glClear(GL_COLOR_BUFFER_BIT)\n glColor3f(0.5, 1, 1)\n draw(80,60)\n\n\nif __name__ == \"__main__\":\n glutInit()\n glutInitWindowSize(width, height)\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)\n glutCreateWindow(\"toroid\")\n glOrtho(-width/2, height/2, -height/2, width/2, -height/2, width/2)\n glMatrixMode(GL_PROJECTION)\n glutDisplayFunc(display)\n glMatrixMode(GL_MODELVIEW)\n gluLookAt(0, 40, 30, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)\n glutMainLoop()\n","sub_path":"lab5_le_van_tan_19it049/toroid.py","file_name":"toroid.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"505326150","text":"from . import *\n\nclass Skybox(Mesh):\n def __init__(self, cube_images=None, size=1000, **kwargs):\n if cube_images is None:\n cube_images = ['images/%s.png' % side\n for side in ('px', 'nx', 'py', 'ny', 'pz', 'nz')]\n geometry = BoxGeometry(width=size, height=size, depth=size)\n shader = deepcopy(shaderlib['cube'])\n shader['uniforms']['tCube']['value'] = cube_images\n material = ShaderMaterial(side=BackSide, **shader)\n Mesh.__init__(self, geometry=geometry, material=material, **kwargs)\n def json(self):\n d = Mesh.json(self)\n d['type'] = 'Mesh'\n return d\n","sub_path":"three/skybox.py","file_name":"skybox.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"173867247","text":"from .snapshot_file import from_canvas_file\nfrom canvasapi.exceptions import ResourceDoesNotExist\n\n\nclass CanvasFileSnapshot(object):\n def __init__(self, course):\n self.course = course\n self.snapshot = {}\n\n def take_snapshot(self):\n course = self.course\n\n if 'files' not in [tab.id for tab in course.get_tabs()]:\n raise ResourceDoesNotExist(\"File tab is not supported.\")\n\n folders = {\n folder.id: folder.full_name\n for folder in course.get_folders()\n }\n\n for file in course.get_files():\n folder = folders[file.folder_id] + \"/\"\n if folder.startswith(\"course files/\"):\n folder = folder[len(\"course files/\"):]\n snapshot_file = from_canvas_file(file)\n filename = f'{folder}{snapshot_file.name}'\n if filename in self.snapshot:\n print(colored(\n f' Duplicated file found: {filename}, please download it using web browser.', 'yellow'))\n continue\n self.snapshot[filename] = snapshot_file\n\n return self.snapshot\n\n def get_snapshot(self):\n return self.snapshot\n","sub_path":"canvas_grab/snapshot/canvas_file_snapshot.py","file_name":"canvas_file_snapshot.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"459734055","text":"import cv2\nimport numpy as np\ncount = 0\npos = [(0,0),(0,0)]\ndef init_image():\n image = np.zeros((500,500,3), np.uint8)\n image[:,:] = [200, 200, 200]\n return image\ndef mouse_event(event, x, y, flags, param):\n print(\"x=\"+str(x)+\",y=\"+str(y))\n global count,img\n if event == cv2.EVENT_LBUTTONUP:\n if count == 0:\n count += 1\n pos[0] = (x,y)\n img = init_image()\n else:\n count = 0\n pos[1] = (x,y)\n cv2.rectangle(img, pos[0], pos[1], (0,0,255), 1)\n if event == cv2.EVENT_MOUSEMOVE:\n if count == 1:\n img = init_image()\n cv2.rectangle(img, pos[0], (x,y), (0,0,255), 1)\n# 画像を作る\nimg = init_image()\ncv2.namedWindow('image')\ncv2.setMouseCallback(\"image\", mouse_event)\n\nwhile True:\n cv2.imshow(\"image\",img)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\ncv2.destroyAllWindows()\n","sub_path":"gui2.py","file_name":"gui2.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"412015745","text":"# -*- coding: utf-8 -*-\nimport os\nimport time\nfrom multiprocessing.dummy import Pool\nimport requests\nfrom queue import Queue\nfrom lxml import etree\n\n\nclass DouyinSpider(object):\n def __init__(self):\n self.temp_url = 'http://douyin.bm8.com.cn/'\n self.headers = {\n \"User-Agent\": \"Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1\"\n }\n self.queue = Queue()\n self.pool = Pool(5)\n self.is_running = True\n self.total_requests_num = 0\n self.total_response_num = 0\n\n @staticmethod\n def file_path():\n base_dir = os.path.dirname(os.path.abspath(__file__))\n _path = os.path.join(base_dir, \"music\")\n if not os.path.exists(_path):\n os.mkdir(_path)\n return _path\n\n def get_url_list(self):\n # for i in range(1, 10):\n # self.queue.put(self.temp_url.format(i))\n self.queue.put(self.temp_url)\n print(\"get_url_list\")\n self.total_requests_num += 1\n\n def parse_url(self, url):\n print(\"parse_url\")\n return requests.get(url, headers=self.headers).content.decode()\n\n def get_content_list(self, html_str):\n html = etree.HTML(html_str)\n li_list = html.xpath(\"//div[@class='pull-left']/ul/li\")\n music_list = []\n print(\"get_content_list\")\n for li in li_list:\n item = {}\n item[\"title\"] = li.xpath(\"./a/span/text()\")[0]\n print(item['title'])\n detail_url = li.xpath(\"./a/@onclick\")[0][6:-1].split(\",\")[-1]\n resp = requests.get(eval(detail_url), headers=self.headers)\n print(resp.status_code)\n html = etree.HTML(resp.content.decode())\n try:\n video_src = html.xpath(\"//video/@src\")[0]\n if video_src is None:\n continue\n print(video_src)\n except Exception:\n continue\n resp = requests.get(video_src, headers=self.headers)\n item['content'] = resp.content\n music_list.append(item)\n print(\"ok_list\")\n return music_list\n\n def download_music(self, music_list):\n for item in music_list:\n _full_name = os.path.join(self.file_path(), item['title'])\n with open(_full_name + '.mp4', \"wb\") as f:\n f.write(item['content'])\n print(\"ok\")\n\n def exetute_requests_item_save(self):\n # 1.url列表\n url = self.queue.get()\n # 2.发送请求,获取响应\n html_str = self.parse_url(url)\n # 3.处理数据\n # 3.1首页数据,取a链接\n # 3.2跳转页面,发送请求,获取数据\n music_list = self.get_content_list(html_str)\n # 4.保存\n self.download_music(music_list)\n self.total_response_num += 1\n\n def _callback(self, temp):\n if self.is_running:\n self.pool.apply_async(self.exetute_requests_item_save, callback=self._callback)\n\n def run(self):\n self.get_url_list()\n\n for i in range(2): # 控制并发\n self.pool.apply_async(self.exetute_requests_item_save, callback=self._callback)\n\n while True: # 防止主线程结束\n time.sleep(0.0001) # 避免cpu空转,浪费资源\n if self.total_response_num >= self.total_requests_num:\n self.is_running = False\n break\n\n self.pool.close() # 关闭线程池,防止新的线程开启\n # self.pool.join() #等待所有的子线程结束\n\n\nif __name__ == '__main__':\n douyin = DouyinSpider()\n douyin.run()\n","sub_path":"douyinspider_thread.py","file_name":"douyinspider_thread.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"350046802","text":"\"\"\"\ntesting of fcntl advisory lock\nhttps://www.programcreek.com/python/example/51486/fcntl.F_GETLK\nhttps://serverfault.com/questions/531813/how-to-determine-posix-advisory-file-locks-are-working-in-simfs-in-the-vm-im-us\nhttps://www.gnu.org/software/libc/manual/html_node/File-Locks.html\n\"\"\"\nimport os\nimport sys\nimport time\nimport fcntl\nimport struct\n\n\nfd = open('/etc/mtab', 'r')\nppid = os.getpid()\nprint('parent pid: %d' % ppid)\nlockdata = struct.pack('hhllh', fcntl.F_RDLCK, 0, 0, 0, ppid)\nres = fcntl.fcntl(fd.fileno(), fcntl.F_SETLK, lockdata)\nprint('put read lock in parent process: %s' % str(struct.unpack('hhllh', res)))\nif os.fork():\n os.wait()\n lockdata = struct.pack('hhllh', fcntl.F_UNLCK, 0, 0, 0, ppid)\n res = fcntl.fcntl(fd.fileno(), fcntl.F_SETLK, lockdata)\n print('release lock: %s' % str(struct.unpack('hhllh', res)))\nelse:\n cpid = os.getpid()\n print('child pid: %d' % cpid)\n lockdata = struct.pack('hhllh', fcntl.F_WRLCK, 0, 0, 0, cpid)\n try:\n fcntl.fcntl(fd.fileno(), fcntl.F_SETLK, lockdata)\n except OSError:\n res = fcntl.fcntl(fd.fileno(), fcntl.F_GETLK, lockdata)\n print('fail to get lock: %s' % str(struct.unpack('hhllh', res)))\n else:\n print('succeeded in getting lock')\n","sub_path":"fcntl_test.py","file_name":"fcntl_test.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"228904668","text":"from flask import Flask\nimport models\nfrom flask import render_template\n\nDEBUG = True\nHOST = '0.0.0.0'\nPORT = 8000\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello():\n user = {'nickname': 'Michael'}\n title = 'Home'\n return render_template('index.html',\n user=user,\n title=title)\n\n\nif __name__ == '__main__':\n app.run(debug=DEBUG, host=HOST, port=PORT)\n models.initialize()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"598337856","text":"# -*- coding: utf-8 -*-\n\nDEFAULT_TYPES = [\n 'Collection',\n 'Document',\n 'Event',\n 'File',\n 'Folder',\n 'Image',\n 'Link',\n 'News Item',\n]\n\n\ndef replace_link_variables_by_paths(context, url):\n \"\"\"Take an `url` and replace the variables \"${navigation_root_url}\" and\n \"${portal_url}\" by the corresponding paths. `context` is the acquisition\n context.\n \"\"\"\n portal_state = context.restrictedTraverse('@@plone_portal_state')\n\n if '${navigation_root_url}' in url:\n url = _replace_variable_by_path(\n url,\n '${navigation_root_url}',\n portal_state.navigation_root()\n )\n\n if '${portal_url}' in url:\n url = _replace_variable_by_path(\n url,\n '${portal_url}',\n portal_state.portal()\n )\n\n return url\n\n\ndef _replace_variable_by_path(url, variable, obj):\n path = '/'.join(obj.getPhysicalPath())\n return url.replace(variable, path)\n","sub_path":"buildout-cache/eggs/plone.app.contenttypes-1.2a9-py2.7.egg/plone/app/contenttypes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"257296098","text":"import logging\nimport os\nfrom datetime import timedelta\nfrom itertools import chain\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import Group\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.db.models import Min, Sum\nfrom django.db.models.signals import post_delete\nfrom django.dispatch import receiver\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django_extensions.db.models import TitleSlugDescriptionModel\nfrom guardian.shortcuts import assign_perm, get_objects_for_group, remove_perm\nfrom jinja2 import sandbox\nfrom jinja2.exceptions import TemplateError\nfrom stdimage import JPEGField\n\nfrom grandchallenge.anatomy.models import BodyStructure\nfrom grandchallenge.components.models import (\n ComponentImage,\n ComponentInterface,\n ComponentJob,\n)\nfrom grandchallenge.core.models import RequestBase, UUIDModel\nfrom grandchallenge.core.storage import (\n get_logo_path,\n get_social_image_path,\n public_s3_storage,\n)\nfrom grandchallenge.core.templatetags.bleach import md2html\nfrom grandchallenge.evaluation.utils import get\nfrom grandchallenge.modalities.models import ImagingModality\nfrom grandchallenge.organizations.models import Organization\nfrom grandchallenge.publications.models import Publication\nfrom grandchallenge.subdomains.utils import reverse\nfrom grandchallenge.workstations.models import Workstation\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_INPUT_INTERFACE_SLUG = \"generic-medical-image\"\nDEFAULT_OUTPUT_INTERFACE_SLUG = \"generic-overlay\"\n\nJINJA_ENGINE = sandbox.ImmutableSandboxedEnvironment()\n\n\nclass Algorithm(UUIDModel, TitleSlugDescriptionModel):\n editors_group = models.OneToOneField(\n Group,\n on_delete=models.CASCADE,\n editable=False,\n related_name=\"editors_of_algorithm\",\n )\n users_group = models.OneToOneField(\n Group,\n on_delete=models.CASCADE,\n editable=False,\n related_name=\"users_of_algorithm\",\n )\n logo = JPEGField(\n upload_to=get_logo_path,\n storage=public_s3_storage,\n variations=settings.STDIMAGE_LOGO_VARIATIONS,\n )\n social_image = JPEGField(\n upload_to=get_social_image_path,\n storage=public_s3_storage,\n blank=True,\n help_text=\"An image for this algorithm which is displayed when you post the link for this algorithm on social media. Should have a resolution of 640x320 px (1280x640 px for best display).\",\n variations=settings.STDIMAGE_SOCIAL_VARIATIONS,\n )\n workstation = models.ForeignKey(\n \"workstations.Workstation\", on_delete=models.CASCADE\n )\n workstation_config = models.ForeignKey(\n \"workstation_configs.WorkstationConfig\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n public = models.BooleanField(\n default=False,\n help_text=(\n \"Should this algorithm be visible to all users on the algorithm \"\n \"overview page? This does not grant all users permission to use \"\n \"this algorithm. Users will still need to be added to the \"\n \"algorithm users group in order to do that.\"\n ),\n )\n detail_page_markdown = models.TextField(blank=True)\n job_create_page_markdown = models.TextField(blank=True)\n additional_terms_markdown = models.TextField(\n blank=True,\n help_text=(\n \"By using this algortihm, users agree to the site wide \"\n \"terms of service. If your algorithm has any additional \"\n \"terms of usage, define them here.\"\n ),\n )\n result_template = models.TextField(\n blank=True,\n default=\"
{{ results|tojson(indent=2) }}
\",\n help_text=(\n \"Define the jinja template to render the content of the \"\n \"results.json to html. For example, the following template will \"\n \"print out all the keys and values of the result.json. \"\n \"Use results to access the json root. \"\n \"{% for key, value in results.metrics.items() -%}\"\n \"{{ key }} {{ value }}\"\n \"{% endfor %}\"\n ),\n )\n inputs = models.ManyToManyField(\n to=ComponentInterface, related_name=\"algorithm_inputs\"\n )\n outputs = models.ManyToManyField(\n to=ComponentInterface, related_name=\"algorithm_outputs\"\n )\n publications = models.ManyToManyField(\n Publication,\n blank=True,\n help_text=\"The publications associated with this algorithm\",\n )\n modalities = models.ManyToManyField(\n ImagingModality,\n blank=True,\n help_text=\"The imaging modalities supported by this algorithm\",\n )\n structures = models.ManyToManyField(\n BodyStructure,\n blank=True,\n help_text=\"The structures supported by this algorithm\",\n )\n organizations = models.ManyToManyField(\n Organization,\n blank=True,\n help_text=\"The organizations associated with this algorithm\",\n related_name=\"algorithms\",\n )\n credits_per_job = models.PositiveIntegerField(\n default=0,\n help_text=(\n \"The number of credits that are required for each execution of this algorithm.\"\n ),\n )\n average_duration = models.DurationField(\n null=True,\n default=None,\n editable=False,\n help_text=\"The average duration of successful jobs.\",\n )\n use_flexible_inputs = models.BooleanField(default=True)\n\n class Meta(UUIDModel.Meta, TitleSlugDescriptionModel.Meta):\n ordering = (\"created\",)\n permissions = [(\"execute_algorithm\", \"Can execute algorithm\")]\n\n def __str__(self):\n return f\"{self.title}\"\n\n def get_absolute_url(self):\n return reverse(\"algorithms:detail\", kwargs={\"slug\": self.slug})\n\n @property\n def api_url(self):\n return reverse(\"api:algorithm-detail\", kwargs={\"pk\": self.pk})\n\n def save(self, *args, **kwargs):\n adding = self._state.adding\n\n if adding:\n self.create_groups()\n self.workstation_id = (\n self.workstation_id or self.default_workstation.pk\n )\n\n super().save(*args, **kwargs)\n\n if adding:\n self.set_default_interfaces()\n\n self.assign_permissions()\n self.assign_workstation_permissions()\n\n def create_groups(self):\n self.editors_group = Group.objects.create(\n name=f\"{self._meta.app_label}_{self._meta.model_name}_{self.pk}_editors\"\n )\n self.users_group = Group.objects.create(\n name=f\"{self._meta.app_label}_{self._meta.model_name}_{self.pk}_users\"\n )\n\n def set_default_interfaces(self):\n if not self.inputs.exists():\n self.inputs.set(\n [\n ComponentInterface.objects.get(\n slug=DEFAULT_INPUT_INTERFACE_SLUG\n )\n ]\n )\n if not self.outputs.exists():\n self.outputs.set(\n [\n ComponentInterface.objects.get(slug=\"results-json-file\"),\n ComponentInterface.objects.get(\n slug=DEFAULT_OUTPUT_INTERFACE_SLUG\n ),\n ]\n )\n\n def assign_permissions(self):\n # Editors and users can view this algorithm\n assign_perm(f\"view_{self._meta.model_name}\", self.editors_group, self)\n assign_perm(f\"view_{self._meta.model_name}\", self.users_group, self)\n # Editors and users can execute this algorithm\n assign_perm(\n f\"execute_{self._meta.model_name}\", self.editors_group, self\n )\n assign_perm(f\"execute_{self._meta.model_name}\", self.users_group, self)\n # Editors can change this algorithm\n assign_perm(\n f\"change_{self._meta.model_name}\", self.editors_group, self\n )\n\n reg_and_anon = Group.objects.get(\n name=settings.REGISTERED_AND_ANON_USERS_GROUP_NAME\n )\n\n if self.public:\n assign_perm(f\"view_{self._meta.model_name}\", reg_and_anon, self)\n else:\n remove_perm(f\"view_{self._meta.model_name}\", reg_and_anon, self)\n\n def assign_workstation_permissions(self):\n \"\"\"Allow the editors and users group to view the workstation.\"\"\"\n perm = f\"view_{Workstation._meta.model_name}\"\n\n for group in [self.users_group, self.editors_group]:\n workstations = get_objects_for_group(\n group=group, perms=perm, klass=Workstation\n )\n\n if (\n self.workstation not in workstations\n ) or workstations.count() > 1:\n remove_perm(perm=perm, user_or_group=group, obj=workstations)\n assign_perm(\n perm=perm, user_or_group=group, obj=self.workstation\n )\n\n @property\n def latest_ready_image(self):\n \"\"\"\n Returns\n -------\n The most recent container image for this algorithm\n \"\"\"\n return (\n self.algorithm_container_images.filter(ready=True)\n .order_by(\"-created\")\n .first()\n )\n\n @property\n def default_workstation(self):\n \"\"\"\n Returns the default workstation, creating it if it does not already\n exist.\n \"\"\"\n w, created = Workstation.objects.get_or_create(\n slug=settings.DEFAULT_WORKSTATION_SLUG\n )\n\n if created:\n w.title = settings.DEFAULT_WORKSTATION_SLUG\n w.save()\n\n return w\n\n def update_average_duration(self):\n \"\"\"Store the duration of successful jobs for this algorithm\"\"\"\n self.average_duration = Job.objects.filter(\n algorithm_image__algorithm=self, status=Job.SUCCESS\n ).average_duration()\n self.save(update_fields=(\"average_duration\",))\n\n def is_editor(self, user):\n return user.groups.filter(pk=self.editors_group.pk).exists()\n\n def add_editor(self, user):\n return user.groups.add(self.editors_group)\n\n def remove_editor(self, user):\n return user.groups.remove(self.editors_group)\n\n def is_user(self, user):\n return user.groups.filter(pk=self.users_group.pk).exists()\n\n def add_user(self, user):\n return user.groups.add(self.users_group)\n\n def remove_user(self, user):\n return user.groups.remove(self.users_group)\n\n\n@receiver(post_delete, sender=Algorithm)\ndef delete_algorithm_groups_hook(*_, instance: Algorithm, using, **__):\n \"\"\"\n Deletes the related groups.\n\n We use a signal rather than overriding delete() to catch usages of\n bulk_delete.\n \"\"\"\n try:\n instance.editors_group.delete(using=using)\n except ObjectDoesNotExist:\n pass\n\n try:\n instance.users_group.delete(using=using)\n except ObjectDoesNotExist:\n pass\n\n\nclass AlgorithmImage(UUIDModel, ComponentImage):\n algorithm = models.ForeignKey(\n Algorithm,\n on_delete=models.CASCADE,\n related_name=\"algorithm_container_images\",\n )\n queue_override = models.CharField(max_length=128, blank=True)\n\n class Meta(UUIDModel.Meta, ComponentImage.Meta):\n ordering = (\"created\", \"creator\")\n\n def get_absolute_url(self):\n return reverse(\n \"algorithms:image-detail\",\n kwargs={\"slug\": self.algorithm.slug, \"pk\": self.pk},\n )\n\n @property\n def api_url(self):\n return reverse(\"api:algorithms-image-detail\", kwargs={\"pk\": self.pk})\n\n def save(self, *args, **kwargs):\n adding = self._state.adding\n\n super().save(*args, **kwargs)\n\n if adding:\n self.assign_permissions()\n\n def assign_permissions(self):\n # Editors and users can view this algorithm image\n assign_perm(\n f\"view_{self._meta.model_name}\", self.algorithm.editors_group, self\n )\n # Editors can change this algorithm image\n assign_perm(\n f\"change_{self._meta.model_name}\",\n self.algorithm.editors_group,\n self,\n )\n\n\nclass JobQuerySet(models.QuerySet):\n def spent_credits(self, user):\n now = timezone.now()\n period = timedelta(days=30)\n user_groups = Group.objects.filter(user=user)\n\n return (\n self.filter(creator=user, created__range=[now - period, now])\n .distinct()\n .order_by(\"created\")\n .select_related(\"algorithm_image__algorithm\")\n .exclude(algorithm_image__algorithm__editors_group__in=user_groups)\n .aggregate(\n total=Sum(\"algorithm_image__algorithm__credits_per_job\"),\n oldest=Min(\"created\"),\n )\n )\n\n\nclass Job(UUIDModel, ComponentJob):\n algorithm_image = models.ForeignKey(\n AlgorithmImage, on_delete=models.CASCADE\n )\n creator = models.ForeignKey(\n settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL\n )\n public = models.BooleanField(\n default=False,\n help_text=(\n \"If True, allow anyone to download this result along \"\n \"with the input image. Otherwise, only the job creator and \"\n \"algorithm editor(s) will have permission to download and view \"\n \"this result.\"\n ),\n )\n comment = models.TextField(blank=True, default=\"\")\n\n viewer_groups = models.ManyToManyField(\n Group,\n help_text=\"Which groups should have permission to view this job?\",\n )\n viewers = models.OneToOneField(\n Group,\n on_delete=models.CASCADE,\n related_name=\"viewers_of_algorithm_job\",\n )\n credits_set = JobQuerySet.as_manager()\n\n class Meta:\n ordering = (\"created\",)\n\n def __str__(self):\n return f\"Job {self.pk}\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._public_orig = self.public\n self._status_orig = self.status\n\n @property\n def container(self):\n return self.algorithm_image\n\n def get_path_and_value(self, inp):\n if inp.file:\n return [(inp.interface.relative_path, inp.file)]\n if inp.image:\n return [\n (\n os.path.join(\n inp.interface.relative_path,\n im_file.file.name.split(\"/\")[-1],\n ),\n im_file.file,\n )\n for im_file in inp.image.files.all()\n ]\n return [(inp.interface.relative_path, inp.value)]\n\n @property\n def input_files(self):\n return chain(\n *[self.get_path_and_value(inp) for inp in self.inputs.all()]\n )\n\n @property\n def output_interfaces(self):\n return self.algorithm_image.algorithm.outputs\n\n @cached_property\n def rendered_result_text(self):\n try:\n results = get(\n [\n o.value\n for o in self.outputs.all()\n if o.interface.slug == \"results-json-file\"\n ]\n )\n except ObjectDoesNotExist:\n return \"\"\n\n try:\n template_output = JINJA_ENGINE.from_string(\n self.algorithm_image.algorithm.result_template\n ).render(results=results)\n except (TemplateError, TypeError, ValueError):\n return \"Jinja template is invalid\"\n\n return md2html(template_output)\n\n def get_absolute_url(self):\n return reverse(\n \"algorithms:job-detail\",\n kwargs={\n \"slug\": self.algorithm_image.algorithm.slug,\n \"pk\": self.pk,\n },\n )\n\n @property\n def api_url(self):\n return reverse(\"api:algorithms-job-detail\", kwargs={\"pk\": self.pk})\n\n def save(self, *args, **kwargs):\n adding = self._state.adding\n\n if adding:\n self.init_viewers_group()\n\n super().save(*args, **kwargs)\n\n if adding:\n self.init_permissions()\n\n if adding or self._public_orig != self.public:\n self.update_viewer_groups_for_public()\n self._public_orig = self.public\n\n if self._status_orig != self.status and self.status == self.SUCCESS:\n self.algorithm_image.algorithm.update_average_duration()\n\n def init_viewers_group(self):\n self.viewers = Group.objects.create(\n name=f\"{self._meta.app_label}_{self._meta.model_name}_{self.pk}_viewers\"\n )\n\n def init_permissions(self):\n # By default, only the viewers can view this job\n self.viewer_groups.set([self.viewers])\n\n # If there is a creator they can view and change this job\n if self.creator:\n self.viewers.user_set.add(self.creator)\n assign_perm(f\"change_{self._meta.model_name}\", self.creator, self)\n\n def update_viewer_groups_for_public(self):\n g = Group.objects.get(\n name=settings.REGISTERED_AND_ANON_USERS_GROUP_NAME\n )\n\n if self.public:\n self.viewer_groups.add(g)\n else:\n self.viewer_groups.remove(g)\n\n def add_viewer(self, user):\n return user.groups.add(self.viewers)\n\n def remove_viewer(self, user):\n return user.groups.remove(self.viewers)\n\n\n@receiver(post_delete, sender=Job)\ndef delete_job_groups_hook(*_, instance: Job, using, **__):\n \"\"\"\n Deletes the related group.\n\n We use a signal rather than overriding delete() to catch usages of\n bulk_delete.\n \"\"\"\n try:\n instance.viewers.delete(using=using)\n except ObjectDoesNotExist:\n pass\n\n\nclass AlgorithmPermissionRequest(RequestBase):\n \"\"\"\n When a user wants to view an algorithm, editors have the option of\n reviewing each user before accepting or rejecting them. This class records\n the needed info for that.\n \"\"\"\n\n algorithm = models.ForeignKey(\n Algorithm,\n help_text=\"To which algorithm has the user requested access?\",\n on_delete=models.CASCADE,\n )\n rejection_text = models.TextField(\n blank=True,\n help_text=(\n \"The text that will be sent to the user with the reason for their \"\n \"rejection.\"\n ),\n )\n\n @property\n def base_object(self):\n return self.algorithm\n\n @property\n def object_name(self):\n return self.base_object.title\n\n @property\n def add_method(self):\n return self.base_object.add_user\n\n @property\n def remove_method(self):\n return self.base_object.remove_user\n\n @property\n def permission_list_url(self):\n return reverse(\n \"algorithms:permission-request-list\",\n kwargs={\"slug\": self.base_object.slug},\n )\n\n def __str__(self):\n return f\"{self.object_name} registration request by user {self.user.username}\"\n\n class Meta(RequestBase.Meta):\n unique_together = ((\"algorithm\", \"user\"),)\n","sub_path":"app/grandchallenge/algorithms/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":19013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"590345950","text":"import cv2\nimport numpy as np\n\ndef obtener_Imagen(Nombre_Imagen_Nueva):\n URL = \"uploads/\" + Nombre_Imagen_Nueva # Direccion donde se almacenan las imagenes\n RESPALDO = \"Pasos/\" # Direccion donde se almadenaran las imagenes como evidencia\n\n # --------- Lectura de la imagen que se encuentra almacenada en el servidor -------------\n\n img_Contorno = cv2.imread(URL) # Imagen que servira para obtner contornos, area y centro del circulo\n img_Polar = cv2.imread(URL,1) # Imagen que se transformara de Carteciano a Polar O -> ---\n\n # -------- Resaltar los detalles de la imagen mejorandola ---------\n\n madian = cv2.medianBlur(img_Contorno, 25) # Filtro\n gris = cv2.cvtColor(madian, cv2.COLOR_BGR2GRAY) # Convertir la imagen a escala de grises\n clahe = cv2.createCLAHE(clipLimit=75.0, tileGridSize=(128,128))\n gris = clahe.apply(gris)\n\n # -------- Busqueda de contornos --------\n\n gris = cv2.Canny(gris, 10, 150)\n gris = cv2.dilate(gris, None, iterations=1)\n gris = cv2.erode(gris, None,iterations=1)\n contornos, _ = cv2.findContours(gris, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # ------- Busqueda de un contorno con el area mas grande ------\n\n for contorno in contornos:\n area = cv2.contourArea(contorno) # Esta variable almacena el area de cada contorno encotrado\n x,y,w,h = cv2.boundingRect(contorno)# Estas variables almacenan los valores del tamaño de la imagen\n\n if area > 3000:\n M = cv2.moments(contorno)\n if (M[\"m00\"]==0): M[\"m00\"] = 1\n xcentro = int(M[\"m10\"]/M[\"m00\"])\n ycentro = int(M[\"m01\"]/M[\"m00\"])\n radio = xcentro-x\n\n # -------- Dibujamos el centro y el contorno del croma --------\n\n cv2.circle(img_Contorno, (xcentro, ycentro), 100, (0,255,0), -1)\n cv2.drawContours(img_Contorno, contorno, -1, (0,255,0), 10)\n\n # -------- Primer Paso Terminado, Guardar evidencia -----------\n\n cv2.imwrite(RESPALDO+\"Paso_1.jpg\", img_Contorno)\n\n # -------- Convercion de carteciano a polar --------\n\n img_Polar = img_Polar.astype(np.float32)\n\n value = np.sqrt((ycentro**2.0)+(xcentro**2.0)) # Variable que almacena la redimencion de la imagen nueva\n polar_img = cv2.linearPolar(img_Polar, (xcentro, ycentro), value, cv2.WARP_FILL_OUTLIERS)\n\n polar_img = polar_img.astype(np.uint8)\n \n # -------- Segundo Paso Termiando, Guardar evidencia --------\n\n cv2.imwrite(RESPALDO+\"Paso_2.jpg\", polar_img)\n\n # -------- Ajuste de la imagen modificada, quitando lo no util --------\n \n crop_img = polar_img[0:3408, 0:2400] # Area que es util de la imagen\n\n # -------- Tercer Paso Terminado, Guardar evidencia --------\n\n cv2.imwrite(RESPALDO+\"Paso_3.jpg\", crop_img)\n\n # -------- Ajuste horizontal de la imagen --------\n\n horizontal_img = cv2.rotate(crop_img, cv2.ROTATE_90_CLOCKWISE)\n\n # -------- Cuarto Paso Terminado, Guardar evidencia --------\n\n cv2.imwrite(RESPALDO+\"Paso_4.jpg\", horizontal_img)\n\n return horizontal_img","sub_path":"Carteciano_a_Polar.py","file_name":"Carteciano_a_Polar.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"542973160","text":"\n\n#calss header\nclass _HURT():\n\tdef __init__(self,): \n\t\tself.name = \"HURT\"\n\t\tself.definitions = [u'to feel pain in a part of your body, or to injure someone or cause them pain: ', u'to cause emotional pain to someone: ', u'to cause harm or difficulty: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_hurt.py","file_name":"_hurt.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"219588340","text":"def get_text(file_name):\n with open(file_name, mode='r', encoding='utf-8') as f:\n text = f.read().split('\\n')\n return text\n\n\ndef get_alph():\n rus_alph = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'\n eng_alph = 'abcdefghijklmnopqrstuvwxyz'\n symb = ' :;!?&—-+=()*/.,1234567890'\n alph = rus_alph + rus_alph.upper() + eng_alph + eng_alph.upper() + symb\n return alph\n\ndef code(alph, key):\n tabs = text\n with open(f'output.txt', mode='w', encoding='utf8') as w:\n for i in range(len(tabs)):\n line = ''\n for j in range(len(tabs[i])):\n line += alph[(alph.index(tabs[i][j]) + key) % len(alph)]\n if i == len(tabs) - 1:\n line = line + '^'\n else:\n line = line + '~'\n w.write(line + '\\n')\n\n\nalph = get_alph()\nkey = 149995388\ntext = get_text('input.txt')\ncode(alph, key)\n","sub_path":"semestr 2/lab 6/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"625951470","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 25 20:55:33 2018\n\n@author: Misha\n\"\"\"\n\n### Takes a card picture and creates a top-down 200x300 flattened image\n### of it. Isolates the suit and rank and saves the isolated images.\n### Runs through A - K ranks and then the 4 suits.\n\n# Import necessary packages\nimport cv2\nimport numpy as np\nimport time\nimport Cards\nimport os\n\nimg_path = os.path.dirname(os.path.abspath(__file__)) + '/Card_Imgs/'\ndebug_path = os.path.dirname(os.path.abspath(__file__)) + '/Debug_Imgs/'\n\nIM_WIDTH = 800\nIM_HEIGHT = 600 \n\nRANK_WIDTH = 400\nRANK_HEIGHT = 580\n\nSUIT_WIDTH = 70\nSUIT_HEIGHT = 100\n\n# If using a USB Camera instead of a PiCamera, change PiOrUSB to 2\nPiOrUSB = 2\n\ndebug_pics = 1\n\nif PiOrUSB == 1:\n # Import packages from picamera library\n from picamera.array import PiRGBArray\n from picamera import PiCamera\n\n # Initialize PiCamera and grab reference to the raw capture\n camera = PiCamera()\n camera.resolution = (IM_WIDTH,IM_HEIGHT)\n camera.framerate = 10\n rawCapture = PiRGBArray(camera, size=(IM_WIDTH,IM_HEIGHT))\n\nif PiOrUSB == 2:\n # Initialize USB camera\n cap = cv2.VideoCapture(0)\n\n# Use counter variable to switch from isolating Rank to isolating Suit\ni = 1\n\nfor Name in ['reito_lantern','ornate_kanzashi', 'free_from_the_real', \n 'sakura_tribe_scout', 'plains_ben_thomposon', 'path_of_angers_flame', \n 'sift_through_sands', 'setons_desire', 'phantom_nomad', \n 'divine_light', 'ghostly_wings', 'plains_fred_fields', 'locust_mister',\n 'jugan_the_rising_star', 'whispering_shade', 'divergent_growth', \n 'ryusei_the_falling_star', 'dripping_tongue_zubera',\n 'ninja_of_the _deep_hours', 'plains_matthew_mitchell', 'plains_greg_staples',\n 'forest_quinton_hoover', 'forest_john_avon', 'ghost_lit_refeemer', \n 'kabuto_moth', 'kami_of_false_home', 'waxmane_baku', 'kami_of_tattered_shoji',\n 'ethereal_haze', 'joyous_respite', 'orochi_sustainer', 'orochi_ranger',\n 'commune_with_nature', 'petalmane_baku', 'scaled_hulk', 'harbinger_of_spring',\n 'traproot_kami', 'rending_vines', 'vital_surge', 'torrent_of_stone',\n 'descendant_of_soramaro', 'wandering_ones', 'orochi_sustainer', 'field_of_reality']:\n\n filename = Name + '.jpg'\n card = None\n \n while True:\n print('Press \"p\" to take a picture of ' + filename)\n \n if PiOrUSB == 1: # PiCamerac\n rawCapture.truncate(0)\n # Press 'p' to take a picture\n for frame in camera.capture_continuous(rawCapture, format=\"bgr\",use_video_port=True):\n \n image = frame.array\n cv2.imshow(\"Card\",image)\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"p\"):\n break\n \n rawCapture.truncate(0)\n \n if PiOrUSB == 2: # USB camera\n # Press 'p' to take a picture\n while(True):\n \n ret, frame = cap.read()\n try:\n cv2.imshow(\"Card\",frame)\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"p\"):\n image = frame\n break\n except Exception as e:\n print(e)\n \n try:\n # Pre-process image\n thresh = Cards.preprocess_image(image)\n thresh_wb = Cards.preprocess_white_image(image)\n\n if debug_pics: cv2.imwrite(debug_path + \"0_prepare.jpg\",thresh)\n \n # Find contours and sort them by size\n dummy,cnts,hier = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n dummy,cnts_wht,hier2 = cv2.findContours(thresh_wb,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n cnts = sorted(cnts, key=cv2.contourArea,reverse=True)\n cnts_wht = sorted(cnts_wht, key=cv2.contourArea,reverse=True)\n \n # Assume largest contour is the card. If there are no contours, print an error\n # Decide is it card with white or black background \n if len(cnts) > 0:\n card = cnts[0]\n elif len(cnts_wht) > 0:\n card = cnts_wht[0]\n else:\n print('No contours found!')\n quit()\n \n # Approximate the corner points of the card\n peri = cv2.arcLength(card,True)\n approx = cv2.approxPolyDP(card,0.01*peri,True)\n pts = np.float32(approx)\n \n x,y,w,h = cv2.boundingRect(card)\n \n # Flatten the card and convert it to 200x300\n warp = Cards.flattener(image,pts,w,h)\n if debug_pics: cv2.imwrite(debug_path + \"1_rectangle.jpg\",warp)\n \n dummy, cnts, hier = cv2.findContours(warp, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n cnts = sorted(cnts, key=cv2.contourArea,reverse=True)\n \n x,y,w,h = cv2.boundingRect(cnts[0])\n \n roi = warp[y:y+h, x:x+w]\n sized = cv2.resize(roi, (RANK_WIDTH, RANK_HEIGHT), 0, 0)\n final_img = sized\n \n cv2.imshow(\"Image\",final_img)\n \n # Save image\n print('Press \"c\" to save or \"n\" to proceed to next image.')\n key = cv2.waitKey(0) & 0xFF\n if key == ord('c'):\n cv2.imwrite(img_path+filename,final_img)\n break\n elif key == ord('n'):\n break\n i = i + 1\n except Exception as e:\n print(e)\n\ncv2.destroyAllWindows()\n\ncamera.close()\n","sub_path":"Card_Isolator.py","file_name":"Card_Isolator.py","file_ext":"py","file_size_in_byte":5689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"490543953","text":"def timefunction():\n a = int(input(\"Введите количество секунд:\"))\n d = (a//86400) % 24\n h = (a // 3600) % 24\n m = (a // 60) % 60\n s = a % 60\n if h < 10:\n h = str('0'+str(h))\n if m < 10:\n m = str('0' + str(m))\n else:\n m = str(m)\n if s < 10:\n s = str('0' + str(s))\n else:\n s = str(s)\n print(str(d) + ':' + str(h) + ':' + str(m) + ':' + str(s))\n\n\ntimefunction()\n","sub_path":"Just a second.py","file_name":"Just a second.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"479025470","text":"from PIL import Image, ImageTk\r\nimport sys\r\n\r\n'''\r\nOpen the source image\r\nCrop the image\r\nSave the cropped image\r\n'''\r\n\r\n#open image from user's commandline argument\r\nimg = Image.open(sys.argv[1])\r\nimgdata=list(img.getdata())#the pixels from the image\r\n\r\n#this with and height seems to be standard for all IR samples\r\nWidth=1024\r\nHeight=768\r\n\r\n#the area of each image that we want (the actual graph)\r\n #(left,right,top,bottom)\r\ntargetRect=(113,978,29,724)\r\n\r\n#copies pixels from the source image within the targetRect\r\ndef cropRect(source,rect):\r\n left,right,top,bottom=rect\r\n newImg=[]\r\n #copy specified pixels\r\n for y in range(top,bottom+1):\r\n for x in range(left,right+1):\r\n newImg+=[source[y*Width+x]]\r\n return newImg\r\n\r\n#the graph cut out of the larger image\r\ngraph=cropRect(imgdata,targetRect)\r\n\r\n#width and height of cropped graph\r\nWidth=targetRect[1]-targetRect[0]+1\r\nHeight=targetRect[3]-targetRect[2]+1\r\n\r\n#save the cropped graph image\r\nimg = Image.new('RGB', (Width, Height))\r\nimg.putdata(graph)\r\nimg.save('graph.png')\r\n\r\n'''\r\nCreate data list by reading pixels from graph\r\n -each entry in data is the range over wich each\r\n column has black pixels\r\nSave data to file\r\n'''\r\n\r\ndef pix(x,y):#checks if the pixel at x,y is black\r\n r,g,b=graph[y*Width+x]\r\n if r+g+b>=100:\r\n return False#not black\r\n else:\r\n return True#black\r\n\r\ndata=[]#to be filled with values from graph\r\n\r\n#For each x get the y range over which the graph has black pixels\r\n# or None if the graph is empty at that x value\r\nfor x in range(0,Width):\r\n data+=[None]\r\n foundPix=False#have you found a pixel while looping through the column\r\n for y in range(0,Height):\r\n p=pix(x,y)#is the pixel black\r\n if p and not foundPix:\r\n #record the first black pixels y value\r\n foundPix=True\r\n maxVal=y\r\n elif not p and foundPix:\r\n #record the last black pixels y value\r\n minVal=y\r\n data[-1]=(minVal,maxVal)#write these values to data\r\n break#next x\r\n\r\n#save data to file\r\nf = open(\"data.txt\", \"w\")\r\nfor element in data:\r\n f.write(str(element) + '\\n')\r\nf.close()\r\n\r\nprint('done')\r\n \r\n","sub_path":"src/IR_read_graph.py","file_name":"IR_read_graph.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"608470630","text":"#!/usr/bin/env python\nimport os\n\n\ndef split_file(infile, prefix, max_size=50*1024*1024, file_buffer=1024):\n \"\"\"\n file: the input file\n prefix: prefix of the output files that will be created\n max_size: maximum size of each created file in bytes\n buffer: buffer size in bytes\n\n Returns the number of parts created.\n \"\"\"\n with open(infile, 'r+b') as src:\n suffix = 0\n while True:\n with open(prefix + '.%s' % suffix, 'w+b') as tgt:\n written = 0\n while written < max_size:\n data = src.read(file_buffer)\n if data:\n tgt.write(data)\n written += file_buffer\n else:\n return suffix\n suffix += 1\n\n\ndef cat_files(infiles, outfile, file_buffer=1024):\n \"\"\"\n infiles: a list of files\n outfile: the file that will be created\n buffer: buffer size in bytes\n \"\"\"\n if os.path.isfile(outfile):\n os.remove(outfile)\n with open(outfile, 'w+b') as tgt:\n for infile in sorted(infiles):\n with open(infile, 'r+b') as src:\n while True:\n data = src.read(file_buffer)\n if data:\n tgt.write(data)\n else:\n break\n","sub_path":"lib/helpers/fileSplitter.py","file_name":"fileSplitter.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"510841368","text":"#!/usr/bin/env python3\n# http://rosalind.info/problems/tran/\n\n\ndef fasta_parse(raw_data):\n \"\"\"Preprocess the entry fasta file. Return a list with header as even\n index number and sequences in a list as odd number index\"\"\"\n \n data = []\n for cell in raw_data:\n if len(cell):\n parts = cell.split()\n header = parts[0]\n seq = ''.join(parts[1:])\n data.append(header)\n data.append([seq])\n return data\n\ndef trans_i_v(seq1, seq2):\n \"\"\"From 2 sequences, calculate the transition/transversion ratio\"\"\"\n \n purine = ['A', 'G']\n pyrimidine = ['C', 'T']\n \n transition = 0\n transversion = 0\n \n for base in range(len(seq1)):\n if seq1[base] != seq2[base]:\n if seq1[base] in purine and seq2[base] in purine:\n transition += 1\n elif seq1[base] in pyrimidine and seq2[base] in pyrimidine:\n transition += 1\n else:\n transversion += 1\n \n print('%0.11f' % (transition / transversion))\n\n\nif __name__ == '__main__':\n f = open('/Users/mathias.galati/Downloads/rosalind_tran.txt', 'r')\n raw_data = f.read().strip().split('>')\n data = fasta_parse(raw_data)\n \n seq1 = data[1][0]\n seq2 = data[3][0]\n \n trans_i_v(seq1, seq2)\n \n","sub_path":"trans_i_v.py","file_name":"trans_i_v.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"134406747","text":"\"\"\"\nGiven an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.\n\nThe function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.\n\nNote:\n\nYour returned answers (both index1 and index2) are not zero-based.\nYou may assume that each input would have exactly one solution and you may not use the same element twice.\n\n# Solved on 09/10/2019 by William\"\"\"\n\ndef twoSum(numbers, target):\n values = {}\n for i in range(len(numbers)):\n needed_value = target - numbers[i]\n # print(needed_value, values)\n # if needed_value < 0:\n # raise Exception(\"Match Not Found!\")\n if needed_value in values:\n return [values[needed_value]+1, i+1]\n values[numbers[i]] = i \n raise Exception(\"Match Not Found!\")\n\nnumbers = [2,7,11,15]\ntarget = 9\nprint(twoSum(numbers, target))","sub_path":"LeetCode/0167.py","file_name":"0167.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"581861923","text":"import json, io\nfrom random import shuffle\nimport shutil\n\n'''\n split the coco keypoints data into smaller parts.\n'''\n\ntry:\n to_unicode = unicode\nexcept NameError:\n to_unicode = str\n\njson_file = 'annotations/person_keypoints_val2017.json' # annotation input json file\nnum_image = 2000 # number of images to output\njson_out_name = 'person_keypoints_val2017_2000.json' #name of json output file name\n\njson_string = open(json_file)\ndata = json.load(json_string)\n\nshuffle(data[\"images\"])\nimg_id = []\ndata[\"images\"] = data[\"images\"][:num_image]\nfor image in data[\"images\"]:\n img_id.append(image[\"id\"])\n\nannotations = []\nfor annot in data[\"annotations\"]:\n if annot[\"image_id\"] in img_id:\n annotations.append(annot)\ndata[\"annotations\"] = annotations\n\nwith io.open(json_out_name, 'w', encoding='utf8') as outfile:\n str_ = json.dumps(data,\n\t indent=4, sort_keys=True,\n\t separators=(',', ': '), ensure_ascii=False)\n outfile.write(to_unicode(str_))\n\n\n\n\n","sub_path":"coco_keypoints_handler/coco_spliter.py","file_name":"coco_spliter.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"383135338","text":"#\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for label.\"\"\"\n\nimport datetime\nimport os\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom google.protobuf import text_format\nfrom proto.stu3 import datatypes_pb2\nfrom proto.stu3 import google_extensions_pb2\nfrom proto.stu3 import resources_pb2\nfrom py.google.fhir.labels import label\n\n\n_TESTDATA_PATH = 'com_google_fhir/testdata/stu3/labels'\n\n\nclass LabelTest(parameterized.TestCase):\n\n def setUp(self):\n super(LabelTest, self).setUp()\n self._test_data_dir = os.path.join(absltest.get_default_test_srcdir(),\n _TESTDATA_PATH)\n self._enc = resources_pb2.Encounter()\n with open(os.path.join(self._test_data_dir, 'encounter_1.pbtxt')) as f:\n text_format.Parse(f.read(), self._enc)\n self._patient = resources_pb2.Patient()\n self._patient.id.value = 'Patient/1'\n\n self._expected_label = google_extensions_pb2.EventLabel()\n with open(os.path.join(self._test_data_dir, 'label_1.pbtxt')) as f:\n text_format.Parse(f.read(), self._expected_label)\n\n def testExtractCodeBySystem(self):\n sample_codeable_concept = datatypes_pb2.CodeableConcept()\n text_format.Merge('''\n coding: {\n system: {\n value : \"urn:sample_system\"\n }\n code: {\n value : \"sample_code\"\n }\n }\n coding: {\n system: {\n value : \"urn:unrelated_system\"\n }\n code: {\n value : \"unrelated_code\"\n }\n }\n ''', sample_codeable_concept)\n\n code = label.ExtractCodeBySystem(sample_codeable_concept,\n 'urn:sample_system')\n self.assertEqual('sample_code', code)\n\n def testComposeLabel(self):\n output_label = label.ComposeLabel(self._patient, self._enc, 'label.test',\n 'true',\n datetime.datetime(2003, 1, 2, 4, 5, 6))\n self.assertEqual(self._expected_label, output_label)\n\n @parameterized.parameters(\n # February 27, 2009 11:31:31 PM UTC\n {'end_us': 1235777491000000, 'label_val': 'above_14'},\n # February 27, 2009 11:31:30 PM UTC\n {'end_us': 1235777490000000, 'label_val': '7_14'},\n # February 20, 2009 11:31:31 PM UTC\n {'end_us': 1235172691000000, 'label_val': '7_14'},\n # February 20, 2009 11:30:30 PM UTC\n {'end_us': 1235172690000000, 'label_val': '3_7'},\n # February 16, 2009 11:31:30 PM UTC\n {'end_us': 1234827091000000, 'label_val': '3_7'},\n # February 26, 2009 11:30:30 PM UTC\n {'end_us': 1234827090000000, 'label_val': 'less_or_equal_3'}\n )\n def testLengthOfStayRangeAt24Hours(self, end_us, label_val):\n enc = resources_pb2.Encounter()\n enc.CopyFrom(self._enc)\n enc.period.end.value_us = end_us\n labels = [l for l in label.LengthOfStayRangeAt24Hours(\n self._patient, enc)]\n expected_label = label.ComposeLabel(\n self._patient,\n self._enc,\n label.LOS_RANGE_LABEL,\n label_val,\n # 24 hours after admission\n datetime.datetime(2009, 2, 14, 23, 31, 30))\n self.assertEqual([expected_label], labels)\n\n def testLengthOfStayRangeAt24HoursLT24Hours(self):\n enc = resources_pb2.Encounter()\n enc.CopyFrom(self._enc)\n enc.period.end.value_us = 1234567891000000\n with self.assertRaises(AssertionError):\n _ = [l for l in label.LengthOfStayRangeAt24Hours(\n self._patient, enc)]\n\n\nif __name__ == '__main__':\n absltest.main()\n","sub_path":"py/google/fhir/labels/label_test.py","file_name":"label_test.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"225163050","text":"import os\n\nAddOption('--build_dir',\n dest='blddir',\n default='build',\n type='string',\n nargs=1,\n action='store',\n metavar='BUILD_DIR',\n help='Initialize Build directory')\n\nAddOption('--use_llvm',\n dest='usellvm',\n default=False,\n action='store_true',\n help='use llvm tools')\n\nenv=Environment(ENV = {'PATH' : os.environ['PATH']},\n TRGT = 'ADSPv5MP',\n BUILD_DIR = os.path.realpath(GetOption('blddir')+ \"/${TRGT}\"),\n USE_LLVM = GetOption('usellvm'))\n\nenv.Replace(Q6_RTOS_ROOT=os.environ['Q6_RTOS_ROOT'])\nenv.VariantDir('${BUILD_DIR}', '.', duplicate=0)\n\nenv.Tool('target_tools', toolpath=[\"${Q6_RTOS_ROOT}/${TRGT}/scripts\"])\nenv.Tool('qurt_builders', toolpath = [\"${Q6_RTOS_ROOT}/scripts\"])\n\nenv.Replace(CCFLAGS=['-m${Q6VERSION}','-g','-O2'])\nenv.Replace(CPPPATH=['.','${Q6_RTOS_ROOT}/include/qurt'])\n\n#Generate example object from c files in current directory\nqurtobj = SConscript('qurt/SConscript', exports='env')\napp1obj = SConscript('app1/SConscript', exports='env')\napp2obj = SConscript('app2/SConscript', exports='env')\n\n#Generate PBN\nenv.Replace(PADDR = '0x1e000000')\nimage = env.QurtImage('${BUILD_DIR}/bootimg.pbn', [qurtobj, app1obj, app2obj])\n\nenv.Replace(OBJDUMPOPT = \"-dClx\")\nimgdump = env.ObjectDump(\"${BUILD_DIR}/bootimg.dump\", image)\n\nif os.name == 'posix':\n q6ss_timer_cfg = \"${Q6_TOOLS_ROOT}/qc/lib/iss/qtimer.so --csr_base=0xab000000 --irq_p=3 --freq=19200000 --cnttid=1\"\n q6ss_int_cfg = \"${Q6_TOOLS_ROOT}/qc/lib/iss/l2vic.so 32 0xab010000\"\n t32cfg = env.Command(\"${BUILD_DIR}/t32sim.cfg\", 't32/t32sim.cfg', \"cat $SOURCE | sed -e 's;T32_MCD.dll;${Q6_TOOLS_ROOT}/qc/lib/iss/T32_MCD.so;' >> $TARGET\")\nelif os.name == 'nt':\n q6ss_timer_cfg = \"L2timer.dll 19200000 0xab020000 3 0xab010000\"\n q6ss_int_cfg = \"l2vic.dll 32 0xab010000\"\n temppath = env.subst(\"${Q6_TOOLS_ROOT}\").replace(\"\\\\\", \"/\")\n t32cfg = env.Command(\"${BUILD_DIR}/t32sim.cfg\", 't32/t32sim.cfg', \"cat $SOURCE | sed -e 's;T32_MCD.dll;\"+temppath+\"/qc/lib/iss/T32_MCD.dll;' >> $TARGET\")\n\nosamcfg = env.Command(\"${BUILD_DIR}/osam.cfg\", '', \"echo ${QURT_MODEL} > $TARGET\")\nq6cfg = env.Command(\"${BUILD_DIR}/q6ss.cfg\", '', [\"echo \"+q6ss_timer_cfg+\" > $TARGET\",\"echo \"+q6ss_int_cfg+\" >> $TARGET\"])\n#run simulator\nt32sirccfg = env.InstallAs(\"${BUILD_DIR}/t32sim_sirc.cfg\", q6cfg)\nt32cmm = env.Command(\"${BUILD_DIR}/t32sim.cmm\", 't32/t32sim.cmm', \"cat $SOURCE | sed -e 's;HexagonV5L;${T32_SYS_CPU};' -e 's;v5l;${T32_MCD_ARCH};' >> $TARGET\")\nenv.Install(\"${BUILD_DIR}\", \"${QURT_MODEL}\")\nt32men1=env.Install(\"${BUILD_DIR}\", \"${Q6_RTOS_ROOT}/debugger/T32/qurt_model.men\")\nt32men2=env.Install(\"${BUILD_DIR}\", \"${Q6_RTOS_ROOT}/debugger/T32/qurt_model.t32\")\nt32men=t32men1+t32men2\nt32bat = env.Command(\"${BUILD_DIR}/T32.bat\", '', [\"echo set PATH=%PATH%;${Q6_TOOLS_ROOT}/qc/bin; > $TARGET\",\"echo c:/t32/t32mqdsp6.exe -c t32sim.cfg >> $TARGET\"])\nenv.Depends(t32bat, [t32cfg, t32sirccfg, t32cmm,t32men])\n\nenv.Replace(SIM_RTOS = \"--rtos \" + str(osamcfg[0]))\nenv.Replace(SIM_COSIM = \"--cosim_file \" + str(q6cfg[0]))\nenv.Replace(SYMFILE = \"--symfile \" + str(qurtobj[0]))\nsimout = env.Simulator(\"${BUILD_DIR}/stats\", image)\nenv.Depends(simout, [t32bat, imgdump])\nAlwaysBuild(simout)\n","sub_path":"modem_proc/core/kernel/qurt/examples/mp_qurt/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"137480302","text":"from django.db import models\nfrom django.core.cache import cache\nfrom django.db.models.query import QuerySet\n\nNEVER_EXPIRE = 60 * 60 * 24 * 30 # 30 days\nINSTANCE_CACHE_KEY = u'{0}.{1}:{2}'\n\n\ndef instance_cache_key(instance, label=None, version=None):\n \"Creates a cache key for the instance with an optional label and token.\"\n opts = instance._meta\n key = INSTANCE_CACHE_KEY.format(opts.app_label, opts.module_name, instance.pk)\n if label:\n key = u'{0}-{1}'.format(key, label)\n if version:\n key = u'{0}-{1}'.format(key, version)\n return key\n\n\ndef cached_property(label, version=None, timeout=NEVER_EXPIRE):\n \"Wraps a function and caches the output indefinitely.\"\n def decorator(func):\n def wrapped(self):\n # Do not cache non-persisted objects\n if not self.pk:\n return func(self)\n\n # If this is a function, pass `label' and `self` in as arguments\n if callable(version):\n _version = version(self, label=label)\n else:\n _version = getattr(self, version)\n # Call if a method\n if callable(_version):\n _version = _version()\n # If no version is defined, the cache cannot be reliably stored\n if _version is None:\n data = func(self)\n else:\n key = instance_cache_key(self, label=label, version=_version)\n data = cache.get(key)\n if data is None:\n data = func(self)\n # Don't bother caching if the data is None\n if data is not None:\n cache.set(key, data, timeout=timeout)\n return data\n return property(wrapped)\n return decorator\n\n\ndef post_save_cache(sender, instance, **kwargs):\n \"\"\"General post-save handler for caching model instances. NOTE: This must\n be used in conjunction with the `pre_delete_uncache` since the cache is set\n to never expire.\n \"\"\"\n cache.set(instance_cache_key(instance), instance, timeout=NEVER_EXPIRE)\n\n\ndef pre_delete_uncache(sender, instance, **kwargs):\n \"General post-delete handler for removing cache for model instances.\"\n cache.delete(instance_cache_key(instance))\n\n\n# `pk` is used as an alias, so this is constant\nPK_LOOKUPS = ['pk', 'pk__exact']\n\n\nclass CacheQuerySet(QuerySet):\n def filter(self, *args, **kwargs):\n \"\"\"For primary-key-based lookups, instances may be cached to prevent\n excessive database hits. If this is a primary-key lookup, the cache\n will be checked and populate the `_result_cache` if available.\n \"\"\"\n clone = super(CacheQuerySet, self).filter(*args, **kwargs)\n\n pk = None\n opts = self.model._meta\n pk_name = opts.pk.name\n\n # Look for `pk` and the actual name of the primary key field\n for key in PK_LOOKUPS + [pk_name, u'{0}__exact'.format(pk_name)]:\n if key in kwargs:\n pk = kwargs[key]\n break\n\n if pk is not None:\n key = INSTANCE_CACHE_KEY.format(opts.app_label, opts.module_name, pk)\n obj = cache.get(key)\n if obj is not None:\n clone._result_cache = [obj]\n\n return clone\n\n\nclass CacheManager(models.Manager):\n def get_query_set(self):\n return CacheQuerySet(self.model)\n","sub_path":"avocado/core/cache/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"622140591","text":"import os\nimport tkinter\n'''\n\nversion: 1.1.0\nauthor: smy\ne-mail: 2282420654@qq.com\n'''\n\ntop = tkinter.Tk()\n\ntop.title('登C号工具')\n\n\ndef center_window(root, width, height):\n screenwidth = root.winfo_screenwidth()\n screenheight = root.winfo_screenheight()\n size = '%dx%d+%d+%d' % (width, height,\n (screenwidth - width)/2, (screenheight - height)/2)\n print(size)\n root.geometry(size)\n\n\ncenter_window(top, 300, 240)\ntop.geometry(\"250x500\")\n\n\ndef helloCallBack(file):\n #tkMessageBox.showinfo( \"Hello Python\", \"Hello Runoob\")\n raw_str = []\n raw_number = []\n usb_str = []\n user_str = []\n id_str = []\n pwd_str = []\n filename = 'data.txt'\n idpwd_name = \"idpwd.txt\"\n # grep / findstr\n os.system('adb devices -l | findstr \".product.\">{name}'.format(name=filename))\n with open(filename, 'r') as f:\n raw_str = f.readlines()\n\n with open(idpwd_name, 'r') as f:\n user_str = f.readlines()\n\n for temp in user_str:\n id_str.append(temp.split(\"----\")[0])\n pwd_str.append(temp.split(\"----\")[1])\n\n for i, item in enumerate(raw_str):\n temp = item.split(\" \")[0]\n print(temp)\n usb = item.split(\" \")[1].split(\"device \")[1].split(\" product\")[0]\n raw_number.append(temp)\n #cmd_str='adb -s {number} shell input text start'.format(number=temp)\n #cmd_str = 'adb -s {number} shell input text start'.format(number=usb)\n bat_str = '{bat} {number} {username} {password}'.format(bat=file, number=usb, username=id_str[i], password=pwd_str[i]) # call\n # bat_str='./{bat} {number} {username} {password}'.format(bat=select_bat,number=usb,username=id_str[i],password=pwd_str[i]) #call\n print(bat_str)\n # os.system(cmd_str)\n os.popen(bat_str)\n\n\nB1 = tkinter.Button(top, text=u\"加好友\", command=lambda:helloCallBack(u\"打开微信并登陆.bat\"))\nB2 = tkinter.Button(top, text=u\"登陆\", command=lambda:helloCallBack(u\"登陆.bat\"))\nB3 = tkinter.Button(top, text=u\"截图\", command=lambda:helloCallBack(u\"截图.bat\"))\nB4 = tkinter.Button(top, text=u\"加好友\", command=lambda:helloCallBack(u\"加好友.bat\"))\nB1.pack()\nB2.pack()\nB3.pack()\nB4.pack()\ntop.mainloop()\n","sub_path":"adb/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"465973763","text":"import os\n\nfrom flask import Flask, jsonify, g, request\nfrom hackajob_phone_book_api import database\nfrom hackajob_phone_book_api.auth import auth\nfrom hackajob_phone_book_api.exceptions import AppException\nfrom hackajob_phone_book_api.repositories import EntryRepository\n\napp = Flask(__name__, instance_path=os.path.dirname(os.path.abspath(__file__)))\n\nif os.environ.get('SENTRY'):\n from raven.contrib.flask import Sentry\n\n sentry = Sentry(app, dsn=os.environ.get('SENTRY'))\n\n\n@app.route('/entries', methods=['POST'])\n@auth.login_required\ndef post():\n model = request.get_json(force=True, silent=True)\n if not model:\n return jsonify({'err': 'Please provide payload'}), 400\n\n return jsonify(EntryRepository.create_one(model))\n\n\n@app.route('/entries/', methods=['GET'])\n@auth.login_required\ndef get(entry_id):\n return jsonify(EntryRepository.get_one(entry_id))\n\n\n@app.route('/entries/', methods=['DELETE'])\n@auth.login_required\ndef delete(entry_id):\n EntryRepository.delete_one(entry_id)\n return '', 200\n\n\n@app.before_request\ndef before_request():\n g.db = database\n g.db.connect()\n\n\n@app.after_request\ndef after_request(response):\n g.db.close()\n return response\n\n\n@app.errorhandler(AppException)\ndef handle_errors(error):\n response = jsonify({'err': error.message})\n response.status_code = error.status_code\n return response\n\n\ndef main():\n with app.app_context():\n app.run('0.0.0.0', 8080, bool(int(os.environ.get('DEBUG', 0))))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"hackajob_phone_book_api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"451444959","text":"firstLine = input()\nLEN = int(firstLine.split()[0])\nNUM = int(firstLine.split()[1])\nlst = [int(input()) for i in range(NUM)]\nresult=[]\nfor i in range(NUM):\n result.append(lst[i]%LEN)\n\nresult999=[]\nfor i in range(NUM-1):\n for j in range(i+1,NUM):\n if result[i]==result[j] and j not in result999:\n result999.append(j)\nresult999.sort()\nif len(result999)==0:\n print(-1)\nelse:\n print(result999[0]+1)\n","sub_path":"Code/CodeRecords/2812/60613/234559.py","file_name":"234559.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"548439137","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : pureoym\n# @Contact : pureoym@163.com\n# @TIME : 2018/10/9 15:21\n# @File : utils.py\n# Copyright 2017 pureoym. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ========================================================================\nimport os\nimport jieba\nimport numpy as np\n\nBASE_DIR = '/data0/search/ai_challenger/'\n\nSEG_SPLITTER = ' '\n\n# Prepossessing parameters\nMAX_SEQUENCE_LENGTH = 300 # 一条内容最多包含词数量\nMAX_NUM_WORDS = 150000 # 词典最大词数,若语料中含词数超过该数,则取前MAX_NUM_WORDS个\nNUM_LABELS = 4 # 分类数目\nEMBEDDING_DIM = 300\n\n\n# 打开停用词表并做处理\nSTOP_WORDS_LIST = os.path.join(BASE_DIR, 'data/stop_list.txt') # 停用词表\nwith open(STOP_WORDS_LIST, 'r') as f:\n stop_words = f.readlines()\ndel stop_words[0] # 删除txt文件第一行的特殊字符\nfor word in stop_words: # 删除每行最后的回车\n stop_words[stop_words.index(word)] = word.replace('\\n', '')\n\n# 加载word_index\nWORD_INDEX = os.path.join(BASE_DIR, 'data/word_index.npy')\nword_index = np.load(WORD_INDEX)[()]\nprint('load word_index: ' + WORD_INDEX)\n\n\n\ndef get_index_sequence_from_text(text):\n \"\"\"\n 获取词序列\n :param text:文本\n :return: 词序列\n \"\"\"\n # 分词\n tokens = segment(text)\n # 转换成词序号序列\n indexes = word2index(tokens)\n # 多截少补\n sequences = indexes.split(' ')[:MAX_SEQUENCE_LENGTH]\n while len(sequences) < MAX_SEQUENCE_LENGTH:\n sequences.append('0')\n x_test = np.matrix(sequences)\n return x_test\n\n\ndef segment(input_string):\n \"\"\"\n 分词\n :param input_string:\n :return:\n \"\"\"\n seg_origin = SEG_SPLITTER.join(jieba.cut(input_string, cut_all=False))\n seg_origin_list = seg_origin.split(SEG_SPLITTER)\n seg_stop_list = [word for word in seg_origin_list if word not in stop_words]\n return SEG_SPLITTER.join(seg_stop_list)\n\n\ndef word2index(tokens):\n \"\"\"\n 将输入的tokens转换成word_index中的序号\n :param tokens:\n :return:\n \"\"\"\n word_list = tokens.split(SEG_SPLITTER)\n indexes = []\n for word in word_list:\n if word is not None:\n if word in word_index.keys():\n index = word_index[word]\n if index > MAX_NUM_WORDS:\n indexes.append('0')\n else:\n indexes.append(str(index))\n else:\n indexes.append('0')\n return SEG_SPLITTER.join(indexes).strip()\n\n\n\n\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"217695929","text":"import os\nimport sys\nimport glob\n\nINPUT_DIR = sys.argv[1]\nOUTPUT_DIR = sys.argv[2]\n\n# reference\nfin = open(os.path.join(INPUT_DIR, 'ref/answer.txt'))\nref = [line.strip() for line in fin.readlines()]\n\nfin = open(os.path.join(INPUT_DIR, 'res/answer.txt'))\nres = [line.strip() for line in fin.readlines()]\nfin.close()\n\nacc = 0\nfor f, s in zip(ref, res):\n if f == s: acc += 1\n\nscore = 100.0 * acc / len(ref)\nprint('accuracy: %5.2f (%d/%d)' % (score, acc, len(ref)))\nfout = open(os.path.join(OUTPUT_DIR, 'scores.txt'), 'w')\nfout.write('accuracy:{0}\\n'.format(score))\nfout.close()","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"393132358","text":"#!/usr/bin/env python\n# -*- coding:gbk -*-\n\nfrom typing import List\n\nclass TimSort:\n def __init__(self):\n self.__minMerge = 16\n self.__runBase, self.__runLength = [], []\n\n def sort(self, nums: List[int]) -> List[int]:\n if not nums or len(nums) < 2:\n return nums\n\n if len(nums) < self.__minMerge:\n runLength = self.__getAscendingLength(nums, 0, len(nums))\n self.__insertSort(nums, 0, len(nums), runLength)\n return nums\n\n aux = list(nums)\n\n base, length = 0, len(nums)\n while length > 0:\n runLength = self.__getAscendingLength(nums, base, len(nums))\n if runLength < self.__minMerge:\n size = min(length, self.__minMerge)\n self.__insertSort(nums, base, base + size, base + runLength)\n runLength = size\n\n self.__pushRun(base, runLength)\n self.__mergeRuns(nums, aux)\n\n base += runLength\n length -= runLength\n self.__mergeRunsForce(nums, aux)\n return nums\n\n def __getAscendingLength(self, nums: List[int], start: int, end: int) -> int:\n if start + 1 == end:\n return 1\n\n index = start + 1\n if nums[start] > nums[index]:\n while index < end - 1 and nums[index] > nums[index + 1]:\n index += 1\n nums[start: index + 1] = reversed(nums[start: index + 1])\n else:\n while index < end - 1 and nums[index] <= nums[index + 1]:\n index += 1\n return index - start + 1\n\n def __insertSort(self, nums: List[int], start: int, end: int, index: int) -> None:\n for i in range(index, end):\n pivot = nums[i]\n if pivot >= nums[i - 1]:\n continue\n\n left, right = start, i - 1\n while left <= right:\n middle = left + ((right - left) >> 1)\n if pivot < nums[middle]:\n right = middle - 1\n else:\n left = middle + 1\n\n for j in range(i - 1, left - 1, -1):\n nums[j + 1] = nums[j]\n nums[left] = pivot\n\n def __pushRun(self, base: int, length: int) -> None:\n self.__runBase.append(base)\n self.__runLength.append(length)\n\n def __mergeRuns(self, nums: List[int], aux: List[int]) -> None:\n while len(self.__runBase) > 1:\n index = len(self.__runBase) - 2\n if index > 0 and self.__runLength[index - 1] <= self.__runLength[index] + self.__runLength[index + 1]:\n if self.__runLength[index - 1] < self.__runLength[index + 1]:\n index -= 1\n self.__merge(nums, aux, index)\n elif self.__runLength[index] <= self.__runLength[index + 1]:\n self.__merge(nums, aux, index)\n else:\n return\n\n def __mergeRunsForce(self, nums: List[int], aux: List[int]) -> None:\n while len(self.__runBase) > 1:\n self.__merge(nums, aux, len(self.__runBase) - 2)\n\n def __merge(self, nums: List[int], aux: List[int], runIndex: int) -> None:\n base1, length1, base2, length2 = self.__runBase[runIndex], self.__runLength[runIndex], self.__runBase[runIndex + 1], self.__runLength[runIndex + 1]\n\n self.__runLength[runIndex] = length1 + length2\n if runIndex == len(self.__runLength) - 3:\n self.__runBase[runIndex + 1], self.__runLength[runIndex + 1] = self.__runBase[runIndex + 2], self.__runLength[runIndex + 2]\n self.__runBase.pop()\n self.__runLength.pop()\n\n start = self.__gallopLeft(nums, base1, length1, nums[base2])\n if start >= base1 + length1:\n return\n\n end = self.__gallopRight(nums, base2, length2, nums[base1 + length1 - 1])\n if end < base2:\n return\n\n aux[start : end + 1] = nums[start : end + 1]\n\n index, left1, right1, left2, right2 = start, start, base1 + length1 - 1, base2, end\n while left1 <= right1 and left2 <= right2:\n if aux[left1] > aux[left2]:\n nums[index] = aux[left2]\n left2 += 1\n else:\n nums[index] = aux[left1]\n left1 += 1\n index += 1\n\n if left1 <= right1:\n nums[index : index + right1 - left1 + 1] = aux[left1 : right1 + 1]\n if left2 <= right2:\n nums[index : index + right2 - left2 + 1] = aux[left2 : right2 + 1]\n\n def __gallopLeft(self, nums: List[int], base: int, length: int, pivot: int) -> int:\n start, end = base, base + length - 1\n while start <= end:\n middle = start + ((end - start) >> 1)\n if pivot <= nums[middle]:\n end = middle - 1\n else:\n start = middle + 1\n return end + 1\n\n def __gallopRight(self, nums: List[int], base: int, length: int, pivot: int) -> int:\n start, end = base, base + length - 1\n while start <= end:\n middle = start + ((end - start) >> 1)\n if pivot >= nums[middle]:\n start = middle + 1\n else:\n end = middle - 1\n return start - 1\n","sub_path":"1.数组/2.排序/E.Tim排序/TimSort.py","file_name":"TimSort.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"262152490","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2017, laugusto and contributors\n# For license information, please see license.txt\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\n\nfrom datetime import datetime\n\ndef datetime_now():\n \n return datetime.now().strftime('%d-%m-%Y %H:%M:%S')\n \n@frappe.whitelist()\ndef start_maintenance(docname):\n\n doc_event = frappe.get_doc('Event', docname)\n if not doc_event.iniciar_manutencao:\n doc_event.iniciar_manutencao = datetime_now()\n doc_event.save()\n else:\n frappe.throw('A manutenção já foi iniciada!')\n\n@frappe.whitelist()\ndef end_maintenance(docname):\n\n doc_event = frappe.get_doc('Event', docname)\n if doc_event.iniciar_manutencao:\n if not doc_event.finalizar_manutencao:\n doc_event.finalizar_manutencao = datetime_now()\n doc_event.color = '#ff4d4d'\n doc_event.save()\n else:\n frappe.throw('Manutenção já finalizada!')\n else:\n frappe.throw('Manutenção não iniciada!')","sub_path":"ordem_servico/ordem_servico/event_document.py","file_name":"event_document.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"475116028","text":"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport stat\nfrom pathlib import Path\n\nfrom synthtool.gcp import common\nfrom synthtool.sources import templates\n\n\nFIXTURES = Path(__file__).parent / \"fixtures\"\nNODE_TEMPLATES = Path(__file__).parent.parent / \"synthtool/gcp/templates/node_library\"\n\n\ndef test_render():\n t = templates.Templates(FIXTURES)\n result = t.render(\"example.j2\", name=\"world\")\n\n assert result.name == \"example\"\n assert result.read_text() == \"Hello, world!\\n\"\n\n\ndef test_render_group():\n t = templates.TemplateGroup(FIXTURES / \"group\")\n result = t.render(var_a=\"hello\", var_b=\"world\")\n\n assert (result / \"1.txt\").read_text() == \"hello\\n\"\n assert (result / \"subdir\" / \"2.txt\").read_text() == \"world\\n\"\n\n\ndef test_render_preserve_mode():\n \"\"\"\n Test that rendering templates correctly preserve file modes.\n \"\"\"\n source_file = FIXTURES / \"executable.j2\"\n source_mode = source_file.stat().st_mode\n\n # Verify source fixture has execute permission for USER\n assert source_mode & stat.S_IXUSR\n\n t = templates.Templates(FIXTURES)\n result = t.render(\"executable.j2\", name=\"executable\")\n\n assert result.stat().st_mode == source_mode\n\n\ndef test_release_quality_badge():\n t = templates.Templates(NODE_TEMPLATES)\n result = t.render(\n \"README.md\", metadata={\"repo\": {\"release_level\": \"beta\"}, \"samples\": {}}\n ).read_text()\n assert f\"https://img.shields.io/badge/release%20level-beta-yellow.svg\" in result\n assert \"This library is considered to be in **beta**\" in result\n\n\ndef test_load_samples():\n cwd = os.getcwd()\n os.chdir(FIXTURES)\n\n common_templates = common.CommonTemplates()\n metadata = {}\n common_templates._load_samples(metadata)\n # should have loaded samples.\n assert metadata[\"samples\"][1][\"name\"] == \"Requester Pays\"\n assert metadata[\"samples\"][1][\"file\"] == \"requesterPays.js\"\n assert len(metadata[\"samples\"]) == 2\n # should have loaded the special quickstart sample (ignoring header).\n assert \"ID of the Cloud Bigtable instance\" in metadata[\"quickstart\"]\n assert \"limitations under the License\" not in metadata[\"quickstart\"]\n\n os.chdir(cwd)\n\n\ndef test_syntax_highlighter():\n t = templates.Templates(NODE_TEMPLATES)\n result = t.render(\n \"README.md\",\n metadata={\"repo\": {\"language\": \"nodejs\"}, \"quickstart\": \"const foo = 'bar'\"},\n ).read_text()\n assert \"```javascript\" in result\n\n\ndef test_hide_billing():\n t = templates.Templates(NODE_TEMPLATES)\n\n result = t.render(\n \"README.md\", metadata={\"repo\": {\"requires_billing\": True, \"api_id\": \"fooapi\"}}\n ).read_text()\n assert \"Enable billing for your project\" in result\n\n result = t.render(\n \"README.md\", metadata={\"repo\": {\"requires_billing\": False}}\n ).read_text()\n assert \"Enable billing for your project\" not in result\n\n\ndef test_readme_partials():\n cwd = os.getcwd()\n os.chdir(FIXTURES)\n\n common_templates = common.CommonTemplates()\n metadata = {}\n common_templates._load_partials(metadata)\n # should have populated introduction from partial.\n assert (\n \"objects to users via direct download\" in metadata[\"partials\"][\"introduction\"]\n )\n\n os.chdir(cwd)\n","sub_path":"tests/test_templates.py","file_name":"test_templates.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"406402999","text":"\"\"\"The module for the Nampi data entry form parser.\n\nClasses:\n Nampi_data_entry_form\n\"\"\"\nimport logging\nfrom typing import List, Optional\n\nimport pandas\nfrom pandas import Series\nfrom rdflib.term import URIRef\n\nfrom modules.appellation import Appellation, Appellation_type\nfrom modules.appellation_assignment import Appellation_assignment\nfrom modules.aspect import Aspect\nfrom modules.author import Author\nfrom modules.birth import Birth\nfrom modules.death import Death\nfrom modules.di_act import Di_act\nfrom modules.event import Event\nfrom modules.family import Family\nfrom modules.gender import Gender\nfrom modules.group import Group\nfrom modules.nampi_graph import Nampi_graph\nfrom modules.nampi_type import Nampi_type\nfrom modules.person import Person\nfrom modules.place import Place\nfrom modules.source import Source\nfrom modules.source_location import Source_location\nfrom modules.source_type import Source_type\nfrom modules.title import Title\nfrom parsers.nampi_data_entry_form.nampi_data_entry_form import Column\nfrom parsers.nampi_data_entry_form.nampi_data_entry_form import \\\n Nampi_data_entry_form as Sheet\nfrom parsers.nampi_data_entry_form.nampi_data_entry_form import (\n Table, added_investiture_label, family_member_label)\n\n_group_types = {\n \"Christian denomination\": Nampi_type.Mona.christian_denomination,\n \"Diocese\": Nampi_type.Mona.diocese,\n \"Family\": Nampi_type.Mona.family,\n \"Monastic community\": Nampi_type.Mona.monastic_community,\n \"Parish\": Nampi_type.Mona.parish,\n \"Polity\": Nampi_type.Mona.polity,\n \"Religious denomination\": Nampi_type.Mona.religious_denomination,\n \"Religious order\": Nampi_type.Mona.religious_order,\n \"Religious polity\": Nampi_type.Mona.religious_polity,\n \"Historic diocese\": Nampi_type.Mona.historic_diocese,\n}\n\n_status_types = {\n \"Academic degree\": Nampi_type.Mona.academic_degree,\n \"Clergy\": Nampi_type.Mona.clergy,\n \"Community subsacristan\": Nampi_type.Mona.community_subsacristan,\n \"Community superior\": Nampi_type.Mona.community_superior,\n \"Member of a religious community\": Nampi_type.Mona.member_of_a_religious_community,\n \"Member of a religious community with manual focus\": Nampi_type.Mona.member_of_a_religious_community_with_manual_focus,\n \"Member of a religious community with spiritual focus\": Nampi_type.Mona.member_of_a_religious_community_with_spiritual_focus,\n \"Procurator\": Nampi_type.Mona.procurator,\n \"Professed member of a religious community\": Nampi_type.Mona.professed_member_of_a_religious_community,\n \"Vice community superior\": Nampi_type.Mona.vice_community_superior,\n \"Visitator\": Nampi_type.Mona.visitator,\n \"Monastic office with spiritual focus\": Nampi_type.Mona.monastic_office_with_spiritual_focus,\n \"Monastic office with manual focus\": Nampi_type.Mona.monastic_office_with_manual_focus,\n \"Monastic office\": Nampi_type.Mona.monastic_office,\n \"Member of a religious community visiting\": Nampi_type.Mona.member_of_a_religious_community_visiting,\n \"Religious life outside a community\": Nampi_type.Mona.religious_life_outside_a_community,\n \"Office in a diocese\": Nampi_type.Mona.office_in_a_diocese,\n \"Secular office\": Nampi_type.Mona.secular_office,\n \"Educator\": Nampi_type.Mona.educator,\n \"Office\": Nampi_type.Mona.office,\n \"Ruler of a school\": Nampi_type.Mona.ruler_of_a_school,\n \"Status\": Nampi_type.Core.status,\n \"Aspect\": Nampi_type.Core.aspect,\n \"Unspecified aspect\": Nampi_type.Mona.unspecified_aspect,\n}\n\n_occupation_types = {\n \"Administration of a community\": Nampi_type.Mona.administration_of_a_community,\n \"Associated parish clergy\": Nampi_type.Mona.associated_parish_clergy,\n \"Clergy\": Nampi_type.Mona.clergy,\n \"Official\": Nampi_type.Mona.official,\n \"Trade\": Nampi_type.Mona.trade,\n \"Rule of a community\": Nampi_type.Mona.rule_of_a_community,\n \"Monastic office\": Nampi_type.Mona.monastic_office,\n \"Secular office\": Nampi_type.Mona.secular_office,\n \"Office in a diocese\": Nampi_type.Mona.office_in_a_diocese,\n \"Office\": Nampi_type.Mona.office,\n \"Educator\": Nampi_type.Mona.educator,\n \"Servant\": Nampi_type.Mona.servant,\n \"Visitator\": Nampi_type.Mona.visitator,\n \"Highly skilled professional\": Nampi_type.Mona.highly_skilled_professional,\n \"Rule of a school\": Nampi_type.Mona.rule_of_a_school,\n \"Occupation\": Nampi_type.Core.occupation,\n \"Aspect\": Nampi_type.Core.aspect,\n \"Unspecified aspect\": Nampi_type.Mona.unspecified_aspect,\n}\n\n\ndef safe_str(row: Series, column: str) -> Optional[str]:\n return str(row[column]) if column in row else None\n\n\nclass Nampi_data_entry_form_parser:\n \"\"\"A parser that parses the NAMPI input tables and transforms the data to an RDF graph.\"\"\"\n\n __sheet: Sheet\n _graph: Nampi_graph\n\n def __init__(\n self,\n graph: Nampi_graph,\n cache_path: str,\n credentials_path: str,\n cache_validity_days: int,\n ):\n \"\"\"Initialize the class.\n\n Parameters:\n graph: The data graph.\n \"\"\"\n self.__sheet = Sheet(cache_path, credentials_path, cache_validity_days)\n self._graph = graph\n\n logging.info(\"Parse the data for '{}'\".format(self.__sheet.sheet_name))\n\n self.__add_persons()\n self.__add_births()\n self.__add_deaths()\n self.__add_complex_events()\n self.__add_investiture_events_for_professions()\n self.__add_religious_names()\n self.__add_independent_titles()\n\n logging.info(\n \"Finished parsing the data for '{}'\".format(self.__sheet.sheet_name)\n )\n\n def __add_births(self):\n \"\"\"\n Add all births from the births table including names and family group memberships.\n \"\"\"\n for _, row in self.__sheet.get_table(Table.BIRTHS).iterrows():\n born_person = self.__get_person(safe_str(row, Column.person))\n if not born_person:\n continue\n mother = self.__get_person(safe_str(row, Column.mother))\n father = self.__get_person(safe_str(row, Column.father))\n birth_place = self.__get_place(safe_str(row, Column.event_place))\n family_names = [\n self.__sheet.get_from_table(\n Table.PERSONS,\n Column.name,\n born_person.label,\n Column.family_name_with_group,\n ),\n self.__sheet.get_from_table(\n Table.PERSONS,\n Column.name,\n born_person.label,\n Column.family_name_gender_neutral,\n ),\n self.__sheet.get_from_table(\n Table.PERSONS, Column.name, born_person.label, Column.family_name\n ),\n ]\n given_name_label = self.__sheet.get_from_table(\n Table.PERSONS, Column.name, born_person.label, Column.given_name\n )\n family_group_label = next((s for s in family_names if s), None)\n birth = Birth(\n self._graph,\n born_person,\n birth_place,\n exact_date=safe_str(row, Column.exact_date),\n earliest_date=safe_str(row, Column.earliest_date),\n latest_date=safe_str(row, Column.latest_date),\n family_name_label=family_names[2],\n given_name_label=given_name_label,\n family_group_label=family_group_label,\n mother=mother,\n father=father,\n )\n self.__insert_di_act(birth, row=row)\n logging.debug(\n \"Added 'birth' for person '{}'\".format(birth.main_person.label)\n )\n logging.info(\"Parsed the births\")\n\n def __add_complex_events(self):\n \"\"\"\n Add all complex events from the complex events table.\n \"\"\"\n for _, row in self.__sheet.get_table(Table.COMPLEX_EVENTS).iterrows():\n main_person = self.__get_person(safe_str(row, Column.person))\n if not main_person:\n continue\n definition = safe_str(row, Column.event_definition)\n event = None\n\n other_participant_labels: List[str] = []\n if Column.other_person_1 in row:\n other_participant_labels.append(str(row[Column.other_person_1]))\n if Column.other_person_2 in row:\n other_participant_labels.append(str(row[Column.other_person_2]))\n if Column.other_person_3 in row:\n other_participant_labels.append(str(row[Column.other_person_3]))\n if Column.other_person_4 in row:\n other_participant_labels.append(str(row[Column.other_person_4]))\n if Column.other_person_5 in row:\n other_participant_labels.append(str(row[Column.other_person_5]))\n other_participants: List[Event.Person_definition] = []\n for label in other_participant_labels:\n person = self.__get_person(label)\n if person:\n other_participants.append({\"person\": person})\n\n def get_def_column(column: str):\n return self.__sheet.get_from_table(\n Table.EVENT_DEFINITIONS, Column.name, definition, column\n )\n\n def merge_event():\n nonlocal event, main_person, definition\n if event:\n return event\n place = self.__get_place(safe_str(row, Column.event_place))\n assert main_person is not None\n event = Event(\n self._graph,\n main_person,\n place=place,\n earliest_date=safe_str(row, Column.earliest_date),\n exact_date=safe_str(row, Column.exact_date),\n latest_date=safe_str(row, Column.latest_date),\n label=str(definition),\n other_participants=other_participants,\n )\n return event\n\n group_label = get_def_column(Column.status_occupation_in_group)\n group = self.__get_group(group_label)\n added_status_label = get_def_column(Column.added_status)\n removed_status_label = get_def_column(Column.removed_status)\n started_occupation_label = get_def_column(Column.started_occupation)\n stopped_occupation_label = get_def_column(Column.stopped_occupation)\n religious_title_text = get_def_column(Column.assigned_religious_title)\n\n if religious_title_text:\n e = merge_event()\n title = Title(\n self._graph, religious_title_text, Nampi_type.Mona.religious_title\n )\n e.add_relationship(\n obj=main_person, pred=Nampi_type.Core.has_main_participant\n )\n e.add_relationship(obj=title, pred=Nampi_type.Core.adds_aspect)\n\n if (\n added_status_label or started_occupation_label\n ) and added_status_label == started_occupation_label:\n aspect_label = added_status_label\n status_type = self.__get_status_type(added_status_label)\n occupation_type = self.__get_occupation_type(started_occupation_label)\n types: List[URIRef] = []\n if status_type:\n types.append(status_type)\n if occupation_type:\n types.append(occupation_type)\n aspect = Aspect(self._graph, aspect_label, types)\n e = merge_event()\n e.add_relationship(\n obj=main_person, pred=Nampi_type.Core.has_main_participant\n )\n if group:\n e.add_relationship(\n obj=group, pred=Nampi_type.Core.changes_aspect_related_to\n )\n e.add_relationship(obj=aspect, pred=Nampi_type.Core.adds_aspect)\n else:\n if added_status_label:\n type = self.__get_status_type(added_status_label)\n aspect = Aspect(self._graph, added_status_label, type)\n e = merge_event()\n e.add_relationship(\n obj=main_person, pred=Nampi_type.Core.has_main_participant\n )\n if group:\n e.add_relationship(\n obj=group, pred=Nampi_type.Core.changes_aspect_related_to\n )\n e.add_relationship(obj=aspect, pred=Nampi_type.Core.adds_aspect)\n if started_occupation_label:\n type = self.__get_occupation_type(started_occupation_label)\n aspect = Aspect(self._graph, started_occupation_label, type)\n e = merge_event()\n e.add_relationship(\n obj=main_person, pred=Nampi_type.Core.has_main_participant\n )\n e.add_relationship(obj=aspect, pred=Nampi_type.Core.adds_aspect)\n if group:\n e.add_relationship(\n obj=group, pred=Nampi_type.Core.changes_aspect_related_to\n )\n\n if (\n removed_status_label or stopped_occupation_label\n ) and removed_status_label == stopped_occupation_label:\n aspect_label = removed_status_label\n status_type = self.__get_status_type(removed_status_label)\n occupation_type = self.__get_occupation_type(stopped_occupation_label)\n types: List[URIRef] = []\n if status_type:\n types.append(status_type)\n if occupation_type:\n types.append(occupation_type)\n aspect = Aspect(self._graph, aspect_label, types)\n e = merge_event()\n e.add_relationship(\n obj=main_person, pred=Nampi_type.Core.has_main_participant\n )\n if group:\n e.add_relationship(\n obj=group, pred=Nampi_type.Core.changes_aspect_related_to\n )\n e.add_relationship(obj=aspect, pred=Nampi_type.Core.removes_aspect)\n else:\n if removed_status_label:\n type = self.__get_status_type(removed_status_label)\n aspect = Aspect(self._graph, removed_status_label, type)\n e = merge_event()\n e.add_relationship(\n obj=main_person, pred=Nampi_type.Core.has_main_participant\n )\n if group:\n e.add_relationship(\n obj=group, pred=Nampi_type.Core.changes_aspect_related_to\n )\n e.add_relationship(obj=aspect, pred=Nampi_type.Core.removes_aspect)\n if stopped_occupation_label:\n type = self.__get_occupation_type(stopped_occupation_label)\n aspect = Aspect(self._graph, stopped_occupation_label, type)\n e = merge_event()\n e.add_relationship(\n obj=main_person, pred=Nampi_type.Core.has_main_participant\n )\n e.add_relationship(obj=aspect, pred=Nampi_type.Core.removes_aspect)\n if group:\n e.add_relationship(\n obj=group, pred=Nampi_type.Core.changes_aspect_related_to\n )\n\n if event:\n self.__insert_di_act(event, row=row)\n else:\n logging.warn(\n \"Skip event '{}' for person '{}'\".format(\n definition, main_person.label\n )\n )\n logging.info(\"Parsed the complex events\")\n\n def __add_deaths(self):\n \"\"\"\n Add all death events from the deaths table.\n \"\"\"\n for _, row in self.__sheet.get_table(Table.DEATHS).iterrows():\n died_person = self.__get_person(safe_str(row, Column.person))\n if not died_person:\n continue\n death_place = self.__get_place(safe_str(row, Column.event_place))\n death = Death(\n self._graph,\n died_person,\n place=death_place,\n earliest_date=safe_str(row, Column.earliest_date),\n exact_date=safe_str(row, Column.exact_date),\n latest_date=safe_str(row, Column.latest_date),\n )\n self.__insert_di_act(death, row=row)\n logging.info(\"Parsed the deaths\")\n\n def __add_independent_titles(self):\n title_query = \"\"\"\n PREFIX core: \n PREFIX mona: \n PREFIX rdfs: \n SELECT ?text\n WHERE {{\n \t ?event a core:event ;\n core:assigns_title/core:has_xsd_string ?text ;\n core:assigns_title_to/rdfs:label \"{}\" .\n }}\n \"\"\"\n for _, row in self.__sheet.get_table(Table.PERSONS).iterrows():\n religious_title = safe_str(row, Column.religious_title)\n person_label = safe_str(row, Column.name)\n if religious_title:\n has_existing_title = bool(\n self._graph.graph.query(title_query.format(person_label))\n )\n if not has_existing_title:\n person = self.__get_person(person_label)\n if person:\n assert person_label\n event = Event(\n self._graph,\n person,\n label=\"Assign religious title \" + religious_title,\n main_person_relationship=Nampi_type.Core.has_main_participant,\n )\n title = Title(\n self._graph,\n religious_title,\n Nampi_type.Mona.religious_title,\n )\n event.add_relationship(Nampi_type.Core.adds_aspect, title)\n self.__insert_di_act(event, row)\n logging.debug(\n \"Assigns title '{}' to '{}'\".format(\n religious_title, person_label\n )\n )\n logging.info(\"Finish adding independent titles\")\n\n def __add_investiture_events_for_professions(self):\n \"\"\"\n Add investiture events for persons that have specific profession events\n \"\"\"\n professions_query = \"\"\"\n PREFIX core: \n PREFIX mona: \n PREFIX rdfs: \n SELECT ?author ?authoring_date ?source ?source_location ?group ?person_node ?person ?place ?exact_date ?earliest_date ?latest_date\n WHERE {\n ?event_node a core:event ;\n rdfs:label ?event_label .\n ?dia_node core:has_interpretation ?event_node ;\n core:is_authored_by/rdfs:label ?author ;\n core:is_authored_on/core:has_date_time ?authoring_date ;\n core:has_source_location ?source_node .\n ?source_node (core:has_source|core:has_online_source|mona:has_paged_source)/rdfs:label ?source ;\n (core:has_value|core:has_text|core:has_url|mona:has_page_number) ?source_location .\n ?event_node core:changes_aspect_related_to/rdfs:label ?group ;\n core:has_main_participant ?person_node .\n ?person_node rdfs:label ?person .\n OPTIONAL { ?event_node core:takes_place_at/rdfs:label ?place }\n OPTIONAL { ?event_node core:takes_place_on/core:has_date_time ?exact_date }\n OPTIONAL { ?event_node core:takes_place_not_later_than/core:has_date_time ?latest_date }\n OPTIONAL { ?event_node core:takes_place_not_earlier_than/core:has_date_time ?earliest_date }\n VALUES ?event_label { \"Profession as choir monk in Astheim\" \"Profession as choir monk in Bistra\" \"Profession as choir monk in Gaming\" \"Profession as choir monk in Žiče\" \"Profession as choir nun in Imbach\" \"Profession as choir nun in St. Jakob\" \"Profession as converse in Gaming\" \"Profession as lay sister in Imbach\" \"Profession as priest monk in Gaming\" \"Profession as choir nun in St. Laurenz\" \"Profession as choir monk in Brno\" \"Second profession as choir nun in Imbach\" \"Secret profession as choir nun in Imbach\" }\n }\n \"\"\"\n has_investiture_event_query = \"\"\"\n PREFIX core: \n PREFIX rdfs: \n ASK WHERE {{\n ?event core:has_main_participant <{}> ;\n rdfs:label ?label .\n FILTER ( CONTAINS(LCASE(?label), \"investiture\") )\n }}\n \"\"\"\n for row in self._graph.graph.query(professions_query):\n has_investiture_event = bool(\n self._graph.graph.query(\n has_investiture_event_query.format(row[\"person_node\"])\n )\n )\n if not has_investiture_event:\n person = self.__get_person(str(row[\"person\"]))\n if not person:\n continue\n status_type = self.__get_status_type(added_investiture_label)\n aspect = Aspect(self._graph, added_investiture_label, status_type)\n author_label = str(row[\"author\"])\n interpretation_date_text = str(row[\"authoring_date\"]).partition(\"T\")[0]\n source_label = str(row[\"source\"])\n source_location_label = str(row[\"source_location\"])\n group = self.__get_group(str(row[\"group\"]))\n assert group is not None\n place = self.__get_place(str(row[\"place\"]))\n exact_date = (\n str(row[\"exact_date\"]).partition(\"T\")[0]\n if row[\"exact_date\"]\n else None\n )\n earliest_date = (\n str(row[\"earliest_date\"]).partition(\"T\")[0]\n if row[\"earliest_date\"]\n else None\n )\n latest_date = (\n str(row[\"latest_date\"]).partition(\"T\")[0]\n if row[\"latest_date\"]\n else None\n )\n dates_sorted_by_specificity = [exact_date, latest_date, earliest_date]\n most_specific_date = next(\n (s for s in dates_sorted_by_specificity if s), None\n )\n event = Event(\n self._graph,\n person,\n main_person_relationship=Nampi_type.Core.has_main_participant,\n place=place,\n latest_date=most_specific_date,\n label=\"Investiture in \" + str(group.label),\n )\n event.add_relationship(\n obj=group, pred=Nampi_type.Core.changes_aspect_related_to\n )\n event.add_relationship(obj=aspect, pred=Nampi_type.Core.adds_aspect)\n self.__insert_di_act(\n event,\n author_label=author_label,\n source_label=source_label,\n source_location_label=source_location_label,\n interpretation_date_text=interpretation_date_text,\n )\n logging.debug(\n \"Added investiture event and interpretation for '{}'\".format(\n person.label\n )\n )\n logging.info(\"Finished adding investiture events\")\n\n def __add_persons(self):\n \"\"\"\n Add all persons from the persons table not being added in birth events.\n \"\"\"\n for _, row in self.__sheet.get_table(Table.PERSONS).iterrows():\n if not row[Column.source]:\n # Only use entries with source\n logging.warning(\n \"No source entry for 'person' table row '{}'\".format(\n row[Column.name]\n )\n )\n continue\n person_label = safe_str(row, Column.name)\n has_birth_event = self.__sheet.table_has_value(\n Table.BIRTHS,\n Column.person,\n person_label,\n )\n person = self.__get_person(safe_str(row, Column.name))\n if not person:\n continue\n if not has_birth_event:\n # Get all family name variants from the person table\n family_names = [\n safe_str(row, Column.family_name_with_group),\n safe_str(row, Column.family_name_gender_neutral),\n safe_str(row, Column.family_name),\n ]\n # Get the official family name by looking through the ordered family_names list and picking the first match\n family_group_name = next((s for s in family_names if s), None)\n # Add family name group membership\n if family_group_name:\n family = Family(self._graph, family_group_name)\n aspect = Aspect(self._graph, family_member_label)\n become_member_event = Event(\n self._graph,\n person,\n Nampi_type.Core.has_main_participant,\n label=\"Become family member\",\n )\n become_member_event.add_relationship(\n Nampi_type.Core.adds_aspect, aspect\n )\n become_member_event.add_relationship(\n Nampi_type.Core.changes_aspect_related_to, family\n )\n self.__insert_di_act(become_member_event, row=row)\n logging.debug(\n \"Added 'membership' in family '{}' for birthless person '{}'\".format(\n family.label, row[Column.name]\n )\n )\n # Add names for persons that don't have birth events\n if family_names[2]:\n # Add personal family name\n fn_assignment = Appellation_assignment(\n self._graph,\n person,\n family_names[2],\n Appellation_type.FAMILY_NAME,\n )\n self.__insert_di_act(fn_assignment, row=row)\n if row[Column.given_name]:\n # Add given name\n gn_assignment = Appellation_assignment(\n self._graph, person, str(safe_str(row, Column.given_name))\n )\n self.__insert_di_act(gn_assignment, row=row)\n logging.debug(\n \"Added 'names' for birthless person '{}'\".format(row[Column.name])\n )\n logging.info(\"Parsed the persons\")\n\n def __add_religious_names(self):\n query = \"\"\"\n PREFIX core: \n PREFIX rdfs: \n SELECT ?event ?person ?person_label\n WHERE {\n ?event a core:event ;\n rdfs:label ?label ;\n core:has_main_participant ?person .\n ?person rdfs:label ?person_label\n FILTER (CONTAINS(LCASE(?label), \"investiture\"))\n }\n ORDER BY ?label\n \"\"\"\n for row in self._graph.graph.query(query):\n person = row[\"person\"]\n person_label = str(row[\"person_label\"])\n investiture = row[\"event\"]\n religious_name = self.__sheet.get_from_table(\n Table.PERSONS, Column.name, person_label, Column.religious_name\n )\n if religious_name:\n appellation = Appellation(\n self._graph,\n appellation_type=Appellation_type.RELIGIOUS_NAME,\n text=religious_name,\n )\n self._graph.add(\n investiture, Nampi_type.Core.adds_aspect, appellation.node\n )\n self._graph.add(\n investiture, Nampi_type.Core.has_main_participant, person\n )\n logging.debug(\n \"Assigned religious name '{}' to '{}' in investiture\".format(\n religious_name, person_label\n )\n )\n logging.info(\"Finished adding religious names to investitures\")\n\n def __get_group(self, group_label: Optional[str]) -> Optional[Group]:\n if not group_label:\n return None\n group_type_label = self.__sheet.get_from_table(\n Table.GROUPS, Column.name, group_label, Column.type\n )\n part_of_label = self.__sheet.get_from_table(\n Table.GROUPS, Column.name, group_label, Column.part_of\n )\n group_type = (\n _group_types[group_type_label]\n if group_type_label\n else Nampi_type.Core.group\n )\n part_of_group = self.__get_group(part_of_label) if part_of_label else None\n group = Group(self._graph, group_label, group_type)\n if part_of_group:\n group.add_relationship(Nampi_type.Core.is_part_of, part_of_group)\n return group\n\n def __get_person(self, person_label: Optional[str]) -> Optional[Person]:\n gender_text = self.__sheet.get_from_table(\n Table.PERSONS, Column.name, person_label, Column.gender\n )\n gender = None\n if gender_text == \"M\":\n gender = Gender.MALE\n elif gender_text == \"F\":\n gender = Gender.FEMALE\n gnd_id = self.__sheet.get_from_table(\n Table.PERSONS, Column.name, person_label, Column.gnd_id\n )\n return Person.optional(self._graph, person_label, gender=gender, gnd_id=gnd_id)\n\n def __get_place(self, place_label: Optional[str]) -> Optional[Place]:\n geoname_id = self.__sheet.get_from_table(\n Table.PLACES, Column.name, place_label, Column.geoname_id\n )\n wikidata_id = self.__sheet.get_from_table(\n Table.PLACES, Column.name, place_label, Column.wikidata\n )\n return Place.optional(\n self._graph, place_label, geoname_id=geoname_id, wikidata_id=wikidata_id\n )\n\n def __get_source_location(\n self, source_label: str, location_text: str\n ) -> Source_location:\n source_type_text = self.__sheet.get_from_table(\n Table.SOURCES, Column.title, source_label, Column.type\n )\n source_type = None\n if source_type_text == \"Manuscript\":\n source_type = Source_type.MANUSCRIPT\n elif source_type_text == \"Online Resource\":\n source_type = Source_type.ONLINE_RESOURCE\n if not source_type:\n raise ValueError(\n \"Could not find source type for '{}'\".format(source_type_text)\n )\n source = Source(self._graph, source_label, source_type)\n return Source_location(self._graph, source, location_text)\n\n def __get_status_type(self, status_label):\n type_label = self.__sheet.get_from_table(\n Table.STATUSES, Column.name, status_label, Column.type\n )\n if type_label:\n if type_label in _status_types:\n return _status_types[type_label]\n else:\n logging.warning(\n \"No status type defined for label '{}'\".format(type_label)\n )\n return None\n\n def __get_occupation_type(self, occupation_label):\n type_label = self.__sheet.get_from_table(\n Table.OCCUPATIONS, Column.name, occupation_label, Column.type\n )\n if type_label:\n if type_label in _occupation_types:\n return _occupation_types[type_label]\n else:\n logging.warning(\n \"No occupation type defined for label '{}'\".format(type_label)\n )\n return None\n\n def __insert_di_act(\n self,\n event: Event,\n row: Series = pandas.Series(),\n author_label: str = \"\",\n source_label: str = \"\",\n source_location_label: str = \"\",\n interpretation_date_text: Optional[str] = None,\n ):\n source_label = row[Column.source] if Column.source in row else source_label\n source_location_label = (\n row[Column.source_location]\n if Column.source_location in row\n else source_location_label\n )\n source_location = self.__get_source_location(\n source_label, source_location_label\n )\n interpretation_date = (\n row[Column.interpretation_date]\n if Column.interpretation_date in row\n else interpretation_date_text\n )\n comment = row[Column.comment] if Column.comment in row else None\n if comment:\n event.add_comment(comment)\n Di_act(\n self._graph,\n event,\n [\"Patrick Fiska\", \"Irene Rabl\"],\n source_location,\n interpretation_date,\n )\n","sub_path":"parsers/nampi_data_entry_form/nampi_data_entry_form_parser.py","file_name":"nampi_data_entry_form_parser.py","file_ext":"py","file_size_in_byte":34137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"210993310","text":"import pytest\nimport yaml\n\n\nclass Test_Data:\n\n @pytest.mark.parametrize(\"a,b\",yaml.safe_load(open(\"./data.yaml\")))\n def test_a(self,a,b):\n\n print(a+b)\n\n\n\nif __name__ == '__main__':\n pytest.main([\"test_data.py\"])","sub_path":"test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"148044395","text":"# This program creates a boxplot from data stored in a csv file and saves it as a png image.\n\n# The data file must be one column of numbers - no column labels, etc.\n# It must be saved as a csv file (e.g. use \"Save As\" in Excel and choose csv format).\n# It must be saved in the same folder as this program.\n# See the file sample_boxplot_data.csv for reference.\n\n# In the next line, replace sample_boxplot_data.csv with the filename of your data:\ndata_filename = 'sample_boxplot_data.csv'\n\n# In the next line, replace boxplot with the filename you wish to save as:\noutput_filename = 'boxplot.png'\n\n# Use the next line to set figure height and width (experiment to check the scale):\nfigure_width, figure_height = 4,10\n\n# You can ignore these two lines:\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndata = np.genfromtxt(data_filename)\n\n# If there are errors importing the data, you can also copy the data in as a list.\n# e.g. data = [1.95878982, 2.59203983, 1.22704688, ...]\n\n# This line creates the figure. \nplt.figure(figsize=(figure_width,figure_height))\n\n# Uncomment the next three lines to set the axis limits (otherwise they will be set automatically):\n#axis_min = 0.95\n#axis_max = 4.05\n#plt.ylim([axis_min,axis_max])\n\n# The next lines create and save the plot:\nplt.xlim([0.75,1.25])\nplt.xticks([])\nplt.boxplot(data,manage_xticks=False)\nplt.savefig(output_filename)","sub_path":"boxplot_plotter.py","file_name":"boxplot_plotter.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"451355125","text":"import networkx as nx\nfrom qiskit import *\nfrom qiskit.tools.monitor import job_monitor\n\nimport os\nos.path.abspath(os.curdir)\nos.path.sys.path.append('../hamiltonian_engine/')\nfrom expectation_value import expectation_value as ex_v\nfrom hamiltonian import mixer_hamiltonian as mix_ham\nfrom hamiltonian import phase_hamiltonian as phs_ham\n\n\n\nclass skeletor:\n objective_function = None \n variables = None\n graph = None\n shots = 0\n\n def __init__(self, p: int, obj_fun: str, variables: list, boolean: bool, graph: nx.Graph = None):\n # self.shots = shots\n self.graph = graph\n self.p = p\n self.objective_function = obj_fun\n self.variables = variables\n\n self.phse_ham = phs_ham(self.objective_function, self.variables)\n\n # generate Phase Hamiltonian\n self.phse_ham.Hamify(boolean=boolean)\n\n if graph != None:\n self.expectation = ex_v(\n self.objective_function, self.variables, is_graph=True)\n else:\n self.expectation = ex_v(\n self.objective_function, self.variables, is_graph=False)\n\n self.mx_ham = mix_ham()\n\n def get_objFun(self):\n return self.phse_ham.get_objFun()\n\n def get_pHamil(self):\n return self.phse_ham.get_pHamil()\n\n def set_upMixerHamiltonian(self, func, inverse=None):\n self.mx_function = (func, inverse)\n \n def setup_device(self, run_function, function_args:dict):\n self.qpu_execution = run_function\n self.qpu_args = function_args\n\n def generate_quantumCircuit(self, hyperparams: list):\n assert len(hyperparams) == 2*self.p\n\n l = len(hyperparams)\n gammas = hyperparams[:l//2]\n betas = hyperparams[l//2:]\n\n if self.graph != None:\n self.phse_ham.perEdgeMap(gammas, self.p, self.graph, True, True)\n else:\n self.phse_ham.perQubitMap(gammas, self.p, True, True)\n\n phse_map = self.phse_ham.qubit_map\n\n self.expectation.use_qubitMap(phse_map)\n\n if self.mx_function[0] == \"general\":\n self.mx_ham.generalXMixer(betas, self.p, phse_map, True)\n elif self.mx_function[0] == \"controlled\":\n self.mx_ham.controlledXMixer(betas, self.p, self.graph, inverse=self.mx_function[1], measure=True)\n\n self.circuit = self.phse_ham / self.mx_ham\n\n return self.circuit.draw(output='mpl')\n\n def run_circuit(self):\n\n job = self.qpu_execution(self.circuit, **self.qpu_args)\n\n job_monitor(job, quiet=True)\n\n results = job.result()\n\n print('Run Complete! job_id : {}'.format(job.job_id()))\n\n print(\"Expectation Value : {}\".format(self.expectation.get_expectationValue(results, self.qpu_args['shots'], self.graph)))\n\n return results\n\n def run_skeletor(self, hyperparameters: list):\n \n self.generate_quantumCircuit(hyperparameters)\n\n job = self.qpu_execution(self.circuit, **self.qpu_args)\n\n job_monitor(job, quiet=True)\n\n results = job.result()\n\n res_maxcut = self.expectation.get_expectationValue(results, self.qpu_args['shots'], self.graph)\n\n return -1 * res_maxcut\n\n \n def run_QAOA(self, opt_function, **kwargs):\n\n res = opt_function(self.run_skeletor, **kwargs)\n\n opt_hyperparameter = res.x\n\n self.generate_quantumCircuit(opt_hyperparameter)\n\n results = self.run_circuit()\n\n res_maxcut = self.expectation.get_expectationValue(results,self.qpu_args['shots'],self.graph)\n\n return {'expectation': res_maxcut, 'optimal_parameters': opt_hyperparameter, 'QPU_data':results , 'optimizer_data': res }\n\n","sub_path":"predefined_problems/skeletor.py","file_name":"skeletor.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"621027016","text":"# Make a geometric rainbow pattern.\nimport turtle\nshelly = turtle.Turtle()\n# pick a order of colours for the hexgon.\ncolours = ['red', 'yellow', 'blue', 'orange', 'green', 'red']\nturtle.bgcolor('black') # turn the background black\n# draw 36 hexgons, each 10 degrees apart.\nfor n in range(36):\n# make a hexgon by repeating 6 times.\n for h in range(6):\n shelly.color(colours[h]) # Pick colour at position i.\n shelly.forward(100)\n shelly.left(60)\n # add a turn before the next hexgon.\n shelly.right(10)\n\n# get ready to draw 36 circles.\nshelly.penup()\nshelly.color('white')\n# repeat 36 times to mach the 36 hexons.\nfor i in range(36):\n shelly.forward(220)\n shelly.pendown()\n shelly.circle(5)\n shelly.penup()\n shelly.backward(220)\n shelly.right(10)\n# hide turtle to finish the drawing.\nshelly.hideturtle()\n# Prevent the program form qutting by asking the user a question\nw = input()","sub_path":"coding-in-python/geometric_art.py","file_name":"geometric_art.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"298003709","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\n\n\n# Открываем браузер\nlink = \"http://suninjuly.github.io/selects2.html\"\nbrowser = webdriver.Chrome()\nbrowser.get(link)\n\n# Получаем значение \"a\" и \"b\" из текста, между тегами и складываем\na_value = browser.find_element_by_id('num1')\na = a_value.text\n\nb_value = browser.find_element_by_id('num2')\nb = b_value.text\n\nx = str(int(a) + int(b))\n\n#Находим список и выбираем значение равное \"X\"\nselect = Select(browser.find_element_by_tag_name(\"select\"))\nselect.select_by_value(x) # ищем элемент\n\n# Отправляем заполненную форму\nbutton = browser.find_element_by_css_selector(\"button.btn\")\nbutton.click()\n","sub_path":"2lesson2_step2.py","file_name":"2lesson2_step2.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"222015403","text":"# largest prime factor\n\nimport math\n\nnumber = 600_851_475_143 # underscore for visibility\n\ndef findPrime(n: int) -> int:\n if n & 1 == 0:\n # check if even\n return False\n\n # only check odds up to the square root\n for i in range(3, int(math.sqrt(n)), 2):\n if n % i == 0:\n return False\n\n return True\n\ndef findFactors(n: int) -> list[int]:\n res = [1]\n for i in range(2, int(math.sqrt(n))):\n if n % i == 0:\n res.append(i)\n\n for i in range(len(res)-1, -1, -1):\n res.append(n // res[i])\n\n return res\n\nprint(max([e for e in findFactors(number) if findPrime(e)]))\n","sub_path":"Project Euler/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"345631486","text":"import random\nimport pygame\nimport core\nfrom pygame.locals import *\npygame.init()\nhauteur = 800\nlargeur = 600\n\nvie = 4\nbriquel1 = []\nbriquel2 = []\nbriquel3 = []\nbriquel4 = []\nraquette = []\nballs = []\n\n\ndef setup():\n pygame.init()\n print(\"setup\")\n global score,briquel1, briquel2, briquel3, briquel4, x, y, l, h, xd, yd, balls, raquette,X,Y,X1,Y1,L,H\n\n core.WINDOW_SIZE = [largeur, hauteur]\n core.fps=30\n\n for b1 in range(0,10) :\n\n l = 60\n h = 20\n x = 1\n y = 1\n briquel1 = briquel1 + [[x*(b1-1)+(l*b1), y, l, h,]]\n for b2 in range(0, 10):\n l = 60\n h = 20\n x = 1\n y = 1\n briquel2 = briquel2 + [[x * (b2 - 1) + (l * b2), y*h+2, l, h, ]]\n for b3 in range(0, 10):\n l = 60\n h = 20\n x = 1\n y = 1\n briquel3 = briquel3 + [[x * (b3 - 1) + (l * b3), 2*y*h+3, l, h, ]]\n for b4 in range(0, 10):\n l = 60\n h = 20\n x = 1\n y = 1\n briquel4 = briquel4 + [[x * (b4 - 1) + (l * b4), 3*y*h+4, l, h, ]]\n for i in range(0,1):\n L=80\n H=20\n XR=260\n YR=700\n raquette = raquette + [[XR,YR,L,H]]\n\n X = 300\n Y = 550\n\n xd = random.uniform(-10, 10)\n yd = random.uniform(-10, 10)\n balls = balls + [X, Y, xd, yd,]\n score = 0\n print(\"Une partie débute, vous avez\", vie,\"vie(s) pour terminer le casse brique\")\n\ndef Spring(b1,b2,k,lo):\n u = pygame.Vector2(b2[0] - b1[0], b2[1] - b1[1])\n distanceEntreB1etB2 = u.length()\n\n if u[0] == 0 and u[1]==0:\n return [0,0]\n\n u = u.normalize()\n\n Fx = u[0] * k * abs(distanceEntreB1etB2 - lo)\n Fy = u[1] * k * abs(distanceEntreB1etB2 - lo)\n\n return [Fx,Fy]\n\ndef run():\n global score,q1,q2,q3,q4\n\n for q in briquel1:\n\n pygame.draw.rect(core.screen,(255, 0, 0), (q[0], q[1], q[2], q[3]))\n\n for q2 in briquel2 :\n pygame.draw.rect(core.screen, (255, 0, 0), (q2[0], q2[1], q2[2], q2[3]))\n\n for q3 in briquel3:\n pygame.draw.rect(core.screen, (255, 0, 0), (q3[0], q3[1], q3[2], q3[3]))\n\n for q4 in briquel4:\n pygame.draw.rect(core.screen, (255, 0, 0), (q4[0], q4[1], q4[2], q4[3]))\n\n for r in raquette:\n\n pygame.draw.rect(core.screen, (255, 0, 0), (r[0], r[1], r[2], r[3]))\n\n pygame.draw.circle(core.screen, (36, 210, 78), (balls[0], balls[1]), 20)\n\n balls[0] = balls[0] + balls[2]\n balls[1] = balls[1] + balls[3]\n\n if balls[0] < 20 or balls[0] > largeur - 20:\n balls[2] = -balls[2]\n if balls[1] < 20:\n balls[3] = -balls[3]\n\n if balls[1] > hauteur :\n balls[1] = 250\n balls[0] = 250\n score = score +1\n print(\"il vous reste\", vie - score, \"vie(s)\")\n\n if vie - score == 0 :\n print(\"game over\")\n exit()\n\n for q4 in briquel4 :\n if q4[0] < balls[0] < q4[0] + l and q4[1] < balls[1] - 20 < q4[1]+h :\n balls[3] = - balls[3]\n briquel4.remove(q4)\n\n\n\n\n for q3 in briquel3:\n if q3[0] < balls[0] < q3[0] + l and q3[1] < balls[1] - 20 < q3[1] + h:\n balls[3] = - balls[3]\n briquel3.remove(q3)\n\n for q2 in briquel2:\n if q2[0] < balls[0] < q2[0] + l and q2[1] < balls[1] - 20 < q2[1] + h:\n balls[3] = - balls[3]\n briquel2.remove(q2)\n\n for q1 in briquel1:\n if q1[0] < balls[0] < q1[0] + l and q1[1] < balls[1] - 20 < q1[1] + h:\n balls[3] = - balls[3]\n briquel1.remove(q1)\n\n if briquel1 is not None and not briquel1 and briquel2 is not None and not briquel2 and briquel3 is not None and not briquel3 and briquel4 is not None and not briquel4:\n print(\"bravo tu as gagné\")\n exit()\n\n\n if r[1] < balls[1] + 20 < r[1] + r[3] and r[0] <= balls[0] < r[0] + 20 :\n balls[3] = -balls[3]\n balls[2] = - balls[2]\n\n if r[1] < balls[1] + 20 < r[1] + r[3] and r[0] +20 <= balls[0] < r[0] + 60 :\n balls[3] = -balls[3]\n\n if r[1] < balls[1] + 20 < r[1] + r[3] and r[0] + 60 <= balls[0] < r[0] + r[2]:\n balls[3] = -balls[3]\n balls[2] = -balls[2]\n if core.getMouseLeftClick() is not None:\n if core.getMouseLeftClick()[0] < largeur - r[2] :\n r[0]=core.getMouseLeftClick()[0]\n\ncore.main(setup, run)","sub_path":"Casse brique fonctionnel.py","file_name":"Casse brique fonctionnel.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"461987322","text":"#!/usr/bin/python\n\n\nfrom itertools import imap\n\nfrom irsystem import preprocess, to_str\nfrom util import nlargest, read_lines, write_lines\n\n\ndef matches(query, document):\n for term in query:\n if term not in document:\n return False\n return True\n\n\ndef brute(docs_path, qrys_path, out_path):\n docs = preprocess(read_lines(docs_path))\n qrys = preprocess(read_lines(qrys_path))\n\n def process_queries():\n for qry_id, qry in qrys:\n candidates = (doc_id for doc_id, doc in docs if matches(qry, doc))\n retrieved = nlargest(5, candidates)\n yield qry_id, retrieved\n\n result_strs = imap(to_str, process_queries())\n write_lines(out_path, result_strs)\n\n\nif __name__ == '__main__':\n brute('docs.txt', 'qrys.txt', 'brute.top')\n","sub_path":"src/brute.py","file_name":"brute.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"102724276","text":"from flask import Flask, render_template, request, redirect\nimport jinja2\nimport os\nfrom solitaire import *\napp = Flask(__name__)\n\n@app.route('/')\ndef hello():\n\treturn render_template(\"index.html\")\n\n@app.route('/change')\ndef chance():\n\treturn redirect('/')\n\n@app.route('/post', methods=['GET','POST'])\ndef post():\n\tif request.method == 'POST':\n\t\tdeck=request.form['deck']\n\t\tcode=request.form['code']\n\t\ted = request.form['ed']\n\t\ts = Solitaire()\n\t\tsuc = s.makeDeck(deck)\n\t\tif suc == \"\":\n\t\t\tif ed == \"e\":\n\t\t\t\ta = s.encrypt(code)\n\t\t\telse:\n\t\t\t\ta = s.decrypt(code)\n\t\t\treturn a\n\t\treturn suc\n\nif __name__ == '__main__':\n\tport = int(os.environ.get('PORT', 8000))\n\tapp.run(host='0.0.0.0', port=port,debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"5765929","text":"# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\nfrom __future__ import annotations\n\nimport json\nfrom pathlib import Path\nfrom textwrap import dedent\n\nimport pytest\n\nfrom pants.backend.javascript import package_json\nfrom pants.backend.javascript.package.rules import (\n GenerateResourcesFromNodeBuildScriptRequest,\n NodePackageTarFieldSet,\n)\nfrom pants.backend.javascript.package.rules import rules as package_rules\nfrom pants.backend.javascript.target_types import JSSourcesGeneratorTarget, JSSourceTarget\nfrom pants.build_graph.address import Address\nfrom pants.core.goals.package import BuiltPackage\nfrom pants.core.target_types import ResourceTarget\nfrom pants.engine.internals.native_engine import EMPTY_DIGEST, Digest, Snapshot\nfrom pants.engine.rules import QueryRule\nfrom pants.engine.target import GeneratedSources\nfrom pants.testutil.rule_runner import RuleRunner\n\n\n@pytest.fixture\ndef rule_runner() -> RuleRunner:\n rule_runner = RuleRunner(\n rules=[\n *package_rules(),\n QueryRule(BuiltPackage, (NodePackageTarFieldSet,)),\n QueryRule(GeneratedSources, (GenerateResourcesFromNodeBuildScriptRequest,)),\n QueryRule(Snapshot, (Digest,)),\n ],\n target_types=[\n *package_json.target_types(),\n JSSourceTarget,\n JSSourcesGeneratorTarget,\n ResourceTarget,\n ],\n objects=dict(package_json.build_file_aliases().objects),\n )\n rule_runner.set_options([], env_inherit={\"PATH\"})\n return rule_runner\n\n\ndef test_packages_sources_as_resource_using_build_tool(rule_runner: RuleRunner) -> None:\n rule_runner.write_files(\n {\n \"src/js/BUILD\": dedent(\n \"\"\"\\\n package_json(\n scripts=[\n node_build_script(\n entry_point=\"build\",\n output_directories=[\"dist\"],\n extra_caches=[\".parcel-cache\"],\n )\n ]\n )\n \"\"\"\n ),\n \"src/js/package.json\": json.dumps(\n {\n \"name\": \"ham\",\n \"version\": \"0.0.1\",\n \"source\": \"lib/index.mjs\",\n \"scripts\": {\n \"build\": \"parcel build lib/index.mjs --dist-dir=dist --cache-dir=.parcel-cache\"\n },\n \"devDependencies\": {\"parcel\": \"2.6.2\"},\n }\n ),\n \"src/js/package-lock.json\": (Path(__file__).parent / \"package-lock.json\").read_text(),\n \"src/js/lib/BUILD\": dedent(\n \"\"\"\\\n javascript_sources(dependencies=[\":style\"])\n resource(name=\"style\", source=\"style.css\")\n \"\"\"\n ),\n \"src/js/lib/style.css\": \"\",\n \"src/js/lib/index.mjs\": \"import './style.css' \",\n }\n )\n tgt = rule_runner.get_target(Address(\"src/js\", generated_name=\"build\"))\n snapshot = rule_runner.request(Snapshot, (EMPTY_DIGEST,))\n result = rule_runner.request(\n GeneratedSources, [GenerateResourcesFromNodeBuildScriptRequest(snapshot, tgt)]\n )\n assert result.snapshot.files == (\n \"src/js/dist/index.css\",\n \"src/js/dist/index.css.map\",\n \"src/js/dist/index.js\",\n \"src/js/dist/index.js.map\",\n )\n","sub_path":"src/python/pants/backend/javascript/package/rules_integration_test.py","file_name":"rules_integration_test.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"488937829","text":"import math\n\nN = int(input())\n\narr = list(map(int,input().split()))\norder = list(map(int,input().split()))\n\nmax_val = -1000000000\nmin_val = 1000000000\ndef DFS(index,N,plus,minus,mul,div,val):\n global max_val,min_val\n if(index == N):\n max_val = max(max_val,val)\n min_val = min(min_val,val)\n return\n \n if(plus > 0):\n DFS(index+1,N,plus-1,minus,mul,div,val+arr[index])\n if(minus > 0):\n DFS(index+1,N,plus,minus-1,mul,div,val-arr[index])\n if(mul > 0):\n DFS(index+1,N,plus,minus,mul-1,div,val*arr[index])\n if(div > 0):\n DFS(index+1,N,plus,minus,mul,div-1,int(val/arr[index]))\n\nDFS(1,N,order[0],order[1],order[2],order[3],arr[0])\nprint(max_val)\nprint(min_val)","sub_path":"boj/14888.py","file_name":"14888.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"33746562","text":"import torch.optim as optim\nfrom torch.optim.lr_scheduler import LambdaLR\nimport numpy as np\nimport torch\nimport math\n\n\ndef get_finetune_optimizer(args, model):\n lr = args.lr\n weight_list = []\n bias_list = []\n last_weight_list = []\n last_bias_list = []\n for name, value in model.named_parameters():\n if 'cls' in name:\n if 'weight' in name:\n last_weight_list.append(value)\n elif 'bias' in name:\n last_bias_list.append(value)\n else:\n if 'weight' in name:\n weight_list.append(value)\n elif 'bias' in name:\n bias_list.append(value)\n\n opt = optim.SGD([{'params': weight_list, 'lr': lr},\n {'params': bias_list, 'lr': lr * 2},\n {'params': last_weight_list, 'lr': lr * 10},\n {'params': last_bias_list, 'lr': lr * 20}], momentum=0.9, weight_decay=0.0001, nesterov=True)\n\n return opt\n\n\ndef decrease_lr_by_epoch(epoch, model, args, fine_tune=False):\n cur_lr = args.lr * (0.1 ** (epoch // 20))\n\n weight_list = []\n bias_list = []\n last_weight_list = []\n last_bias_list = []\n for name, value in model.named_parameters():\n if 'features' in name:\n if 'weight' in name:\n weight_list.append(value)\n elif 'bias' in name:\n bias_list.append(value)\n else:\n if 'weight' in name:\n last_weight_list.append(value)\n elif 'bias' in name:\n last_bias_list.append(value)\n\n if not fine_tune:\n opt = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=0.0001)\n else:\n opt = torch.optim.SGD([{'params': weight_list, 'lr': cur_lr / 10},\n {'params': bias_list, 'lr': cur_lr * 2 / 10},\n {'params': last_weight_list, 'lr': cur_lr},\n {'params': last_bias_list, 'lr': cur_lr * 2}], lr=cur_lr, momentum=0.9,\n weight_decay=0.0001, nesterov=False)\n\n return opt\n\n\ndef get_regular_optimizer(args, model):\n return torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=1.0e-4)\n","sub_path":"BackgroundImage/Main/custom_optim.py","file_name":"custom_optim.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"380150698","text":"import requests\nimport logging\nimport time\nimport os\nfrom datetime import datetime\nfrom atomium.models.data import CODES\nfrom itertools import combinations\nfrom atomium.structures import Chain, Residue\n\nCODES = CODES.copy()\nCODES[\"HOH\"] = \"w\"\n\nclass RcsbError(Exception):\n \"\"\"Error raised if there's a problem talking to the RCSB web services.\"\"\"\n pass\n\n\n\ndef get_log():\n \"\"\"Creates a logger object that writes to a file at data/logs.\"\"\"\n\n logger = logging.getLogger(\"Build Script\")\n logger.setLevel(logging.INFO)\n os.environ[\"TZ\"] = \"Europe/London\"\n time.tzset()\n handler = logging.FileHandler(\n \"data/logs/\" + datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S.log\")\n )\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger\n\n\ndef get_zinc_pdb_codes():\n \"\"\"Gets PDB codes for all structures with a zinc atom in them.\n\n If the response returned has an error code of 500, or if there are fewer\n than 10,000 PDB codes sent back, an RcsbError will be thrown.\"\"\"\n\n query = \"\"\\\n \"org.pdb.query.simple.ChemCompFormulaQuery\"\\\n \"ZN\"\n url = \"https://www.rcsb.org//pdb/rest/search/\"\n response = requests.post(url, data=query.encode(), headers={\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n })\n if response.status_code == 200:\n codes = response.text.split()\n if len(codes) > 10000:\n return response.text.split()\n raise RcsbError(\"RCSB didn't send back PDB codes\")\n\n\ndef model_is_skeleton(model):\n \"\"\"Returns ``True`` if the model given only contains backbone atoms.\"\"\"\n\n for residue in model.residues():\n atom_names = set([atom.name for atom in residue.atoms()])\n for name in atom_names:\n if name not in [\"C\", \"N\", \"CA\", \"O\"]:\n return False\n return True\n\n\ndef cluster_zincs_with_residues(metals):\n \"\"\"This function takes a set of atoms - all the metal atoms found in an\n atomium model.\n\n For each atom it will identify the binding residues for that atom and\n associate them with each other.\n\n From this dictionary, a list of cluster dictionaries will be made. In the\n simplest case, each metal will be a cluster, but two metal atoms will be\n merged into one cluster if they share binding residues.\n\n Then, duplicates will be removed. Two clusters are duplicates if they have\n the same metal atom IDs - this is usually created from symmetry operations.\n\n Finally clusters with no zinc in are removed.\"\"\"\n\n metals = {metal: [] for metal in metals}\n for metal in metals:\n metals[metal] = get_atom_binding_residues(metal)\n clusters = merge_metal_groups(metals)\n for cluster in clusters: remove_duplicates_from_cluster(cluster)\n aggregate_clusters(clusters)\n return [c for c in clusters if \"ZN\" in [a.element for a in c[\"metals\"]]]\n\n\ndef get_atom_binding_residues(metal):\n \"\"\"Takes an atom and gets all residues within 3Å - including ligands.\n In the case of zinc atoms, it will only use nitrogen, oxygen or sulphur\n atoms. For other atoms it will take everything except carbons.\n\n It will also mark atoms as 'liganding' or otherwise.\"\"\"\n\n kwargs = {\n \"cutoff\": 3, \"is_metal\": False,\n \"element_regex\": \"[NOS]\" if metal.element == \"ZN\" else \"[^C]\"\n }\n nearby_residues = metal.nearby_residues(ligands=True, **kwargs)\n for residue in nearby_residues:\n for atom in residue.atoms():\n atom.liganding = False\n for atom in metal.nearby_atoms(**kwargs): atom.liganding = True\n return nearby_residues\n\n\ndef merge_metal_groups(metals):\n \"\"\"Takes a dictionary in which the keys are metal atoms and the values are\n the set of residues that bind to them.\n\n It then creates a list of clusters from this, where each cluster is a dict\n object with metals and residues. Two metals and their residues will be\n merged together if they share residues.\"\"\"\n\n clusters = [{\"metals\": {metal}, \"residues\": residues, \"count\": 1}\n for metal, residues in metals.items()]\n while not check_clusters_have_unique_residues(clusters):\n for cluster1, cluster2 in combinations(clusters, 2):\n if cluster1[\"residues\"].intersection(cluster2[\"residues\"]):\n cluster1[\"metals\"].update(cluster2[\"metals\"])\n cluster1[\"residues\"].update(cluster2[\"residues\"])\n clusters.remove(cluster2)\n break\n return clusters\n\n\ndef check_clusters_have_unique_residues(clusters):\n \"\"\"Takes a list of clusters and returns True if they don't share any\n residues in common.\"\"\"\n\n residue_count = sum([len(cluster[\"residues\"]) for cluster in clusters])\n unique_residue_count = len(set.union(\n *[cluster[\"residues\"] for cluster in clusters]\n ))\n return residue_count == unique_residue_count\n\n\ndef remove_duplicates_from_cluster(cluster):\n \"\"\"Takes a cluster representing a binding site, and removes a metal if its\n ID is duplicated in the cluster.\"\"\"\n\n identifiers = set([m.id for m in cluster[\"metals\"]])\n new_metals = set()\n for id_ in identifiers:\n for metal in cluster[\"metals\"]:\n if metal.id == id_:\n new_metals.add(metal)\n break\n cluster[\"metals\"] = new_metals\n\n\ndef aggregate_clusters(clusters):\n \"\"\"Takes a list of cluster dictionaries and merges those with the same metal\n IDs.\"\"\"\n\n while not check_clusters_have_unique_sites(clusters):\n for cluster1, cluster2 in combinations(clusters, 2):\n if set([m.id for m in cluster1[\"metals\"]]) ==\\\n set([m.id for m in cluster2[\"metals\"]]):\n cluster1[\"count\"] += 1\n clusters.remove(cluster2)\n break\n\n\ndef check_clusters_have_unique_sites(clusters):\n \"\"\"Takes a list of clusters and returns True if they have any equivalent\n sites.\"\"\"\n\n cluster_ids = [frozenset([\n m.id for m in cluster[\"metals\"]\n ]) for cluster in clusters]\n unique_ids = set(cluster_ids)\n return len(cluster_ids) == len(unique_ids)\n\n\ndef create_site_code(residues):\n codes = [CODES.get(r.name, \"X\") for r in residues if r.__class__.__name__ == \"Residue\"]\n return \"\".join([f\"{code}{codes.count(code)}\" for code in sorted(set(codes))])\n","sub_path":"core/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":6425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"124934134","text":"class Car:\n def __init__(self, name='General', model='GM', type='Benz', speed=0):\n self.name = name\n self.model = model\n self.type = type\n self.speed = speed\n\n def num_of_doors(self, doors = 2):\n if self.name == 'Porshe' or self.name == 'Koenigsegg':\n result = doors\n else:\n result = doors + 2\n return result\n\n def num_of_wheels(self, wheels=4):\n if self.type == 'trailer':\n result = wheels + 4\n else:\n result = wheels\n return result\n\n def is_saloon(self):\n if self.type == 'trailer':\n return True\n else:\n self.type = 'saloon'\n return self.type","sub_path":"car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"578380457","text":"# Викторина со столицами.\n\n# Модуль для выбора случайной страны из словаря.\nimport random\n\ndef main():\n \"\"\"Основная функция.\"\"\"\n\n contry = country_capital()\n\n choice = ''\n while choice != '0':\n quiz_counrys(contry)\n choice = input('Для выхода - 0, для продолжения любая клавиша: ')\n\ndef quiz_counrys(contry):\n \"\"\"Узнаю у пользователя какая столица загаданной страны.\"\"\"\n conceived = choice_country(contry)\n\n # Счетчики правильных и не правильных ответов.\n correct_count = 0\n wrong_count = 0\n\n print('Я загадала страну:', conceived)\n answer = ''\n while answer != contry[conceived]:\n answer = input('А её столица: ')\n if answer in contry[conceived]:\n print('Вы правы столица', conceived, '- это', contry[conceived])\n correct_count += 1\n else:\n print('Не правильно!')\n wrong_count += 1\n print('Еще раз, столица', conceived)\n answer = input('- ')\n print('Правильных ответов:', correct_count)\n print('Не правильных ответов:', wrong_count)\n\ndef choice_country(country):\n \"\"\"Выбирает случайным образом страну и её столицу.\"\"\"\n\n # В список записываю случайную страну.\n enigmatic_country = []\n\n # Генератор случайной цифры\n num_key = random.randrange(len(country))\n # Счетчик для сверки со случайным числом.\n count = 0\n\n for c in country.keys():\n if count == num_key:\n if len(enigmatic_country) < 1:\n enigmatic_country.append(c)\n else:\n count += 1\n\n\n return enigmatic_country[0]\n\ndef country_capital():\n \"\"\"Словарь со странами и их столицами.\"\"\"\n\n county = {\n 'Россия': 'Москва',\n 'Руанда': 'Кигали',\n 'Румыния': 'Бухарест',\n 'Сальвадор': 'Сан - Сальвадор',\n 'Саудовская Аравия': 'Эр-Рияд',\n 'Северная Корея': 'Пхеньян',\n 'Сербия': 'Белград',\n 'Сирия': 'Дамаск',\n 'Словакия': 'Братислава',\n 'Словения': 'Любляна',\n 'Соединенные Штаты Америки': 'Вашингтон',\n }\n\n return county\n\nmain()","sub_path":"9/tasks/9.2.py","file_name":"9.2.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"377216597","text":"#-*- coding:utf-8 -*-\n# 通过回溯法求解0-1背包问题\ninfo = [\n [3,8],\n [2,5],\n [5,12]\n]\n\nselects = []\nmax_selects =[]\nmax_value = 0\ndef search(depth,rest):\n if depth == 3:\n print(selects)\n values = []\n for index,select in enumerate(selects):\n values.append(select*info[index][1])\n global max_value\n if sum(values) > max_value:\n max_value = sum(values)\n print(max_value)\n global max_selects\n max_selects = selects[:]\n print(max_selects)\n else:\n # 1.不放\n # 1.设置现场\n selects.append(0)\n # 2.递归\n search(depth+1,rest)\n # 3.恢复现场\n selects.pop()\n\n # 2.放\n if rest >= info[depth][0]:\n # 1.设置相缠\n selects.append(1)\n # 2.递归\n search(depth+1,rest-info[depth][0])\n # 3.恢复现场\n selects.pop()\nif __name__ == \"__main__\":\n search(0,5)\n","sub_path":"chapter03/beibao_search.py","file_name":"beibao_search.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"537068169","text":"# encoding=utf8\nfrom pyspark import SparkContext\nfrom pyspark import SparkConf\nfrom pyspark.streaming import StreamingContext, dstream\nfrom pyspark.streaming.kafka import KafkaUtils\nimport time\nimport json\nimport redis\nfrom config import config\nconfig = config('../example.ini')\n\n\"\"\"\n总pv allpv redis 6379 hash chatlog_pv botname\n 总量pv 相关pv之和\n当天pv todaypv redis 6379 hash chatlog_pv_per_day botname:datetime\n 相关hash botname:datetime 之和\n总uv alluser redis 6379 set botname\tuser_id\n 相关 scard 之和\n当天新增uv todayuser redis 6379 set botname:datetime\t\n 相关set scard总量之和\n当天uv todayuv redis 6380 set botname:datetime\n 当天总量uv 相关uv之和\n今日qps qps redis 6381 zset botname:datetime num datetime\nchildren全量qps redis 6381 zset children_qps_per_day num datetime\nadult 全量 qps redis 6381 zset adult_qps_per_day\ntopqps topqps redis 6381 zset 获取score 最高的值 \n\"\"\"\n\n\n\ndef data_filter(rdd):\n \"\"\"\n :param rdd: list\n :return: True or False\n \"\"\"\n if len(rdd) >= 4:\n if rdd[1] != \"REQ\":\n return False\n try:\n json_col = json.loads(rdd[3])\n except Exception:\n return False\n if json_col.get(\"bot_name\"):\n if json_col.get(\"user_id\"):\n if \"trio_test\" in str(json_col.get(\"user_id\")).lower():\n return False\n if 'monitor' in str(json_col.get(\"user_id\")).lower():\n return False\n else:\n return False\n else:\n return False\n else:\n return False\n return True\n\n\ndef sequence_user_id_col(rdd):\n \"\"\"\n :param rdd: a list\n :return: botname_col, user_id_col\n \"\"\"\n json_col = json.loads(rdd[3])\n botname_col = json_col[\"bot_name\"]\n user_id_col = json_col[\"user_id\"]\n return botname_col, user_id_col\n\n\ndef sequence_pv_col(rdd):\n \"\"\"\n :param rdd: a list\n :return: botname column\n \"\"\"\n json_col = json.loads(rdd[3])\n botname_col = json_col[\"bot_name\"]\n return botname_col\n\n\ndef sequence_qps_col(rdd):\n \"\"\"\n :param rdd: a list\n :return: time_col,botname\n \"\"\"\n json_col = json.loads(rdd[3])\n botname_col = json_col[\"bot_name\"]\n time_col = rdd[0]\n return time_col, botname_col\n\n\ndef user_id_output_to_redis(rdd):\n \"\"\"\n :param rdd: [(botname,user_id),num]\n :return: to redis set ,uv store\n \"\"\"\n today = time.strftime(\"%Y-%m-%d\", time.localtime())\n try:\n pool79 = redis.ConnectionPool(host=config['redis-for-uv-pv']['ip'], port=config['redis-for-uv-pv']['port'],\n password=config['redis-for-uv-pv']['password'])\n redis_connect79 = redis.Redis(connection_pool=pool79)\n except Exception as e:\n raise e\n try:\n pool80 = redis.ConnectionPool(host=config['redis-day-uv']['ip'], port=config['redis-day-uv']['port'],\n password=config['redis-day-uv']['password'])\n redis_connect80 = redis.Redis(connection_pool=pool80)\n except Exception as e:\n raise e\n for bot_user, num in rdd:\n botname, user_id = bot_user\n redis_connect80.sadd(str(botname) + \":\" + today, str(user_id))\n status = redis_connect79.sadd(str(botname), str(user_id))\n if status:\n redis_connect79.sadd(str(botname) + \":\" + today, str(user_id))\n redis_connect79.sadd(\"botname_list\", str(botname))\n\n\ndef pv_output_to_redis(rdd):\n \"\"\"\n :param rdd: botname, count\n :return: to redis hash store pv\n \"\"\"\n try:\n pool79 = redis.ConnectionPool(host=config['redis-for-uv-pv']['ip'], port=config['redis-for-uv-pv']['port'],\n password=config['redis-for-uv-pv']['password'])\n redis_connect79 = redis.Redis(connection_pool=pool79)\n except Exception as e:\n raise e\n today = time.strftime(\"%Y-%m-%d\", time.localtime())\n for botname, num in rdd:\n redis_connect79.hincrby(\"chatlog_pv\", str(botname), num)\n redis_connect79.hincrby(\"chatlog_pv_per_day\", str(botname) + \":\" + today, num)\n\n\ndef qps_output_to_redis(rdd, rdd_type):\n \"\"\"\n :param rdd: botname, count\n :return: to redis hash store pv\n \"\"\"\n try:\n pool81 = redis.ConnectionPool(host=config['redis-qps']['ip'], port=config['redis-qps']['port'],\n password=config['redis-qps']['password'])\n redis_connect81 = redis.Redis(connection_pool=pool81)\n except Exception as e:\n raise e\n today = time.strftime(\"%Y-%m-%d\", time.localtime())\n for time_bot, num in rdd:\n time_col, botname_col = time_bot\n redis_connect81.zincrby(str(botname_col) + \":\" + today, num, str(time_col))\n redis_connect81.zincrby(rdd_type + \":\" + today, num, str(time_col))\n\n\ndef create_context(children_topic, brokers, children_qps_store_name):\n # If you do not see this printed, that means the StreamingContext has been loaded\n # from the new checkpoint\n print(\n \"Creating new context-------------------------------------------------------------------------------------------------------------------------------\")\n sc = SparkContext(appName=\"PythonStreamingRecoverableNetworkWordCount\")\n sc.setLogLevel(\"WARN\")\n ssc = StreamingContext(sc, 1)\n line_children = KafkaUtils.createDirectStream(ssc, [children_topic, ],\n kafkaParams={\"metadata.broker.list\": brokers})\n line_children.checkpoint(15)\n children_filter_data = line_children.map(lambda x: x[1].split(\"\\t\")).filter(data_filter)\n children_filter_data.cache()\n # children todayuv:redis80 totaluser:redis79 todayuser:redis79\n children_filter_data.map(sequence_user_id_col).map(lambda x: (x, 1)).reduceByKey(lambda a, b: a + b).foreachRDD(\n lambda rdd: rdd.foreachPartition(user_id_output_to_redis))\n # children 发送至redis 81,zset botname + today , sore 排序,计算 qps,topqps\n children_filter_data.map(sequence_pv_col).map(lambda x: (x, 1)).reduceByKey(lambda a, b: a + b).foreachRDD(\n lambda rdd: rdd.foreachPartition(pv_output_to_redis))\n # children 发送至redis79,botname 哈希,用于计算全部pv,当天pv等\n children_filter_data.map(sequence_qps_col).map(lambda x: (x, 1)).reduceByKey(lambda a, b: a + b).foreachRDD(\n lambda rdd: rdd.foreachPartition(lambda x: qps_output_to_redis(x, children_qps_store_name)))\n return ssc\n\n\nif __name__ == '__main__':\n children_topic = \"chatlog_children\"\n children_qps_store_name = \"children_qps_per_day\"\n brokers = ','.join(config['kafka-broker'].values())\n checkpoint = \"hdfs://triohdfs/user/yefei/sparkstreaming/chatlog/children/checkpoint\"\n ssc = StreamingContext.getOrCreate(checkpoint, lambda: create_context(children_topic=children_topic, brokers=brokers,\n children_qps_store_name=children_qps_store_name))\n ssc.start()\n ssc.awaitTermination()\n","sub_path":"streaming/chatlog_children_streaming_with_checkpoint.py","file_name":"chatlog_children_streaming_with_checkpoint.py","file_ext":"py","file_size_in_byte":7108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"417334206","text":"import pandas as pd\r\n\r\ncalorias = {\"day1\": 420, \"day2\": 380, \"day3\": 390}\r\n\r\n\r\n#Transformas as chaves do dicionario em rotulos correspondentes a cada numero\r\ndados = pd.Series(calorias)\r\nprint(dados)\r\n\r\nselecionados = pd.Series(calorias, index = [\"day2\",\"day3\"])\r\nprint(\"dados selecionados:\\n\",selecionados)","sub_path":"Pandas/chaves.py","file_name":"chaves.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"593692478","text":"#! /usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom PyQt4.QtGui import *\r\nfrom PyQt4.QtCore import *\r\nimport pypyodbc\r\nfrom utils import *\r\n\r\n\r\nclass NewPirzeDialog(QDialog):\r\n\r\n def __init__(self, parent=None):\r\n QDialog.__init__(self, parent)\r\n self.setWindowTitle('添加奖品')\r\n self.resize(300, 400)\r\n\r\n dl_layout = QVBoxLayout(self)\r\n account_layout = QGridLayout()\r\n btn_layout = QHBoxLayout()\r\n\r\n account_layout.setSpacing(12)\r\n account_layout.setMargin(8)\r\n # 奖品姓名\r\n name_label = QLabel('名称: ')\r\n self.name_lineEdit = QLineEdit()\r\n # 奖品描述\r\n des_label = QLabel('描述: ')\r\n self.des_textEdit = QTextEdit()\r\n # 兑换积分\r\n level_label = QLabel('兑换积分: ')\r\n self.level_lineEdit = QLineEdit()\r\n self.level_lineEdit.setMaximumWidth(100)\r\n # 兑换等级\r\n point_label = QLabel('兑换等级: ')\r\n self.point_combobox = QComboBox()\r\n self.point_combobox.addItem('白金会员')\r\n self.point_combobox.addItem('黄金会员')\r\n self.point_combobox.addItem('钻石会员')\r\n\r\n # addWidget(row, column, takenrow, takencolumn)\r\n account_layout.addWidget(name_label, 1, 0, 1, 1)\r\n account_layout.addWidget(self.name_lineEdit, 1, 1, 1, 4)\r\n account_layout.addWidget(des_label, 2, 0, 1, 1)\r\n account_layout.addWidget(self.des_textEdit, 2, 1, 3, 4)\r\n account_layout.addWidget(level_label, 6, 0, 1, 1)\r\n account_layout.addWidget(self.level_lineEdit, 6, 1, 1, 1)\r\n account_layout.addWidget(point_label, 7, 0, 1, 1)\r\n account_layout.addWidget(self.point_combobox, 7, 1, 1, 1)\r\n\r\n # 确定按钮\r\n ok_btn = QPushButton('确定')\r\n # 取消按钮\r\n cancel_btn = QPushButton('取消')\r\n\r\n # 绑定事件\r\n ok_btn.clicked.connect(self.insert_prize)\r\n cancel_btn.clicked.connect(self.close)\r\n\r\n btn_layout.addStretch()\r\n btn_layout.addWidget(ok_btn)\r\n btn_layout.addWidget(cancel_btn)\r\n\r\n dl_layout.addLayout(account_layout)\r\n dl_layout.addStretch()\r\n dl_layout.addLayout(btn_layout)\r\n\r\n @pyqtSlot()\r\n def insert_prize(self):\r\n # check\r\n # 电话号码, 年龄是数字\r\n if not is_num(self, self.level_lineEdit):\r\n toastWarning(self, '请输入正确积分数字!')\r\n return False\r\n\r\n # sql 插入员工\r\n conn = pypyodbc.connect(\r\n 'driver={SQL Server};server=localhost;database=market;UID=sa;PWD=1234')\r\n\r\n cur = conn.cursor()\r\n level = 1\r\n if self.point_combobox.currentText() == '白金会员':\r\n level = 1\r\n elif self.point_combobox.currentText() == '黄金会员':\r\n level = 2\r\n else:\r\n # 钻石会员\r\n level = 3\r\n\r\n cur.execute('''insert into prize values (?, ?, ?, ?);''', (self.name_lineEdit.text(),\r\n self.des_textEdit.toPlainText(),\r\n int(self.level_lineEdit.text()),\r\n level))\r\n cur.commit()\r\n cur.close()\r\n conn.close()\r\n # 插入成功, 关闭对话框\r\n self.accept()\r\n","sub_path":"new_prize.py","file_name":"new_prize.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"183501916","text":"# ----------A----------\ns = 0\nt = 1\nn = 1\nfor n in range(1, 21):\n t *= n\n s += t\nprint(s)\n\n# -----------B---------\nl = range(1, 21)\ndef op(x):\n t = 1\n for i in range(1, x+1):\n t *= i\n return t\ns = sum(map(op, l)) # map() 会根据提供的函数对指定序列做映射\nprint(s)","sub_path":"阶乘.py","file_name":"阶乘.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"275968142","text":"grouped_birds = birddata.groupby(\"bird_name\")\r\nmean_speeds = grouped_birds.speed_2d.mean()\r\nmean_altitudes = grouped_birds.altitude.mean()\r\n\r\n# Convert birddata.date_time to the `pd.datetime` format.\r\nbirddata.date_time = pd.to_datetime(birddata.date_time)\r\n\r\nbirddata[\"date\"] = birddata.date_time.dt.date\r\ngrouped_bydates = birddata.groupby(\"date\")\r\nmean_altitudes_perday = grouped_bydates.altitude.mean()\r\n\r\ngrouped_birdday = birddata.groupby([\"bird_name\", \"date\"])\r\nmean_altitudes_perday = grouped_birdday.altitude.mean()\r\n\r\neric_daily_speed = grouped_birdday.speed_2d.mean()[\"Eric\"]\r\nsanne_daily_speed = grouped_birdday.speed_2d.mean()[\"Sanne\"]\r\nnico_daily_speed = grouped_birdday.speed_2d.mean()[\"Nico\"]\r\n\r\neric_daily_speed.plot(label=\"Eric\")\r\nsanne_daily_speed.plot(label=\"Sanne\")\r\nnico_daily_speed.plot(label=\"Nico\")\r\nplt.legend(loc=\"upper left\")\r\nplt.show()","sub_path":"flight_patterns.py","file_name":"flight_patterns.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"274353521","text":"#Code by Neil\n\nimport sympy as sp\nimport numpy as np\nfrom scipy import signal\nimport matplotlib.pyplot as plt\n\n\nA = np.array([\n\t[0,1],\n\t[-2,-3]\n])\n\nB = np.array([\n\t[0],\n\t[1]\n])\n\nC = np.array([\n\t[1,0]\n])\n\nD = 0\nsys = signal.StateSpace(A,B,C,D) #State space to time domain conversion\n\nt1,y1= signal.impulse2(sys) # time and output axis for natural(impulse) response\n\na,b = signal.ss2tf(A,B,C,D) #Importing Num. and Den. of T.F. from State Space Rep.\n\n#rounding off\nnum = np.around(a[0],decimals = 0)\nden = np.around(b,decimals = 0)\ns = sp.symbols('s')\nH_s = sp.Poly(num,s)/sp.Poly(den,s) # Getting polynomial expressions\n\nprint(\"THE TRANSFER FUNCTION OF THE SYSTEM \")\nprint(\"H(s) =\",H_s)\nplt.plot(t1,y1,label='impulse response')\nplt.legend()\nplt.grid()\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"manual/codes/ee18btech11031/ee18btech11031.py","file_name":"ee18btech11031.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"291251611","text":"from django.contrib import admin\nfrom .models import book,hero,testmanage\n#一个功能强大的后台管理功能\n# Register your models here.\nclass heroInine(admin.StackedInline):\n model = hero\n extra = 1\nclass bookAdmin(admin.ModelAdmin):\n list_display = [\"title\",'pub_date']\n list_filter = [\"title\",'pub_date']\n list_per_page = 10\n search_fields = [\"title\",'pub_date']\n inlines = [heroInine]\nclass heroAdmin(admin.ModelAdmin):\n list_display = ['name','gender','skill','wj']\n list_filter = ['name','gender','skill','wj']\n list_per_page = 10\n search_fields = ['name','gender','skill','wj']\nadmin.site.register(book,bookAdmin)\nadmin.site.register(hero,heroAdmin)\n\n\n","sub_path":"demo1/booktest/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"143634797","text":"from socket import *\nfrom tfjfbvDHT.net_functions import *\nfrom tfjfbvDHT.hash_functions import getHash\n\n# Class to interact with DHT via network protocol\nclass DHTInterface:\n '''\n DHTInterface is utilized to interact with a DHT\n under the protocol set in Networks and Distributed Systems\n with Dr. Nathan Backman at Buena Vista University in Spring 2019.\n \n Initialization:\n x = DHTInterface(ipport_file='/path/to/file/example.txt')\n example.txt contains ['IP.IP.IP.IP:PORT']\n y = DHTInterface(peerIP='123.456.789.101', peerPort=12345)\n z = DHTInterface()\n z.peerIP = '987.654.321.000'\n z.peerPort = 65532\n \n With objects x, y, or z, the user can now call\n methods in DHTInterface to interact with the DHT.\n such as\n x.insert('asdf', 'fdsaasdf')\n x.get('asdf')\n x.exists('asdf')\n print(x.help())\n '''\n def __init__(self, **kwargs):\n self.conn = None\n self.peerIP = None\n self.peerPort = None\n self.PEER_FILE_IP_PORT = None\n\n passed_keys = kwargs.keys() # Grab keys passed\n\n # Set peer file\n if \"ipport_file\" in passed_keys:\n self.PEER_FILE_IP_PORT = kwargs[\"ipport_file\"]\n\n # Set ip/port\n if \"peerIP\" in passed_keys and \"peerPort\" in passed_keys:\n self.peerIP = kwargs['peerIP']\n self.peerPort = kwargs['peerPort']\n\n # Help message\n def help(self):\n r = \"Available methods:\\n\"\n methods = [\n \"get(key)\",\n \"insert(key, value)\",\n \"remove(key)\",\n \"exists(key)\",\n \"owns(key)\",\n ]\n for method in methods:\n r += '\\t'+method+'\\n'\n return r\n\n # Make sure key is sendable\n def prepKey(self, key):\n if type(key) is str:\n return getHash(key.encode())\n else:\n return getHash(key)\n\n # Make sure value is sendable\n def prepVal(self, val):\n if type(val) is str:\n return val.encode()\n else:\n return val\n\n # Decide how to connect to a peer\n def read_and_set_connection(self, file_name):\n '''\n Decides if we connect to an already known peerIp and peerPort,\n or read from provided file name to get a connection.\n Calls set_connection to set the connection.\n '''\n # If file has already been read\n # set connection with known IP and Port\n if self.peerIP is not None and self.peerPort is not None:\n self.set_connection(self.peerIP, self.peerPort)\n return\n\n # Need to read from file\n IPPORT = None\n try:\n with open(file_name, 'r') as f:\n line = f.readline()\n try:\n IPPORT = line.split(':')\n except:\n raise Exception(\"IPPORT file was not proper.\")\n except:\n raise Exception(\"IPPORT file DNE\")\n self.peerIP = IPPORT[0]\n self.peerPort = IPPORT[1]\n self.set_connection(IPPORT[0], int(IPPORT[1]))\n\n # Set connection\n def set_connection(self, peerIP, peerPort):\n '''\n Sets the self.conn to a connection\n to parameters peerIP and peerPort.\n self.conn is useb by methods that\n contact the DHT.\n Internal methods rely on an already\n established connection to interact with the DHT.\n '''\n if self.conn is not None:\n self.close_connection()\n\n self.conn = socket(AF_INET, SOCK_STREAM)\n self.peerIP = peerIP\n self.peerPort = peerPort\n try:\n self.conn.connect((peerIP, int(peerPort)))\n except:\n self.close_connection()\n raise BaseException(\"Could not connect to\",peerIP,peerPort)\n\n # Close connection\n def close_connection(self):\n '''\n Closes the connection and sets the conn\n object to None.\n '''\n if self.conn is not None:\n try:\n self.conn.close()\n except:\n print(\"Couldn't close connection. Hard reset\")\n self.conn = None\n\n # Find who we are supposed to be connected to and connect to them\n def set_true_connection(self, key):\n '''\n First, set the connection to a peer.\n Obtain the true owner to a key\n passed. A connection will then be \n set to that owner.\n '''\n self.read_and_set_connection(self.PEER_FILE_IP_PORT)\n\n TO = self.trueOwner(key).split(':')\n self.set_connection(TO[0], TO[1])\n\n # Inserting value into DHT\n def insert(self, key, value):\n self.set_true_connection(key)\n\n if self.conn is None:\n raise BaseException(\"Not connected to a peer\")\n self.conn.send(\"INS\".encode())\n sendKey(self.conn, self.prepKey(key))\n\n response1 = recvAll(self.conn, 1)\n if response1.decode() == 'F':\n return response1.decode()\n\n sendVal(self.conn, self.prepVal(value))\n \n response2 = recvAll(self.conn, 1)\n\n self.close_connection()\n return response2.decode()\n\n # Removing value from DHT\n def remove(self, key):\n self.set_true_connection(key)\n\n if self.conn is None:\n raise BaseException(\"Not connected to a peer\")\n self.conn.send(\"REM\".encode())\n sendKey(self.conn, self.prepKey(key))\n response1 = recvAll(self.conn, 1)\n\n self.close_connection()\n return response1.decode()\n\n # Getting value from the DHT by key\n def get(self, key):\n self.set_true_connection(key)\n\n if self.conn is None:\n raise BaseException(\"Not connected to a peer\")\n self.conn.send(\"GET\".encode())\n sendKey(self.conn, self.prepKey(key))\n response1 = recvAll(self.conn, 1)\n data = None\n if response1.decode() == 'T':\n data = recvVal(self.conn)\n \n self.close_connection()\n return (response1.decode(), data)\n\n # Checking for existence of a key\n def exists(self, key):\n self.set_true_connection(key)\n\n if self.conn is None:\n raise BaseException(\"Not connected to a peer\")\n self.conn.send(\"EXI\".encode())\n\n key_to_send = self.prepKey(key)\n\n sendKey(self.conn, key_to_send)\n response1 = recvAll(self.conn, 1)\n\n self.close_connection()\n return response1.decode()\n\n # Ask for who owns the space\n def owns(self, key):\n '''\n In this interface with the DHT,\n an owns request means to obtain the\n TRUE owner of a key. This is not the DHT \n protocol, a true owner is returned here.\n This is obtained by setting the true connection\n to the key passed.\n '''\n self.set_true_connection(key)\n\n self.close_connection()\n return self.peerIP + ':' + str(self.peerPort)\n\n # Finding true owner\n def trueOwner(self, key):\n '''\n Algorithm to obtain the true owner of\n a key. It is necessary for a DHT interface\n to interact with a true owner of a key\n because they are the only ones who can\n interact with the real data.\n Why is this needed here?\n Because a peer in the DHT is not responsible\n for delegating work.\n A peer in the DHT is only responsible for the\n data they own. So we need to interact with who really\n owns the file.\n '''\n if self.conn is None:\n raise Exception(\"Not connected to a peer.\")\n # Contact who we know right now with an OWNS query\n self.conn.send(\"OWN\".encode())\n key_to_send = self.prepKey(key)\n sendKey(self.conn, key_to_send)\n response1 = recvAddress(self.conn)\n # Obtain the first candidate to compare to\n candidate = response1[0] +\":\"+ str(response1[1])\n\n temp = candidate # Temp is needed as a third variable in the while loop\n returned_peer = ''\n # Algorithm that searches for the true owner of data\n # While who we ask, is not who is returned,\n # query the candidate for who owns the data\n while candidate != returned_peer:\n candidate = temp # Temp is next person to contact\n _conn = socket(AF_INET, SOCK_STREAM)\n connIP = candidate.split(':')[0]\n connPort = int(candidate.split(':')[1])\n\n # Send an owns query to the candidate\n _conn.connect((connIP, connPort))\n _conn.send(\"OWN\".encode())\n sendKey(_conn, self.prepKey(key))\n\n returned_peer = recvAddress(_conn)\n returned_peer = returned_peer[0] + \":\" + str(returned_peer[1])\n # Reset temp to who we received\n temp = returned_peer\n _conn.close()\n\n return temp # If here, temp is the true owner\n","sub_path":"tfjfbvDHT/dhti.py","file_name":"dhti.py","file_ext":"py","file_size_in_byte":9073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"423842855","text":"\n# coding: utf-8\n\n# In[ ]:\n\n\nimport numpy as np\nimport cv2 as cv\n\n#음식 사진에서 가장 큰 원 추출, 매개변수 -> 이미지\ndef test(shape_img, filename):\n #이미지 불러온뒤 224*224로 resize\n shape_img = cv.resize(shape_img,(112,112),interpolation = cv.INTER_LINEAR_EXACT)\n \n #이미지 불러온뒤 gray 스케일로 변경 및 resize\n img_gray = cv.cvtColor(shape_img,cv.COLOR_BGR2GRAY)\n img_gray = cv.resize(img_gray,(112,112),interpolation = cv.INTER_LINEAR_EXACT)\n\n img_gray = cv.medianBlur(img_gray,5)\n img_color = cv.cvtColor(img_gray,cv.COLOR_GRAY2BGR)\n\n #이미지에서 원을 찾는 함수\n circles = cv.HoughCircles(img_gray,cv.HOUGH_GRADIENT,1,20,\n param1=50,param2=35,minRadius=20,maxRadius=56)\n \n #원들의 정보를 리스트로 저장\n circles = np.uint16(np.around(circles))\n \n\n #찾은 원들중에 가장 큰 원의 정보 저장\n c = circles[0,len(circles)-1]\n\n #원의 중심 좌표\n center = (c[0],c[1])\n \n #원의 반지름\n radius = c[2]\n print(c[0], c[1], c[2])\n # 바깥원\n cv.circle(img_color,center,radius,(0,255,0),2)\n\n # 중심원\n cv.circle(img_color,center,2,(0,0,255),3)\n \n # 원 밖의 이미지부분 제거\n for i in range(112):\n for j in range(112):\n if (c[0] - i)*(c[0] - i) + (c[1] - j)*(c[1] - j) > c[2]*c[2] :\n shape_img[j][i] = 0\n\n # 자른 이미지 저장\n# path = 'C:/Users/jhkim/OneDrive/Desktop/' + filename+'.jpg'\n# cv.imwrite(path, shape_img)\n\n # 자른 이미지 리턴\n return shape_img\n\n","sub_path":"cut.py","file_name":"cut.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"324280308","text":"\"\"\" This program allows the player t play a 'Battleship' style game against the computer.\r\n The player and computer each has a 3 x 3 grid where they can place their battleship\r\n in one space. The computer's battleship placement and bombings are all random.\r\n\"\"\"\r\nfrom random import randint\r\nfrom random import choice\r\nimport time\r\n\r\nprint(\"\\nWelcome to Python Battleship!\\n\")\r\n\r\nplayer_grid = {\r\n\r\n 'A1' : 0, 'A2' : 0, 'A3' : 0,\r\n 'B1' : 0, 'B2' : 0, 'B3' : 0,\r\n 'C1' : 0, 'C2' : 0, 'C3' : 0,\r\n}\r\n\r\ncomputer_grid = {\r\n\r\n 'A1' : 0, 'A2' : 0, 'A3' : 0,\r\n 'B1' : 0, 'B2' : 0, 'B3' : 0,\r\n 'C1' : 0, 'C2' : 0, 'C3' : 0,\r\n}\r\n\r\ndef start():\r\n#asks for user input on battleship placement and has the computer automatically place theirs.\r\n print(\"Where do you want to place your battleship? You have a 3x3 grid, A, B, C - 1, 2, 3!\")\r\n for key in player_grid:\r\n player_grid[key] = 0\r\n player_battleship = input(\"> \")\r\n player_battleship = player_battleship.title()\r\n player_grid[player_battleship] = 1\r\n# print(player_grid)\r\n\r\n print(\"The computer will now place it's battleship...\")\r\n for key in computer_grid:\r\n computer_grid[key] = 0\r\n computer_battleship = choice(['A1', 'A2', 'A3', 'B1', 'B2', 'B3', 'C1', 'C2', 'C3'])\r\n computer_grid[computer_battleship] = 1\r\n# print(computer_grid)\r\n\r\n print(\"\\nLET'S BATTLE!\\n\")\r\n time.sleep(1)\r\n battle()\r\n\r\ndef battle():\r\n#this is where thegame takes place. it will loop until the player or computer sinks the other's battleship\r\n player_bomb = input(\"Choose a grid square to bomb.\\n> \")\r\n player_bomb = player_bomb.title()\r\n# print(computer_grid)\r\n\r\n if computer_grid[player_bomb] == 1:\r\n print(\"\\nBOOOOOOOOOOOOOOOOOOM!!!\\n\")\r\n time.sleep(2)\r\n print(f\"YOU BOMBED {player_bomb} AND SUNK THE COMPUTER'S BATTLESHIP!!!\")\r\n time.sleep(1)\r\n ending()\r\n\r\n else:\r\n print(f\"You bombed {player_bomb} and missed...\")\r\n time.sleep(1)\r\n\r\n print(\"Watch out! The computer is bombing...\")\r\n time.sleep(1)\r\n computer_bomb = choice(['A1', 'A2', 'A3', 'B1', 'B2', 'B3', 'C1', 'C2', 'C3'])\r\n\r\n if player_grid[computer_bomb] == 1:\r\n print(\"\\nBOOOOOOOOOOOOOOOOOOM!!!\\n\")\r\n time.sleep(2)\r\n print(f\"The computer bombed {computer_bomb} and SUNK YOUR BATTLESHIP!\")\r\n ending()\r\n\r\n else:\r\n print(f\"The computer bombed {computer_bomb} and missed...\")\r\n time.sleep(1)\r\n battle()\r\n\r\ndef ending():\r\n#this allows the player to play another game after winning or losing.\r\n again = input(\"Would you like to play again? 'y' or 'n'?\\n> \")\r\n\r\n if again == 'y':\r\n start()\r\n\r\n else:\r\n print(\"Ok, BYE!\")\r\n exit(0)\r\n\r\nstart()\r\n","sub_path":"Basic/battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"504176419","text":"items = [1, 2, 3, 4, 5]\n\ndataset = [ [1, 2, 3, 4],\n [1, 2, 3, 4, 5],\n [2, 3, 4],\n [1, 2, 4],\n [2, 3, 5],\n [1, 3, 4],\n [2, 3, 4, 5],\n [1, 3, 4, 5],\n [3, 4, 5],\n [1, 2, 3, 5]\n ]\n\ndef getFreq(s):\n count = 0\n for i in dataset:\n if all(elem in i for elem in s):\n count = count + 1\n return count\n\ndef subsets(s):\n sets = []\n for i in range(1 << len(s)):\n subset = [s[bit] for bit in range(len(s)) if is_bit_set(i, bit)]\n sets.append(subset)\n return sets\n\ndef is_bit_set(num, bit):\n return num & (1 << bit) > 0\n\ndef apriori(minSupport):\n L = []\n L.append([[x] for x in items if getFreq([x]) >= minSupport])\n p = 0\n for k in range(1, len(items)):\n C = []\n S = L[k-1]\n for i in range(len(S) - 1):\n for j in range(i+1, len(S)):\n l = S[i]\n h = S[j]\n if l[:p] == h[:p] and l[-1] < h[-1]:\n sets = subsets(l[:p])\n temp = []\n for s in sets:\n x = [l[-1], h[-1]]\n x.extend(s)\n temp.append(x)\n if all(getFreq(t) >= minSupport for t in temp):\n tmp = l[:p]\n tmp.append(l[-1])\n tmp.append(h[-1])\n C.append(tmp)\n p = p + 1\n if not C:\n break\n L.append(C)\n return L\n\ndef getRules(L, minConfidence):\n for k in L[1:]:\n for l in k:\n maxDen = getFreq(l) * 100 / minConfidence\n for x in range(1, len(l)):\n left = l[:x]\n right = l[x:]\n if getFreq(left) <= maxDen:\n print(str(left).replace('[', '').replace(']', '').replace(' ', '') + '->' + str(right).replace('[', '').replace(']', ''))\n if getFreq(right) <= maxDen:\n print(str(right).replace('[', '').replace(']', '').replace(' ', '') + '->' + str(left).replace('[', '').replace(']', ''))\n\nif __name__ == '__main__':\n L = apriori(4)\n print(L)\n print()\n getRules(L, 80)\n","sub_path":"apriori-rules.py","file_name":"apriori-rules.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"570735777","text":"\nimport re\n\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.shortcuts import HttpResponse\nfrom django.conf import settings\n\n\nclass RbacMiddleware(MiddlewareMixin):\n\t\"\"\"\n\t用户权限信息校验\n\t\"\"\"\n\tdef process_request(self, request):\n\t\t\"\"\"\n\t\t当用户请求刚进入时触发执行\n\t\t:param request:\n\t\t:return:\n\t\t\"\"\"\n\t\t# 1.获取当前用户请求的URL\n\t\t# 2.获取当前用户session中保存的权限列表(luffy_permission_url_list_key)(URL)\n\t\t# 3.权限信息的匹配\n\n\t\t# 访问:http://127.0.0.1:8000/customer/list/ --path_info 的值为--> /customer/list/\n\t\t# 访问:http://127.0.0.1:8000/customer/list/?id=1&pk=5 --path_info 的值为--> /customer/list/\n\t\t# 访问的URL\n\t\tcurrent_url = request.path_info\n\n\t\t# 增加白名单url\n\n\t\tfor valid_url in settings.VALID_URL_LIST:\n\t\t\tif re.match(valid_url,current_url):\n\t\t\t\t# 白名单中的URL无需权限验证即可访问\n\t\t\t\treturn None # 中间件返回None 可以执行后面的视图函数(return None 中间件不拦截)\n\n\t\t# 获取session信息\n\t\tpermissions_list = request.session.get(settings.PERMISSION_SESSION_KEY)\n\n\t\tif not permissions_list:\n\t\t\treturn HttpResponse('未获取到用户权限信息,请登录!')\n\n\t\tflag = False\n\t\tfor url in permissions_list: # 此时的url必须包含起始和终止符,不然,匹配有bug\n\t\t\treg = \"^%s$\" % url\n\t\t\tif re.match(reg,current_url):\n\t\t\t\tflag = True\n\t\t\t\tbreak\n\n\t\tif not flag: # 有权访问\n\t\t\treturn HttpResponse(\"无权访问\")\n\n\n\n\n\n\n","sub_path":"seventh_module/CRM/2.用户登陆权限控制/luffy_permission/rbac/middlewares/rbac.py","file_name":"rbac.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"87186024","text":"from googleapiclient.discovery import build\nfrom extract_plain_text import extract\nimport sys\nfrom sortedcontainers import SortedSet\nfrom stanfordnlp.server import CoreNLPClient\n\n\ndef main():\n google_api_key = sys.argv[1]\n google_engine_id = sys.argv[2]\n\n # integer indicating the relation to extract\n # 1 is for Schools_Attended\n # 2 is for Work_For\n # 3 is for Live_In\n # 4 is for Top_Member_Employees\n r = int(sys.argv[3])\n # extraction confidence threshold\n t = float(sys.argv[4])\n # seed query\n q = sys.argv[5]\n # the number of tuples that we request in the output\n k = int(sys.argv[6])\n\n if r == 1:\n str_r = \"per:schools_attended\"\n elif r == 2:\n str_r = \"per:employee_or_member_of\"\n elif r == 3:\n str_r = \"per:cities_of_residence\"\n elif r == 4:\n str_r = \"org:top_members_employees\"\n\n print(\"____\")\n print(\"Parameters\")\n print(\"Client key = \" + google_api_key)\n print(\"Engine key = \" + google_engine_id)\n print(\"Relation = \" + str_r)\n print(\"Threshold = \" + str(t))\n print(\"Query = \" + q)\n print(\"# of Tuples = \" + str(k))\n print(\"Loading necessary libraries; This should take a minute or so ...\")\n\n service = build(\"customsearch\", \"v1\", developerKey=google_api_key)\n\n X = set()\n conf = dict()\n tuples_for_query_augment = set()\n iteration = 0\n\n while True:\n with CoreNLPClient(annotators=['tokenize', 'ssplit', 'pos', 'lemma', 'ner'], timeout=300000, memory='4G',\n endpoint=\"http://localhost:9000\", be_quiet=True) as pipeline_ner:\n with CoreNLPClient(annotators=['tokenize', 'ssplit', 'pos', 'lemma', 'ner', 'depparse', 'coref', 'kbp'],\n timeout=300000, memory='4G', endpoint=\"http://localhost:9001\",\n be_quiet=True) as pipeline_kbp:\n\n # send query to gcse api and get results\n results = service.cse().list(q=q,\n cx=google_engine_id,\n ).execute()\n\n print(\"=========== Iteration: \" + str(iteration) + \" - Query: \" + q + \" ===========\")\n URL_index = 1\n for item in results['items']:\n print(\"URL (\" + str(URL_index) + \" / \" + str(len(results['items'])) + \"): \" + item['link'])\n extracted_text = extract(item['link'])\n # first, annotate with NER to construct sentences\n ann_ner = pipeline_ner.annotate(extracted_text)\n # should not run depparse over sentences that do not contain named entities of the right type for the relation of interest\n selected_sentences = select_sentences(r, ann_ner)\n for sentence in selected_sentences:\n sentence_ann_doc = pipeline_kbp.annotate(sentence)\n for kbp_sentence in sentence_ann_doc.sentence:\n for kbp_triple in kbp_sentence.kbpTriple:\n status = \"add\"\n if r == 1:\n if kbp_triple.relation == \"per:schools_attended\" and kbp_triple.confidence >= t:\n tup = (kbp_triple.subject, kbp_triple.object)\n if tup in X:\n if conf[tup] < kbp_triple.confidence:\n status = \"update\"\n conf[tup] = kbp_triple.confidence\n else:\n status = \"higher\"\n else:\n X.add(tup)\n conf[tup] = kbp_triple.confidence\n print_relation(kbp_triple, sentence, status)\n elif kbp_triple.relation == \"per:schools_attended\" and kbp_triple.confidence < t:\n status = \"lower\"\n print_relation(kbp_triple, sentence, status)\n elif r == 2:\n if kbp_triple.relation == \"per:employee_or_member_of\" and kbp_triple.confidence >= t:\n tup = (kbp_triple.subject, kbp_triple.object)\n if tup in X:\n if conf[tup] < kbp_triple.confidence:\n status = \"update\"\n conf[tup] = kbp_triple.confidence\n else:\n status = \"higher\"\n else:\n X.add(tup)\n conf[tup] = kbp_triple.confidence\n print_relation(kbp_triple, sentence, status)\n elif kbp_triple.relation == \"per:employee_or_member_of\" and kbp_triple.confidence < t:\n status = \"lower\"\n print_relation(kbp_triple, sentence, status)\n elif r == 3:\n if kbp_triple.relation == \"per:cities_of_residence\" and kbp_triple.confidence >= t:\n tup = (kbp_triple.subject, kbp_triple.object)\n if tup in X:\n if conf[tup] < kbp_triple.confidence:\n status = \"update\"\n conf[tup] = kbp_triple.confidence\n else:\n status = \"higher\"\n else:\n X.add(tup)\n conf[tup] = kbp_triple.confidence\n print_relation(kbp_triple, sentence, status)\n elif kbp_triple.relation == \"per:cities_of_residence\" and kbp_triple.confidence < t:\n status = \"lower\"\n print_relation(kbp_triple, sentence, status)\n # print(\n # f\"\\t Confidence: {kbp_triple.confidence};\\t Subject: {kbp_triple.subject};\\t Relation: {kbp_triple.relation}; Object: {kbp_triple.object}\")\n elif r == 4:\n if kbp_triple.relation == \"org:top_members_employees\" and kbp_triple.confidence >= t:\n tup = (kbp_triple.subject, kbp_triple.object)\n if tup in X:\n if conf[tup] < kbp_triple.confidence:\n status = \"update\"\n conf[tup] = kbp_triple.confidence\n else:\n status = \"higher\"\n else:\n X.add(tup)\n conf[tup] = kbp_triple.confidence\n print_relation(kbp_triple, sentence, status)\n elif kbp_triple.relation == \"org:top_members_employees\" and kbp_triple.confidence < t:\n status = \"lower\"\n print_relation(kbp_triple, sentence, status)\n # print(\n # f\"\\t Confidence: {kbp_triple.confidence};\\t Subject: {kbp_triple.subject};\\t Relation: {kbp_triple.relation}; Object: {kbp_triple.object}\")\n\n URL_index += 1\n iteration += 1\n\n sorted_x = SortedSet()\n for key, v in conf.items():\n sorted_x.add((v,) + key)\n\n # the reference implementation prints the top relations at the end of every iteration\n print(\"================== ALL RELATIONS (\" + str(len(sorted_x)) + \") =================\")\n for i in range(len(sorted_x) - 1, -1, -1):\n print(\"Confidence: {} \t | Subject: {} \t | Object: {}\".format(sorted_x[i][0], sorted_x[i][1],\n sorted_x[i][2]))\n\n # if there are at least k tuples,\n # then we're done!\n if len(X) >= k:\n break\n # otherwise, select a tuple from X\n # 1. has not been used for querying yet\n # 2. as an extraction confidence that is highest among the tuples in X that have not yet been used for querying\n else:\n for i in range(len(sorted_x) - 1, -1, -1):\n if (sorted_x[i][1], sorted_x[i][2]) not in tuples_for_query_augment:\n tuples_for_query_augment.add((sorted_x[i][1], sorted_x[i][2]))\n q = sorted_x[i][1] + \" \" + sorted_x[i][2]\n\n\ndef select_sentences(relation, doc):\n selected_sentences = [] # list of sentences which contain the entities needed\n for sentence in doc.sentence:\n if relation == 1:\n if any(men.ner == 'PERSON' for men in sentence.token) and any(\n men.ner == 'ORGANIZATION' for men in sentence.token):\n selected_sentences.append(' '.join([t.word for t in sentence.token]))\n elif relation == 2:\n if any(men.ner == 'PERSON' for men in sentence.token) and any(\n men.ner == 'ORGANIZATION' for men in sentence.token):\n selected_sentences.append(' '.join([t.word for t in sentence.token]))\n elif relation == 3:\n if any(men.ner == 'PERSON' for men in sentence.token) and (\n any(men.ner == 'LOCATION' for men in sentence.token) or\n any(men.ner == 'CITY' for men in sentence.token) or\n any(men.ner == 'STATE_OR_PROVINCE' for men in sentence.token) or\n any(men.ner == 'COUNTRY' for men in sentence.token)\n ):\n selected_sentences.append(' '.join([t.word for t in sentence.tokens]))\n\n elif relation == 4:\n if any(men.ner == 'PERSON' for men in sentence.token) and any(\n men.ner == 'ORGANIZATION' for men in sentence.token):\n selected_sentences.append(' '.join([t.word for t in sentence.tokens]))\n return selected_sentences\n\n\ndef print_relation(kbp_triple, sentence, status):\n print(\"=== Extracted Relation ===\")\n transcript = \"\"\n transcript += \"Sentence: \"\n transcript += sentence # sentence\n transcript += \"\\n\"\n transcript += \"Confidence: \"\n transcript += str(kbp_triple.confidence)\n transcript += \";\"\n transcript += \"Subject: \"\n transcript += kbp_triple.subject\n transcript += \";\"\n transcript += \"Object: \"\n transcript += kbp_triple.object\n transcript += \";\\n\"\n if status == \"add\":\n transcript += \"Adding to set of extracted relations\"\n elif status == \"update\":\n transcript += \"The same relation is already present but with a lower confidence. Just updating the confident value.\"\n elif status == \"higher\":\n transcript += \"The same relation is already present with higher (or equal) confidence. Ignoring this.\"\n elif status == \"lower\":\n transcript += \"Confidence is lower than threshold confidence. Ignoring this.\"\n transcript += \"\\n==========\"\n print(transcript)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":12283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"578318149","text":"# This parser looks for keyword \"The patient\" and converts it into a unique name\n\nimport re\nimport fileinput\nimport os, sys\nimport shutil\n\n# pattern = re.compile(\"The patient | patient\")\n# match = re.search(pattern, \"The patient gave informed\")\n# print(match.group(0))\n\n# generate a unique name\n\n# process files and find keyword \"The patient\"\n# compile file in one directory\n\n# create directory for keyword \"The patient\"\ndef create_dir(child_dir, parent_dir):\n path1 = parent_dir+\"/\"+child_dir\n if not os.path.exists(path1):\n #print(\"directory not exist\")\n # use octal number 0755 for python 2 & 0o755 for python 3\n os.mkdir(path1,0o755);\n else:\n print(\"directory exists\")\n\n\ndef find_in_dir(parent_dir):\n new_dir = \"male\"\n create_dir(new_dir, parent_dir)\n for index in range(2375):\n filename = parent_dir+\"/\"+str(index)+\".txt\"\n try:\n f = open(filename,'r')\n filedata = f.read()\n f.close()\n except OSError:\n continue\n\n m = re.findall('He\\s', filedata)\n print(m)\n if m: \n # if (filedata.find(\"he\") > 0):\n src = parent_dir+\"/\"+str(index)+\".txt\"\n dst = parent_dir+\"/\"+new_dir+\"/\"+str(index)+\".txt\"\n shutil.move(src,dst)\n\n\ntarget_dir = input(\"input directory:\")\nfind_in_dir(target_dir)\n\n","sub_path":"letters/gender_finder.py","file_name":"gender_finder.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"83538296","text":"from django.shortcuts import render, redirect\nfrom django.views.generic import ListView\nfrom Annotation.models import Vod, Post\nfrom Annotation.forms import PostForm, VodForm\nimport datetime\nfrom django.http import HttpResponseRedirect, Http404, HttpResponse\nfrom django.urls import reverse\nimport math\n\ndef post(request, key):\n #retrieves the specific vod object\n try:\n target_vod = Vod.objects.get(pk=int(key))\n except Vod.DoesNotExist:\n raise Http404(\"Vod does not exist\")\n\n #handles commenting\n if request.method == \"POST\" and 'comment' in request.POST:\n form = PostForm(request.POST)\n user = request.user\n if user is not None:\n if form.is_valid():\n current_post = form.save(commit=False)\n current_post.user = user\n current_post.save()\n target_vod.posts.add(current_post)\n return HttpResponseRedirect(reverse(\"Annotation-post\",args=[key]))\n else:\n print(form.errors)\n\n #loads the form for input if not POST\n else:\n form = PostForm()\n\n #using vod object, retrieves all its posts and renders\n target_posts = Vod.objects.get(pk=int(key)).posts.all().order_by(\"-date\")\n context = {'target_vod': target_vod, 'target_posts': target_posts, 'form':form}\n\n return render(request, 'Annotation/post.html', context)\n\ndef vod(request, page):\n #acquires 24 vods\n page = int(page)\n start = page * 24\n end = start + 24\n queryset = Vod.objects.all().order_by(\"-date\")[start:end]\n\n\n if request.method == \"POST\":\n form = VodForm(request.POST)\n user = request.user\n if user is not None:\n if form.is_valid():\n current_vod = form.save(commit=False)\n current_vod.user = user\n current_vod.save()\n return HttpResponseRedirect(reverse(\"Annotation-feed\", kwargs={'page':0}))\n else:\n print(form.errors)\n else:\n form = VodForm()\n\n #used to determine whether to display pages\n pages = Vod.objects.all().count()\n previous = False\n after = False\n if (page - 1) >= 0:\n previous = True\n if pages > (page + 1) * 24:\n after = True\n\n context = {'object_list':queryset, 'form':form, 'page':page, 'previous':previous, 'after':after}\n\n return render(request, 'Annotation/home.html', context)\n\ndef vote(request,vkey, key):\n user = request.user\n #retrieves specific post\n try:\n target_post = Post.objects.get(pk=int(key))\n except Post.DoesNotExist:\n raise Http404(\"Post does not exist\")\n\n if user is not None:\n #user can only upvote or remove previous vote\n if target_post.votes.exists(user.id):\n target_post.votes.down(user.id)\n else:\n target_post.votes.up(user.id)\n target_post.vote_count = target_post.votes.count()\n target_post.save()\n\n return HttpResponse(target_post.vote_count)\n\ndef delete(request, vkey, key):\n user = request.user\n try:\n target_post = Post.objects.get(pk=int(key))\n except Post.DoesNotExist:\n raise Http404(\"Post does not exist\")\n\n if user is not None:\n target_post.delete()\n\n return redirect(post, key=int(vkey))\n\ndef delete_vod(request, vkey):\n user = request.user\n try:\n target_vod = Vod.objects.get(pk=int(vkey))\n except Vod.DoesNotExist:\n raise Http404(\"Vod does not exist\")\n\n if user is not None:\n all_posts = target_vod.posts.all()\n all_posts.delete()\n target_vod.delete()\n\n return redirect(\"Annotation-feed\", page=0)\n\ndef search(request, search_text):\n search_vods = Vod.objects.filter(title__contains=search_text).order_by(\"-date\")\n form = VodForm()\n context = {'object_list':search_vods, 'form':form}\n\n return render(request, 'Annotation/search.html', context)\n","sub_path":"SmashSite/Annotation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"374859936","text":"\nimport re\n\ndef get_list_tags(saveName):\n with open(saveName) as f:\n results = f.read()\n\n x = re.findall('',results)\n x += re.findall('<(br).*?>',results)\n return set(x)\n\ndef write_script(results):\n saveName = 'venice.MD'\n with open(saveName,'a') as file:\n file.write(results)\n\ndef write_tagnum(saveName):\n with open(saveName) as f:\n results = f.read()\n x = re.findall('', results)\n x += re.findall('<(meta).*?>', results)\n x += re.findall('<(LINK).*?>', results)\n x += re.findall('<(br).*?>', results)\n x += re.findall('<(table).*?>', results)\n\n for each in x:\n each = each.lower()\n if each not in alltag:\n alltag[each] = 1\n else:\n alltag[each] += 1\n\ndef get_scene_script(actName, saveName):\n results = '## ' + actName +'\\n'\n with open(saveName) as f:\n html = f.readlines()\n for each in html:\n if re.search('(.*?)',each):\n results +='\\n*'+re.search('(.*?)',each).group(1)+'*\\n'\n\n if re.search('

(.*?)<',each):\n results +='\\n### ' + re.search('

(.*?)<',each).group(1)+'\\n'\n\n if re.search('(.*?)', each):\n results += re.search('(.*?)', each).group(1) + '\\n'\n\n if re.search('(.*?)',each):\n results +='\\n**'+re.search('(.*?)',each).group(1)+'**\\n\\n'\n\n return results\n\n\ndef get_list_scene():\n name = '../data/Merchant of Venice_ List of Scenes.html'\n with open(name) as f:\n results = f.read()\n act_and_name = re.findall('(Act .*?),.*?\"(.*?)\">',results,re.S)\n\n return act_and_name\n\nalltag = {}\nact_and_name_list = get_list_scene()\nfor act_and_name in act_and_name_list:\n actName = act_and_name[0]\n saveName = '../data/'+act_and_name[1][2:]\n # print(saveName)\n results = get_scene_script(actName, saveName)\n write_script(results)\n get_list_tags(saveName)\n write_tagnum(saveName)\n\nname_list = []\nfor item in sorted(alltag.values(),reverse=True):\n for each in alltag:\n if each not in name_list:\n if alltag[each] == item:\n with open('venice.Tag','a') as f:\n print('标签:'+each +' 值:'+str(alltag[each]))\n f.write('标签:'+each +' 值:'+str(alltag[each])+'\\n')\n\n name_list.append(each)","sub_path":"201906/pdf2markdown/projectvenice7/The Merchant of Venice/document/fixMarkdown.py","file_name":"fixMarkdown.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"295643516","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n-----------------------------------------------------------------------\r\nMLP with tensorflow and keras\r\nkeras is build up either TensorFlow or Theano.\r\n\r\nHere is an example on how to use keras and TensorFlow.\r\nkeras simpler, but TensorFlow give access to more details.\r\n\r\nDr Kamel Saadi\r\n- 21/12/2017\r\n\r\nData can be download from \r\n\r\nhttp://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/\r\n( winequality-white.csv, winequality-red.csv )\r\n----------------------------------------------------------------------\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndatFile = \"C:\\\\Users\\saadik1\\\\Downloads\\\\winequality-white.csv\"\r\nwhite = pd.read_csv(datFile, sep=';', header=0)\r\nwhite['type']='white'\r\ndatFile = \"C:\\\\Users\\saadik1\\\\Downloads\\\\winequality-red.csv\"\r\nred = pd.read_csv(datFile, sep=';', header=0)\r\nred['type']='red'\r\ncomb =red.append(white)\r\n\r\n#----------------------- few graphs ----------------------------\r\nplt.scatter(red['quality'], red['sulphates'])\r\n\r\nnp.random.seed(123)\r\n\r\nredlabels = np.unique(red['quality'])\r\n\r\nredcolors = np.random.rand(6,4)\r\n\r\nfor i in range(len(redcolors)):\r\n redy = red['alcohol'][red.quality == redlabels[i]]\r\n redx = red['volatile acidity'][red.quality == redlabels[i]]\r\n plt.scatter(redx, redy, c=redcolors[i]) \r\nplt.title(\"Red Wine\")\r\nplt.xlim([0,1.3])\r\nplt.ylim([7,15])\r\nplt.xlabel(\"Volatile Acidity\")\r\nplt.ylabel(\"Alcohol\")\r\nplt.legend(redlabels, loc='best', bbox_to_anchor=(1.3, 1))\r\nplt.show()\r\n\r\nimport seaborn as sns\r\ncorr = red.corr()\r\nsns.heatmap(corr, \r\n xticklabels=corr.columns.values,\r\n yticklabels=corr.columns.values)\r\nsns.plt.show()\r\n#---------------------------------------------------------------\r\n\r\n#----------- Preprocessing the data ============================# \r\n# quality is excluded \r\nX = comb.iloc[:,:-2]\r\nY = comb.iloc[:,-1]\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\r\n\r\nlb = LabelEncoder().fit(Y)\r\ny = lb.transform(Y)\r\n\r\nXtr,Xte,ytr,yte = train_test_split(X,y, test_size = 0.33, \r\n random_state = 234)\r\nscaler = StandardScaler().fit(Xtr)\r\n# if we want the value scaler.mean_, scaler.scale_ this is the std\r\nXtr = scaler.transform(Xtr)\r\nXte = scaler.transform(Xte) # use same scaler\r\n\r\n \r\n#========================== Keras ============================# \r\n \r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nimport keras\r\n\r\nin_dim = Xtr.shape[1] # 11\r\n\r\n \r\n#tbCallBack = keras.callbacks.TensorBoard(log_dir='./finishedCode/Graph', \r\n# histogram_freq=0, write_graph=True, write_images=True)\r\n\r\n \r\n \r\nmodel = Sequential()\r\n# Add an input layer \r\nmodel.add(Dense(12,activation='relu', input_shape=(in_dim,)))\r\nmodel.add(Dense(8, activation='relu'))\r\nmodel.add(Dense(1, activation='sigmoid'))\r\n#from keras.losses import \r\nmodel.compile(loss='binary_crossentropy',\r\n optimizer='adam',\r\n metrics=['accuracy'])\r\n \r\nh=model.fit(Xtr, ytr,\r\n epochs=20, \r\n batch_size=1, \r\n #validation_data=(Xte, yte),\r\n validation_split = 0.30,\r\n #shuffle=True,\r\n verbose=2, \r\n #callbacks=[tbCallBack] \r\n )\r\n# if use of tensorboard incomment collbacks=...\r\nprint(\"test accuracy : \", model.evaluate(Xte,yte)) \r\n\r\nplt.plot(h.history['loss'],label='train loss')\r\nplt.plot(h.history['val_loss'],label='validation loss')\r\nplt.legend()\r\nplt.grid()\r\nplt.show()\r\n\r\n\r\n\r\n\r\n#===================== TensorFlow ============================# \r\n\r\nimport tensorflow as tf\r\n\r\nin_dim = Xtr.shape[1]\r\nn_class = 2\r\n\r\nx_ = tf.placeholder(tf.float32,[None,in_dim])\r\ny_ = tf.placeholder(tf.float32,[None,n_class])\r\n\r\n# weights \r\nw = {\r\n 'h1': tf.Variable(tf.truncated_normal([in_dim, 12])),\r\n 'h2': tf.Variable(tf.truncated_normal([12, 8])), \r\n 'out' : tf.Variable(tf.truncated_normal([8,n_class]))\r\n} \r\n# biases\r\nb = {\r\n 'h1': tf.Variable(tf.truncated_normal([12])),\r\n 'h2': tf.Variable(tf.truncated_normal([8])), \r\n 'out' : tf.Variable(tf.truncated_normal([n_class]))\r\n} \r\n\r\ndef mlp_model(x,w,b):\r\n layer1 = tf.nn.relu(tf.add(tf.matmul(x,w['h1']),b['h1'])) \r\n layer2 = tf.nn.relu(tf.add(tf.matmul(layer1,w['h2']),b['h2'])) \r\n layer_o = tf.nn.sigmoid(tf.add(tf.matmul(layer2,w['out']),b['out'])) \r\n return layer_o\r\n\r\n# ----------- hot encoding ----------------------------\r\nytr_h = np.hstack((ytr.reshape(-1,1),1-ytr.reshape(-1,1))) \r\nyte_h = np.hstack((yte.reshape(-1,1),1-yte.reshape(-1,1))) \r\n\r\nmodel = mlp_model(x_,w,b)\r\n\r\neta = 0.01\r\nloss_func = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\r\n logits=model, labels=y_))\r\ntrain_steps = tf.train.AdamOptimizer(eta).minimize(loss_func)\r\ncorr_pred = tf.equal(tf.argmax(model,1),tf.argmax(y_,1))\r\naccuracy = tf.reduce_mean(tf.cast(corr_pred,tf.float32))\r\n\r\n#tf.train.batch(batch_size=20)\r\n\r\ninit = tf.global_variables_initializer()\r\n \r\nsess = tf.Session()\r\nsess.run(init)\r\n\r\nepochs=200\r\n\r\n#------- to visualise ...\r\nlogs_path = \"C:\\\\Users\\\\saadik1\\\\Desktop\\\\pythonWork\\\\finishedCode\\\\tfGraph\\\\\"\r\nwriter = tf.summary.FileWriter(logs_path, sess.graph)\r\n# after the session is closed go tot he directory above 'tfGraph'\r\n# type tensorboard --logdir=tfGraph\r\n# http address 0.0.0.0:6006 will display the graph\r\n\r\nfor epoch in range(epochs):\r\n \r\n sess.run(train_steps, feed_dict={x_: Xtr, y_: ytr_h }) \r\n \r\n loss = sess.run(loss_func, feed_dict={x_: Xtr, y_: ytr_h })\r\n \r\n pred_tr = sess.run(model, feed_dict={x_: Xtr, y_: ytr_h })\r\n acc_tr = sess.run(accuracy, feed_dict={x_: Xtr, y_:ytr_h} ) \r\n \r\n tePred = sess.run(model, feed_dict={x_:Xte})\r\n acc_te = sess.run(accuracy, feed_dict={x_: Xte, y_:yte_h} ) \r\n\r\n print(\"epoch = \", epoch,\"loss =\",loss,\" tr_acc= \", acc_tr, \" test_acc= \", acc_te)\r\n\r\nsess.close()\r\n\r\n \r\n\r\n\r\n\r\n","sub_path":"tensFlow_py/keras_red_wine.py","file_name":"keras_red_wine.py","file_ext":"py","file_size_in_byte":6170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"103626025","text":"import json\nimport spacy\n\n# Author: Balaji Ganesan\n# Email: balajinix@gmail.com\n\n# we can load the dataset filename like below\ndataset_filename = \"./documents.json\"\nwith open(dataset_filename, 'r') as f:\n data = json.load(f)\n\n# this is not a complete list of patterns that you may find\npatterns = [r' XXXX XXXX XXXX XXXX', r'XXXX XXXX XXXX', r'XXXX XXXX', r'XXXX', r'XX/XX/XXXX', r'XX/XX/']\nregex_patterns = '|'.join(patterns)\n\n# we'll use spacy to get the sentences from the text https://pypi.org/project/spacy/\nnlp = spacy.load('en')\njson_output = []\nfor doc in data:\n json_element = {}\n sentence = nlp(doc)\n for sent in sentence.sents:\n text = sent.text\n # sanity check\n if 'XX' not in text:\n continue\n # we don't want too short sentences\n if (len(text) < 80):\n continue\n # apostophe's create problems in tokenization, removing is fine for this hackathon\n if '\\'' in text:\n continue\n # we don't want unnecessary linefeeds\n text = text.replace(\"\\n\", \" \")\n text = text.strip()\n # let's see if the last character in the sentence is a period\n ch = text[-1]\n if '.' != ch:\n continue\n # let's see if the first character in the sentence is uppercase\n ch = text[0]\n if not ch.isupper():\n continue\n # atleast one of the redaction patterns above should be present in the sentence\n mentions = re.findall(regex_patterns, text)\n if (len(mentions) != 1):\n continue\n json_output.append(text)\n\noutput_filename = \"./dataset.json\"\nwith open(output_filename, \"w\") as f:\n json.dump(json_output, f, indent=2, separators=(',', ': '))\n f.write('\\n')\n","sub_path":"additional_data/create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"480385197","text":"from openpyxl import load_workbook\nfrom openpyxl.utils import get_column_letter\nimport datetime\nfrom datetime import date\n\nclass ExcelManager(object):\n\n\n def __init__(self, targhet):\n self.dataA5 = date(2018, 12, 16)\n self.targhet = targhet\n self.file_name = targhet + '.xlsx'\n self.wb = self.load_excel(self.file_name)\n\n def load_excel(self, player):\n try:\n wb = load_workbook(player)\n except Exception as e:\n template = load_workbook('template.xlsx')\n template.save(player)\n template.close()\n wb = load_workbook(player)\n return wb\n\n def select_sheet(self, wb, coords, isMoon):\n sheet_name = coords.replace(\":\", \"-\");\n if isMoon:\n sheet_name = sheet_name + ' (Luna)'\n\n try:\n ws = wb.get_sheet_by_name(sheet_name)\n except Exception as e:\n ws = wb.get_sheet_by_name(\"Empty\")\n ws = wb.copy_worksheet(ws)\n ws.title = sheet_name\n return ws\n\n def findRow(self):\n now = datetime.datetime.now()\n oggi = date(now.year, now.month, now.day)\n delta = oggi - self.dataA5\n return delta.days + 5\n\n\n def findCol(self, ws):\n now = datetime.datetime.now()\n time = str(now.hour).zfill(2) + str(now.minute).zfill(2)\n nOra = int(time)\n\n i = 2\n\n while i < 49:\n i = i + 1\n\n cella = get_column_letter(i) + str(4)\n calla2 = get_column_letter(i+1) + str(4)\n\n _min_t = int(str(ws[cella].value).replace(':', ''))\n _max_t = int(str(ws[calla2].value).replace(':', ''))\n\n if (nOra > _min_t) and (nOra < _max_t):\n return get_column_letter(i)\n\n return get_column_letter(i)\n\n def scrivi(self, ws, row, col, time):\n val = ws[col + str(row)].value\n\n if val == None:\n ws[col + str(row)] = int(time)\n else:\n if int(time) < int(val):\n ws[col + str(row)] = int(time)\n\n def write_time(self, coords, isMoon, time):\n if(time == \">60\"):\n time = 999\n\n ws = self.select_sheet(self.wb, coords, isMoon)\n row = self.findRow()\n col = self.findCol(ws)\n self.scrivi(ws, row, col, time);\n\n ws = self.wb.get_sheet_by_name(\"Attivita Generale\")\n self.scrivi(ws, row, col, time);\n\n self.wb.save(self.file_name)\n\nif __name__ == '__main__':\n excelManager = ExcelManager(\"Prova\")\n excelManager.write_time(\"Ciao\", True, 10)\n","sub_path":"excelManager.py","file_name":"excelManager.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"631809088","text":"import sys\n\n#In this veraible stores size of matrixes in a list of them\nmatrix_size = 0\n\ndef bad_print(message, matrix = None, matrix_print = None):\n print('Error: ' + message)\n if matrix is not None:\n matrix_print(matrix)\n print('Exiting...')\n sys.exit()\n\ndef validate_matrix(matrix, matrix_print):\n global matrix_size\n\n if len(matrix) == 0:\n bad_print('Matrix can not be zero sized!', matrix, matrix_print)\n\n #Square check\n row_len = len(matrix[0])\n for row in matrix:\n if len(row) != row_len:\n bad_print('Matrix should be squared!', matrix, matrix_print)\n \n if len(matrix) != row_len:\n bad_print('Matrix should be squared!', matrix, matrix_print)\n\n #Same with other matrixes\n if matrix_size == 0:\n matrix_size = len(matrix)\n else: \n if len(matrix) != matrix_size:\n bad_print('This matrix size is not the same with others!', matrix, matrix_print)\n\n #Checking matrix's elements\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if i == j:\n if matrix[i][j] is not None:\n bad_print('Elements on the main diagonal should be None!', matrix, matrix_print)\n else:\n continue\n \n if matrix[i][j] != 1 and matrix[i][j] != 0 and matrix[i][j] != 0.5:\n bad_print('Matrix should be filled with 1, 0 or 0.5', matrix, matrix_print)\n\n if matrix[i][j] + matrix[j][i] != 1:\n bad_print('Opposite elements in sum should be one!', matrix, matrix_print)\n\ndef validate_precision(precision):\n #Checking type \n if type(precision) is not float:\n bad_print('Precision should be float number!')\n sys.exit()\n\n #Checking value\n if precision < 0 or precision > 1:\n bad_print(\"Precision's value should be between zero and one!\")\n sys.exit()\n\ndef validate_iterations(iterations):\n #Checking type\n if type(iterations) is not int:\n bad_print('Iterations should be integer number!')\n sys.exit()\n\n #Checking value\n if iterations < 1:\n bad_print(\"Iteration's value should be more then one!\")\n sys.exit()\n\ndef validate_experts(experts_num):\n #Checking value\n if experts_num < 2:\n print('Too few experts matrix! (Minimum 2)')\n sys.exit()\n","sub_path":"LR_1/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"463090889","text":"# i7-4870HQ 4 cores, 8 threads, 2.5 GHz -> 3.7 GHz\n\n# You are given the following information, but you may prefer to do some research for yourself.\n\n# 1 Jan 1900 was a Monday.\n# Thirty days has September,\n# April, June and November.\n# All the rest have thirty-one,\n# Saving February alone,\n# Which has twenty-eight, rain or shine.\n# And on leap years, twenty-nine.\n# A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.\n# How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?\n\ndef first_sundays(first_day, month_dict):\n\n\tsunday_counter = 0\n\n\tif first_day == 0:\n\t\tsunday_counter += 1\n\n\tfor month in range(1,13):\n\t\tfirst_day = (first_day + month_dict[month]) % 7\n\t\tif first_day == 0:\n\t\t\tsunday_counter += 1\n\n\treturn sunday_counter, first_day\n\nif __name__ == '__main__':\n\n\tmonth_dict = { 1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}\n\tleap_month_dict = { 1: 31, 2: 29, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}\n\n\tdays_counter = 0\n\tfirst_day = 2 # January 1 1901 was a Tuesday (We're using 0 for Sunday)\n\n\tfor year in range(1901, 2001):\n\t\tprint(first_day, days_counter)\n\t\tif (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):\n\t\t\tsundays, first_day = first_sundays(first_day, leap_month_dict)\n\t\telse:\n\t\t\tsundays, first_day = first_sundays(first_day, month_dict)\n\t\tdays_counter += sundays\n\n\tprint(days_counter)","sub_path":"euler19.py","file_name":"euler19.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"613751857","text":"import time\n\n\ninicio = time.time()\n\narq = open('dataset-2-a.csv', 'r')\ntexto = arq.read().split('\\n')\n\nresposta=max(texto)\nprint(resposta+\"--------teste-------\")\narq.close()\narquivoFinal=open(\"resposta 2-A.txt\",\"w\")\n\n\nsegundos = time.time()\nmilissegundos = segundos * 1000\nprint(milissegundos)\narquivoFinal.write(\"Maior numero:\" + str(resposta)+ \"\\n\"+ \"Tempo gasto em milissegundos:\"+str(milissegundos))\narquivoFinal.close()\n","sub_path":"BuscarNumero.py","file_name":"BuscarNumero.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"188756307","text":"# coding: utf-8\n\nimport collections\n\n\ndef n_gram(seq, n):\n s = list(seq)\n s.append(\"\")\n return collections.Counter([tuple(s[i:i+n]) for i in range(len(s) - n + 1)])\n\n\ndef _main() -> int:\n s1 = \"paraparaparadise\"\n s2 = \"paragraph\"\n cb1 = n_gram(s1, 2)\n cb2 = n_gram(s2, 2)\n X = set(cb1.keys())\n Y = set(cb2.keys())\n\n print(\"X | Y: \" + str(sorted(X | Y)))\n print(\"X & Y: \" + str(sorted(X & Y)))\n print(\"X - Y: \" + str(sorted(X - Y)))\n print(\"se in X: \" + str((\"s\", \"e\") in X))\n print(\"se in Y: \" + str((\"s\", \"e\") in Y))\n\n return 0\n\n\nif __name__ == \"__main__\":\n import sys\n sys.exit(_main())\n","sub_path":"Python/chapter1/06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"22759048","text":"#!/usr/bin/env python\n# coding=utf-8\n\n'''\n递归调用栈举例 阶乘 5! = 5*4*3*2*1\n算法图解 P36\n'''\n\n\ndef fact(x):\n if x == 1:\n return 1\n else:\n return x * fact(x-1)\n\n\nif __name__ == '__main__':\n print(fact(3))","sub_path":"algorithm_python_suanfatujie/3_3_2_recurisive_call.py","file_name":"3_3_2_recurisive_call.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"132981481","text":"from pygame import *\nfrom doodle_engine_core.matrix44 import*\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nclass Game:\n def __init__(self, size, title=\"\", fullscreen=False):\n if fullscreen:\n self.display=display.set_mode(size, FULLSCREEN|OPENGL)\n else:\n self.display=display.set_mode(size, OPENGL)\n display.set_caption(title)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_FLAT)\n glClearColor(1.0, 1.0, 1.0, 0.0)\n glEnable(GL_COLOR_MATERIAL)\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glLight(GL_LIGHT0, GL_POSITION, (0, 1, 1, 0))\n w,h=size\n Game.resize(w,h)\n self.title=title\n self.size=size\n def resize(width, height):\n glViewport(0, 0, width, height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(60.0, float(width)/height, .1, 1000.)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n def __str__(self):\n return('main Game object title='+self.title+' size='+str(self.size)+' - doodle3D.Game instance')\n\nclass Cube(object):\n\n\n def __init__(self, position, color):\n\n self.position = position\n self.color = color\n\n # Cube information\n\n num_faces = 6\n\n vertices = [ (0.0, 0.0, 1.0),\n (1.0, 0.0, 1.0),\n (1.0, 1.0, 1.0),\n (0.0, 1.0, 1.0),\n (0.0, 0.0, 0.0),\n (1.0, 0.0, 0.0),\n (1.0, 1.0, 0.0),\n (0.0, 1.0, 0.0) ]\n\n normals = [ (0.0, 0.0, +1.0), # front\n (0.0, 0.0, -1.0), # back\n (+1.0, 0.0, 0.0), # right\n (-1.0, 0.0, 0.0), # left\n (0.0, +1.0, 0.0), # top\n (0.0, -1.0, 0.0) ] # bottom\n\n vertex_indices = [ (0, 1, 2, 3), # front\n (4, 5, 6, 7), # back\n (1, 5, 6, 2), # right\n (0, 4, 7, 3), # left\n (3, 2, 6, 7), # top\n (0, 1, 5, 4) ] # bottom\n\n def render(self):\n\n # Set the cube color, applies to all vertices till next call\n glColor( self.color )\n\n # Adjust all the vertices so that the cube is at self.position\n vertices = []\n for v in self.vertices:\n vertices.append( tuple(Vector3(v)+ self.position) )\n\n\n # Draw all 6 faces of the cube\n glBegin(GL_QUADS)\n\n for face_no in range(self.num_faces):\n\n glNormal3dv( self.normals[face_no] )\n\n v1, v2, v3, v4 = self.vertex_indices[face_no]\n\n glVertex( vertices[v1] )\n glVertex( vertices[v2] )\n glVertex( vertices[v3] )\n glVertex( vertices[v4] )\n\n glEnd()\nclass Mosaic(object):\n def __init__(self, pic):\n\n map_surface = image.load(pic)\n map_surface.lock()\n\n w, h = map_surface.get_size()\n\n self.cubes = []\n\n # Create a cube for every non-transperant pixel\n for y in range(h):\n for x in range(w):\n\n r, g, b, a = map_surface.get_at((x, y))\n\n if (r,g,b) != (255,255,255):\n\n gl_col = (r/255.0, g/255.0, b/255.0)\n position = (float(x), 0.0, float(y))\n cube = Cube( position, gl_col )\n self.cubes.append(cube)\n\n\n map_surface.unlock()\n\n self.display_list = None\n\n def render(self):\n\n if self.display_list is None:\n\n # Create a display list\n self.display_list = glGenLists(1)\n glNewList(self.display_list, GL_COMPILE)\n\n # Draw the cubes\n for cube in self.cubes:\n cube.render()\n\n # End the display list\n glEndList()\n\n else:\n\n # Render the display list\n glCallList(self.display_list)\n####VECTOR3 CLASS####\nclass Vector3(object):\n \n __slots__ = ('_v',)\n \n _gameobjects_vector = 3\n \n \n def __init__(self, *args):\n \"\"\"Creates a Vector3 from 3 numeric values or a list-like object\n containing at least 3 values. No arguments result in a null vector.\n \n \"\"\"\n if len(args) == 3:\n self._v = map(float, args[:3])\n return\n \n if not args:\n self._v = [0., 0., 0.]\n elif len(args) == 1:\n self._v = map(float, args[0][:3])\n else:\n raise ValueError(\"Vector3.__init__ takes 0, 1 or 3 parameters\")\n \n \n @classmethod\n def from_points(cls, p1, p2):\n \n v = cls.__new__(cls, object)\n ax, ay, az = p1\n bx, by, bz = p2\n v._v = [bx-ax, by-ay, bz-az]\n \n return v\n \n @classmethod\n def from_floats(cls, x, y, z):\n \"\"\"Creates a Vector3 from individual float values.\n Warning: There is no checking (for efficiency) here: x, y, z _must_ be\n floats.\n \n \"\"\"\n v = cls.__new__(cls, object)\n v._v = [x, y, z]\n return v\n \n \n @classmethod\n def from_iter(cls, iterable):\n \"\"\"Creates a Vector3 from an iterable containing at least 3 values.\"\"\"\n next = iter(iterable).next\n v = cls.__new__(cls, object)\n v._v = [ float(next()), float(next()), float(next()) ]\n return v\n \n @classmethod\n def _from_float_sequence(cls, sequence):\n v = cls.__new__(cls, object)\n v._v = list(sequence[:3])\n return v\n \n def copy(self):\n \"\"\"Returns a copy of this vector.\"\"\"\n \n v = self.__new__(self.__class__, object)\n v._v = self._v[:]\n return v\n #return self.from_floats(self._v[0], self._v[1], self._v[2])\n \n __copy__ = copy\n \n def _get_x(self):\n return self._v[0]\n def _set_x(self, x):\n try:\n self._v[0] = 1.0 * x\n except:\n raise TypeError(\"Must be a number\")\n x = property(_get_x, _set_x, None, \"x component.\")\n \n def _get_y(self):\n return self._v[1]\n def _set_y(self, y):\n try:\n self._v[1] = 1.0 * y\n except:\n raise TypeError(\"Must be a number\")\n y = property(_get_y, _set_y, None, \"y component.\")\n \n def _get_z(self):\n return self._v[2]\n def _set_z(self, z):\n try:\n self._v[2] = 1.0 * z\n except:\n raise TypeError(\"Must be a number\")\n z = property(_get_z, _set_z, None, \"z component.\")\n \n def _get_length(self):\n x, y, z = self._v\n return sqrt(x*x + y*y + z*z)\n \n def _set_length(self, length):\n v = self._v\n try:\n x, y, z = v\n l = length / sqrt(x*x + y*y +z*z)\n except ZeroDivisionError:\n v[0] = 0.\n v[1] = 0.\n v[2] = 0.\n return self\n \n v[0] = x*l\n v[1] = y*l\n v[2] = z*l\n \n length = property(_get_length, _set_length, None, \"Length of the vector\")\n \n def unit(self):\n \"\"\"Returns a unit vector.\"\"\"\n x, y, z = self._v\n l = sqrt(x*x + y*y + z*z)\n return self.from_floats(x/l, y/l, z/l)\n \n \n def set(self, x, y, z):\n \"\"\"Sets the components of this vector.\n x -- x component\n y -- y component\n z -- z component\n \n \"\"\"\n \n v = self._v\n try:\n v[0] = x * 1.0\n v[1] = y * 1.0\n v[2] = z * 1.0\n except TypeError:\n raise TypeError(\"Must be a number\")\n return self\n \n \n\n \n \n def __repr__(self):\n \n x, y, z = self._v\n return \"Vector3(%s, %s, %s)\" % (x, y, z)\n \n \n def __len__(self):\n \n return 3\n \n def __iter__(self):\n \"\"\"Iterates the components in x, y, z order.\"\"\"\n return iter(self._v[:])\n \n def __getitem__(self, index):\n \"\"\"Retrieves a component, given its index.\n \n index -- 0, 1 or 2 for x, y or z\n \n \"\"\"\n try:\n return self._v[index]\n except IndexError:\n raise IndexError(\"There are 3 values in this object, index should be 0, 1 or 2!\")\n \n def __setitem__(self, index, value):\n \"\"\"Sets a component, given its index.\n \n index -- 0, 1 or 2 for x, y or z\n value -- New (float) value of component\n \n \"\"\"\n \n try:\n self._v[index] = 1.0 * value\n except IndexError:\n raise IndexError(\"There are 3 values in this object, index should be 0, 1 or 2!\")\n except TypeError:\n raise TypeError(\"Must be a number\")\n \n \n def __eq__(self, rhs):\n \n \"\"\"Test for equality\n \n rhs -- Vector or sequence of 3 values\n \n \"\"\"\n \n x, y, z = self._v\n xx, yy, zz = rhs\n return x==xx and y==yy and z==zz\n \n def __ne__(self, rhs):\n \n \"\"\"Test of inequality\n \n rhs -- Vector or sequenece of 3 values\n \n \"\"\"\n \n x, y, z = self._v\n xx, yy, zz = rhs\n return x!=xx or y!=yy or z!=zz\n \n def __hash__(self):\n \n return hash(self._v)\n \n def __add__(self, rhs):\n \"\"\"Returns the result of adding a vector (or collection of 3 numbers)\n from this vector.\n \n rhs -- Vector or sequence of 2 values\n \n \"\"\"\n \n x, y, z = self._v\n ox, oy, oz = rhs\n return self.from_floats(x+ox, y+oy, z+oz)\n \n \n def __iadd__(self, rhs):\n \"\"\"Adds another vector (or a collection of 3 numbers) to this vector.\n \n rhs -- Vector or sequence of 2 values\n \n \"\"\"\n ox, oy, oz = rhs\n v = self._v\n v[0] += ox\n v[1] += oy\n v[2] += oz\n return self\n \n \n def __radd__(self, lhs):\n \n \"\"\"Adds vector to this vector (right version)\n \n lhs -- Left hand side vector or sequence\n \n \"\"\"\n \n x, y, z = self._v\n ox, oy, oz = lhs\n return self.from_floats(x+ox, y+oy, z+oz)\n \n \n \n def __sub__(self, rhs):\n \"\"\"Returns the result of subtracting a vector (or collection of\n 3 numbers) from this vector.\n \n rhs -- 3 values\n \n \"\"\"\n \n x, y, z = self._v\n ox, oy, oz = rhs\n return self.from_floats(x-ox, y-oy, z-oz)\n \n \n def _isub__(self, rhs):\n \"\"\"Subtracts another vector (or a collection of 3 numbers) from this\n vector.\n \n rhs -- Vector or sequence of 3 values\n \n \"\"\"\n \n ox, oy, oz = rhs\n v = self._v\n v[0] -= ox\n v[1] -= oy\n v[2] -= oz\n return self\n \n def __rsub__(self, lhs):\n \n \"\"\"Subtracts a vector (right version)\n \n lhs -- Left hand side vector or sequence\n \n \"\"\"\n \n x, y, z = self._v\n ox, oy, oz = lhs\n return self.from_floats(ox-x, oy-y, oz-z)\n \n def scalar_mul(self, scalar):\n \n v = self._v\n v[0] *= scalar\n v[1] *= scalar\n v[2] *= scalar\n \n def vector_mul(self, vector):\n \n x, y, z = vector\n v= self._v\n v[0] *= x\n v[1] *= y\n v[2] *= z\n \n def get_scalar_mul(self, scalar):\n \n x, y, z = self._v\n return self.from_floats(x*scalar, y*scalar, z*scalar)\n \n def get_vector_mul(self, vector):\n \n x, y, z = self._v\n xx, yy, zz = vector\n return self.from_floats(x * xx, y * yy, z * zz)\n \n def __mul__(self, rhs):\n \"\"\"Return the result of multiplying this vector by another vector, or\n a scalar (single number).\n \n \n rhs -- Vector, sequence or single value.\n \n \"\"\"\n \n x, y, z = self._v\n if hasattr(rhs, \"__getitem__\"):\n ox, oy, oz = rhs\n return self.from_floats(x*ox, y*oy, z*oz)\n else:\n return self.from_floats(x*rhs, y*rhs, z*rhs)\n \n \n def __imul__(self, rhs):\n \"\"\"Multiply this vector by another vector, or a scalar\n (single number).\n \n rhs -- Vector, sequence or single value.\n \n \"\"\"\n \n v = self._v\n if hasattr(rhs, \"__getitem__\"):\n ox, oy, oz = rhs\n v[0] *= ox\n v[1] *= oy\n v[2] *= oz\n else:\n v[0] *= rhs\n v[1] *= rhs\n v[2] *= rhs\n \n return self\n \n def __rmul__(self, lhs):\n \n x, y, z = self._v\n if hasattr(lhs, \"__getitem__\"):\n ox, oy, oz = lhs\n return self.from_floats(x*ox, y*oy, z*oz)\n else:\n return self.from_floats(x*lhs, y*lhs, z*lhs)\n \n \n def __div__(self, rhs):\n \"\"\"Return the result of dividing this vector by another vector, or a scalar (single number).\"\"\"\n \n x, y, z = self._v\n if hasattr(rhs, \"__getitem__\"):\n ox, oy, oz = rhs\n return self.from_floats(x/ox, y/oy, z/oz)\n else:\n return self.from_floats(x/rhs, y/rhs, z/rhs)\n \n \n def __idiv__(self, rhs):\n \"\"\"Divide this vector by another vector, or a scalar (single number).\"\"\"\n \n v = self._v\n if hasattr(rhs, \"__getitem__\"):\n v[0] /= ox\n v[1] /= oy\n v[2] /= oz\n else:\n v[0] /= rhs\n v[1] /= rhs\n v[2] /= rhs\n \n return self\n \n \n def __rdiv__(self, lhs):\n \n x, y, z = self._v\n if hasattr(lhs, \"__getitem__\"):\n ox, oy, oz = lhs\n return self.from_floats(ox/x, oy/y, oz/z)\n else:\n return self.from_floats(lhs/x, lhs/y, lhs/z)\n \n def scalar_div(self, scalar):\n \n v = self._v\n v[0] /= scalar\n v[1] /= scalar\n v[2] /= scalar\n \n def vector_div(self, vector):\n \n x, y, z = vector\n v= self._v\n v[0] /= x\n v[1] /= y\n v[2] /= z\n \n def get_scalar_div(self, scalar):\n \n x, y, z = self.scalar\n return self.from_floats(x / scalar, y / scalar, z / scalar)\n \n def get_vector_div(self, vector):\n \n x, y, z = self._v\n xx, yy, zz = vector\n return self.from_floats(x / xx, y / yy, z / zz)\n \n def __neg__(self):\n \"\"\"Returns the negation of this vector (a vector pointing in the opposite direction.\n eg v1 = Vector(1,2,3)\n print -v1\n >>> (-1,-2,-3)\n \n \"\"\"\n x, y, z = self._v\n return self.from_floats(-x, -y, -z)\n \n def __pos__(self):\n \n return self.copy()\n \n \n def __nonzero__(self):\n \n x, y, z = self._v\n return bool(x or y or z)\n \n \n def __call__(self, keys):\n \"\"\"Returns a tuple of the values in a vector\n \n keys -- An iterable containing the keys (x, y or z)\n eg v = Vector3(1.0, 2.0, 3.0)\n v('zyx') -> (3.0, 2.0, 1.0)\n \n \"\"\"\n ord_x = ord('x')\n v = self._v\n return tuple( v[ord(c)-ord_x] for c in keys )\n \n \n def as_tuple(self):\n \"\"\"Returns a tuple of the x, y, z components. A little quicker than\n tuple(vector).\"\"\"\n \n return tuple(self._v)\n \n \n def scale(self, scale):\n \"\"\"Scales the vector by onther vector or a scalar. Same as the\n *= operator.\n \n scale -- Value to scale the vector by\n \n \"\"\"\n v = self._v\n if hasattr(scale, \"__getitem__\"):\n ox, oy, oz = scale\n v[0] *= ox\n v[1] *= oy\n v[2] *= oz\n else:\n v[0] *= scale\n v[1] *= scale\n v[2] *= scale\n \n return self\n \n \n def get_length(self):\n \"\"\"Calculates the length of the vector.\"\"\"\n \n x, y, z = self._v\n return sqrt(x*x + y*y +z*z)\n get_magnitude = get_length\n \n def set_length(self, new_length):\n \"\"\"Sets the length of the vector. (Normalises it then scales it)\n \n new_length -- The new length of the vector.\n \n \"\"\"\n v = self._v\n try:\n x, y, z = v\n l = new_length / sqrt(x*x + y*y + z*z)\n except ZeroDivisionError:\n v[0] = 0.0\n v[1] = 0.0\n v[2] = 0.0\n return self\n \n v[0] = x*l\n v[1] = y*l\n v[2] = z*l\n \n return self\n \n \n def get_distance_to(self, p):\n \"\"\"Returns the distance of this vector to a point.\n \n p -- A position as a vector, or collection of 3 values.\n \n \"\"\"\n ax, ay, az = self._v\n bx, by, bz = p\n dx = ax-bx\n dy = ay-by\n dz = az-bz\n return sqrt( dx*dx + dy*dy + dz*dz )\n \n \n def get_distance_to_squared(self, p):\n \"\"\"Returns the squared distance of this vector to a point.\n \n p -- A position as a vector, or collection of 3 values.\n \n \"\"\"\n ax, ay, az = self._v\n bx, by, bz = p\n dx = ax-bx\n dy = ay-by\n dz = az-bz\n return dx*dx + dy*dy + dz*dz\n \n \n def normalise(self):\n \"\"\"Scales the vector to be length 1.\"\"\"\n v = self._v\n x, y, z = v\n l = sqrt(x*x + y*y + z*z)\n try:\n v[0] /= l\n v[1] /= l\n v[2] /= l\n except ZeroDivisionError:\n v[0] = 0.0\n v[1] = 0.0\n v[2] = 0.0\n return self\n normalize = normalise\n \n def get_normalised(self):\n \n x, y, z = self._v\n l = sqrt(x*x + y*y + z*z)\n return self.from_floats(x/l, y/l, z/l)\n get_normalized = get_normalised\n \n \n def in_sphere(self, sphere):\n \"\"\"Returns true if this vector (treated as a position) is contained in\n the given sphere.\n \n \"\"\"\n \n return distance3d(sphere.position, self) <= sphere.radius\n \n \n def dot(self, other):\n \n \"\"\"Returns the dot product of this vector with another.\n \n other -- A vector or tuple\n \n \"\"\"\n x, y, z = self._v\n ox, oy, oz = other\n return x*ox + y*oy + z*oz\n \n def cross(self, other):\n \n \"\"\"Returns the cross product of this vector with another.\n \n other -- A vector or tuple\n \n \"\"\"\n \n x, y, z = self._v\n bx, by, bz = other\n return self.from_floats( y*bz - by*z,\n z*bx - bz*x,\n x*by - bx*y )\n \n def cross_tuple(self, other):\n \n \"\"\"Returns the cross product of this vector with another, as a tuple.\n This avoids the Vector3 construction if you don't need it.\n \n other -- A vector or tuple\n \n \"\"\"\n \n x, y, z = self._v\n bx, by, bz = other\n return ( y*bz - by*z,\n z*bx - bz*x,\n x*by - bx*y )\n \n \ndef distance3d_squared(p1, p2):\n x, y, z = p1\n xx, yy, zz = p2\n dx = x - xx\n dy = y - yy\n dz = z - zz\n return dx*dx + dy*dy +dz*dz\ndef distance3d(p1, p2):\n x, y, z = p1\n xx, yy, zz = p2\n dx = x - xx\n dy = y - yy\n dz = z - zz\n return sqrt(dx*dx + dy*dy +dz*dz)\ndef centre_point3d(points):\n return sum( Vector3(p) for p in points ) / len(points)\n","sub_path":"doodle_engine_core/doodle3D.py","file_name":"doodle3D.py","file_ext":"py","file_size_in_byte":21222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"95312913","text":"#!/usr/bin/env python\nimport MySQLdb as mysql\nfrom flask import Flask,request,render_template\nimport json\n\napp = Flask(__name__)\n\n#connect to mysql\ncon = mysql.connect(user='root',passwd='lyao36843',db='liaoyao')\ncon.autocommit(True)\ncur = con.cursor()\n\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n@app.route('/list')\ndef list():\n\tsql = 'select * from server'\n\tcur.execute(sql)\n\treturn json.dumps(cur.fetchall())\n\n@app.route('/add')\ndef add():\n\tname = request.args.get('name')\n\tmem = request.args.get('mem')\n\tsql = 'insert into server (server,memory) values (\"%s\",%s)' % (name,mem)\n\tcur.execute(sql)\n\treturn 'ok'\n\n@app.route('/del')\ndef delete():\n\tserver_id = request.args.get('id')\n\tsql = 'delete from server where id=' + server_id\n\tcur.execute(sql)\n\treturn 'ok'\n\t\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0',debug=True,port=9092)","sub_path":"08/liaoyao/flask_web.py","file_name":"flask_web.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"16097695","text":"#classify.py\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@author Falko Benthin\n@Date 24.01.2014\n@brief classificator for alarms\n\"\"\"\n\nimport numpy as np\n#import scipy as sp\nimport logging\nimport readConfig as rc\n\nclass Classify():\n\tdef __init__(self):\n\t\tself.thresholdProbability = rc.config.getfloat('classification','thresholdProbability')\n\t\tself.thresholdCosSimilarity = rc.config.getfloat('classification','thresholdCosSimilarity')\n\t\t\n\t\n\t\"\"\"\n\tcheck, if recent bahavior is conspicuous\n\tthis is the case, if there no activity or a lot activity\n\twhen there is no activity, the senior could lie anywhere and we have to check, if no activity at this time is normal\n\twhen there is a lot activity, more than 3*interval lenght, the senior could fallen in shower, so we also have to check, if a lot aktivity is normal\n\tIs there only a litte bit activity, in one or two time slices, everything seems ok and we have to do nothing\n\t@param np.array recentBehavior\n\t@return bool\n\t\"\"\"\n\tdef suspiciousBehavior(self, recentBehavior):\n\t\t\"\"\"\n\t\tlength l (euclidic Norm, Skalarprodukt) of current behavior vector \n\t\tif 0<||v||_2<||(1.0,1.0,1.0)||_2 everthing is fine\n\t\t||v||_2 = sqrt((v_1)^2 + (v_2)^2 + ... + (v_n)^2)\n\t\t\n\t\t\"\"\"\n\t\tlenghtV = np.linalg.norm(recentBehavior)\n\t\tlogging.debug(\"aktueller Vektor: %s La:nge: %s\" % (recentBehavior, lenghtV))\n\t\tif(lenghtV == 0.0 or lenghtV == np.sqrt(1.0*recentBehavior.shape[0])):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\n\t\"\"\"\n\tcheck, if usually bahavior was different from recent, clean ans smooth values first\n\t@param numpy array recentBehavior\n\t@param numpy array usuallyBehavior\n\treturn bool\n\t\"\"\"\n\tdef behaviorDiffCos(self, recentBehavior, usuallyBehavior):\n\t\tthresholdProbability = rc.config.getfloat('classification','thresholdProbability')\n\t\t#clean data, remove noise, means everything < thresholdProbability, because this were rare events \n\t\tusuallyBehavior[usuallyBehavior < thresholdProbability] = 0.0\n\t\tusuallyBehavior[usuallyBehavior >= thresholdProbability] = 1.0\n\t\tlenghtRB = np.linalg.norm(recentBehavior)\n\t\tresult = False\t#per default thers no strange behavior\t\n\t\t#checks, if senior normally has water consumption\n\t\tif(lenghtRB == 0.0):\n\t\t\tnorms = []\n\t\t\tfor behavior in usuallyBehavior:\n\t\t\t\tnorms.append(np.linalg.norm(behavior))\n\t\t\tif(max(norms) > lenghtRB):\n\t\t\t\tresult = True\n\t\t#otherwise checks, if senior normally has water consumption over long duration\t\t\n\t\telse:\n\t\t\tsimilarity = [] # otherwise\n\t\t\tfor behavior in usuallyBehavior:\n\t\t\t\tif(np.linalg.norm(behavior) > 0.0):\n\t\t\t\t\tsimilarity.append(self.cosSimilarity(recentBehavior,behavior))\n\t\t\t\telse: #avoid division through zero\n\t\t\t\t\tsimilarity.append(0.0)\n\t\t\tif(max(similarity) < self.thresholdCosSimilarity):\n\t\t\t\tresult = True\n\t\treturn result\n\n\t\n\t\"\"\"\n\tcalculate cosinus similarity between two vectors\n\t@param numpy array\n\t@param numpy array\n\t@return float\n\t\"\"\"\n\tdef cosSimilarity(self, vector1, vector2):\n\t\treturn np.dot(vector1,vector2) / (np.linalg.norm(vector1) * np.linalg.norm(vector2))\n\t\t#return sp.spatial.distance.cosine(vector1, vector2)\n\t\n\t\"\"\"\n\tcalculate euclidian distance between two points\n\t@param numpy array\n\t@param numpy array\n\t@return float \n\t\"\"\"\n\tdef distance(self, pt1, pt2):\n\t\tpass\n\t\n","sub_path":"program/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"138038254","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('customers', '0024_auto_20170412_1944'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='shoppingcart',\n name='voucher',\n field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, blank=True, to='customers.Voucher'),\n ),\n ]\n","sub_path":"customers/migrations/0025_auto_20170419_0421.py","file_name":"0025_auto_20170419_0421.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"420154377","text":"from lookcells import *\nfrom cell import *\n\nclass drone(cell):\n\n def __init__(self,lattice,x,y):\n cell.__init__(self,lattice,x,y)\n self.species = 'drone'\n lattice.cellMap[x][y] = True\n self.birthometer = 0\n self.deathsdoor = 0.0\n \n def eatFood(self,lattice,foodList):\n \n if lattice.foodMap[self.x][self.y]:\n self.hunger = 0.0\n lattice.foodMap[self.x][self.y] = False\n for tik in range(len(foodList.foodList)):\n if foodList.foodList[tik].x == self.x and foodList.foodList[tik].y == self.y:\n foodList.foodList.pop(tik)\n break \n\n def updateCell(self,lattice,foodList,cellList):\n \n self.eatFood(lattice,foodList)\n self.moveCell(lattice)\n self.hunger = self.hunger + 0.01\n self.birthometer = self.birthometer - 1\n \n if self.birthometer < 1:\n self.birthometer = 0\n if self.hunger > 1.0:\n self.deathsdoor = self.deathsdoor + 0.1\n\n def printCell(self,screen,lattice):\n \n green = round((1-self.hunger)*255) \n blue = round(self.hunger*255)\n green = keepInBoundaries(0,green,255)\n blue = keepInBoundaries(0,blue,255)\n\n pygame.draw.circle(screen,(0,green,blue),(lattice.points[self.x][self.y].location[0],lattice.points[self.x][self.y].location[1]),10)\n\n\n","sub_path":"drone.py","file_name":"drone.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"370423753","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2015 LIP - Lisbon\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport testtools\n\nfrom os_restfulcli.client import cli\nfrom os_restfulcli.client.controller import ControllerResource\nfrom os_restfulcli.driver import parsers\nfrom os_restfulcli.tests.test_integration import configure_env\n\nimport os_restfulcli.tests\n\n\nclass TestIntegrationRolesommand(os_restfulcli.tests.TestCaseCommandLine):\n\n def setUp(self):\n super(TestIntegrationRolesommand, self).setUp()\n self.user_id = \"3a8b1c4387664d9488dee661df025b80\"\n self.project_id = \"484d3a7eeb4f4462b329c1d0463cf324\"\n self.anotherrole = \"e80fa7ab6cfa45d39be195878350853d\"\n self.admin_role=\"1f53a6862bdb4625930a1083bc675c99\"\n configure_env(self.project_id)\n\n def test_rol_show(self):\n result = self.runner.invoke(cli.roles, ['show','admin'])\n self.assertEqual(result.exit_code,0)\n self.assertIsNone(result.exception)\n\n def test_rol_show_wrong(self):\n result = self.runner.invoke(cli.roles, ['show','adminee'])\n self.assertEqual(result.exit_code,2)\n # self.assertIsNone(result.exception)\n\n def test_rol_list(self):\n result = self.runner.invoke(cli.roles, ['list'])\n self.assertEqual(result.exit_code,0)\n self.assertIsNone(result.exception)\n\n def test_grant_list(self):\n result = self.runner.invoke(cli.roles, ['list_grants', 'admin', 'admin'])\n self.assertEqual(result.exit_code,0)\n self.assertIsNone(result.exception)\n\n def test_roles_link_unlink(self):\n result = self.runner.invoke(cli.roles, ['create_grant', 'demo', 'demo','anotherrole'])\n self.assertEqual(result.exit_code,0)\n self.assertIsNone(result.exception)\n result = self.runner.invoke(cli.roles, ['delete_grant', 'demo', 'demo','anotherrole'])\n self.assertEqual(result.exit_code,0)\n self.assertIsNone(result.exception)\n\n #\n # def test_user_create_delete(self):\n # result = self.runner.invoke(cli.users, ['create', '--attributes={\"name\":\"name53\"}'])\n # self.assertEqual(result.exit_code,0)\n # self.assertIsNone(result.exception)\n # #delete\n # #id = str(result.output_bytes).strip().split(\"\\n\")[2].split(\"|\")[5].strip()\n # ids = parsers.json_load_from_client(result.output_bytes)\n # for id in ids:\n # result_delete = self.runner.invoke(cli.users, ['delete', '--id=%s' % id])\n # self.assertEqual(result_delete.exit_code,0)\n # self.assertIsNone(result_delete.exception)\n #\n # def test_user_create_delete_bunch(self):\n # result = self.runner.invoke(cli.users, ['create', '--file=../user_json_file_example.json'])\n # self.assertEqual(result.exit_code,0)\n # self.assertIsNone(result.exception)\n # #delete\n # ids = parsers.json_load_from_client(result.output_bytes)\n # # var = \"[{u'project': {u'description': u'', u'links': {u'self': u'http://localhost/v3/projects/e2b42b2aa5d5444f833b94d973571b63'}, u'enabled': True, u'id': u'e2b42b2aa5d5444f833b94d973571b63', u'parent_id': None, u'domain_id': u'default', u'name': u'name3'}}]\"\n # # result_dict = parsers.json_load_from_os_string(var)\n # for id in ids:\n # result_delete = self.runner.invoke(cli.users, ['delete', '--id=%s' % id])\n # self.assertEqual(result_delete.exit_code,0)\n # self.assertIsNone(result_delete.exception)\n\n\n def test_list_by_project(self):\n result = self.runner.invoke(cli.roles, ['grants_by_project', 'admin'])\n self.assertEqual(result.exit_code,0)\n self.assertIsNone(result.exception)\n\n def test_list_by_user(self):\n result = self.runner.invoke(cli.roles, ['grants_by_user', 'admin'])\n self.assertEqual(result.exit_code,0)\n self.assertIsNone(result.exception)\n\n# class TestIntegrationUserController(testtools.TestCase):\n#\n# def setUp(self):\n# super(TestIntegrationUserController, self).setUp()\n# self.project_id = \"484d3a7eeb4f4462b329c1d0463cf324\"\n# configure_env(self.project_id)\n# self.controller = ControllerResource('projects')\n#\n# def test_index(self):\n# result = self.controller.index()\n# self.assertIsNotNone(result)\n#\n# # def test_show(self):\n# # result = self.controller.index()\n# # self.assertIsNotNone(result)\n#\n#","sub_path":"os_restfulcli/tests/test_integration/test_roles.py","file_name":"test_roles.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"346961493","text":"import time\nqueryTemplate = \"INSERT INTO `Przyjazd` (`kierunek`, `godzina`, `kolejność`, `id_linia`, `id_przyst`) VALUES ({0},'{1}',{2},{3},{4});\\n\"\nactfile = open(\"insert_przyjazd.sql\",\"at\")\nfor linia in range(1,4):\n for przystanek in range(1 + linia,16 + linia):\n for cykl in range(5):\n acttime = time.localtime(time.mktime((1970, 1, 1, 9 + cykl, linia + przystanek, 0, 0, 0, 0)))\n newQuery = queryTemplate.format(0,time.strftime(\"%H:%M:%S\",acttime),przystanek - linia,linia,przystanek)\n actfile.write(newQuery)\n for przystanek in range(15 + linia,1 + linia,-1):\n for cykl in range(5):\n acttime = time.localtime(time.mktime((1970, 1, 1, 9 + cykl, linia + ( 15 + linia - przystanek) + 30, 0, 0, 0, 0)))\n newQuery = queryTemplate.format(1,time.strftime(\"%H:%M:%S\",acttime),przystanek - linia,linia,przystanek)\n actfile.write(newQuery)","sub_path":"project/src/sql/generate_przyjazd_mysql.py","file_name":"generate_przyjazd_mysql.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"376893652","text":"import sys\n\n\nprint (len(sys.argv))\nprint (sys.argv)\nfor arg in sys.argv:\n print (arg)\nprint (\"Hello\" * 2)\n\nfor x in range(10, 20):\n print (x)\nfor char in \"Python\":\n if char == \"h\": continue\n else: print (\"Current Letter: \",char)\n\n# the while loop\nvar = 10\nwhile var > 0:\n print (\"Current loop variable is \", var)\n var -= 1\nnumber = 2\nprimeCount = 0\n\n\n\nwhile number < 100:\n j = 2\n while j <= (number/j):\n if number % j == 0:\n break\n j = j + 1\n if (j >= number/j):\n print (number,\" is a prime\")\n primeCount += 1\n\n number += 1\n\nprint (\"Primes under 100 = \", primeCount + 1)\n\n# Defining a function\ndef mySweetFunction():\n for x in range(0, 12):\n print('Log from mySweetFunction')\n\n# calling mySweet def function\nmySweetFunction()\n \n","sub_path":"Old Memories/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"79399431","text":"'''\r\nCreated on 2018年11月19日\r\n用10层隐层的神经网络解决线性或曲线回归\r\n'''\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# 特征数\r\nfeaturenum = 1\r\n#数据条数\r\ndatano = 100\r\n\r\ntrain_X = np.linspace(-1, 1, datano)[:,np.newaxis]\r\nprint(train_X.shape)\r\n#线性y\r\n# train_Y = (3 * train_X + 2)+np.random.ranf(datano).T.reshape(datano,1)\r\n#曲线y\r\ntrain_Y = np.square(train_X) + np.random.normal(0,0.1,train_X.shape)\r\nprint(train_Y.shape)\r\n\r\n#x---100:1\r\nx = tf.placeholder(tf.float32,[None,featurenum])\r\ny = tf.placeholder(tf.float32,[None,featurenum])\r\n\r\n#全连接,之后 100:10\r\nw1 = tf.Variable(tf.random_normal([featurenum, 10])) #十层隐层\r\nb1 = tf.Variable(tf.zeros([1, featurenum]))\r\ny1 = tf.matmul(x, w1)+b1\r\nprediction1 = tf.nn.relu(y1)#预测结果 \r\n\r\n#输出层 10:1\r\nwout = tf.Variable(tf.random_normal([10,featurenum]))\r\nbout = tf.Variable(tf.zeros([1,1]))\r\nyout = tf.matmul(prediction1,wout)+bout\r\n\r\n# loss = tf.reduce_mean(tf.square(y - yout))\r\nloss = tf.losses.mean_squared_error(y, yout)\r\n\r\ntrain_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss) \r\n\r\nwith tf.Session() as sess:\r\n sess.run(tf.initialize_all_variables())\r\n for i in range(100000):\r\n _ ,lossout,prediction1_out= sess.run([train_op,loss,yout],feed_dict={x:train_X,y:train_Y})\r\n if i % 10000 == 0:\r\n print(lossout)\r\n plt.scatter(train_X,train_Y)\r\n plt.plot(train_X,prediction1_out)\r\n plt.show()","sub_path":"tf/_01_base/_06_nn.py","file_name":"_06_nn.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"357211358","text":"# Welcome to my calculations program\r\n# Alex Stomberg\r\n # Period 7 #\r\n # 10/5/2018\r\n #year_income\r\n\r\nprint = \"welcome to my Tax calculations program\"\r\n#asks the user their income\r\nyear_income = int(input(\"\\nWhat is your yearly income:\"))\r\n\r\n\r\n#this is the if statment that decideds if the income is greater than 0 but less than equal to 8925\r\nif year_income >= 0 and year_income <= 8925:\r\n print(\"Based on your annual income of ${0:.2f}\".format(year_income),\r\n \"you owe the IRS $0 Please make your checks payable to Mr. Van Diepen.\")\r\n\r\n#elif statments that print out the same thing about the income program goes to elif when the if staements dont work\r\nelif year_income > 8925 and year_income <= 36250:\r\n print(\"Based on your annual income of ${0:.2f}\".format(year_income),\r\n \"you owe the IRS ${0:.2f}\".format((year_income*0.15)+892.50), \"Please make your checks payable to Mr. Van Diepen.\")\r\n\r\nelif year_income > 36250 and year_income <= 87850:\r\n print(\"Based on your annual income of ${0:.2f}\".format(year_income),\r\n \"you owe the IRS ${0:.2f}\".format((year_income*0.25)+4991.25), \"Please make your checks payable to Mr. Van Diepen.\")\r\n\r\nelif year_income > 87850 and year_income <= 183250:\r\n print(\"Based on your annual income of ${0:.2f}\".format(year_income),\r\n \"you owe the IRS ${0:.2f}\".format((year_income*0.28)+17891.25), \"Please make your checks payable to Mr. Van Diepen.\")\r\n\r\nelif year_income > 183250 and year_income <= 298350:\r\n print(\"Based on your annual income of ${0:.2f}\".format(year_income),\r\n \"you owe the IRS ${0:.2f}\".format((year_income*0.33)+44603.25), \"Please make your checks payable to Mr. Van Diepen.\")\r\n\r\nelif year_income > 298350 and year_income <= 400000:\r\n print(\"Based on your annual income of ${0:.2f}\".format(year_income),\r\n \"you owe the IRS ${0:.2f}\".format((year_income*0.35)+82586.25), \"Please make your checks payable to Mr. Van Diepen.\")\r\n\r\nelif year_income > 400000:\r\n print(\"Based on your annual income of ${0:.2f}\".format(year_income),\r\n \"you owe the IRS ${0:.2f}\".format((year_income*0.396)+118163.75), \"Please make your checks payable to Mr. Van Diepen.\")\r\n\r\nelse:\r\n print(\"You entered an invalid number. Please run the program again\")","sub_path":"Python Programs/Tax TIme.py","file_name":"Tax TIme.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"17584187","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n tkGAME - all-in-one Game library for Tkinter\r\n\r\n Generic Game Grid and subcomponents\r\n\r\n Copyright (c) 2014+ Raphaël Seban \r\n\r\n This program is free software: you can redistribute it and/or\r\n modify it under the terms of the GNU General Public License as\r\n published by the Free Software Foundation, either version 3 of\r\n the License, or (at your option) any later version.\r\n\r\n This program is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\r\n General Public License for more details.\r\n\r\n You should have received a copy of the GNU General Public License\r\n along with this program.\r\n\r\n If not, see http://www.gnu.org/licenses/\r\n\"\"\"\r\n\r\n# lib imports\r\n\r\nimport tkinter as TK\r\n\r\n\r\n# module utility function\r\n\r\ndef normalize(value, minimum=1):\r\n r\"\"\"\r\n normalizes value along constraints;\r\n\r\n returns UINT of @value or at least @minimum;\r\n \"\"\"\r\n\r\n return max(abs(int(minimum)), abs(int(value)))\r\n\r\n\r\n# end def\r\n\r\n\r\n# main class def\r\n\r\nclass GameGrid(TK.Canvas):\r\n r\"\"\"\r\n Generic Game Grid component;\r\n \"\"\"\r\n\r\n # background color\r\n\r\n BGCOLOR = \"white\"\r\n\r\n # foreground color\r\n\r\n FGCOLOR = \"grey\"\r\n\r\n # nb of rows and columns in grid\r\n\r\n ROWS = 3\r\n\r\n COLUMNS = 3\r\n\r\n # thickness of a line stroke\r\n\r\n THICKNESS = 8 # pixels\r\n\r\n # default global config values\r\n\r\n CONFIG = {\r\n\r\n \"background\": BGCOLOR,\r\n\r\n \"highlightthickness\": 0,\r\n\r\n \"width\": 500, # pixels\r\n\r\n \"height\": 500, # pixels\r\n\r\n } # end of CONFIG\r\n\r\n def __init__(self, master, **kw):\r\n\r\n # member inits\r\n\r\n self.CONFIG = self.CONFIG.copy()\r\n\r\n self.CONFIG.update(kw)\r\n\r\n # super class inits\r\n\r\n TK.Canvas.__init__(self, master)\r\n\r\n self.configure(**self._only_tk(self.CONFIG))\r\n\r\n # public members\r\n\r\n self.rows = kw.get(\"rows\", self.ROWS)\r\n\r\n self.columns = kw.get(\"columns\", self.COLUMNS)\r\n\r\n self.thickness = kw.get(\"thickness\", self.THICKNESS)\r\n\r\n self.bgcolor = kw.get(\"bgcolor\", self.BGCOLOR)\r\n\r\n self.fgcolor = kw.get(\"fgcolor\", self.FGCOLOR)\r\n\r\n # private member inits\r\n\r\n self.__tk_owner = master\r\n\r\n self.__tiles = dict()\r\n\r\n self.__matrix = GridMatrix(self.rows, self.columns)\r\n\r\n self.__cell_size = GridCellSize(self)\r\n\r\n # widget inits\r\n\r\n self.init_widget(**self.CONFIG)\r\n\r\n # end def\r\n\r\n def get_tiles(self):\r\n return self.__tiles\r\n\r\n def _only_tk(self, kw):\r\n r\"\"\"\r\n private method def;\r\n\r\n filters external keywords to suit tkinter init options;\r\n\r\n returns filtered dict() of keywords;\r\n \"\"\"\r\n\r\n # inits\r\n\r\n _dict = dict()\r\n\r\n # $ 2014-03-24 RS $\r\n # Caution:\r\n # TK widget *MUST* be init'ed before calling _only_tk() /!\\\r\n # self.configure() needs self.tk to work well\r\n\r\n if hasattr(self, \"tk\") and hasattr(self, \"configure\"):\r\n\r\n _attrs = set(self.configure().keys()) & set(kw.keys())\r\n\r\n for _key in _attrs:\r\n _dict[_key] = kw.get(_key)\r\n\r\n # end for\r\n\r\n # end if\r\n\r\n return _dict\r\n\r\n # end def\r\n\r\n @property\r\n def cell_size(self):\r\n r\"\"\"\r\n returns internal GridCellSize object instance;\r\n \"\"\"\r\n\r\n return self.__cell_size\r\n\r\n # end def\r\n\r\n def clear_all(self, tk_event=None, *args, **kw):\r\n r\"\"\"\r\n clears up all critical members;\r\n \"\"\"\r\n\r\n # clear grid\r\n\r\n self.clear_grid()\r\n\r\n # clear tiles collection\r\n\r\n self.clear_tiles()\r\n\r\n # clear matrix\r\n\r\n self.matrix.reset_matrix()\r\n\r\n # end def\r\n\r\n def clear_grid(self, tk_event=None, *args, **kw):\r\n r\"\"\"\r\n clears up grid canvas entirely;\r\n \"\"\"\r\n\r\n # clear grid\r\n\r\n self.delete(TK.ALL)\r\n\r\n # end def\r\n\r\n def clear_tiles(self, tk_event=None, *args, **kw):\r\n r\"\"\"\r\n clears up tiles collection entirely;\r\n \"\"\"\r\n\r\n # clear tiles\r\n\r\n self.tiles.clear()\r\n\r\n # end def\r\n\r\n @property\r\n def columns(self):\r\n r\"\"\"\r\n returns grid's current nb of columns;\r\n \"\"\"\r\n\r\n return self.__columns\r\n\r\n # end def\r\n\r\n @columns.setter\r\n def columns(self, value):\r\n\r\n self.__columns = normalize(value)\r\n\r\n # end def\r\n\r\n @columns.deleter\r\n def columns(self):\r\n\r\n del self.__columns\r\n\r\n # end def\r\n\r\n def get_coords(self, row, column, centered=False):\r\n r\"\"\"\r\n calculates canvas (x, y) coordinates from grid matrix\r\n (row, column) pair;\r\n \"\"\"\r\n\r\n # get (left, top) coordinates\r\n\r\n _x, _y = self.cell_size.xy_left_top(row, column)\r\n\r\n # center coords?\r\n\r\n if centered:\r\n _x += self.cell_size.width // 2\r\n\r\n _y += self.cell_size.height // 2\r\n\r\n # end if\r\n\r\n # new coordinates\r\n\r\n return (_x, _y)\r\n\r\n # end def\r\n\r\n @property\r\n def grid_height(self):\r\n r\"\"\"\r\n returns grid's height;\r\n \"\"\"\r\n\r\n return self.winfo_reqheight()\r\n\r\n # end def\r\n\r\n @property\r\n def grid_size(self):\r\n r\"\"\"\r\n returns (real_width, real_height) pair;\r\n \"\"\"\r\n\r\n # must adjust along thickness\r\n\r\n return (\r\n\r\n (self.grid_width - self.half_high),\r\n\r\n (self.grid_height - self.half_high)\r\n )\r\n\r\n # end def\r\n\r\n @property\r\n def grid_width(self):\r\n r\"\"\"\r\n returns grid's width;\r\n \"\"\"\r\n\r\n return self.winfo_reqwidth()\r\n\r\n # end def\r\n\r\n @property\r\n def half_high(self):\r\n r\"\"\"\r\n returns half thickness, high value;\r\n \"\"\"\r\n\r\n return round(0.1 + self.thickness / 2)\r\n\r\n # end def\r\n\r\n @property\r\n def half_low(self):\r\n r\"\"\"\r\n returns half thickness, low value;\r\n \"\"\"\r\n\r\n return self.thickness // 2\r\n\r\n # end def\r\n\r\n def init_widget(self, **kw):\r\n r\"\"\"\r\n widget's main inits;\r\n \"\"\"\r\n\r\n # put your own code in subclass\r\n\r\n pass\r\n\r\n # end def\r\n\r\n def is_full(self):\r\n r\"\"\"\r\n evaluates available room in grid;\r\n \"\"\"\r\n\r\n return len(self.tiles) >= self.max_tiles\r\n\r\n # end def\r\n\r\n def is_tile(self, row, column):\r\n r\"\"\"\r\n determines whether canvas item at (row, column) is of\r\n tile type or not;\r\n \"\"\"\r\n\r\n # inits coordinates\r\n\r\n _x, _y = self.get_coords(row, column, centered=True)\r\n\r\n # get canvas item id\r\n\r\n _item_id = self.find_overlapping(_x, _y, _x, _y)\r\n\r\n # is a tile?\r\n\r\n return bool(_item_id in self.tiles)\r\n\r\n # end def\r\n\r\n @property\r\n def matrix(self):\r\n r\"\"\"\r\n returns internal matrix object;\r\n \"\"\"\r\n\r\n return self.__matrix\r\n\r\n # end def\r\n\r\n @matrix.setter\r\n def matrix(self, value):\r\n self.__matrix = value\r\n\r\n @property\r\n def max_tiles(self):\r\n r\"\"\"\r\n returns maximum number of tiles currently admitted;\r\n \"\"\"\r\n\r\n return self.rows * self.columns\r\n\r\n # end def\r\n\r\n @property\r\n def owner(self):\r\n r\"\"\"\r\n returns ref to private tk_owner;\r\n \"\"\"\r\n\r\n return self.__tk_owner\r\n\r\n # end def\r\n\r\n def register_tile(self, tile_id, tile_object, raise_error=False):\r\n r\"\"\"\r\n registers new tile in tiles dict();\r\n \"\"\"\r\n\r\n # new tile id?\r\n\r\n if tile_id not in self.tiles:\r\n\r\n # register tile object\r\n\r\n self.tiles[tile_id] = tile_object\r\n\r\n elif raise_error:\r\n\r\n # should *NOT* override already existing tile\r\n\r\n raise KeyError(\r\n\r\n \"tile id '{tid}' is already registered.\"\r\n\r\n .format(tid=tile_id)\r\n )\r\n\r\n # end if\r\n\r\n # end def\r\n\r\n def remove_tile(self, tile_id):\r\n r\"\"\"\r\n removes silently if exists;\r\n \"\"\"\r\n\r\n self.tiles.pop(tile_id, None)\r\n\r\n # end def\r\n\r\n def reset_grid(self, tk_event=None, *args, **kw):\r\n r\"\"\"\r\n clears up and redraws grid entirely;\r\n \"\"\"\r\n\r\n # clear all\r\n\r\n self.clear_all()\r\n\r\n # canvas dims\r\n\r\n _grid_width, _grid_height = self.grid_size\r\n\r\n # point of origin\r\n\r\n _x0, _y0 = self.xy_origin\r\n\r\n # thickness\r\n\r\n _thickness = self.thickness\r\n\r\n # foreground color\r\n\r\n _fg = self.fgcolor\r\n\r\n # draw rectangle\r\n\r\n self.create_rectangle(\r\n\r\n _x0, _y0, _grid_width, _grid_height,\r\n\r\n outline=_fg, width=_thickness,\r\n )\r\n\r\n # draw vertical lines\r\n\r\n for _column in range(1, self.columns):\r\n _x = _x0 + _column * (self.cell_size.width + _thickness)\r\n\r\n self.create_line(\r\n\r\n _x, 0, _x, _grid_height,\r\n\r\n fill=_fg, width=_thickness,\r\n )\r\n\r\n # end for\r\n\r\n # draw horizontal lines\r\n\r\n for _row in range(1, self.rows):\r\n _y = _y0 + _row * (self.cell_size.height + _thickness)\r\n\r\n self.create_line(\r\n\r\n 0, _y, _grid_width, _y,\r\n\r\n fill=_fg, width=_thickness,\r\n )\r\n\r\n # end for\r\n\r\n # end def\r\n\r\n @property\r\n def rows(self):\r\n r\"\"\"\r\n returns grid's current nb of rows;\r\n \"\"\"\r\n\r\n return self.__rows\r\n\r\n # end def\r\n\r\n @rows.setter\r\n def rows(self, value):\r\n\r\n self.__rows = normalize(value)\r\n\r\n # end def\r\n\r\n @rows.deleter\r\n def rows(self):\r\n\r\n del self.__rows\r\n\r\n # end def\r\n\r\n @property\r\n def thickness(self):\r\n r\"\"\"\r\n returns grid's line stroke thickness;\r\n \"\"\"\r\n\r\n return self.__thickness\r\n\r\n # end def\r\n\r\n @thickness.setter\r\n def thickness(self, value):\r\n\r\n self.__thickness = normalize(value, minimum=0)\r\n\r\n # end def\r\n\r\n @thickness.deleter\r\n def thickness(self):\r\n\r\n del self.__thickness\r\n\r\n # end def\r\n\r\n @property\r\n def tiles(self):\r\n r\"\"\"\r\n returns internal tiles collection;\r\n \"\"\"\r\n\r\n return self.__tiles\r\n\r\n # end def\r\n\r\n @property\r\n def xy_origin(self):\r\n r\"\"\"\r\n returns (x0, y0) point of origin of grid drawings;\r\n \"\"\"\r\n\r\n # must adjust along thickness\r\n\r\n _x0 = _y0 = self.half_low\r\n\r\n return (_x0, _y0)\r\n\r\n # end def\r\n\r\n @property\r\n def xy_center(self):\r\n r\"\"\"\r\n returns (x, y) coordinates of canvas' center point;\r\n \"\"\"\r\n\r\n return (self.grid_width // 2, self.grid_height // 2)\r\n\r\n # end def\r\n\r\n\r\n# end class GameGrid\r\n\r\n\r\n# subcomponent class def\r\n\r\nclass GridAnimation(TK.Frame):\r\n r\"\"\"\r\n GridAnimation - GameGrid subcomponent;\r\n \"\"\"\r\n\r\n def __init__(self, master=None):\r\n\r\n # super class inits\r\n\r\n TK.Frame.__init__(self, master)\r\n\r\n # public member inits\r\n\r\n self.owner = master\r\n\r\n # private member inits\r\n\r\n self.__pid = 0\r\n\r\n self.__animation_kw = dict()\r\n\r\n self.__callback = None\r\n\r\n self.__callback_args = tuple()\r\n\r\n self.__callback_kw = dict()\r\n\r\n # end def\r\n\r\n @property\r\n def keywords(self):\r\n r\"\"\"\r\n returns internal animation's keywords;\r\n \"\"\"\r\n\r\n return self.__animation_kw\r\n\r\n # end def\r\n\r\n def register(self, callback, *args, **kw):\r\n r\"\"\"\r\n registers callback function/method with its own\r\n arguments and keywords;\r\n\r\n returns True on success, False otherwise;\r\n \"\"\"\r\n\r\n if callable(callback):\r\n\r\n # init callback\r\n\r\n self.__callback = callback\r\n\r\n # init args and kw\r\n\r\n self.__callback_args = args\r\n\r\n self.__callback_kw = kw\r\n\r\n # success\r\n\r\n return True\r\n\r\n else:\r\n\r\n raise TypeError(\r\n\r\n \"callback object *MUST* be a callable one.\"\r\n )\r\n\r\n # end if - callable\r\n\r\n # failure\r\n\r\n return False\r\n\r\n # end def\r\n\r\n def resume(self):\r\n r\"\"\"\r\n resumes animation with current param values;\r\n\r\n returns newly created process id (pid) on success,\r\n integer zero (0 - no pid) otherwise;\r\n \"\"\"\r\n\r\n return self.run_sequencer()\r\n\r\n # end def\r\n\r\n def run_sequencer(self, animation_kw=None):\r\n r\"\"\"\r\n runs animation loop itself with some cool features;\r\n\r\n returns newly created process id (pid) on success,\r\n integer zero (0 - no pid) otherwise;\r\n \"\"\"\r\n\r\n # stops previous pending process, if any;\r\n # resets self.__pid = 0 whatever happens;\r\n\r\n self.stop()\r\n\r\n # first of all\r\n\r\n if callable(self.__callback):\r\n\r\n # param controls\r\n\r\n if isinstance(animation_kw, dict):\r\n\r\n # set new keywords\r\n\r\n self.__animation_kw = _anim_kw = animation_kw\r\n\r\n else:\r\n\r\n # get previously stored keywords\r\n\r\n _anim_kw = self.__animation_kw\r\n\r\n # end if - animation_kw\r\n\r\n # param inits\r\n\r\n _sequence = _anim_kw.get(\"sequence\")\r\n\r\n # indexed and iterable sequence?\r\n\r\n if isinstance(_sequence, (list, tuple)):\r\n\r\n # get other inits\r\n\r\n _interval = int(_anim_kw.get(\"interval\", 100))\r\n\r\n _step = int(_anim_kw.get(\"step\", 0))\r\n\r\n # should we run a new step?\r\n\r\n if _step < len(_sequence):\r\n # update values in callback keywords\r\n\r\n self.__callback_kw.update(\r\n\r\n value=_sequence[_step]\r\n )\r\n\r\n # call callback with args and kw\r\n\r\n self.__callback(\r\n\r\n *self.__callback_args, **self.__callback_kw\r\n )\r\n\r\n # schedule next step\r\n\r\n self.__animation_kw[\"step\"] = _step + 1\r\n\r\n # go further\r\n\r\n self.__pid = self.after(\r\n\r\n _interval, self.run_sequencer\r\n )\r\n\r\n # end if - new step\r\n\r\n # end if - sequence\r\n\r\n # end if - callable\r\n\r\n # current process id (pid) or 0 on failure\r\n\r\n return self.__pid\r\n\r\n # end def\r\n\r\n def start(self, interval=100, step=0, sequence=None):\r\n r\"\"\"\r\n starts animation loop along params;\r\n\r\n returns newly created process id (pid) on success,\r\n integer zero (0 - no pid) otherwise;\r\n \"\"\"\r\n\r\n return self.run_sequencer(\r\n\r\n dict(interval=interval, step=step, sequence=sequence)\r\n )\r\n\r\n # end def\r\n\r\n def start_after(self, delay=500, interval=100, step=0, sequence=None):\r\n r\"\"\"\r\n runs deferred animation after @delay (in milliseconds);\r\n\r\n returns newly created process id (pid) of deferred call;\r\n \"\"\"\r\n\r\n self.__pid = self.after(\r\n\r\n delay, self.start, interval, step, sequence\r\n )\r\n\r\n return self.__pid\r\n\r\n # end def\r\n\r\n def stop(self, pid=None):\r\n r\"\"\"\r\n stops a deferred process along @pid or along internal\r\n pid if omitted;\r\n\r\n no return value (void);\r\n \"\"\"\r\n\r\n # specific pid to cancel?\r\n\r\n if pid:\r\n\r\n self.after_cancel(pid)\r\n\r\n # internal pid\r\n\r\n else:\r\n try:\r\n\r\n self.after_cancel(self.__pid)\r\n\r\n self.__pid = 0\r\n\r\n except ValueError:\r\n pass\r\n\r\n # end if - pid\r\n\r\n # end def\r\n\r\n\r\n# end class GridAnimation\r\n\r\n\r\n# subcomponent class def\r\n\r\nclass GridCellSize:\r\n r\"\"\"\r\n GridCellSize - GameGrid subcomponent;\r\n \"\"\"\r\n\r\n def __init__(self, grid_owner):\r\n\r\n # private member inits\r\n\r\n self.__tk_owner = grid_owner\r\n\r\n self.__width = None\r\n\r\n self.__height = None\r\n\r\n # end def\r\n\r\n def _real_size(self, size, count, thickness):\r\n r\"\"\"\r\n adjusts calculations to meet real GridCellSize size;\r\n \"\"\"\r\n\r\n # adjust to correct size\r\n\r\n _size = size - (count + 1) * thickness\r\n\r\n # return real size\r\n\r\n return round(abs(_size // count))\r\n\r\n # end def\r\n\r\n @property\r\n def height(self):\r\n r\"\"\"\r\n gets GridCellSize's real height;\r\n \"\"\"\r\n\r\n # missing pre-computed dimension?\r\n\r\n if not self.__height:\r\n # get cell's real height dimension\r\n\r\n self.__height = self._real_size(\r\n\r\n size=self.owner.grid_height,\r\n\r\n count=self.owner.rows,\r\n\r\n thickness=self.owner.thickness,\r\n )\r\n\r\n # end if\r\n\r\n return self.__height\r\n\r\n # end def\r\n\r\n @property\r\n def owner(self):\r\n r\"\"\"\r\n returns ref to private tk_owner;\r\n \"\"\"\r\n\r\n return self.__tk_owner\r\n\r\n # end def\r\n\r\n @property\r\n def size(self):\r\n r\"\"\"\r\n returns a (width, height) pair;\r\n \"\"\"\r\n\r\n return (self.width, self.height)\r\n\r\n # end def\r\n\r\n @property\r\n def size_hxw(self):\r\n r\"\"\"\r\n returns a (height, width) pair;\r\n \"\"\"\r\n\r\n return (self.height, self.width)\r\n\r\n # end def\r\n\r\n @property\r\n def size_wxh(self):\r\n r\"\"\"\r\n returns a (width, height) pair;\r\n \"\"\"\r\n\r\n return (self.width, self.height)\r\n\r\n # end def\r\n\r\n @property\r\n def width(self):\r\n r\"\"\"\r\n gets GridCellSize's real width;\r\n \"\"\"\r\n\r\n # missing pre-computed dimension?\r\n\r\n if not self.__width:\r\n # get cell's real width dimension\r\n\r\n self.__width = self._real_size(\r\n\r\n size=self.owner.grid_width,\r\n\r\n count=self.owner.columns,\r\n\r\n thickness=self.owner.thickness,\r\n )\r\n\r\n # end if\r\n\r\n return self.__width\r\n\r\n # end def\r\n\r\n def x_center(self, column):\r\n r\"\"\"\r\n returns only centered x coordinate;\r\n \"\"\"\r\n\r\n return self.x_left(column) + self.width // 2\r\n\r\n # end def\r\n\r\n def x_left(self, column):\r\n r\"\"\"\r\n returns only x_left coordinate;\r\n \"\"\"\r\n\r\n # rebind location\r\n\r\n _column = min(abs(int(column)), self.owner.columns)\r\n\r\n _thickness = self.owner.thickness\r\n\r\n # make calculations\r\n\r\n _x = _thickness + _column * (self.width + _thickness)\r\n\r\n # new coordinate\r\n\r\n return _x\r\n\r\n # end def\r\n\r\n def xy_center(self, row, column):\r\n r\"\"\"\r\n returns (x, y) centered coordinates;\r\n \"\"\"\r\n\r\n return (self.x_center(column), self.y_center(row))\r\n\r\n # end def\r\n\r\n def xy_left_top(self, row, column):\r\n r\"\"\"\r\n returns (x_left, y_top) coordinates;\r\n \"\"\"\r\n\r\n return (self.x_left(column), self.y_top(row))\r\n\r\n # end def\r\n\r\n def y_center(self, row):\r\n r\"\"\"\r\n returns only centered y coordinate;\r\n \"\"\"\r\n\r\n return self.y_top(row) + self.height // 2\r\n\r\n # end def\r\n\r\n def y_top(self, row):\r\n r\"\"\"\r\n returns only y_top coordinate;\r\n \"\"\"\r\n\r\n # rebind location\r\n\r\n _row = min(abs(int(row)), self.owner.rows)\r\n\r\n _thickness = self.owner.thickness\r\n\r\n # make calculations\r\n\r\n _y = _thickness + _row * (self.height + _thickness)\r\n\r\n # new coordinate\r\n\r\n return _y\r\n\r\n # end def\r\n\r\n\r\n# end class GridCellSize\r\n\r\n\r\n# subcomponent class def\r\n\r\nclass GridError(Exception):\r\n r\"\"\"\r\n Exception handler for convenience;\r\n \"\"\"\r\n\r\n pass\r\n\r\n\r\n# end class GridError\r\n\r\n\r\n# subcomponent class def\r\n\r\nclass GridMatrix:\r\n r\"\"\"\r\n GridMatrix - GameGrid subcomponent;\r\n \"\"\"\r\n\r\n def __init__(self, rows, columns):\r\n\r\n # member inits\r\n\r\n self.rows = rows\r\n\r\n self.columns = columns\r\n\r\n # first time: reset matrix\r\n\r\n self.reset_matrix()\r\n\r\n # end def\r\n\r\n def add(self, object_, row, column, raise_error=False):\r\n r\"\"\"\r\n adds an object at (row, column) in matrix;\r\n\r\n raises error if @raise_error and busy location;\r\n\r\n returns True on success, False otherwise;\r\n \"\"\"\r\n\r\n # all is OK?\r\n\r\n if self.matrix.get((row, column)) is None:\r\n\r\n # add object to matrix\r\n\r\n self.matrix[(row, column)] = object_\r\n\r\n # succeeded\r\n\r\n return True\r\n\r\n elif raise_error:\r\n\r\n raise GridError(\r\n\r\n \"cannot add object at (row, column) = \"\r\n\r\n \"({row}, {col}): busy location.\"\r\n\r\n .format(row=row, col=column)\r\n )\r\n\r\n # end if\r\n\r\n # failed\r\n\r\n return False\r\n\r\n # end def\r\n\r\n @property\r\n def columns(self):\r\n r\"\"\"\r\n returns number of columns in matrix;\r\n \"\"\"\r\n\r\n return self.__columns\r\n\r\n # end def\r\n\r\n @columns.setter\r\n def columns(self, value):\r\n\r\n self.__columns = normalize(value)\r\n\r\n # end def\r\n\r\n @columns.deleter\r\n def columns(self):\r\n\r\n del self.__columns\r\n\r\n # end def\r\n\r\n def duplicate_object(self, from_row_column, to_row_column):\r\n r\"\"\"\r\n duplicates the object located at @from_row_column into\r\n @to_row_column if exists;\r\n\r\n raises errors otherwise;\r\n \"\"\"\r\n\r\n # get object if exists\r\n\r\n _object = self.get_object_at(*from_row_column, raise_error=True)\r\n\r\n # add copy to new location\r\n\r\n self.add(_object, *to_row_column, raise_error=True)\r\n\r\n # end def\r\n\r\n def get_object_at(self, row, column, raise_error=False):\r\n r\"\"\"\r\n returns the object located at (row, column) in the\r\n matrix or None on failure;\r\n\r\n raises an error if @raise_error and empty location;\r\n \"\"\"\r\n\r\n # get object\r\n\r\n _object = self.matrix.get((row, column))\r\n\r\n # no object found?\r\n\r\n if raise_error and _object is None:\r\n raise GridError(\r\n\r\n \"no object found at (row, column) = \"\r\n\r\n \"({row}, {col}): empty location.\"\r\n\r\n .format(row=row, col=column)\r\n )\r\n\r\n # end if\r\n\r\n return _object\r\n\r\n # end def\r\n\r\n @property\r\n def matrix(self):\r\n r\"\"\"\r\n returns internal matrix object;\r\n \"\"\"\r\n\r\n return self.__matrix\r\n\r\n # end def\r\n\r\n def move_object(self, from_row_column, to_row_column):\r\n r\"\"\"\r\n moves the object located at @from_row_column to\r\n @to_row_column if exists;\r\n\r\n raises errors otherwise;\r\n \"\"\"\r\n\r\n # get object if exists\r\n\r\n _object = self.get_object_at(*from_row_column, raise_error=True)\r\n\r\n # add it to new location\r\n\r\n self.add(_object, *to_row_column, raise_error=True)\r\n\r\n # and then remove it from old location\r\n\r\n self.remove_object_at(*from_row_column)\r\n\r\n # end def\r\n\r\n def remove_object_at(self, row, column):\r\n r\"\"\"\r\n removes the object located at (row, column) from the\r\n matrix, if any;\r\n \"\"\"\r\n\r\n # remove object\r\n\r\n self.matrix.pop((row, column), None)\r\n\r\n # end def\r\n\r\n def reset_matrix(self):\r\n r\"\"\"\r\n resets matrix;\r\n \"\"\"\r\n\r\n self.__matrix = dict()\r\n\r\n # end def\r\n\r\n @property\r\n def rows(self):\r\n r\"\"\"\r\n returns number of rows in matrix;\r\n \"\"\"\r\n\r\n return self.__rows\r\n\r\n # end def\r\n\r\n @rows.setter\r\n def rows(self, value):\r\n\r\n self.__rows = normalize(value)\r\n\r\n # end def\r\n\r\n @rows.deleter\r\n def rows(self):\r\n\r\n del self.__rows\r\n\r\n # end def\r\n\r\n def swap_objects(self, row_column1, row_column2):\r\n r\"\"\"\r\n swaps two objects located at @row_column1 and\r\n @row_column2 if they do exist;\r\n\r\n raises errors otherwise;\r\n \"\"\"\r\n\r\n # get objects if exist\r\n\r\n _object1 = self.get_object_at(*row_column1, raise_error=True)\r\n\r\n _object2 = self.get_object_at(*row_column2, raise_error=True)\r\n\r\n # clear locations\r\n\r\n self.remove_object_at(*row_column1)\r\n\r\n self.remove_object_at(*row_column2)\r\n\r\n # swap locations\r\n\r\n self.add(_object1, *row_column2, raise_error=True)\r\n\r\n self.add(_object2, *row_column1, raise_error=True)\r\n\r\n # end def\r\n\r\n\r\n# end class GridMatrix\r\n\r\n\r\n# subcomponent class def\r\n\r\nclass GridTile:\r\n r\"\"\"\r\n GridTile - GameGrid subcomponent;\r\n \"\"\"\r\n\r\n def __init__(self, grid_owner, value, row, column, tile_animation=True):\r\n # private member inits\r\n\r\n self.__tk_owner = grid_owner\r\n\r\n self.__cell_size = grid_owner.cell_size\r\n\r\n # unique tag id for canvas tags management\r\n\r\n self.tag = \"GridTile{}\".format(id(self))\r\n\r\n # public member inits\r\n\r\n self.id = None\r\n\r\n self.value = value\r\n\r\n self.row = row\r\n\r\n self.column = column\r\n\r\n self.tile_animation = tile_animation\r\n\r\n # end def\r\n\r\n @property\r\n def cell_size(self):\r\n r\"\"\"\r\n returns object's GridCellSize structure;\r\n \"\"\"\r\n\r\n return self.__cell_size\r\n\r\n # end def\r\n\r\n @property\r\n def column(self):\r\n r\"\"\"\r\n returns object's normalized column;\r\n \"\"\"\r\n\r\n return self.__column\r\n\r\n # end def\r\n\r\n @column.setter\r\n def column(self, value):\r\n self.__column = normalize(value, minimum=0)\r\n\r\n # end def\r\n\r\n @column.deleter\r\n def column(self):\r\n del self.__column\r\n\r\n # end def\r\n\r\n @property\r\n def row_column(self):\r\n r\"\"\"\r\n returns a (row, column) pair;\r\n \"\"\"\r\n\r\n return (self.row, self.column)\r\n\r\n # end def\r\n\r\n @property\r\n def owner(self):\r\n r\"\"\"\r\n returns ref to private tk_owner;\r\n \"\"\"\r\n\r\n return self.__tk_owner\r\n\r\n # end def\r\n\r\n @property\r\n def row(self):\r\n r\"\"\"\r\n returns object's normalized row;\r\n \"\"\"\r\n\r\n return self.__row\r\n\r\n # end def\r\n\r\n @row.setter\r\n def row(self, value):\r\n self.__row = normalize(value, minimum=0)\r\n\r\n # end def\r\n\r\n @row.deleter\r\n def row(self):\r\n del self.__row\r\n\r\n # end def\r\n\r\n @property\r\n def size(self):\r\n r\"\"\"\r\n returns object's (width, height) cell size;\r\n \"\"\"\r\n\r\n return self.cell_size.size_wxh\r\n\r\n # end def\r\n\r\n @property\r\n def value(self):\r\n r\"\"\"\r\n returns object's value;\r\n \"\"\"\r\n\r\n return self.__value\r\n\r\n # end def\r\n\r\n @value.setter\r\n def value(self, new_value):\r\n self.__value = new_value\r\n\r\n # end def\r\n\r\n @value.deleter\r\n def value(self):\r\n del self.__value\r\n\r\n # end def\r\n\r\n @property\r\n def xy_center(self):\r\n r\"\"\"\r\n returns tile's (x, y) center point on canvas;\r\n \"\"\"\r\n\r\n return self.cell_size.xy_center(self.row, self.column)\r\n\r\n # end def\r\n\r\n @property\r\n def xy_origin(self):\r\n r\"\"\"\r\n returns tile's (x_left, y_top) point of origin on canvas;\r\n \"\"\"\r\n\r\n return self.cell_size.xy_left_top(self.row, self.column)\r\n\r\n # end def\r\n\r\n# end class GridTile\r\n","sub_path":"game_grid.py","file_name":"game_grid.py","file_ext":"py","file_size_in_byte":28033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"493715701","text":"# coding=utf-8\n\n# coding=utf-8\nimport struct\nimport numpy as np\n#import pylab as pl\n\nclass mnistData:\n\n def __init__(self):\n dir = \"../Data/mnist/\"\n self._testDataSetPath = dir + \"t10k-images.idx3-ubyte\"\n self._testLabelSetPath = dir + \"t10k-labels.idx1-ubyte\"\n self._trainDataSetPath = dir + \"train-images.idx3-ubyte\"\n self._trainLabelSetPath = dir + \"train-labels.idx1-ubyte\"\n self._load_data_header()\n\n def _load_data_header(self):\n buf = open(self._testDataSetPath, 'rb').read()\n magic, self.testSetSize = struct.unpack_from(\">II\", buf, 0)\n buf = open(self._trainLabelSetPath, 'rb').read()\n magic, self.trainSetSize = struct.unpack_from(\">II\", buf, 0)\n\n def _load_image_set(self, path, num):\n binFile = open(path, 'rb')\n buf = binFile.read()\n\n # '>IIII'是说使用大端法读取4个unsinged int32\n index = 0\n magic, numImages, numRows, numCols = struct.unpack_from(\">IIII\", buf, index)\n if num == -1: num = numImages\n else : num = min(numImages, num)\n index += struct.calcsize(\">IIII\")\n\n images = []\n for i in range(num):\n im, index = self._load_image(buf, index, numRows, numCols)\n images.append(im)\n return images\n\n # 读取一张图片 (numRows * numCols byte) 28 * 28 = 784 byte\n def _load_image(self, buf, index, numRows, numCols):\n # '>784B'的意思就是用大端法读取784个unsigned byte\n fmt = \">\" + str(numRows * numCols) + \"B\"\n im = struct.unpack_from(fmt, buf, index)\n index += struct.calcsize(fmt)\n im = np.array(im).reshape((numRows, numCols))\n #pl.imshow(im)\n #pl.show()\n return im, index\n\n def _load_label_set(self, path, num):\n buf = open(path,'rb').read()\n index = 0\n\n magic, numLabels = struct.unpack_from('>II', buf, index)\n if num == -1: num = numLabels\n else: num = min(num, numLabels)\n index += struct.calcsize('>II')\n\n labels = []\n for i in range(num):\n label, = struct.unpack_from('>B', buf, index) # 返回的是tuple, 后面加逗号表明只需要第一个元素\n index += struct.calcsize('>B')\n labels.append(label)\n\n return labels\n\n def load_train_set(self, num=-1):\n images = self._load_image_set(self._trainDataSetPath, num)\n labels = self._load_label_set(self._trainLabelSetPath, num)\n return images, labels\n\n def load_test_set(self, num=-1):\n images = self._load_image_set(self._testDataSetPath, num)\n labels = self._load_label_set(self._testLabelSetPath, num)\n return images, labels\n","sub_path":"MLInAction/code/mnistData.py","file_name":"mnistData.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"131348916","text":"# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nimport unittest\nfrom selenium.webdriver.common.keys import Keys\n\nclass NewVisitorTest(unittest.TestCase):\n\n def setUp(self):\n self.browser = webdriver.Firefox()\n self.browser.implicitly_wait(3)\n\n def tearDown(self):\n self.browser.quit()\n\n def test_can_start_a_list_and_retrieve_it_later(self):\n # 张小胖听说有个在线待办事项应用很不错\n # 他搜索去登录了这个应用的首页\n self.browser.get('http://localhost:8000')\n\n #他看到网页标题和头部都包含\"TO-DO\"关键词\n self.assertIn('To-Do' , self.browser.title)\n header_text = self.browser.find_element_by_tag_name('h1').text\n self.assertIn('To-Do' , header_text)\n\n #应用邀请她输入一个待办事项\n inputbox = self.browser.find_element_by_id('id_new_item')\n self.assertEqual(\n inputbox.get_attribute('placeholder'),\n 'Enter a to-do item'\n )\n\n #她在文本框中输入了\"Buy Box\"\n inputbox.send_keys('Buy a Box')\n #因为她的爱好是收藏盒子\n #输入后按回车,页面更新了\n inputbox.send_keys(Keys.ENTER)\n table = self.browser.find_element_by_id('id_list_table')\n rows = table.find_elements_by_tag_name('tr')\n self.assertTrue(\n any(row.text == '1:Buy a Box' for row in rows),\n \"New to-do litem did not appear in table\"\n )\n #待办事项表格中显示了\"1:Buy Box\"\n #同时页面又出现一个文本框可以输入其他待办事项\n #他有输入了\"Use Box\"\n #页面再次更新,她的清单中显示两个待办事项\n #他想知道下次登录还有没有,应用是否保存\n #他看到网站为他生成了一个唯一的URL\n #而且网页中有一些文字解说这个功能\n #他访问了这个唯一的URL发现待办事项都在\n #他很满意之后去睡觉了\n self.fail('Finisfh the test!')\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"functional_tests.py","file_name":"functional_tests.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"55502682","text":"#!/usr/bin/env python\nfrom imp import *\nfrom inspect import *\nfrom ass import *\nfrom import_module import *\nfrom import_path import *\nfrom inspection import *\nfrom ospath import *\nfrom public import *\n\nclass IOError(IOError):\n pass\n\n@public\ndef mod_requirements(module):\n result = []\n for name,object in getmembers(module):\n pkg = getpackage(object)\n #pkg = getmodule(object)\n result.append(pkg)\n result = list(set(filter(None,result)))\n result = filter(\n lambda m:not issystem(m),\n result\n )\n return sorted(\n result,\n key=lambda m:m.__name__.lower()\n )\n\ndef is_exists(path):\n if not exists(path):\n err = \"%s not exists\" % path\n raise IOError(err)\n\n@public\ndef path_requirements(path):\n path = unicode(path)\n is_exists(path)\n module = import_path(path)\n return mod_requirements(module)\n\n@public\ndef files_requirements(path,include=\"*.py\",exclude=[\"*.pyc\",\"*test*\"]):\n path = unicode(path)\n is_exists(path)\n result = []\n for file in files(path,\n include=include,\n exclude=exclude\n ):\n result+=file_requirements(file)\n return list(set(result))\n\n@public\ndef requirements(object,include=\"*.py\",exclude=[\"*.pyc\",\"*test*\"]):\n if ismodule(object):\n return mod_requirements(object)\n path = unicode(object)\n if exists(path):\n return path_requirements(path)\n return pkg_requirements(object,\n include=include,\n exclude=exclude\n )\n\n","sub_path":"py_modules/requirements.py","file_name":"requirements.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"6067468","text":"import re\r\n\r\n\r\nclass WordCount:\r\n def __init__(self, s):\r\n \"\"\"\r\n Args:\r\n s: 待处理字符串\r\n \"\"\"\r\n if not type(s) == str:\r\n raise TypeError(\"请传入一个字符串\")\r\n self.str = s\r\n self.words = []\r\n\r\n def charCount(self):\r\n \"\"\"\r\n Returns:\r\n 字符数量\r\n \"\"\"\r\n return len(self.str)\r\n\r\n def wordCount(self):\r\n \"\"\"\r\n Returns:\r\n 单词数量\r\n \"\"\"\r\n s = self.str\r\n s = s.lower()\r\n if not self.words:\r\n self.words = re.findall(\"([a-z]{4,}[a-z0-9]*)\", s)\r\n return len(self.words)\r\n\r\n def topWord(self):\r\n \"\"\"\r\n Returns:\r\n [[单词,频率]...]\r\n \"\"\"\r\n if not self.words:\r\n self.wordCount()\r\n count = []\r\n keys = []\r\n for k in self.words:\r\n if k not in keys:\r\n keys.append(k)\r\n for key in keys:\r\n count.append([key, self.words.count(key)])\r\n count2 = count.copy()\r\n\r\n count.sort(key=lambda a: a[1], reverse=True)\r\n result = []\r\n for c in count:\r\n for c2 in count2:\r\n if c[1] == c2[1] and c2 not in result:\r\n result.append(c2)\r\n count2.remove(c2)\r\n break\r\n if len(result) == 10:\r\n break\r\n\r\n return result\r\n\r\n def lineCount(self):\r\n \"\"\"\r\n Returns:\r\n 行数\r\n \"\"\"\r\n s = self.str\r\n # 去除空白字符\\f\\n\\r\\t\\v\\b\r\n spaces = \"\\t \"\r\n for space in spaces:\r\n s = s.replace(space, \"\")\r\n\r\n # 将\\r\\n和\\r式的换行转化为\\n式\r\n s = s.replace(\"\\r\", \"\\n\")\r\n while True:\r\n tmp = s.replace(\"\\n\\n\", \"\\n\")\r\n if tmp == s:\r\n break\r\n s = tmp\r\n\r\n # 标准化完成,行数计算\r\n s = s.strip()\r\n return s.count(\"\\n\") + 1\r\n","sub_path":"S-Java/运行结果/221801209/src/Lib.py","file_name":"Lib.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"347830126","text":"from sqlalchemy.testing.assertions import eq_, assert_raises\nfrom sqlalchemy.testing import fixtures\nfrom sqlalchemy import testing\nfrom sqlalchemy.dialects.mysql import insert\nfrom sqlalchemy import Table, Column, Integer, String\n\n\nclass OnDuplicateTest(fixtures.TablesTest):\n __only_on__ = 'mysql',\n __backend__ = True\n run_define_tables = 'each'\n\n @classmethod\n def define_tables(cls, metadata):\n Table(\n 'foos', metadata,\n Column('id', Integer, primary_key=True, autoincrement=True),\n Column('bar', String(10)),\n Column('baz', String(10)),\n )\n\n def test_bad_args(self):\n assert_raises(\n ValueError,\n insert(self.tables.foos, values={}).on_duplicate_key_update\n )\n\n def test_on_duplicate_key_update(self):\n foos = self.tables.foos\n with testing.db.connect() as conn:\n conn.execute(insert(foos, dict(id=1, bar='b', baz='bz')))\n stmt = insert(foos, [dict(id=1, bar='ab'), dict(id=2, bar='b')])\n stmt = stmt.on_duplicate_key_update(bar=stmt.values.bar)\n result = conn.execute(stmt)\n eq_(result.inserted_primary_key, [2])\n eq_(\n conn.execute(foos.select().where(foos.c.id == 1)).fetchall(),\n [(1, 'ab', 'bz')]\n )\n\n def test_last_inserted_id(self):\n foos = self.tables.foos\n with testing.db.connect() as conn:\n stmt = insert(foos, {\"bar\": \"b\", \"baz\": \"bz\"})\n result = conn.execute(\n stmt.on_duplicate_key_update(\n bar=stmt.values.bar, baz=\"newbz\")\n )\n eq_(result.inserted_primary_key, [1])\n\n stmt = insert(foos, {\"id\": 1, \"bar\": \"b\", \"baz\": \"bz\"})\n result = conn.execute(\n stmt.on_duplicate_key_update(\n bar=stmt.values.bar, baz=\"newbz\")\n )\n eq_(result.inserted_primary_key, [1])\n\n\n\n","sub_path":"test/dialect/mysql/test_on_duplicate.py","file_name":"test_on_duplicate.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"339819432","text":"from tksheet import Sheet\nimport tkinter as tk\n\n\nclass demo(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n self.grid_columnconfigure(0, weight=1)\n self.grid_rowconfigure(0, weight=1)\n self.frame = tk.Frame(self)\n self.frame.grid_columnconfigure(0, weight=1)\n self.frame.grid_rowconfigure(0, weight=1)\n self.sheet = Sheet(\n self.frame,\n page_up_down_select_row=True,\n #empty_vertical = 0,\n column_width=120,\n startup_select=(0, 1, \"rows\"),\n data=[[\n f\"Row {r}, Column {c}\\nnewline1\\nnewline2\" for c in range(50)\n ] for r in range(1000)], #to set sheet data at startup\n height=500, #height and width arguments are optional\n width=1200 #For full startup arguments see DOCUMENTATION.md\n )\n\n self.sheet.enable_bindings((\n \"single_select\", #\"single_select\" or \"toggle_select\"\n # \"drag_select\", #enables shift click selection as well\n # \"column_drag_and_drop\",\n # \"row_drag_and_drop\",\n \"column_select\",\n \"row_select\",\n # \"column_width_resize\",\n # \"double_click_column_resize\",\n #\"row_width_resize\",\n #\"column_height_resize\",\n \"arrowkeys\",\n \"row_height_resize\",\n \"double_click_row_resize\",\n \"right_click_popup_menu\",\n \"rc_select\",\n \"rc_insert_column\",\n \"rc_delete_column\",\n \"rc_insert_row\",\n \"rc_delete_row\",\n # \"hide_columns\",\n # \"copy\",\n # \"cut\",\n # \"paste\",\n # \"delete\",\n # \"undo\",\n \"edit_cell\"))\n #self.sheet.disable_bindings() #uses the same strings\n #self.sheet.enable_bindings()\n\n self.frame.grid(row=0, column=0, sticky=\"nswe\")\n self.sheet.grid(row=0, column=0, sticky=\"nswe\")\n \"\"\"_________________________ EXAMPLES _________________________ \"\"\"\n \"\"\"_____________________________________________________________\"\"\"\n\n # __________ CHANGING THEME __________\n\n #self.sheet.change_theme(\"light green\")\n\n # __________ DISPLAY SUBSET OF COLUMNS __________\n\n self.sheet.display_subset_of_columns(indexes=[0, 1, 2, 3, 4, 5],\n enable=True)\n #self.sheet.display_columns(enable = False)\n self.sheet.insert_column(idx=0)\n self.sheet.insert_columns(columns=5,\n idx=10,\n mod_column_positions=False)\n\n # __________ HIGHLIGHT / DEHIGHLIGHT CELLS __________\n\n self.sheet.highlight_cells(row=5, column=5, fg=\"red\")\n self.sheet.highlight_cells(row=5, column=1, fg=\"red\")\n self.sheet.highlight_cells(row=5,\n bg=\"#ed4337\",\n fg=\"white\",\n canvas=\"row_index\")\n self.sheet.highlight_cells(column=0,\n bg=\"#ed4337\",\n fg=\"white\",\n canvas=\"header\")\n\n # __________ CELL / ROW / COLUMN ALIGNMENTS __________\n\n self.sheet.align_cells(row=1, column=1, align=\"e\")\n self.sheet.align_rows(rows=3, align=\"e\")\n self.sheet.align_columns(columns=4, align=\"e\")\n\n # __________ ADDITIONAL BINDINGS __________\n\n #self.sheet.bind(\"\", self.mouse_motion)\n\n \"\"\"\n\n UNTIL DOCUMENTATION IS COMPLETE, PLEASE BROWSE THE FILE\n _tksheet.py FOR A FULL LIST OF FUNCTIONS AND THEIR PARAMETERS\n\n \"\"\"\n\n def all_extra_bindings(self, event):\n print(event)\n\n def begin_edit_cell(self, event):\n print(event) # event[2] is keystroke\n return event[\n 2] # return value is the text to be put into cell edit window\n\n def window_resized(self, event):\n pass\n #print (event)\n\n def deselect(self, event):\n print(event, self.sheet.get_selected_cells())\n\n def rc(self, event):\n print(event)\n\n def ctrl_a(self, response):\n print(response)\n\n def row_select(self, response):\n print(response)\n\n def column_select(self, response):\n print(response)\n #for i in range(50):\n # self.sheet.create_dropdown(i, response[1], values=[f\"{i}\" for i in range(200)], set_value=\"100\",\n # destroy_on_select = False, destroy_on_leave = False, see = False)\n #print (self.sheet.get_cell_data(0, 0))\n #self.sheet.refresh()\n\n\napp = demo()\napp.mainloop()","sub_path":"tksheetdemo.py","file_name":"tksheetdemo.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"9317185","text":"\"\"\"\n신윤중\n2017-5-28\nlab02-1 - linear_regression\n\"\"\"\n\nimport tensorflow as tf\ntf.set_random_seed(777)\n\n# X and Y data\nx_train = [1, 2, 3]\ny_train = [1, 2, 3]\n\n# Try to find values for W and b to compute y_data = x_data * W + b\n# 위 데이터를 입력한다면 W = 1, b = 0일 것이다.\n# tensorflow 를 이용하여 알아보자\nW = tf.Variable(tf.random_normal([1]), name='weight')\nb = tf.Variable(tf.random_normal([1]), name='bias')\n# W와 b에 크기 [1]의 초기 랜덤값이 저장된다\n\n# hypothesis XW+b\nhypothesis = x_train * W + b\n# 회귀 방정식 가중치 W와 편차 b\n\n# cost/loss function\ncost = tf.reduce_mean(tf.square(hypothesis - y_train))\n# reduce_mean : tensor 데이터의 rank 를 1로 감소 시킨다. 행, 열에 따른 처리도 가능하다.\n# square : 제곱식\n# 추정값과 실제값(y_train)간 차이의 제곱을 평균내어 cost 에 저장한다.\n\n# Minimize\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\n# 학습률 0.01의 gradient Descent 함수를 optimizer 에 저장\ntrain = optimizer.minimize(cost)\n# cost 를 최소화시킨다. train 에 저장\n# 이 과정에서 W와 b의 최적화가 이루어진다.\n\n# Launch the graph in a session.\nsess = tf.Session()\n# Initializes global variables in the graph\nsess.run(tf.global_variables_initializer())\n# tensorflow variable 을 사용할 때 반드시 우선 사용되야하는 초기화 함수\n\n# Fit the line\nfor step in range(2001):\n sess.run(train)\n if step%20 == 0:\n print(step, sess.run(cost), sess.run(W), sess.run(b))\n# Learns best fit W:[1.], b:[1.]\n","sub_path":"lab02-1-linear_regression.py","file_name":"lab02-1-linear_regression.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"106569282","text":"from __future__ import division\nfrom __future__ import absolute_import\nimport numpy as np\n\nfrom libensemble.message_numbers import EVAL_GEN_TAG\nfrom libensemble.alloc_funcs.support import \\\n avail_worker_ids, sim_work, gen_work, count_persis_gens\n\nfrom libensemble.gen_funcs.aposmm import \\\n initialize_APOSMM, decide_where_to_start_localopt, update_history_dist\n\n\ndef start_persistent_local_opt_gens(W, H, sim_specs, gen_specs, persis_info):\n \"\"\"\n This allocation function will:\n\n - Start up a persistent generator that is a local opt run at the first point\n identified by APOSMM's decide_where_to_start_localopt.\n - It will only do this if at least one worker will be left to perform\n simulation evaluations.\n - If multiple starting points are available, the one with smallest function\n value is chosen.\n - If no candidate starting points exist, points from existing runs will be\n evaluated (oldest first).\n - If no points are left, call the generation function.\n\n :See:\n ``/libensemble/tests/regression_tests/test_6-hump_camel_uniform_sampling_with_persistent_localopt_gens.py``\n \"\"\"\n\n Work = {}\n gen_count = count_persis_gens(W)\n task_avail = ~H['given']\n\n # If a persistent localopt run has just finished, use run_order to update H\n # and then remove other information from persis_info\n for i in persis_info.keys():\n if 'done' in persis_info[i]:\n H['num_active_runs'][persis_info[i]['run_order']] -= 1\n if 'x_opt' in persis_info[i]:\n opt_ind = np.all(H['x'] == persis_info[i]['x_opt'], axis=1)\n assert sum(opt_ind) == 1, \"There must be just one optimum\"\n H['local_min'][opt_ind] = True\n persis_info[i] = {'rand_stream': persis_info[i]['rand_stream']}\n\n # If i is idle, but in persistent mode, and its calculated values have\n # returned, give them back to i. Otherwise, give nothing to i\n for i in avail_worker_ids(W, persistent=True):\n gen_inds = (H['gen_worker'] == i)\n if np.all(H['returned'][gen_inds]):\n last_time_pos = np.argmax(H['given_time'][gen_inds])\n last_ind = np.nonzero(gen_inds)[0][last_time_pos]\n gen_work(Work, i,\n sim_specs['in'] + [n[0] for n in sim_specs['out']],\n persis_info[i], np.atleast_1d(last_ind), persistent=True)\n persis_info[i]['run_order'].append(last_ind)\n\n for i in avail_worker_ids(W, persistent=False):\n # Find candidates to start local opt runs if a sample has been evaluated\n if np.any(np.logical_and(~H['local_pt'], H['returned'])):\n _, n_s, _, _, rk_const, lhs_divisions, mu, nu = initialize_APOSMM(H, gen_specs)\n update_history_dist(H, gen_specs, c_flag=False)\n starting_inds = decide_where_to_start_localopt(H, n_s, rk_const, lhs_divisions, mu, nu)\n else:\n starting_inds = []\n\n # Start persistent generator for local opt run unless it would use all workers\n if starting_inds and gen_count + 1 < len(W):\n # Start at the best possible starting point\n ind = starting_inds[np.argmin(H['f'][starting_inds])]\n gen_work(Work, i,\n sim_specs['in'] + [n[0] for n in sim_specs['out']],\n persis_info[i], np.atleast_1d(ind), persistent=True)\n\n H['started_run'][ind] = 1\n H['num_active_runs'][ind] += 1\n\n persis_info[i]['run_order'] = [ind]\n gen_count += 1\n\n elif np.any(task_avail):\n\n # Perform sim evaluations from existing runs\n q_inds_logical = np.logical_and(task_avail, H['local_pt'])\n if not np.any(q_inds_logical):\n q_inds_logical = task_avail\n sim_ids_to_send = np.nonzero(q_inds_logical)[0][0] # oldest point\n sim_work(Work, i, sim_specs['in'], np.atleast_1d(sim_ids_to_send))\n task_avail[sim_ids_to_send] = False\n\n elif (gen_count == 0\n and not np.any(np.logical_and(W['active'] == EVAL_GEN_TAG,\n W['persis_state'] == 0))):\n\n # Finally, generate points since there is nothing else to do\n gen_count += 1\n gen_work(Work, i, gen_specs['in'], persis_info[i], [])\n\n return Work, persis_info\n","sub_path":"libensemble/alloc_funcs/start_persistent_local_opt_gens.py","file_name":"start_persistent_local_opt_gens.py","file_ext":"py","file_size_in_byte":4372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"500284884","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport matplotlib\nif not hasattr(sys, \"ps1\"):\n matplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\n# plt.style.use(\"seaborn-ticks\")\nplt.style.use(\"fast\")\nplt.rcParams[\"xtick.direction\"] = \"in\"\nplt.rcParams[\"ytick.direction\"] = \"in\"\nplt.rcParams[\"font.size\"] = 11.0\nplt.rcParams[\"figure.figsize\"] = (9, 6)\nplt.rcParams[\"legend.frameon\"] = False\n\n\ndef line_plot(df, title=None, xlabel=None, ylabel=\"Cases\",\n v=None, h=None,\n xlim=(None, None), ylim=(0, None),\n math_scale=True, x_logscale=False, y_logscale=False, y_integer=False,\n show_legend=True, bbox_to_anchor=(1.02, 0), bbox_loc=\"lower left\",\n colormap=None, color_dict=None,\n filename=None):\n \"\"\"\n Show chronological change of the data.\n\n Args:\n df (pandas.DataFrame): target data\n\n Index\n reset index\n Columns\n field names\n Values:\n data values\n title (str): title of the figure\n xlabel (str): x-label\n ylabel (str): y-label\n v (list[int/float]): list of x values of vertical lines or None\n h (list[int/float]): list of y values of horizontal lines or None\n xlim (tuple(int or float, int or float)): limit of x dimain\n ylim (tuple(int or float, int or float)): limit of y dimain\n math_scale (bool): whether use LaTEX or not\n x_logscale (bool): whether use log-scale in x-axis or not\n y_logscale (bool): whether use log-scale in y-axis or not\n y_integer (bool): whether force to show the values as integer or not\n show_legend (bool): whether show legend or not\n bbox_to_anchor (tuple(int or float, int or float)): distance of legend and plot\n bbox_loc (str): location of legend\n colormap (str, matplotlib colormap object or None): colormap, please refer to https://matplotlib.org/examples/color/colormaps_reference.html\n color_dict (dict[str, str] or None): dictionary of column names (keys) and colors (values)\n filename (str): filename of the figure, or None (show figure)\n\n Note:\n If None is included in xlim/ylim, the values will be automatically determined by Matplotlib\n \"\"\"\n # Color\n if color_dict is None:\n color_args = {\"colormap\": colormap}\n else:\n colors = [color_dict.get(col) for col in df.columns]\n color_args = {\"colormap\": colormap, \"color\": colors}\n try:\n ax = df.plot(**color_args)\n except ValueError as e:\n raise ValueError(e.args[0]) from None\n # Scale\n if math_scale:\n ax.yaxis.set_major_formatter(\n matplotlib.ticker.ScalarFormatter(useMathText=True)\n )\n ax.ticklabel_format(style=\"sci\", axis=\"y\", scilimits=(0, 0))\n if x_logscale:\n ax.set_xscale(\"log\")\n if xlim[0] == 0:\n xlim = (None, None)\n if y_logscale:\n ax.set_yscale(\"log\")\n if ylim[0] == 0:\n ylim = (None, None)\n if y_integer:\n fmt = matplotlib.ticker.ScalarFormatter(useOffset=False)\n fmt.set_scientific(False)\n ax.yaxis.set_major_formatter(fmt)\n # Set metadata of figure\n ax.set_title(title or \"\")\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_xlim(*xlim)\n ax.set_ylim(*ylim)\n if show_legend:\n ax.legend(bbox_to_anchor=bbox_to_anchor, loc=bbox_loc, borderaxespad=0)\n else:\n ax.legend().set_visible(False)\n if h is not None:\n ax.axhline(y=h, color=\"black\", linestyle=\":\")\n if v is not None:\n if not isinstance(v, list):\n v = [v]\n for value in v:\n ax.axvline(x=value, color=\"black\", linestyle=\":\")\n plt.tight_layout()\n # Save figure or show figure\n if filename is None:\n plt.show()\n return None\n plt.savefig(\n filename, bbox_inches=\"tight\", transparent=False, dpi=300\n )\n plt.clf()\n return None\n\n\ndef box_plot(df, title, xlabel=None, ylabel=None,\n v=None, h=None,\n show_legend=True, bbox_to_anchor=(1.02, 0), bbox_loc=\"lower left\",\n filename=None):\n \"\"\"\n Show box plot of the data.\n\n Args:\n df (pandas.DataFrame): target data\n\n Index\n reset index\n Columns\n field names\n Values:\n data values\n title (str): title of the figure\n xlabel (str): x-label\n ylabel (str): y-label\n v (list[int/float]): list of x values of vertical lines or None\n h (list[int/float]): list of y values of horizontal lines or None\n show_legend (bool): whether show legend or not\n bbox_to_anchor (tuple(int or float, int or float)): distance of legend and plot\n bbox_loc (str): location of legend\n filename (str): filename of the figure, or None (show figure)\n \"\"\"\n df.plot.bar(title=title)\n plt.xticks(rotation=0)\n if h is not None:\n plt.axhline(y=h, color=\"black\", linestyle=\":\")\n plt.legend(\n bbox_to_anchor=bbox_to_anchor, loc=bbox_loc, borderaxespad=0\n )\n plt.tight_layout()\n if filename is None:\n plt.show()\n return None\n plt.savefig(\n filename, bbox_inches=\"tight\", transparent=False, dpi=300\n )\n plt.clf()\n return None\n","sub_path":"covsirphy/util/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"484805259","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 19 12:51:26 2020\r\n\r\n@author: rfuchs\r\n\"\"\"\r\n\r\nfrom lik_functions import ord_loglik_j, log_py_zM_ord, \\\r\n log_py_zM_bin, binom_loglik_j, log_py_zM_cont, cont_loglik_j,\\\r\n log_py_zM_categ, categ_loglik_j\r\nfrom lik_gradients import ord_grad_j, bin_grad_j, cont_grad_j, categ_grad_j\r\n\r\nfrom scipy.optimize import minimize\r\nfrom scipy.optimize import LinearConstraint\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nfrom scipy.stats import multivariate_normal as mvnorm\r\n\r\nfrom copy import deepcopy\r\nimport autograd.numpy as np \r\nfrom autograd.numpy import newaxis as n_axis\r\n\r\nimport warnings\r\n#=============================================================================\r\n# MC Step functions\r\n#=============================================================================\r\n\r\ndef draw_zl1_ys(z_s, py_zl1, M):\r\n ''' Draw from p(z1 | y, s) proportional to p(y | z1) * p(z1 | s) for all s \r\n z_s (list of nd-arrays): zl | s^l for all s^l and all l.\r\n py_zl1 (nd-array): p(y | z1_M) \r\n M (list of int): The number of MC points on all layers\r\n ------------------------------------------------------------------------\r\n returns ((M1, numobs, r1, S1) nd-array): z^{(1)} | y, s\r\n '''\r\n epsilon = 1E-16\r\n \r\n numobs = py_zl1.shape[1]\r\n L = len(z_s) - 1\r\n S = [z_s[l].shape[2] for l in range(L)]\r\n r = [z_s[l].shape[1] for l in range(L + 1)]\r\n\r\n norm_cste = np.sum(py_zl1, axis = 0, keepdims = True) \r\n norm_cste = np.where(norm_cste <= epsilon, epsilon, norm_cste) \r\n \r\n py_zl1_norm = py_zl1 / norm_cste\r\n \r\n zl1_ys = np.zeros((M[0], numobs, r[0], S[0]))\r\n for s in range(S[0]):\r\n qM_cum = py_zl1_norm[:,:, s].T.cumsum(axis=1)\r\n u = np.random.rand(numobs, 1, M[0])\r\n \r\n choices = u < qM_cum[..., np.newaxis]\r\n idx = choices.argmax(1)\r\n \r\n zl1_ys[:,:,:,s] = np.take(z_s[0][:,:, s], idx.T, axis=0)\r\n \r\n return zl1_ys\r\n\r\n#=============================================================================\r\n# E Step functions\r\n#=============================================================================\r\n\r\ndef fy_zl1(lambda_bin, y_bin, nj_bin, lambda_ord, y_ord, nj_ord, lambda_categ,\\\r\n y_categ, nj_categ, y_cont, lambda_cont, zl1_s):\r\n ''' Compute log p(y | z1) = sum_{s= 1}^S[0] p(y, s| z1) as in Cagnone and \r\n Viroli (2014)\r\n lambda_bin (nb_bin x (1 + r1) nd-array): The binomial coefficients\r\n y_bin (numobs x nb_bin nd-array): The binary/count data\r\n nj_bin (list of int): The number of modalities for each bin variable\r\n lambda_ord (list of nb_ord_j x (nj_ord + r1) elements): The ordinal coefficients\r\n y_ord (numobs x nb_ord nd-array): The ordinal data\r\n nj_ord (list of int): The number of modalities for each ord variable\r\n zl1_s ((M1, r1, s1) nd-array): z1 | s \r\n ------------------------------------------------------------------------------\r\n returns ((M1, numobs, S1) nd-array):log p(y | z1_M)\r\n '''\r\n M0 = zl1_s.shape[0]\r\n S0 = zl1_s.shape[2] \r\n numobs = len(y_bin)\r\n \r\n nb_ord = len(nj_ord)\r\n nb_bin = len(nj_bin)\r\n nb_categ = len(nj_categ)\r\n nb_cont = y_cont.shape[1]\r\n \r\n log_py_zl1 = np.zeros((M0, numobs, S0), dtype = np.float) # l1 standing for the first layer\r\n \r\n if nb_bin: # First the Count/Binomial variables\r\n log_py_zl1 += log_py_zM_bin(lambda_bin, y_bin, zl1_s, S0, nj_bin) \r\n \r\n if nb_ord: # Then the ordinal variables \r\n log_py_zl1 += log_py_zM_ord(lambda_ord, y_ord, zl1_s, S0, nj_ord)[:,:,:,0] \r\n \r\n if nb_categ:\r\n log_py_zl1 += log_py_zM_categ(lambda_categ, y_categ, zl1_s, S0, nj_categ) \r\n \r\n if nb_cont:\r\n log_py_zl1 += log_py_zM_cont(lambda_cont, y_cont, zl1_s, S0)\r\n\r\n \r\n py_zl1 = np.exp(log_py_zl1)\r\n py_zl1 = np.where(py_zl1 == 0, 1E-50, py_zl1)\r\n \r\n return py_zl1\r\n\r\n\r\ndef E_step_GLLVM(zl1_s, mu_l1_s, sigma_l1_s, w_s, py_zl1):\r\n ''' Compute the distributions involved involved in the E step of \r\n the GLLVM coefficients estimations\r\n zl1_s ((M1, r1, s1) nd-array): z1 | s \r\n mu_l1_s (nd-array): mu_s for all s in S1 (mu_s starting from the 1st layer)\r\n sigma_l1_s (nd-array): sigma_s for all s in S1 (sigma_s starting from the 1st layer)\r\n w_s (list of length s1): The path probabilities for all s in S1\r\n py_zl1 (nd-array): p(y | z1_M)\r\n ----------------------------------------------------------------------------\r\n returns (tuple of len 3): p(z1 |y, s), p(s |y) and p(y)\r\n '''\r\n epsilon = 1E-16\r\n\r\n M0 = zl1_s.shape[0]\r\n S0 = zl1_s.shape[2] \r\n pzl1_s = np.zeros((M0, 1, S0))\r\n \r\n for s in range(S0): # Have to retake the function for DGMM to parallelize or use apply along axis\r\n pzl1_s[:,:, s] = mvnorm.pdf(zl1_s[:,:,s], mean = mu_l1_s[s].flatten(order = 'C'), \\\r\n cov = sigma_l1_s[s])[..., n_axis] \r\n # Compute p(y | s_i = 1)\r\n norm_cste = np.sum(pzl1_s, axis = 0, keepdims = True) \r\n norm_cste = np.where(norm_cste <= epsilon, epsilon, norm_cste) \r\n \r\n pzl1_s_norm = pzl1_s / norm_cste\r\n py_s = (pzl1_s_norm * py_zl1).sum(axis = 0)\r\n \r\n # Compute p(z |y, s) and normalize it\r\n norm_cste = py_s[n_axis]\r\n norm_cste = np.where(norm_cste <= epsilon, epsilon, norm_cste) \r\n pzl1_ys = pzl1_s * py_zl1 / norm_cste\r\n \r\n norm_cste = np.sum(pzl1_ys, axis = 0, keepdims = True) \r\n norm_cste = np.where(norm_cste <= epsilon, epsilon, norm_cste) \r\n pzl1_ys = pzl1_ys / norm_cste\r\n\r\n # Compute unormalized (18)\r\n ps_y = w_s[n_axis] * py_s\r\n\r\n norm_cste = np.sum(ps_y, axis = 1, keepdims = True) \r\n norm_cste = np.where(norm_cste <= epsilon, epsilon, norm_cste) \r\n\r\n ps_y = ps_y / norm_cste \r\n p_y = py_s @ w_s[..., n_axis]\r\n \r\n return pzl1_ys, ps_y, p_y\r\n\r\n#=============================================================================\r\n# M Step functions\r\n#=============================================================================\r\n\r\ndef bin_params_GLLVM(y_bin, nj_bin, lambda_bin_old, ps_y, pzl1_ys, zl1_s, AT,\\\r\n tol = 1E-5, maxstep = 100):\r\n ''' Determine the GLLVM coefficients related to binomial coefficients by \r\n optimizing each column coefficients separately.\r\n y_bin (numobs x nb_bin nd-array): The binomial data\r\n nj_bin (list of int): The number of modalities for each count/binary variable\r\n lambda_bin_old (list of nb_ord_j x (nj_ord + r1) elements): The binomial coefficients\r\n of the previous iteration\r\n ps_y ((numobs, S) nd-array): p(s | y) for all s in Omega\r\n pzl1_ys (nd-array): p(z1 | y, s)\r\n zl1_s ((M1, r1, s1) nd-array): z1 | s \r\n AT ((r1 x r1) nd-array): Var(z1)^{-1/2}\r\n tol (int): Control when to stop the optimisation process\r\n maxstep (int): The maximum number of optimization step.\r\n ----------------------------------------------------------------------\r\n returns (list of nb_bin_j x (nj_ord + r1) elements): The new bin coefficients\r\n '''\r\n \r\n r0 = zl1_s.shape[1] \r\n S0 = zl1_s.shape[2] \r\n nb_bin = len(nj_bin)\r\n \r\n new_lambda_bin = [] \r\n \r\n for j in range(nb_bin):\r\n if j < r0 - 1: # Constrained columns\r\n nb_constraints = r0 - j - 1\r\n lcs = np.hstack([np.zeros((nb_constraints, j + 2)), np.eye(nb_constraints)])\r\n linear_constraint = LinearConstraint(lcs, np.full(nb_constraints, 0), \\\r\n np.full(nb_constraints, 0), keep_feasible = True)\r\n \r\n opt = minimize(binom_loglik_j, lambda_bin_old[j] , \\\r\n args = (y_bin[:,j], zl1_s, S0, ps_y, pzl1_ys, nj_bin[j]), \r\n tol = tol, method='trust-constr', jac = bin_grad_j, \\\r\n constraints = linear_constraint, hess = '2-point', \\\r\n options = {'maxiter': maxstep})\r\n \r\n else: # Unconstrained columns\r\n opt = minimize(binom_loglik_j, lambda_bin_old[j], \\\r\n args = (y_bin[:,j], zl1_s, S0, ps_y, pzl1_ys, nj_bin[j]), \\\r\n tol = tol, method='BFGS', jac = bin_grad_j, \r\n options = {'maxiter': maxstep})\r\n\r\n res = opt.x \r\n if not(opt.success):\r\n res = lambda_bin_old[j]\r\n warnings.warn('One of the binomial optimisations has failed', RuntimeWarning)\r\n \r\n new_lambda_bin.append(deepcopy(res)) \r\n\r\n # Last identifiability part\r\n if nb_bin > 0:\r\n new_lambda_bin = np.stack(new_lambda_bin)\r\n new_lambda_bin[:,1:] = new_lambda_bin[:,1:] @ AT[0] \r\n \r\n return new_lambda_bin\r\n\r\n\r\n\r\ndef ord_params_GLLVM(y_ord, nj_ord, lambda_ord_old, ps_y, pzl1_ys, zl1_s, AT,\\\r\n tol = 1E-5, maxstep = 100):\r\n ''' Determine the GLLVM coefficients related to ordinal coefficients by \r\n optimizing each column coefficients separately.\r\n y_ord (numobs x nb_ord nd-array): The ordinal data\r\n nj_ord (list of int): The number of modalities for each ord variable\r\n lambda_ord_old (list of nb_ord_j x (nj_ord + r1) elements): The ordinal coefficients\r\n of the previous iteration\r\n ps_y ((numobs, S) nd-array): p(s | y) for all s in Omega\r\n pzl1_ys (nd-array): p(z1 | y, s)\r\n zl1_s ((M1, r1, s1) nd-array): z1 | s \r\n AT ((r1 x r1) nd-array): Var(z1)^{-1/2}\r\n tol (int): Control when to stop the optimisation process\r\n maxstep (int): The maximum number of optimization step.\r\n ----------------------------------------------------------------------\r\n returns (list of nb_ord_j x (nj_ord + r1) elements): The new ordinal coefficients\r\n '''\r\n #****************************\r\n # Ordinal link parameters\r\n #**************************** \r\n \r\n r0 = zl1_s.shape[1] \r\n S0 = zl1_s.shape[2] \r\n nb_ord = len(nj_ord)\r\n \r\n new_lambda_ord = []\r\n \r\n for j in range(nb_ord):\r\n enc = OneHotEncoder(categories='auto')\r\n y_oh = enc.fit_transform(y_ord[:,j][..., n_axis]).toarray() \r\n \r\n # Define the constraints such that the threshold coefficients are ordered\r\n nb_constraints = nj_ord[j] - 2 \r\n nb_params = nj_ord[j] + r0 - 1\r\n \r\n lcs = np.full(nb_constraints, -1)\r\n lcs = np.diag(lcs, 1)\r\n np.fill_diagonal(lcs, 1)\r\n \r\n lcs = np.hstack([lcs[:nb_constraints, :], \\\r\n np.zeros([nb_constraints, nb_params - (nb_constraints + 1)])])\r\n \r\n linear_constraint = LinearConstraint(lcs, np.full(nb_constraints, -np.inf), \\\r\n np.full(nb_constraints, 0), keep_feasible = True)\r\n \r\n opt = minimize(ord_loglik_j, lambda_ord_old[j] ,\\\r\n args = (y_oh, zl1_s, S0, ps_y, pzl1_ys, nj_ord[j]), \r\n tol = tol, method='trust-constr', jac = ord_grad_j, \\\r\n constraints = linear_constraint, hess = '2-point',\\\r\n options = {'maxiter': maxstep})\r\n \r\n res = opt.x\r\n if not(opt.success): # If the program fail, keep the old estimate as value\r\n res = lambda_ord_old[j]\r\n warnings.warn('One of the ordinal optimisations has failed', RuntimeWarning)\r\n \r\n # Ensure identifiability for Lambda_j\r\n new_lambda_ord_j = (res[-r0: ].reshape(1, r0) @ AT[0]).flatten() \r\n new_lambda_ord_j = np.hstack([deepcopy(res[: nj_ord[j] - 1]), new_lambda_ord_j]) \r\n new_lambda_ord.append(new_lambda_ord_j)\r\n \r\n return new_lambda_ord\r\n \r\n\r\n# This last function could be refactored with bin_params_GLLVM as they are very similar\r\ndef cont_params_GLLVM(y_cont, lambda_cont_old, ps_y, pzl1_ys, zl1_s, AT,\\\r\n tol = 1E-5, maxstep = 100):\r\n ''' Determine the GLLVM coefficients related to binomial coefficients by \r\n optimizing each column coefficients separately.\r\n y_cont (numobs x nb_bin nd-array): The binomial data\r\n lambda_cont_old (list of nb_ord_j x (nj_ord + r1) elements): The continuous coefficients\r\n of the previous iteration\r\n ps_y ((numobs, S) nd-array): p(s | y) for all s in Omega\r\n pzl1_ys (nd-array): p(z1 | y, s)\r\n zl1_s ((M1, r1, s1) nd-array): z1 | s \r\n AT ((r1 x r1) nd-array): Var(z1)^{-1/2}\r\n tol (int): Control when to stop the optimisation process\r\n maxstep (int): The maximum number of optimization step.\r\n ----------------------------------------------------------------------\r\n returns (list of nb_bin_j x (nj_ord + r1) elements): The new bin coefficients\r\n '''\r\n \r\n r0 = zl1_s.shape[1] \r\n S0 = zl1_s.shape[2] \r\n nb_cont = y_cont.shape[1]\r\n \r\n new_lambda_cont = [] \r\n \r\n for j in range(nb_cont):\r\n if j < r0 - 1: # Constrained columns\r\n nb_constraints = r0 - j - 1\r\n lcs = np.hstack([np.zeros((nb_constraints, j + 2)), np.eye(nb_constraints)])\r\n linear_constraint = LinearConstraint(lcs, np.full(nb_constraints, 0), \\\r\n np.full(nb_constraints, 0), keep_feasible = True)\r\n \r\n opt = minimize(cont_loglik_j, lambda_cont_old[j] , \\\r\n args = (y_cont[:,j], zl1_s, S0, ps_y, pzl1_ys), \r\n tol = tol, method='trust-constr', jac = cont_grad_j, \\\r\n constraints = linear_constraint, hess = '2-point', \\\r\n options = {'maxiter': maxstep})\r\n \r\n else: # Unconstrained columns\r\n opt = minimize(cont_loglik_j, lambda_cont_old[j], \\\r\n args = (y_cont[:,j], zl1_s, S0, ps_y, pzl1_ys), \\\r\n tol = tol, method='BFGS', jac = cont_grad_j, \r\n options = {'maxiter': maxstep})\r\n\r\n res = opt.x \r\n if not(opt.success):\r\n res = lambda_cont_old[j]\r\n warnings.warn('One of the continuous optimisations has failed', RuntimeWarning)\r\n \r\n new_lambda_cont.append(deepcopy(res)) \r\n\r\n # Last identifiability part\r\n if nb_cont > 0:\r\n new_lambda_cont = np.stack(new_lambda_cont)\r\n new_lambda_cont[:,1:] = new_lambda_cont[:,1:] @ AT[0] \r\n \r\n return new_lambda_cont\r\n\r\n\r\ndef categ_params_GLLVM(y_categ, nj_categ, lambda_categ_old, ps_y, pzl1_ys, zl1_s, AT,\\\r\n tol = 1E-5, maxstep = 100):\r\n ''' Determine the GLLVM coefficients related to categ coefficients by \r\n optimizing each column coefficients separately.\r\n y_categ (list of numobs x nb_categ nd-array): The categorical data\r\n nj_categ (list of int): The number of modalities for each categorical variable\r\n lambda_categ_old (list of nb_categ_j x (nj_categ + r1) elements): The categorical coefficients\r\n of the previous iteration\r\n ps_y ((numobs, S) nd-array): p(s | y) for all s in Omega\r\n pzl1_ys (nd-array): p(z1 | y, s)\r\n zl1_s ((M1, r1, s1) nd-array): z1 | s \r\n AT ((r1 x r1) nd-array): Var(z1)^{-1/2}\r\n tol (int): Control when to stop the optimisation process\r\n maxstep (int): The maximum number of optimization step.\r\n ----------------------------------------------------------------------\r\n returns (list of nb_ord_j x (nj_ord + r1) elements): The new ordinal coefficients\r\n '''\r\n #****************************\r\n # Categorical link parameters\r\n #**************************** \r\n \r\n r0 = zl1_s.shape[1] \r\n S0 = zl1_s.shape[2] \r\n nb_categ = len(nj_categ)\r\n \r\n new_lambda_categ = []\r\n \r\n for j in range(nb_categ):\r\n enc = OneHotEncoder(categories='auto')\r\n y_oh = enc.fit_transform(y_categ[:,j][..., n_axis]).toarray() \r\n \r\n opt = minimize(categ_loglik_j, lambda_categ_old[j], \\\r\n args = (y_oh, zl1_s, S0, ps_y, pzl1_ys, nj_categ[j]), \\\r\n tol = tol, method='BFGS', jac = categ_grad_j, \r\n options = {'maxiter': maxstep})\r\n \r\n res = opt.x\r\n if not(opt.success): # If the program fail, keep the old estimate as value\r\n res = lambda_categ_old[j]\r\n warnings.warn('One of the categorical optimisations has failed', RuntimeWarning)\r\n \r\n res = res.reshape(nj_categ[j], r0 + 1, order = 'C')\r\n\r\n # Ensure identifiability for Lambda_j\r\n new_lambda_categ_j = res[:, -r0: ] @ AT[0]\r\n new_lambda_categ_j = np.hstack([deepcopy(res[:, 0][..., n_axis]), new_lambda_categ_j]) \r\n new_lambda_categ.append(new_lambda_categ_j)\r\n \r\n return new_lambda_categ","sub_path":"M1DGMM/MCEM_GLLVM.py","file_name":"MCEM_GLLVM.py","file_ext":"py","file_size_in_byte":16969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"326057982","text":"from sys import argv\n\ndef suma(*numeros):\n resultado = 0\n for numero in numeros:\n resultado = resultado + numero\n return resultado\n\ndef resta(*numeros):\n resultado = numeros[0]\n for numero in numeros[1:]:\n resultado = resultado - numero\n return resultado\n\ndef multiplicacion(*numeros):\n resultado = 1\n for numero in numeros:\n resultado = resultado * numero\n return resultado\n\ndef division(*numeros):\n resultado = numeros[0]\n try:\n for numero in numeros[1:]:\n resultado = resultado / numero\n return resultado\n except ZeroDivisionError:\n print(\"Error: División por 0\")\n return 0\n\n# Eliminar el primer argumento y tomar solo los numeros\nargumentos = argv[2:]\noperacion = argv[1]\n\nnumeritos = []\n\nfor argumento in argumentos:\n numeritos.append(int(argumento))\n\nif operacion == \"+\":\n print(suma(*numeritos))\nelif operacion == \"-\":\n print(resta(*numeritos))\nelif operacion == \"*\":\n print(multiplicacion(*numeritos))\nelif operacion == \"/\":\n print(division(*numeritos))\nelse:\n print(\"Operación invalida\")","sub_path":"sesion06/operaciones_cli.py","file_name":"operaciones_cli.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"120924435","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 18 20:58:10 2018\n\n@author: Dean\n\"\"\"\n\ndef getdp(str1,str2):\n if not str1 or not str2:\n return None\n len1 = len(str1)\n len2 = len(str2)\n #dp[i][j]表示str1[0..i]和str2[0..j]的最长公共子序列的长度\n dp = [[0 for j in range(len2)] for i in range(len1)]\n dp[0][0] = 1 if str1[0] == str2[0] else 0\n for j in range(1,len2):\n if dp[0][j-1] == 1 or str1[0] == str2[j]:\n dp[0][j] = 1\n for i in range(1,len1):\n if dp[i-1][0] == 1 or str1[i] == str2[0]:\n dp[i][0] = 1\n \n for i in range(1,len1):\n for j in range(1,len2):\n dp[i][j] = max(dp[i-1][j],dp[i][j-1])\n if str1[i] == str2[j]:\n dp[i][j] = max(dp[i][j], dp[i-1][j-1] + 1)\n return dp\n\ndef lcs(str1,str2):\n if not str1 or not str2:\n return None\n len1 = len(str1)\n len2 = len(str2)\n dp = getdp(str1,str2)\n result = []\n N = dp[len1-1][len2-1]\n m = len1 - 1\n n = len2 - 1\n while(N > 0):\n if n >0 and dp[m][n] == dp[m][n-1]:\n n -= 1\n elif m > 0 and dp[m][n] == dp[m-1][n]:\n m -= 1\n else:\n result.insert(0,str1[m])\n N -= 1\n m -= 1\n n -= 1\n \n return \"\".join(result)\n\nif __name__ == \"__main__\":\n str1 = \"1A2C3D4B56\"\n str2 = \"B1D23CA45B6A\"\n print(lcs(str1,str2))\n \n \n \n \n \n \n \n\n\n\n\n","sub_path":"算法题/程序员面试指南/python/递归和动态规划/最长公共子序列问题.py","file_name":"最长公共子序列问题.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"459453781","text":"\"\"\"\nFilter out raw log records that would cause low Radar availability by provider.\n\nExample::\n\n $ source ~/pythonenv/analyze_cedexis_logs_p3/bin/activate\n $ python scripts/analyze_provider_availability.py --input-dir ~/work/cedexis/logs/jsdelivr/radar-pingdom --provider-id 275 --output-file /tmp/output.json\n\"\"\"\n\nimport sys\nimport argparse\nimport os\nimport datetime\nimport logging\nimport tempfile\nfrom pprint import pprint\nimport json\n\nlogger = logging.getLogger(__name__)\n\nimport common.processing\nimport common.app_logging\nimport common\n\nargs = None\naggregated = {\n 'market': {},\n 'country': {},\n 'asn': {},\n}\n\ndef proc_radar_file(row_data):\n\n def _nearest_min(value):\n result = datetime.datetime.utcfromtimestamp(value)\n return result.strftime('%Y-%m-%d %H:%M:00')\n\n def _increment(level, value, status):\n data = aggregated[level][value] = aggregated[level].get(value, {})\n status_data = data[status] = data.get(status, {})\n if report_timestamp not in status_data:\n status_data[report_timestamp] = 0\n status_data[report_timestamp] += 1\n\n global aggregated\n #print(row_data)\n\n if not args.provider_id is None:\n provider_id = row_data[common.legacy_fields['provider_id']['index']]\n if provider_id != args.provider_id:\n return\n\n if not args.country_num is None:\n resolver_country_num = row_data[common.legacy_fields['resolver_country_num']['index']]\n if resolver_country_num != args.country_num:\n return\n\n response_code = int(row_data[common.legacy_fields['response_code']['index']])\n avail = 'available'\n if 0 != response_code:\n avail = 'not available'\n\n report_timestamp = _nearest_min(int(row_data[common.legacy_fields['report_timestamp']['index']]))\n resolver_market_num = row_data[common.legacy_fields['resolver_market_num']['index']]\n resolver_country_num = row_data[common.legacy_fields['resolver_country_num']['index']]\n resolver_asn = row_data[common.legacy_fields['resolver_asn']['index']]\n #print(report_timestamp, provider_id, response_code, resolver_market_num, resolver_country_num, resolver_asn)\n\n _increment('market', resolver_market_num, avail)\n _increment('country', resolver_country_num, avail)\n _increment('asn', resolver_asn, avail)\n #pprint(aggregated)\n\ndef main():\n global args\n\n log_file_path = os.path.join(tempfile.gettempdir(), 'analyze_provider_availability.log')\n if os.path.isfile(log_file_path):\n os.unlink(log_file_path)\n common.app_logging.setup_logging(log_file_path)\n\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument('--input-dir')\n arg_parser.add_argument('--input-file')\n arg_parser.add_argument('--provider-id')\n arg_parser.add_argument('--max-files', type=int)\n arg_parser.add_argument('--output-file', '-o')\n arg_parser.add_argument('--country-num')\n args = arg_parser.parse_args()\n print(args)\n\n kwargs = {\n 'max_files': args.max_files,\n 'legacy_processing_fn': proc_radar_file,\n }\n\n if not args.input_dir is None:\n kwargs['input_dir'] = args.input_dir\n elif not args.input_file is None:\n kwargs['input_file'] = args.input_file\n\n common.processing.go(**kwargs)\n pprint(aggregated)\n\n if not args.output_file is None:\n with open(args.output_file, 'w') as fp:\n json.dump(aggregated, fp, sort_keys=True, indent=4, separators=(',', ': '))\n\nif __name__ == '__main__':\n print(sys.version)\n main()\n","sub_path":"scripts/analyze_provider_availability.py","file_name":"analyze_provider_availability.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"496979539","text":"from openpyxl import load_workbook\nfrom openpyxl import Workbook\nimport datetime\nwb=load_workbook('courses.xlsx')\nwc = wb.create_sheet(title='combine')\nws=wb['students']\nwt=wb['time']\ndef combine():\n\twc.append(['创建时间', '课程名称', '学习人数', '学习时间'])\n\tfor i in ws.values:\n\t\tif i[1] != '课程名称':\n\t\t\tfor j in wt.values:\n\t\t\t\tif i[1] == j[1]:\n\t\t\t\t\twc.append(list(i) + [j[2]])\n\twb.save('sourse')\n\ndef split():\n\twc = wb['combine']\n\tyears = {}\n\tfor time in wc.values:\n\t\tif time[1] != '课程名称':\n\t\t\tyear = time[0].strftime('%Y')\n\t\t\tif year in years.keys():\n\t\t\t\tyears[year].append(time)\n\t\t\telse:\n\t\t\t\tyears[year] = [time]\n\tfor item in years.keys():\n\t\twn = Workbook()\n\t\twn.remove(wn.active)\n\t\twn2 = wn.create_sheet(title=item)\n\t\tfor row in years[item]:\n\t\t\twn2.append(row)\n\t\twn.save(item + '.xlsx')\n\nif __name__==\"__main__\":\n\tcombine()\n\tsplit()","sub_path":"challenge11/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"569413160","text":"import math\nfrom SiCmaker import SiC\nfrom numpy import array\npattern = \"abcb\"\n\n#a1 = 3.070027\n#a2 = 5.31744299\n#a3 = 2.5066667\nd = 3.070027\na1 = d\na2 = math.sqrt(3)*d\na3 = math.sqrt(6)/3.0*d\nslope = math.radians(4.0)\n\nny, nz = 5,2\nnx = int(math.ceil(len(pattern)*nz*a3/(2.0*math.sin(slope)*math.cos(slope)*a1)))\ndz = nz*a3*len(pattern)/2.0\nlx = dz/math.sin(slope)\nlz = dz/math.cos(slope)\n\nRy = array([[math.cos(slope),0,math.sin(slope)],[0,1,0],[-1*math.sin(slope),0,math.cos(slope)]])\n\nwafer = SiC(pattern, nx, ny, nz)\nsiArry = wafer.getSiArry()\ncArry = wafer.getCArry()\n\nrot_siArry = [(si-array([0,0,dz])).dot(Ry) for si in siArry]\nrot_cArry = [(c-array([0,0,dz])).dot(Ry) for c in cArry]\nnew_siArry = []\nnew_cArry = []\n\nfor si in rot_siArry:\n\tif (si[0] >= 0)and(si[0]<=lx)and(si[2]>=0)and(si[2]<=lz):\n\t\tnew_siArry.append(si)\nfor c in rot_cArry:\n\tif (c[0] >=0)and(c[0]<=lx)and(c[2]>=a3/4)and(c[2]<=lz+a3/4):\n\t\tnew_cArry.append(c)\n\nwafer.setSiArry(new_siArry)\nwafer.setCArry(new_cArry)\nwafer.setBox(lx, a2*ny, lz)\n\n#wafer.outLammps()\nwafer.outAtomeye()\n","sub_path":"4depi_wafer2.py","file_name":"4depi_wafer2.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"629447629","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport numpy.linalg as alg\nimport pickle\nimport scipy, scipy.optimize\nimport scipy.special as special\nimport pandas as pd\nimport clipboard\n#Get the i-th bit from 'x'\ndef get_bit(x, i):\n return (x >> i)&1\n#Returns a new integer with the i-th bit = 1\ndef set_bit(value, i):\n return value | (1<> i)%2\n return cnt\n#Computes the vector local z-magnetizations on the state 'state'\ndef sigma_z(state, n_particles):\n mgn = np.zeros(n_particles)\n for i in range(n_particles):\n if(get_bit(state,i)==0):\n mgn[i] = + 1\n else:\n mgn[i] = - 1\n return mgn\n#Computes the total z-magnetization on the state 'state'\ndef magnetization_z(state, n_particles):\n mgn = 0\n for i in range(n_particles):\n if(get_bit(state,i)==0):\n mgn = mgn + 1\n else:\n mgn = mgn - 1\n return mgn\n#Returns all magnetizations from all the possible 'n_particles' many body states\ndef create_magnetizations(n_particles):\n sz = 2**n_particles\n mgns = np.zeros(sz)\n for state in range(sz):\n mgns[state] = magnetization_z(state, n_particles)\n return mgns \n#Local spin for each state and site\ndef get_spin_map(n_particles):\n full_sz = 1 << n_particles\n bit_map = np.zeros((full_sz, n_particles))\n for state in range(full_sz):\n bit_map[state,:] = np.array([(state>>i) & 1 for i in range(L)])\n return 1-2*bit_map\n#Given x_i returns d_ij = x_i-x_j\ndef diff_matrix(x):\n temp = np.repeat(x.reshape(1,-1), len(x), axis=0)\n return (temp - x.reshape(-1,1)).T\n#Simple curve intersection -> best estimate given by data\ndef curve_intersection(f,g):\n delta = np.abs(f-g)\n return np.argmin(delta)\ndef random_heseinberg_diag_sector(n_particles,h_strength, n_samples, times, verbosity=1, verbosity_s=1, pbc=True, sector_magn=0, compute_Ct=False):\n ####################################################\n ############## HAMILTONIAN ##############\n ####################################################\n\n #recovers the magnetizations for all possible many body states of 'n_particles'\n mgns = create_magnetizations(n_particles)\n #takes all the states (sector) with a given magnetization. Default = 0\n sector = np.argwhere(mgns==sector_magn).reshape(-1).astype('int')\n #amount of states in the chosen sector\n sz = len(sector)\n #dictionary to map sectpr to indices\n sect_dict = {sector[i]: i for i in range(sz)} \n #create the Hamiltonian using only sector states\n H0 = np.zeros((sz,sz))\n #decide whether use PBC or OBC\n if(pbc):\n max_site = n_particles\n else:\n max_site = n_particles-1\n spin_map = get_spin_map(n_particles)\n #for each state in the sector it contains the local magnetizations\n spin_sector = spin_map[sector,:]\n #I need to address both the site and the index -> same values as in 'sect_dict'\n for idx,state in enumerate(sector):\n #build the zz interaction part of the Hamiltonian\n for site in range(max_site):\n next_site = (site+1)%n_particles\n #if site and the next have same magnetization we have a positive contribution (assuming J_zz==1)\n if(get_bit(state, site)==get_bit(state,next_site)):\n H0[idx, idx] += 1/4\n #otherwise is negative\n else:\n H0[idx, idx] += -1/4\n\n #build the xy interaction part of the Hamiltonian\n for i in range(max_site):\n #next particle site\n j = (i+1)%n_particles\n #if the state bits i and i+1 are different the contribution is non-zero\n if(get_bit(state,i) != get_bit(state,j)):\n #i need to flip the two spins\n mask = (1< following original paper we focus on the middle one third\n start_idx =int(sz/3)\n stop_idx = int(2*sz/3)\n\n ####################################################\n ############## GLOBAL RESULTS ##############\n ####################################################\n\n #spectrum for all samples and fields\n E = np.zeros((n_fields, n_samples, stop_idx-start_idx))\n #local magnetizations\n m = np.zeros((n_fields, n_samples, stop_idx-start_idx, L))\n \n ####################################################\n ############## STATE RESULTS ##############\n ####################################################\n #alternating sign state\n #-+-+-+...-+\n state0 = 0\n for i in range(n_particles):\n if(i%2==1):\n state0 += 1< 0 and k % verbosity == 0):\n print((\"%2.f\" % (100*k/n_fields)) + \"%\")\n\n for s in range(n_samples):\n if(n_particles > 10 and verbosity_s > 0 and s % verbosity_s == 0):\n print((\"Sample: %2.f\" % (100*s/n_samples)) + \"%\")\n #the random field\n z_field = np.random.rand(n_particles)*(2*strg)-strg\n #start from the deterministic hamiltonian\n H = H0.copy()\n #loops for all state in the sector and their index\n for idx,state in enumerate(sector):\n #the function sigma_z recovers all local magnetizations\n H[idx, idx] += 0.5*np.sum(sigma_z(state, n_particles)*z_field) #1/2 sum_i sigma_i^z h_i\n spectrum, vecs = alg.eigh(H)\n c_vecs = np.conj(vecs) #actually, vecs are real since H is real and symmetric\n #\n sjz_mn = np.einsum('sm,sn,sj', c_vecs, vecs, spin_sector, optimize=['einsum_path',(0,2),(0,1)]) #indices are nmj\n #psi0 in the eigenbasis\n coefs = np.matmul(c_vecs.T,psi0)\n #ipr of the state\n ipr[k,s] = 1/np.sum(np.abs(coefs)**4)\n if(compute_Ct):\n if(n_particles > 10 and verbosity_s > 0 and s % verbosity_s == 0):\n print(\"Time evolution...\")\n for t_idx,t in enumerate(times): \n #time evolution\n u_nt = np.exp(-1.0j*spectrum*t)\n if(c_path is None):\n c_path, _ = np.einsum_path('jlm, jln, l', sjz_mn, sjz_mn, np.conj(u_nt))\n\n # \n two_point = np.einsum('jlm, jln, l', sjz_mn, sjz_mn, np.conj(u_nt), optimize=c_path)/n_particles #indices are nm\n if(path is None):\n path, _ = np.einsum_path('n,m,nm,n', coefs, np.conj(coefs), two_point, u_nt)\n C_t[k,s,t_idx] = np.abs(np.einsum('n,m,nm,n', coefs, np.conj(coefs), two_point, u_nt, optimize=path))\n #stores spectrum\n E[k,s,:] = spectrum[start_idx:stop_idx]\n\n #probabilities\n p = np.abs(vecs)**2\n\n #computes local magnetizations\n local_m = np.matmul(spin_sector.T, p).T\n m[k, s, :, :] = local_m[start_idx:stop_idx,:]\n\n #wave of spin part (see paper)\n f_njk = np.einsum('sn,sj,sk',p, spin_sector, spin_sector) #(L,L,sz)\n f_nj = np.einsum('sn,sj', p, spin_sector) #(L,sz)\n M_wave[k,s] = np.mean(1-np.abs(np.matmul(wave_cfs, f_nj))**2/np.einsum('jkn,j,k', f_njk, wave_cfs, np.conj(wave_cfs))).real\n\n \n return E,m,times,ipr, M_wave, C_t\n\nLOAD_SUMMARY = False\nSAVE_SUMMARY = False\ncompute_Ct = False #leave false for just computing static quantities -> much faster\nt_steps = 50\ntimes = np.logspace(-2,4,t_steps)\n\nif(LOAD_SUMMARY):\n summary = pickle.load(open('summary.pkl', 'rb'))\n if(len(summary)==8):\n [h_strg, L_s, n_samples, dm_s, r_s, IPR_s, M_s, C_s] = summary\n else:\n [h_strg, L_s, n_samples, dm_s, r_s, IPR_s, M_s] = summary\n print('Samples present:', n_samples)\nelse:\n #actual work\n h_strg = [ 0.6, 1.0, 2.0, 2.7, 3.6, 5.0, 8.0 ]\n #h_strg = np.linspace(2.7, 3.7, 10)\n print('Field strengths to probe:')\n print(h_strg)\n L_s = [8,10,12,14]\n n_samples = [10000,10000,1000,50]\n dm_s = []\n r_s = []\n IPR_s = []\n M_s = []\n C_s = []\n for idx, L in enumerate(L_s):\n print('L:',L)\n E,m,times,ipr, M_wave, C_t = random_heseinberg_diag_sector(L,h_strg,n_samples[idx],times,pbc=True,verbosity=1, compute_Ct=compute_Ct)\n deltas = np.abs(np.diff(E, axis=2))\n #these are delta^n and delta^(n+1) where n is the eigenstate\n seq1, seq2 = deltas[:,:,0:-1],deltas[:,:,1:]\n #ratios\n r = np.minimum(seq1, seq2)/np.maximum(seq1,seq2)\n #average ratios (we average over the realizations of disorder and on the eigenstates)\n avg_r = np.mean(r, axis=(1,2))\n avg_dm = np.mean(np.abs(np.diff(m, axis=2)), axis=(1,2,3))\n r_s.append(avg_r)\n dm_s.append(avg_dm)\n IPR_s.append(ipr)\n M_s.append(M_wave)\n C_s.append(C_t)\n print('--------------------')\n if(SAVE_SUMMARY):\n summary = [h_strg, L_s, n_samples, dm_s, r_s, IPR_s, M_s, C_s]\n pickle.dump(summary, open('summary.pkl', 'wb'))\n\n\n\n\nif(compute_Ct):\n f_t = lambda t,a,tau,w1,phi,b,z,c,eta,w2, : a*np.exp(-t/tau)*np.cos(w1*t+phi)+b*(t**(-z))*(1+c*(t**(-eta))*np.sin(w2*t+phi))\n fig, ax = plt.subplots(figsize=(16,10), nrows=len(L_s))\n try:\n len(ax)\n except:\n ax = [ax]\n curve_fits = []\n error_fits = []\n for idx, L in enumerate(L_s):\n avg_Ct = np.mean(C_s[idx], axis=1)\n for idx2, h in enumerate(h_strg):\n ax[idx].plot(times, avg_Ct[idx2,:], '-o', label='h: %2.1f' % h)\n try:\n p_opt, p_cov = scipy.optimize.curve_fit(f_t, times, avg_Ct[idx2,:], bounds=(0,[1,np.inf,np.inf,2*np.pi,1,np.inf,1,np.inf,np.inf]), maxfev=10000)\n p_err = np.sqrt(np.diag(p_cov))\n curve_fits.append(np.concatenate(([L,h], np.array(p_opt))))\n error_fits.append(np.concatenate(([L,h], p_err)))\n except:\n pass\n ax[idx].set_xscale('log')\n ax[idx].set_yscale('log')\n ax[idx].grid()\n ax[idx].legend()\n ax[idx].set_xlabel('t')\n ax[idx].set_ylabel('$C_t, L=%i$'%L)\n plt.savefig('Images/corr.png')\n plt.show()\n curve_fits = pd.DataFrame(data=curve_fits, columns=['L','h','a','tau','w1','phi','b','z','c','eta','w2'])\n error_fits = pd.DataFrame(data=error_fits, columns=['L','h','a','tau','w1','phi','b','z','c','eta','w2'])\n #clipboard.copy(curve_fits.to_latex(index=False)+'\\n\\r' +error_fits.to_latex(index=False))\n #exit()\n\n#IPR plot\nfig, ax = plt.subplots()\nfor idx, L in enumerate(L_s):\n ax.plot(h_strg, np.mean(IPR_s[idx],axis=1)/scipy.special.binom(L,L/2), '-o', label='L: %i' %L)\nplt.grid()\nplt.xlabel('$h$')\nplt.ylabel('$Inverse Participation Ratio$')\nplt.legend()\n#plt.title('IPR for a $T=\\\\infty$ state')7\nplt.savefig('Images/ipr.png')\nplt.show()\n\n#DeltaM plot\navg_dm = np.array(dm_s)\nfig, ax = plt.subplots()\nfor idx2, h in enumerate(h_strg):\n ax.plot(L_s, np.log(avg_dm[:,idx2]) , '-o', label='h: %2.1f' % h)\nplt.grid()\nplt.xlabel('$L$')\nplt.ylabel('$\\log < |m^{(n+1)}_{i \\\\alpha}-m^{(n)}_{i \\\\alpha}|>$')\nplt.legend()\nplt.savefig('Images/dm.png')\nplt.show()\n\n#Spin wave\nfig, ax = plt.subplots()\nfor idx, L in enumerate(L_s):\n ax.plot(h_strg, np.mean(M_s[idx],axis=1), '-o', label='L: %i' %L)\nplt.grid()\nplt.xlabel('$h$')\nplt.ylabel('$< f^{(n)}_\\\\alpha >$')\nplt.legend()\nplt.savefig('Images/m_factor.png')\nplt.show()\n\n#DeltaE ratios\nfig, ax = plt.subplots()\nfor idx, L in enumerate(L_s):\n ax.plot(h_strg, r_s[idx], '-o', label='L: %i' %L)\nplt.grid()\nplt.xlabel('$h$')\nplt.ylabel(\"$< r^{(n)}_\\\\alpha >$\")\nplt.legend()\nplt.savefig('Images/energy_ratio.png')\nplt.show()\n\n","sub_path":"FinalProject/heisenberg_sector.py","file_name":"heisenberg_sector.py","file_ext":"py","file_size_in_byte":12838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"299186600","text":"from flask import Flask\nfrom flask import render_template\nfrom deep_daze import Imagine\nfrom flask import request\nimport os\nimport threading\n\ndef threaded_function(requested_text):\n parent_dir = os.environ.get('STATIC_URL')\n path = os.path.join(parent_dir,requested_text)\n os.mkdir(path)\n os.chdir(path)\n imagine = Imagine(\n text=requested_text,\n save_every=4,\n save_progress=True,\n epochs=1,\n open_folder=False,\n )\n imagine()\n\napp = Flask(__name__, static_url_path='/static')\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n\n@app.route(\"/new_image\", methods=['GET', 'POST'])\ndef requests():\n text = request.form.get('text')\n print(text)\n x = threading.Thread(target=threaded_function, args=(text,))\n x.start()\n return 'Done!'","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"239667300","text":"# -*- coding: utf-8 -*-\nfrom django.db import models\nimport datetime\nimport json\nimport random\nimport calendar\n\n\nMAX_SIZE_IMG = 1048576\n\nmounths = {\n\t\"Січень\":1,\n\t\"Лютий\":2,\n\t\"Березень\":3,\n\t\"Квітень\":4,\n\t\"Травень\": 5,\n\t\"Червень\":6,\n\t\"Липень\":7,\n\t\"Серпень\":8,\n\t\"Вересень\":9,\n\t\"Жовтень\":10,\n\t\"Листопад\":11,\n\t\"Грудень\":12}\n\nyears = 2015\n\ndef iter():\n\ts = '{'\n\tfor year in xrange(years,years+2): \n\t\ts+='\"'+ str(year)+'\":{'\n\t\tfor mounth, id in mounths.items():\n\t\t\ts+=' \"'+str(id)+'\":' + str(list(random.choice((0,1)) for q in range(0,calendar.monthrange(year,id)[1] ) ))+','\n\t\ts=s[:-1]+\"},\"\n\ts=s[:-1]+\"}\"\n\treturn s\n\n\n# Create your models here.\nclass Student(models.Model):\n\t\"\"\"Student model\"\"\"\n\tclass Meta(object):\n\t\tverbose_name = u'Студент'\n\t\tverbose_name_plural = u\"Студенти\"\n\n\tfirst_name = models.CharField(\n\t\tmax_length = 256,\n\t\tblank = False,\n\t\tverbose_name = u\"Ім'я\")\n\n\tlast_name = models.CharField(\n\t\tmax_length = 256,\n\t\tblank = False,\n\t\tverbose_name = u'Прізвище')\n\n\tmiddle_name = models.CharField(\n\t\tmax_length = 256,\n\t\tblank = True,\n\t\tverbose_name = u\"По-батькові\",\n\t\tdefault = \"\")\n\n\tbirthday = models.DateField(\n\t\tblank = False,\n\t\tverbose_name = u\"Дата народження\",\n\t\tnull = True)\n\n\tphoto = models.ImageField(\n\t\tblank = True,\n\t\tverbose_name = u\"Фото\",\n\t\tnull = True,\n\t\tmax_length=MAX_SIZE_IMG)\n\n\tticket = models.CharField(\n\t\tmax_length = 256,\n\t\tblank = False,\n\t\tverbose_name = u\"Білет\")\n\n\tnotes = models.TextField(\n\t\tblank = True,\n\t\tverbose_name = u\"Додаткові нотатки\")\n\n\tstudent_group = models.ForeignKey('Group',\n\t\tverbose_name = u\"Група\",\n\t\tblank = False,\n\t\tnull = True,\n\t\ton_delete = models.PROTECT)\n\n\tvisit = models.TextField(\n\t\tblank = True,\n\t\tdefault=iter,\n\t\tverbose_name = u\"Відвідування\")\n\n\tdef _visit_json(self):\n\t\treturn json.loads(self.visit)\n\tvisit_json = property(_visit_json)\n\n\n\tentered = models.DateField(\n\t\tdefault = datetime.date(2015, 1, 1),\n\t\tverbose_name = u\"Дата вступу\",\n\t\tnull = True)\n\n\tdef _day_on_unix(self):\n\t\treturn self.entered.toordinal()\n\tday_on_unix = property(_day_on_unix)\n\n\n\tdef __unicode__(self):\n\t\treturn u\"%s %s\" % (self.first_name, self.last_name)","sub_path":"students/models/students.py","file_name":"students.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"199461923","text":"\"\"\"\n Copyright (c) 2023, NVIDIA CORPORATION.\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport hugectr\nfrom mpi4py import MPI\n\nsolver = hugectr.CreateSolver(\n max_eval_batches=1,\n batchsize_eval=1024,\n batchsize=1024,\n lr=0.01,\n end_lr=0.0001,\n warmup_steps=8000,\n decay_start=48000,\n decay_steps=24000,\n vvgpu=[[0]],\n repeat_dataset=True,\n i64_input_key=True,\n)\nreader = hugectr.DataReaderParams(\n data_reader_type=hugectr.DataReaderType_t.Parquet,\n source=[\"./multi_cross/data/train/_file_list.txt\"],\n eval_source=\"./multi_cross/data/test/_file_list.txt\",\n check_type=hugectr.Check_t.Sum,\n slot_size_array=[10001, 10001, 10001, 10001],\n)\noptimizer = hugectr.CreateOptimizer(\n optimizer_type=hugectr.Optimizer_t.Adam,\n update_type=hugectr.Update_t.Local,\n beta1=0.9,\n beta2=0.999,\n epsilon=0.0000001,\n)\nmodel = hugectr.Model(solver, reader, optimizer)\nnum_gpus = 1\nworkspace_size_per_gpu_in_mb = int(40004 * 16 * 4 * 3 / 1000000) + 10\nmodel.add(\n hugectr.Input(\n label_dim=3,\n label_name=\"label\",\n dense_dim=3,\n dense_name=\"dense\",\n data_reader_sparse_param_array=[\n hugectr.DataReaderSparseParam(\n \"data1\",\n [1, 1, 1, 1],\n False,\n 4,\n )\n ],\n )\n)\nmodel.add(\n hugectr.SparseEmbedding(\n embedding_type=hugectr.Embedding_t.LocalizedSlotSparseEmbeddingHash,\n workspace_size_per_gpu_in_mb=workspace_size_per_gpu_in_mb,\n embedding_vec_size=16,\n combiner=\"mean\",\n sparse_embedding_name=\"sparse_embedding1\",\n bottom_name=\"data1\",\n optimizer=optimizer,\n )\n)\nmodel.add(\n hugectr.DenseLayer(\n layer_type=hugectr.Layer_t.InnerProduct,\n bottom_names=[\"dense\"],\n top_names=[\"fc1\"],\n num_output=16,\n )\n)\nmodel.add(\n hugectr.DenseLayer(\n layer_type=hugectr.Layer_t.ReLU,\n bottom_names=[\"fc1\"],\n top_names=[\"relu1\"],\n )\n)\nmodel.add(\n hugectr.DenseLayer(\n layer_type=hugectr.Layer_t.Interaction,\n bottom_names=[\"relu1\", \"sparse_embedding1\"],\n top_names=[\"interaction1\"],\n )\n)\nmodel.add(\n hugectr.DenseLayer(\n layer_type=hugectr.Layer_t.InnerProduct,\n bottom_names=[\"interaction1\"],\n top_names=[\"fc4\"],\n num_output=32,\n )\n)\nmodel.add(\n hugectr.DenseLayer(\n layer_type=hugectr.Layer_t.ReLU,\n bottom_names=[\"fc4\"],\n top_names=[\"relu4\"],\n )\n)\nmodel.add(\n hugectr.DenseLayer(\n layer_type=hugectr.Layer_t.InnerProduct,\n bottom_names=[\"relu4\"],\n top_names=[\"fc8\"],\n num_output=3,\n )\n)\nmodel.add(\n hugectr.DenseLayer(\n layer_type=hugectr.Layer_t.MultiCrossEntropyLoss,\n bottom_names=[\"fc8\", \"label\"],\n top_names=[\"loss\"],\n target_weight_vec=[0.2, 0.4, 0.4],\n )\n)\nmodel.compile()\nmodel.summary()\nmodel.graph_to_json(graph_config_file=\"/dump_infer/multi_cross_entropy_loss.json\")\n\nmodel.fit(\n max_iter=1001,\n display=100,\n eval_interval=1000,\n snapshot=1000,\n snapshot_prefix=\"/dump_infer/multi_cross_entropy_loss\",\n)\n\nmodel.export_predictions(\n \"/dump_infer/multi_cross_entropy_loss_pred_\" + str(1000),\n \"/dump_infer/multi_cross_entropy_loss_label_\" + str(1000),\n)\n\n\nfrom hugectr.inference import InferenceModel, InferenceParams\nfrom mpi4py import MPI\nimport hugectr\nimport pandas as pd\nimport numpy as np\n\ninference_params = InferenceParams(\n model_name=\"multi_cross_entropy_loss\",\n max_batchsize=1024,\n hit_rate_threshold=1.0,\n dense_model_file=\"/dump_infer/multi_cross_entropy_loss_dense_1000.model\",\n sparse_model_files=[\"/dump_infer/multi_cross_entropy_loss0_sparse_1000.model\"],\n device_id=0,\n use_gpu_embedding_cache=True,\n cache_size_percentage=0.5,\n use_mixed_precision=False,\n i64_input_key=True,\n)\n\ninference_model = InferenceModel(\"/dump_infer/multi_cross_entropy_loss.json\", inference_params)\n\npreds = inference_model.predict(\n num_batches=1,\n source=\"./multi_cross/data/test/_file_list.txt\",\n data_reader_type=hugectr.DataReaderType_t.Parquet,\n check_type=hugectr.Check_t.Sum,\n slot_size_array=[10001, 10001, 10001, 10001],\n)\n\nground_truth = np.loadtxt(\"/dump_infer/multi_cross_entropy_loss_pred_1000\")\npredictions = preds.flatten()\ndiff = predictions - ground_truth\nmse = np.mean(diff * diff)\nif mse > 1e-3:\n raise RuntimeError(\n \"Too large mse between multi_cross_entropy_loss inference and training: {}\".format(mse)\n )\n sys.exit(1)\nelse:\n print(\n \"multi_cross_entropy_loss inference results are consistent with those during training, mse: {}\".format(\n mse\n )\n )\n","sub_path":"test/inference/inference_model/multi_cross_entropy_loss.py","file_name":"multi_cross_entropy_loss.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"258779292","text":"# -*- coding: utf-8 -*-\nimport db\nimport time\nimport logging\nfrom db import TableError\n\n\nclass ModelMetaclass(type):\n def __new__(cls, name, bases, attrs):\n # 避免对 Model 类进行修改\n if name == 'Model':\n return super(ModelMetaclass, cls).__new__(cls, name, bases, attrs)\n\n # 读取cls的字段\n mappings = dict()\n primary_key = None\n for k, v in attrs.iteritems():\n if isinstance(v, Field):\n if not v.name:\n v.name = k\n # check duplicate primary_key\n if v.primary_key:\n if primary_key:\n raise TableError('cannot define more than 1 primary key in one table: %s' % name)\n if v.updatable:\n v.updatable = False\n if v.nullable:\n v.nullable = False\n primary_key = v\n mappings[k] = v\n # check exist of primary_key\n if not primary_key:\n raise TableError('Primary key not defined in table: %s' % name)\n # 我也不知道这是要干啥\n for k in mappings.iterkeys():\n attrs.pop(k)\n\n # 增加__table__字段\n if '__table__' not in attrs.keys():\n attrs['__table__'] = name.lower()\n\n # 给cls新增字段\n attrs['__mapping__'] = mappings\n attrs['__primary_key__'] = primary_key\n\n return super(ModelMetaclass, cls).__new__(cls, name, bases, attrs)\n\n def ff(cls, name, bases, attrs):\n\n # store all subclasses info:\n if not hasattr(cls, 'subclasses'):\n cls.subclasses = {}\n if not name in cls.subclasses:\n cls.subclasses[name] = name\n else:\n logging.warning('Redefine class: %s' % name)\n\n logging.info('Scan ORMapping %s...' % name)\n mappings = dict()\n primary_key = None\n for k, v in attrs.iteritems():\n if isinstance(v, Field):\n # 这个判断可以让我们在定义Model里的Field的时候不需要特意写name字段\n if not v.name:\n v.name = k\n logging.info('[MAPPING] Found mapping: %s => %s' % (k, v))\n # check duplicate primary key:\n if v.primary_key:\n if primary_key:\n raise TableError('Cannot define more than 1 primary key in class: %s' % name)\n if v.updatable:\n logging.warning('NOTE: change primary key to non-updatable.')\n v.updatable = False\n if v.nullable:\n logging.warning('NOTE: change primary key to non-nullable.')\n v.nullable = False\n primary_key = v\n mappings[k] = v\n # check exist of primary key:\n if not primary_key:\n raise TypeError('Primary key not defined in class: %s' % name)\n for k in mappings.iterkeys():\n attrs.pop(k)\n if not '__table__' in attrs:\n attrs['__table__'] = name.lower()\n attrs['__mappings__'] = mappings\n attrs['__primary_key__'] = primary_key\n # attrs['__sql__'] = lambda self: _gen_sql(attrs['__table__'], mappings)\n # for trigger in _triggers:\n # if not trigger in attrs:\n # attrs[trigger] = None\n return type.__new__(cls, name, bases, attrs)\n\n\nclass Model(dict):\n \"\"\"\n 注:由于不为了由Model生成sql,很多字段不在Model里写出来\n \"\"\"\n __metaclass__ = ModelMetaclass\n\n def __init__(self, **kwargs):\n super(Model, self).__init__(**kwargs)\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError:\n raise AttributeError('Dict object has no attribute %s' % key)\n\n def __setattr__(self, key, value):\n self[key] = value\n\n @classmethod\n def find(cls, pk):\n \"\"\"\n get by primary_key\n :param pk:\n :return:\n \"\"\"\n d = db.select_first('select * from %s where %s = ?' % (cls.__table__, cls.__primary_key__.name), pk)\n return cls(**d) if d else None\n\n @classmethod\n def first(cls, where, *args):\n d = db.select_first('select * from %s where %s' % (cls.__table__, where), *args)\n return cls(**d) if d else None\n\n\nclass Field(object):\n \"\"\"\n 自己实现一套用于保存数据库表中字段属性的类,其实主要是为了能用Model直接生产sql\n \"\"\"\n # _count = 0\n def __init__(self, **kwargs):\n self.name = kwargs.get('name', None)\n self._default = kwargs.get('default', None)\n self.primary_key = kwargs.get('primary_key', False)\n self.nullable = kwargs.get('nullable', False)\n self.updatable = kwargs.get('updatable', True)\n self.insertable = kwargs.get('insertable', True)\n self.ddl = kwargs.get('ddl', '')\n # self._order = Field._count\n # Field._count += 1\n\n @property\n def default(self):\n \"\"\"\n 不设置setattr,所以这个字段是只读的\n :return:\n \"\"\"\n d = self._default\n return d() if callable(d) else d\n\n def __str__(self):\n \"\"\"\n 返回实例对象的描述信息\n :return:\n \"\"\"\n s = ['<%s:%s,%s,default(%s),' % (self.__class__.__name__, self.name, self.ddl, self._default)]\n self.nullable and s.append('N')\n self.updatable and s.append('U')\n self.insertable and s.append('I')\n s.append('>')\n return ''.join(s)\n\n\nclass StringField(Field):\n \"\"\"\n 保存char,varchar 类型字段\n \"\"\"\n def __init__(self, **kwargs):\n if 'default' not in kwargs.keys():\n kwargs['default'] = ''\n if 'ddl' not in kwargs.keys():\n kwargs['ddl'] = 'varchar(255)'\n super(StringField, self).__init__(**kwargs)\n\n\nclass IntegerField(Field):\n \"\"\"\n 保存各种整数类型,tinyint,smallint,int,bigint\n 注:python的int最大值是 2 ** 63 - 1\n \"\"\"\n def __init__(self, **kwargs):\n if 'default' not in kwargs.keys():\n kwargs['default'] = 0\n if 'ddl' not in kwargs.keys():\n kwargs['ddl'] = 'bigint'\n super(IntegerField, self).__init__(**kwargs)\n\n\nclass BooleanField(Field):\n \"\"\"\n 保存bool类型\n \"\"\"\n def __init__(self, **kwargs):\n if 'default' not in kwargs.keys():\n kwargs['default'] = False\n if 'ddl' not in kwargs.keys():\n kwargs['ddl'] = 'bool'\n super(BooleanField, self).__init__(**kwargs)\n\n\nclass TextField(Field):\n \"\"\"\n 保存 text 类型\n \"\"\"\n def __init__(self, **kwargs):\n if 'default' not in kwargs.keys():\n kwargs['default'] = ''\n if 'ddl' not in kwargs.keys():\n kwargs['ddl'] = 'text'\n super(TextField, self).__init__(**kwargs)\n\n\nclass BlobField(Field):\n \"\"\"\n 保存Blob类型\n \"\"\"\n def __init__(self, **kwargs):\n if 'default' not in kwargs:\n kwargs['default'] = ''\n if 'ddl' not in kwargs:\n kwargs['ddl'] = 'blob'\n super(BlobField, self).__init__(**kwargs)\n\n\nclass PhoneField(StringField):\n \"\"\"\n 保存手机号\n \"\"\"\n def __init__(self, **kwargs):\n # TODO 加一些对手机号的判断\n super(PhoneField, self).__init__(**kwargs)\n\n\nclass EmailField(StringField):\n \"\"\"\n 保存电子邮箱地址\n \"\"\"\n def __init__(self, **kwargs):\n # TODO 加一些对电子邮箱地址的判断\n super(EmailField, self).__init__(**kwargs)\n","sub_path":"app/models/database/orm.py","file_name":"orm.py","file_ext":"py","file_size_in_byte":7695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"545274349","text":"import sys\nsys.path.append('/home/zhouyj/software/PAD')\nimport data_pipelines as dps\nimport pickers\n\nclass Config(object):\n def __init__(self):\n\n # Template cut\n self.win_len = 45 # cut window length\n self.t_blank = 15 # time before P in the cut window\n self.min_sta = 4 # min sta num for a template events\n self.chn_dict = {'ZSY':['HHE','HHN','HHZ'], \n 'YN': ['HHE','HHN','HHZ'],\n 'XLS':['HHE','HHN','HHZ']}\n self.get_data_dict = dps.Data(None).get_data_dict\n\n # MFT params\n self.temp_win_trig = [1., 9.] # win fr trig temp cut, rel p\n self.temp_win_p = [0.5,1.5] # win for p temp cut, rel p \n self.temp_win_s = [0, 2.] # win for s temp cut, rel s\n self.trig_thres = 0.25 # cc thres for det & mask\n self.mask_len = 1. # win len for cc mask\n self.det_gap = 5. # gap sec for detection\n self.ppk_win_p = [1., 1.] # win for p pick\n self.ppk_win_s = [2., 2.] # win for s pick\n\n # data process\n self.resp_dict = {'ZSY': 3.02e8,\n 'YN': 1.67785e9,\n 'XLS': 1/1.6e-9} # instrumental gain (cnt/m/s)\n self.samp_rate = 50\n self.freq_band = ['bandpass', [1., 40.]]\n self.picker = pickers.Trad_PS(self.samp_rate)\n self.num_workers = 5\n","sub_path":"run_msms/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"229093393","text":"list = [9, 4, 6, 2, 3, 7, 8, 5, 1, 0]\n\ndef swap(array, index1, index2):\n\ttemp = array[index1]\n\tarray[index1] = array[index2]\n\tarray[index2] = temp\n\n# selection sort\n# for i in range(0, len(list) - 1, 1):\n# \tminIndex = i\n# \tfor k in range(i + 1, len(list), 1):\n# \t\tif(list[k] < list[minIndex]):\n# \t\t\tminIndex = k\n# \tswap(list, i, i - 1)\n\n# insertion sort\n# for i in range(1, len(list), 1):\n# \tk = i\n# \twhile(list[k] < list[k - 1] and k > 0):\n# \t\tswap(list, k, k - 1)\n# \t\tk = k - 1\n\n# bubble sort\n# swapped = True\n# while(swapped):\n# \tswapped = False\n# \tfor i in range(0, len(list) - 1, 1):\n# \t\tif(list[i] > list[i + 1]):\n# \t\t\tswap(list, i, i+ 1);\n# \t\t\tswapped = True\n# \tprint(list)\n\n# shell sort\nfor j in [10, 4, 1]:\n\tfor i in range(1, len(list), j):\n\t\tk = i\n\t\twhile(list[k] < list[k - j] and k > 0):\n\t\t\tswap(list, k, k - j)\n\t\t\tk = k - j\n\n\nprint(list)","sub_path":"Must Know/Algorithms/sorts.py","file_name":"sorts.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"244551801","text":"root = '/opt/ycsb' # root of YCSB installation\n# TIME_DURATION = 20*60 #seconds\n\n\nconf = {\n ## Key YSCB properties: ##\n 'operationcount': 1000, # Total operations (writes or reads during a workload)\n 'insertcount': 1000, # How many objects to write during load phase\n 'recordcount': 1000, # Specifies the keyspaace for reads\n 'fieldcount': 30, #the value is a list of this many byte arrays\n 'fieldlength': 100, #either exactly or approximately this length depending on fieldlengthdistribution\n 'fieldlengthdistribution': 'constant', # can also be zipfian or uniform but zipfian isn't working currently\n # 'maxexecutiontime': 90, #Stops the run after a set amount of time\n\n\n #Only used for the query extension:\n # 'valuegenerator': 'queryable',\n # 'queryfield': 'field0',\n # 'cardinality': 1000 / 10, #set as a fraction of the insertcount\n\n 'threadcount': 5,\n 'workload': 'com.yahoo.ycsb.workloads.CoreWorkload',\n 'exportmeasurementsinterval': 30000,\n 'insertretrycount': 10,\n 'ignoreinserterrors': 'true',\n 'readretrycount': 1000,\n 'updateretrycount': 1000,\n 'measurementtype': 'timeseries',\n 'timeseries.granularity': 100,\n 'reconnectiontime': 1000,\n}\n\nworkloads = {\n 'A': { # Heavy Update workload\n 'name': 'workloada', # name of the workload to be part of the log files\n 'propertyfiles': [root + '/workloads/workloada'], #workload properties files\n },\n 'B': { # Mostly Read workload\n 'name': 'workloadb',\n 'propertyfiles': [root + '/workloads/workloadb'],\n },\n 'C': { # Read Only workload\n 'name': 'workloadc',\n 'propertyfiles': [root + '/workloads/workloadc'],\n 'properties': { # additional workload properties, overrides the global ones\n #'maxexecutiontime': 60000,\n },\n },\n 'G': { # Mostly Update workload\n 'name': 'workloadg',\n 'propertyfiles': [root + '/workloads/workloadg'],\n }, 'H': { # Query workload\n 'name': 'workloadh',\n 'propertyfiles': [root + '/workloads/workloadh'],\n },\n}\n","sub_path":"fabfile/conf/workloads.py","file_name":"workloads.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"91179376","text":"# -*- coding: utf-8 -*-\nimport logging\nimport random\nimport json\nimport hashlib\nfrom urllib import quote\n\nimport MySQLdb\nimport scrapy\nfrom scrapy import Request\n\n\nlogging.basicConfig(\n # level=logging.DEBUG if env == \"dev\" else logging.INFO,\n level=logging.INFO,\n format='%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'\n)\n\n\nclass BaseSpider(scrapy.Spider):\n\n def _format(self, _str):\n return _str.replace(\",\", \"\").replace(\"/\", \"\").replace(\"&\", \"\").replace(\" \", \"\")\n\n def get_is_equal(self, text1, text2):\n return (\n self._format(text1) == self._format(text2)\n )\n\n def __init__(self, *args, **kwargs):\n super(scrapy.Spider, self).__init__(*args, **kwargs)\n conn = MySQLdb.connect(\n host=\"192.168.0.13\",\n user=\"root\",\n passwd=\"root\",\n db=\"2017_dsp_release\",\n charset=\"utf8\"\n )\n cur = conn.cursor()\n # NOTE self.name\n #cur.execute(\"select album_name, artist_name, upc from {}_album\".format(self.name))\n #self.album_list = cur.fetchall()\n result_table_name = \"{}_crawler_result\".format(self.name)\n create_table_sql = \"\"\"\nCREATE TABLE `2017_dsp_release`.`{}` (\n`id` INT NOT NULL AUTO_INCREMENT,\n`album_name` VARCHAR(1024) NULL,\n`artist_name` VARCHAR(1024) NULL,\n`upc` VARCHAR(24) NULL,\n`url` VARCHAR(512) NULL,\n`result` VARCHAR(512) NULL,\n`release_company` VARCHAR(512) NULL,\nPRIMARY KEY (`id`),\nUNIQUE INDEX `upc_UNIQUE` (`upc` ASC));\n\n\"\"\".format(result_table_name)\n\n cur.execute(\"show tables\")\n all_table = \"\".join(i[0] for i in cur.fetchall())\n if all_table.find(result_table_name) == -1:\n cur.execute(create_table_sql)\n cur.execute(\"commit\")\n self.cursor = cur\n\n def save(self, album_name, artist_name, upc, url, result, release_company):\n # import ipdb; ipdb.set_trace()\n result_table_name = \"{}_crawler_result\".format(self.name)\n insert_sql = (\n \"INSERT INTO {} (album_name, artist_name, upc, url, result, release_company)\"\n \" values (%s, %s, %s, %s, %s, %s)\"\n ).format(result_table_name)\n #if upc == '6930043310290':\n #import ipdb\n #ipdb.set_trace()\n try:\n self.cursor.execute(\n insert_sql,\n (album_name, artist_name, upc, url, result, release_company)\n )\n self.cursor.execute(\"commit\")\n except Exception as e:\n self.cursor.execute(\"rollback\")\n logging.warning(e)\n","sub_path":"xiaomi/spiders/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"326958565","text":"from django import forms\n\nfrom apps.evento.models import Evento\n\nclass EventoForm(forms.ModelForm):\n\n class Meta:\n model = Evento\n\n fields = [\n 'descripcion_evento',\n 'fecha_evento',\n 'lugar_evento',\n 'hora_evento',\n #'id_ministerio_encargado'\n ]\n labels = {\n 'descripcion_evento': 'Descripcion del evento', #evento queda mejor que actividad especial\n 'fecha_evento': 'Dia del evento',\n 'lugar_evento': 'Lugar del evento',\n 'hora_evento': 'Hora del evento',\n }\n","sub_path":"apps/evento/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"234567306","text":"import sys\nfrom collections import deque\nimport heapq\n\n# to run code: python3 blockmaze.py examples/maze1.txt\n\n#git commands \n#git fetch --all \n#git reset --hard origin/master\n#git reset --hard origin/\n#git add blockmaze.py\n#git commit -m “note”\n#git push\n\n\nclass Node:\n def __init__(self, parent = None, location = None, isVertical = None):\n \n # Keep track of x and y coordinates: [(x1,y1), (x2,y2)]\n self.location = location\n\n # Boolean to store orientation of the block (vertical/horizontal)\n self.isVertical = isVertical\n\n # Is the next node reachable or is it a wall/obstacle?\n # self.isReachable = isReachable\n\n self.parent = None\n\n # Heuristic to store move cost so far to reach goal\n self.g = 0\n\n # Heuristic to store the estimated distance from current square to goal square\n self.h = 0\n\n # The sum of the moveCost and distEstimate heuristics\n self.f = 0\n def __lt__(self, other):\n return self.f < other.f\n\n def __eq__(self, other):\n return self.location == other.location\n \n\ndef read_file(filename):\n # Read in maze files\n print(\"Reading maze: \" + filename)\n\n maze_list = []\n\n maze = open(filename)\n\n # read in each row from the maze\n for row in maze:\n row = row.rstrip('\\n')\n maze_list.append(row)\n print (row)\n \n maze.close()\n\n return maze_list\n\n\ndef find_path(maze_list):\n # getting coordinates of start position, obstacles, and goal position\n obstacles = []\n x = 0\n y = 0\n grid = []\n maze1 = maze_list\n for row in (maze_list):\n for char in row:\n if char == 'S':\n start = ([x,y],[x,y])\n \n if char == '*':\n obstacles.append([x,y])\n \n if char == 'G':\n #goal = ([x,y],[x,y])\n goal = ((x,y),(x,y))\n y += 1\n y = 0\n x += 1\n \n return start, goal\n\n\n#returning a list of the succesors of the selected node\ndef get_succesor_nodes(currentNode, maze_list):\n successors = []\n #check if its vertical\n if currentNode.isVertical == True: \n #the possible moves that the block can make\n nodePos = [((0,1),(0,2)), ((0,-1),(0,-2)), ((-1,0),(-2,0)), ((1,0),(2,0))]\n #iterates through the possible move and checks if out of bound or an obstacle\n for positions in nodePos:\n \n newPosition = ((currentNode.location[0][0] + positions[0][0], currentNode.location[0][1] + positions[0][1]), (currentNode.location[1][0] + positions[1][0], currentNode.location[1][1] + positions[1][1]))\n \n if (newPosition[0][0] > newPosition[1][0] + 1) or (newPosition[0][1] > newPosition[1][1] + 1):\n continue \n\n if (newPosition[0][0] < newPosition[1][0]-1) or (newPosition[0][1] < newPosition[1][1] - 1):\n continue \n\n if newPosition[0][0] > (len(maze_list) - 1) or newPosition[0][0] < 0 or newPosition[0][1] > (len(maze_list[len(maze_list) -1]) - 1) or newPosition[0][1] < 0:\n continue\n if newPosition[1][0] > (len(maze_list) - 1) or newPosition[1][0] < 0 or newPosition[1][1] > (len(maze_list[len(maze_list) -1]) - 1) or newPosition[1][1] < 0:\n continue\n\n if maze_list[newPosition[0][0]][newPosition[0][1]] == '*':\n continue\n if maze_list[newPosition[1][0]][newPosition[1][1]] == '*':\n continue\n #we want to create the node that are possible after the checks \n print('A')\n newNode = Node(currentNode, newPosition, False)\n newNode.parent = currentNode\n #add it to the successors list \n successors.append(newNode)\n else:\n #meaing its horizontal\n #checking the orientation of the horizontal block\n if currentNode.location[0][0] == currentNode.location[1][0]: \n #or currentNode.location[0][1] == currentNode.location[1][1]:\n nodePos = [((1,0),(1,0)), ((-1,0),(-1,0)), ((0,2), (0,1)), ((0,-1),(0,-2))]\n print('yes')\n else:\n nodePos = [((2,0),(1,0)), ((-1,0),(-2,0)), ((0,1),(0,1)), ((0,-1),(0,-1))]\n print('no')\n #iterates through the possible move and checks if out of bound or an obstacle\n for positions in nodePos:\n newPosition = ((currentNode.location[0][0] + positions[0][0], currentNode.location[0][1] + positions[0][1]), (currentNode.location[1][0] + positions[1][0], currentNode.location[1][1] + positions[1][1]))\n \n if (newPosition[0][0] > newPosition[1][0] + 1) or (newPosition[0][1] > newPosition[1][1] + 1):\n continue\n if (newPosition[0][0] < newPosition[1][0] - 1) or (newPosition[0][1] < newPosition[1][1] - 1):\n continue\n\n if newPosition[0][0] > (len(maze_list) - 1) or newPosition[0][0] < 0 or newPosition[0][1] > (len(maze_list[len(maze_list) -1]) - 1) or newPosition[0][1] < 0:\n continue\n if newPosition[1][0] > (len(maze_list) - 1) or newPosition[1][0] < 0 or newPosition[1][1] > (len(maze_list[len(maze_list) -1]) - 1) or newPosition[1][1] < 0:\n continue\n\n if maze_list[newPosition[0][0]][newPosition[0][1]] == '*':\n continue\n if maze_list[newPosition[1][0]][newPosition[1][1]] == '*':\n continue\n #we want to create the node that are possible after the checks\n newNode = Node(currentNode, newPosition, False)\n newNode.parent = currentNode\n #before appending it we wan to check if its horizontal now or if its vertical \n if newNode.location[0][0] == newNode.location[1][0] and newNode.location[0][1] == newNode.location[1][1]:\n newNode.isVertical = True\n print(newNode.isVertical)\n successors.append(newNode)\n numNodes = len(successors) \n return successors, numNodes\n\n\n\ndef Asearch(maze_list): # A* search:\n \n # Create start and end node\n startPos, goalPos = find_path(maze_list)\n \n startNode = Node(None, startPos, True)\n goalNode = Node(None, goalPos, True)\n #init our hueristics \n startNode.g = startNode.h = startNode.f = 0\n goalNode.g = goalNode.h = goalNode.f = 0\n \n # Initialize both open and closed list\n frontier = []\n heapq.heapify(frontier)\n visited = []\n #keeping track of nodes generated\n totalNodes = 0\n\n heapq.heappush(frontier, startNode)\n #Loop until the frontier is empty\n while len(frontier) > 0:\n # Look at acceptable neighbors (not a wall or obstacle) and find the one with the lowest costTotal\n currentNode = frontier[0]\n currentIndex = 0\n for index, item in enumerate(frontier):\n if item.f < currentNode.f:\n currentNode = item\n currentIndex = index\n\n # Pop current off open list, add to closed list\n heapq.heappop(frontier)\n heapq.heapify(frontier)\n visited.append(currentNode)\n print('cur')\n print(currentNode.location)\n print('goal')\n print(goalNode.location)\n # Found the goal\n if currentNode.location[0][0] == goalNode.location[0][0] and currentNode.location[0][1] == goalNode.location[0][1]:\n if currentNode.isVertical:\n print ('Goal')\n path = []\n current = currentNode\n while current is not None:\n path.append(current.location)\n current = current.parent\n print('Length of Path: ')\n print (len(path))\n print('Number of nodes visited: ')\n print (len(visited))\n for a in path:\n print(a)\n return path[::-1]\n #return path.reverse()\n\n successors, numNodes = get_succesor_nodes(currentNode, maze_list)\n totalNodes = totalNodes + numNodes\n print('Total Nodes: ')\n print (totalNodes)\n # Loop through children\n for child in successors:\n # Child is on the closed list\n for visitedChild in visited:\n if child.location == visitedChild.location:\n continue\n # Create and update the f, g, and h values\n child.g = currentNode.g + 1\n child.h = (((child.location[0][0] - goalNode.location[0][0]) ** 2) + ((child.location[0][1] - goalNode.location[0][1]) ** 2))\n child.f = child.g + child.h\n # Child is already in the open lis\n for item in frontier:\n if child.location == item.location:\n if child.g > item.g:\n #remove old child\n continue\n else:\n frontier.remove(item)\n heapq.heapify(frontier)\n \n # Add the child to the open list \n heapq.heappush(frontier, child)\n heapq.heapify(frontier)\n \n\n\n \n\n \n\ndef main():\n if len(sys.argv) != 2:\n print(\"Usage: python3 blockmaze.py mazeFile\")\n else: \n mazeFile = sys.argv[1]\n maze_list = read_file(mazeFile)\n find_path(maze_list)\n path = Asearch(maze_list)\n for a in path:\n print (a)\n \n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"blockmaze.py","file_name":"blockmaze.py","file_ext":"py","file_size_in_byte":9892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"594864008","text":"import pybamm\n\n\ndef graphite_ocp_PeymanMPM(sto):\n \"\"\"\n Graphite Open Circuit Potential (OCP) as a function of the\n stochiometry. The fit is taken from Peyman MPM [1].\n\n References\n ----------\n .. [1] Peyman Mohtat et al, MPM (to be submitted)\n \"\"\"\n\n u_eq = (\n 0.063\n + 0.8 * pybamm.exp(-75 * (sto + 0.007))\n - 0.0120 * pybamm.tanh((sto - 0.127) / 0.016)\n - 0.0118 * pybamm.tanh((sto - 0.155) / 0.016)\n - 0.0035 * pybamm.tanh((sto - 0.220) / 0.020)\n - 0.0095 * pybamm.tanh((sto - 0.190) / 0.013)\n - 0.0145 * pybamm.tanh((sto - 0.490) / 0.020)\n - 0.0800 * pybamm.tanh((sto - 1.030) / 0.055)\n )\n\n return u_eq\n\n\n# if __name__ == \"__main__\": # pragma: no cover\n# import matplotlib.pyplot as plt\n# import numpy as np\n\n# x = np.linspace(0, 1)\n# plt.plot(x, graphite_ocp_PeymanMPM(x))\n# plt.show()\n","sub_path":"pybamm/input/parameters/lithium-ion/negative_electrodes/graphite_UMBL_Mohtat2020/graphite_ocp_PeymanMPM.py","file_name":"graphite_ocp_PeymanMPM.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"361927454","text":"class Solution(object):\n def isMatch(self, s, p):\n \"\"\"\n :type s: str\n :type p: str\n :rtype: bool\n \"\"\"\n return self.recall(s, p, 0, 0)\n\n def recall(self, s, p, sindex, pindex):\n if sindex == len(s) and pindex == len(p):\n return True\n elif sindex < len(s) and pindex == len(p):\n return False\n else:\n if pindex < len(p)-1 and p[pindex+1] == '*':\n if sindex < len(s):\n if p[pindex] == s[sindex] or p[pindex] == '.':\n if self.recall(s, p, sindex+1, pindex):\n return True\n return self.recall(s, p, sindex, pindex+2)\n elif sindex < len(s) and (p[pindex] == s[sindex] or p[pindex] == '.'):\n return self.recall(s, p, sindex+1, pindex+1)\n else:\n return False\n\nif __name__ == \"__main__\":\n sol = Solution()\n s = \"mississippi\"\n p = \"mis*is*p\"\n print(sol.isMatch(s, p))\n\n","sub_path":"hard/isMatch.py","file_name":"isMatch.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"506752767","text":"'''\nCreated on Feb 5, 2014\n\n@author: nonlinear\n'''\nfrom eu.leads.infext.python.ecom.classifier.virtualclassifier import VirtualClassifier\nfrom eu.leads.infext.python.lang.dicts import lang_dicts\nimport requests\nfrom lxml import html\nfrom eu.leads.infext.python.myprint import prettyprint\nfrom eu.leads.infext.python.ecom.test import shopslist\nfrom eu.leads.infext.python.ops import xpathops\nfrom eu.leads.infext.python.ecom.dictionaries.regexfunc import getwordregex, regexofdictentries\nimport re\nfrom eu.leads.infext.python.ecom.classifier.addtobagbuttonclassifier import AddToBasketButtonClassifier\n\nnamespaces = {\"re\": \"http://exslt.org/regular-expressions\"}\n\npoints = [3,1,1,1,1,1,1,1]\n\n\nclass BagLinkCandidate(object):\n __slots__= \"node\", \"nodepath\", \"points\",\n\n def items(self):\n \"dict style items\"\n return [\n (field_name, getattr(self, field_name))\n for field_name in self.__slots__]\n\n def __iter__(self):\n \"iterate over fields tuple/list style\"\n for field_name in self.__slots__:\n yield getattr(self, field_name)\n\n def __getitem__(self, index):\n \"tuple/list style getitem\"\n return getattr(self, self.__slots__[index])\n \n \n\nclass BagLinkClassifier(VirtualClassifier):\n '''\n classdocs\n '''\n\n\n def __init__(self):\n '''\n Constructor\n '''\n \n \n def find(self,params):\n \n tree = self.page_dict.get(\"tree\")\n lang = self.page_dict.get(\"lang\")\n \n # 1. Find all with attributes\n nodes = self.__nodes_containing_attrs(elem='*', lang=lang, dict_entries=[\"ECOM_CART\",\"ECOM_CHECKOUT\"]) \n #print(\"---\")\n \n baglink_candidates_list = []\n for node in nodes:\n cand = BagLinkCandidate()\n cand.node = node\n cand.nodepath = tree.getpath(node)\n cand.points = 0\n baglink_candidates_list.append(cand)\n \n #print(len(baglink_candidates_list))\n # 2a. Remove ones that have more than 3 levels of children\n baglink_candidates_list = [cand for cand in baglink_candidates_list if not tree.xpath(cand.nodepath+\"/*/*/*/*/*\")] \n #print(len(baglink_candidates_list)) \n # 2b. Remove ones that have more than 20 descendants\n baglink_candidates_list = [cand for cand in baglink_candidates_list if tree.xpath(\"count(\"+cand.nodepath+\"//*)\")<20] \n #print(len(baglink_candidates_list))\n # 2c. Remove the ones that look like add-to-cart buttons\n addclassif = AddToBasketButtonClassifier()\n baglink_candidates_list = [cand for cand in baglink_candidates_list if not addclassif.classifygivennode(self.page_dict, cand.nodepath)] \n # 2d. Remove the ones that have more than 20 words in text nodes inside\n baglink_candidates_list = [cand for cand in baglink_candidates_list if sum(len(text.split()) for text in tree.xpath(cand.nodepath+\"//text()\"))<10] \n #print(len(baglink_candidates_list)) \n # 2e. Remove ones that are children of others found\n baglink_candidates_list = [cand for cand in baglink_candidates_list if self.__isbagroot(cand,baglink_candidates_list)] \n #print(len(baglink_candidates_list)) \n #print(' '.join(x.node.tag for x in baglink_candidates_list))\n \n for cand in baglink_candidates_list:\n \n # 3. If element is anchor, add points\n if cand.node.tag == 'a':\n cand.points += points[0]\n # 4. If element has anchor descendants, add points\n xpath = cand.nodepath+\"//\"+\"a\"\n if tree.xpath(xpath,namespaces=namespaces):\n cand.points += points[1]\n # 5. If element is image descendants, add points\n xpath = cand.nodepath+\"//\"+\"img\"\n if tree.xpath(xpath,namespaces=namespaces):\n cand.points += points[2]\n # 6. If element has descendants with attrs, add points\n if self.__nodes_containing_attrs(root=cand.nodepath, elem=\"*\", lang=lang, dict_entries=[\"ECOM_CART\",\"ECOM_CHECKOUT\"]):\n cand.points += points[3]\n # 7. If element has anchor descendants with attrs, add points\n if self.__nodes_containing_attrs(root=cand.nodepath, elem=\"a\", lang=lang, dict_entries=[\"ECOM_CART\",\"ECOM_CHECKOUT\"]):\n cand.points += points[4]\n \n# if not attention_flag: \n# # 8. If element has descendants with text type 1, add points\n# xpath = self.__xpath_contains_attrs(root=cand.nodepath, elem='*', attr=\"text()\", \n# lang=lang, dict_entries=[\"ECOM_CART\",\"ECOM_CHECKOUT\"]) \n# if tree.xpath(xpath,namespaces=namespaces):\n# cand.points += points[5] \n# # 9. If element has descendants with text type 2, add points\n# xpath = self.__xpath_contains_attrs(root=cand.nodepath, elem='*', attr=\"text()\", \n# lang=lang, dict_entries=[\"ECOM_ITEM\"]) \n# if tree.xpath(xpath,namespaces=namespaces):\n# cand.points += points[6]\n# # 9. If element has descendants with text type 3, add points\n# xpath = self.__xpath_contains_attrs(root=cand.nodepath, elem='*', attr=\"text()\", \n# lang=lang, regexs=[\"\\d+\"]) \n# if tree.xpath(xpath,namespaces=namespaces):\n# cand.points += points[7]\n# else:\n text_nodes = tree.xpath(cand.nodepath+\"//text()\")\n text_nodes = [text.strip() for text in text_nodes if text.strip()]\n \n match1 = match2 = match3 = 0\n regex1 = regexofdictentries(entries=[\"ECOM_CART\",\"ECOM_CHECKOUT\"],lang=lang)\n regex2 = regexofdictentries(entries=[\"ECOM_ITEM\"],lang=lang)\n regex3 = \"\\d+\"\n for text in text_nodes:\n #text = str(text)\n # 8. If element has descendants with text type 1, add points\n if match1==0 and re.search(regex1,text):\n match1 = 1\n # 9. If element has descendants with text type 2, add points\n if match2==0 and re.search(regex2,text):\n match2 = 1\n # 10. If element has descendants with text type 3, add points\n if match3==0 and re.search(regex3,text):\n match3 = 1\n cand.points += match1 + match2 + match3\n \n if len(baglink_candidates_list) > 0:\n baglink_candidates_list = sorted(baglink_candidates_list, key=lambda x: -x.points)\n \n cand = baglink_candidates_list[0]\n # print(\"\\n--\\n\\n\")\n # print(i,cand.points,\":::\")\n # prettyprint.print_html(cand.node)\n \n self.features = [cand.points]\n self.nodepath = cand.nodepath if cand.points>=2 else None\n self.certainty = 1.0 if cand.points>5 else cand.points/5.0 \n \n else:\n self.features = [0] \n \n return self.nodepath\n \n \n def __xpath_contains_attrs(self,root=\"\",dict_entries=None,regexs=None,elem=\"\",attr=None,lang='en'):\n \n if not dict_entries and not regexs:\n return None\n \n if attr == None: attr = \"@*\"\n if elem == None: elem = \"*\"\n \n lang_dict = lang_dicts.get(lang)\n en_dict = lang_dicts.get('en')\n \n xpath = \"\"\n xpath += root\n xpath += \"//\"\n xpath += elem\n xpath += \"[\"\n \n if dict_entries:\n words_set = set()\n for entry in dict_entries:\n for word in lang_dict.get(entry):\n words_set.add(word)\n if lang != 'en':\n for word in en_dict.get(entry):\n words_set.add(word)\n \n for word in words_set:\n xpath += \"re:match(\"+attr+\",'\"+getwordregex(word)+\"')\"\n xpath += \" or \"\n \n if regexs:\n for regex in regexs:\n xpath += \"re:match(\"+attr+\",'\"+regex+\"','i')\"\n xpath += \" or \"\n \n xpath = xpath[:-4]\n \n xpath += \"]\"\n \n return xpath\n \n \n def __nodes_containing_attrs(self,root=\"\",dict_entries=None,elem=\"\",attr=None,lang='en'):\n \n if not dict_entries:\n return None\n \n tree = self.page_dict.get(\"tree\")\n \n if attr == None: attr = \"@*\"\n if elem == None: elem = \"*\"\n \n lang_dict = lang_dicts.get(lang)\n en_dict = lang_dicts.get('en')\n \n xpath = \"\"\n xpath += root\n xpath += \"//\"\n xpath += elem\n xpath += \"[\"\n \n words_set = set()\n for entry in dict_entries:\n for word in lang_dict.get(entry):\n words_set.add(word)\n if lang != 'en':\n for word in en_dict.get(entry):\n words_set.add(word)\n \n xpath += \"@*[\"\n \n for word in words_set:\n xpath += \"contains(.,'\"+word.lower()+\"')\"\n xpath += \" or \"\n xpath += \"contains(.,'\"+word.title()+\"')\"\n xpath += \" or \"\n \n xpath = xpath[:-4]\n xpath += \"]]\"\n \n nodes = tree.xpath(xpath,namespaces=namespaces)\n if nodes:\n eval_nodes = []\n regexdictentries = regexofdictentries(dict_entries, lang=lang)\n for node in nodes:\n attrvals = node.values()\n attrvals = [attr for attr in attrvals if re.search(regexdictentries,attr)]\n if attrvals:\n eval_nodes.append(node)\n return eval_nodes\n else:\n return nodes\n \n \n def __isbagroot(self,cand,baglink_candidates_list):\n for potancestor in baglink_candidates_list:\n if cand == potancestor:\n break\n if xpathops.isancestor(cand.nodepath, potancestor.nodepath):\n return False \n \n return True\n \n \n\n# classif = BagLinkClassifier() \n# for url in shopslist.page_per_shop_list+shopslist.page_per_shop_test_list:\n# print(url)\n# if classif.classify(requests.get(url).text, 'en'):\n# print(classif.getCertainty(),classif.getNodePath())\n# else:\n# print \"Nope\"\n# prettyprint.print_space()\n \n","sub_path":"nqe/system-plugins/leads-lucene-indexing/src/main/python/eu/leads/infext/python/ecom/classifier/baglinkclassifier.py","file_name":"baglinkclassifier.py","file_ext":"py","file_size_in_byte":10756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"577108119","text":"from app.models.models import staticRecord\r\n\r\n#############################################################################################\r\nclass StandardizedCurrency:\r\n def __init__(self):\r\n self.sector = 'Suppress'\r\n self.ss_asset_class = 'Spot'\r\n self.instrument_type = 'FX'\r\n self.gics_sector_name = 'Suppress'\r\n self.gics_industry_name = 'Suppress'\r\n self.issuer = 'Suppress'\r\n self.bics_level_3_industry_name = 'Suppress'\r\n return\r\n\r\n#############################################################################################\r\nclass CurrencyDef(StandardizedCurrency):\r\n\r\n ###Currency name: Dollar, Pound, Ringit, Etc.##Country Desc Name: euro, brazilian, etc.\r\n def __init__(self, code = None, name = None, country = None, country_desc_name = None):\r\n self.code = code\r\n self.name = name\r\n self.country = country\r\n self.country_desc_name = country_desc_name\r\n return\r\n\r\n ### Standardized Properties Based on Currency Definition\r\n @property\r\n def rcg_id(self):\r\n return \"RCG\" + self.code.upper() + \"999999\"\r\n @property\r\n def search_name(self):\r\n return self.code.upper() + \" Curncy\"\r\n @property\r\n def security_name(self):\r\n return self.country_desc_name.title() + \" \" + self.name.title()\r\n\r\n @staticmethod\r\n def nameMeetsRequirementList(name, requirementList):\r\n for req in requirementList:\r\n if req not in name.lower():\r\n return False\r\n return True\r\n\r\n def fitsName(self, name):\r\n if 'forward' in name.lower() or 'fut' in name.lower():\r\n return False\r\n requirements = self.possibleRequirements\r\n for requirementSet in requirements:\r\n if CurrencyDef.nameMeetsRequirementList(name, requirementSet):\r\n return True\r\n return False\r\n\r\n def fitsID(self, rcg_id):\r\n if self.rcg_id == rcg_id or(self.code.upper() in rcg_id and '999' in rcg_id):\r\n return True\r\n return False\r\n\r\n @property\r\n def possibleRequirements(self):\r\n \r\n requirements = []### The appends with only one element in the list mean that the security name will have to almost equal### the string exactly.\r\n requirements.append([self.security_name.lower()])##['chilean peso']\r\n requirements.append([self.security_name.lower()])##['us dollar']\r\n \r\n requirements.append([self.code.lower()[: 2] + \" \" + self.name.lower()])#['usd dollar']\r\n requirements.append([self.code.lower() + \" cash\"])##['jpy cash']\r\n requirements.append([self.code.lower() + \" collateral\"])\r\n \r\n ### Multi element list appends mean the security name will have to contain all of the separate sub parts.\r\n requirements.append([self.code.lower(), \"cash\", \"collateral\"])\r\n requirements.append([self.code.lower(), \"broker\", \"cash\"])\r\n return requirements\r\n \r\n ### Restandardizes an already existing static model\r\n def standardizeHoldingModel(self, model):\r\n \r\n StandardizedCurrency.__init__(self)\r\n \r\n model.rcg_id = self.rcg_id\r\n model.sec_name = self.security_name.title()### Inherited\r\n model.ss_asset_class = self.ss_asset_class\r\n return model\r\n \r\n ### Restandardizes an already existing static model\r\n def standardizeStaticModel(self, model):\r\n \r\n StandardizedCurrency.__init__(self)\r\n \r\n model.rcg_id = self.rcg_id\r\n model.security_name = self.security_name.title()\r\n model.country_full_name = self.country.title()\r\n model.search_name = self.search_name\r\n \r\n ### Inherited\r\n model.ss_asset_class = self.ss_asset_class\r\n model.bics_level_3_industry_name = self.bics_level_3_industry_name\r\n model.issuer = self.issuer\r\n model.gics_industry_name = self.gics_industry_name\r\n model.instrument_type = self.instrument_type\r\n model.gics_sector_name = self.gics_sector_name\r\n \r\n return model\r\n\r\n ### Generates a new static model that is standardized\r\n def generateStaticModel(self):\r\n \r\n StandardizedCurrency.__init__(self)\r\n model = staticRecord()\r\n model = self.standardizeStaticModel(model)\r\n \r\n return model\r\n\r\n#############################################################################################\r\nclass CurrencyStandards:\r\n\r\n###Names that aren 't programatically determined to be associated with currencies but instead### hardcoded in .\r\n\r\n defaults = {\r\n 'pound sterling': 'gbp'\r\n }\r\n \r\n currencyDefinitions = [\r\n \r\n CurrencyDef(code = 'aud', name = 'dollar', country = 'australia', country_desc_name = \"australian\"),\r\n CurrencyDef(code = 'usd', name = 'dollar', country = 'united states', country_desc_name = \"united states\"),\r\n CurrencyDef(code = 'cad', name = 'dollar', country = 'canada', country_desc_name = \"canadian\"),\r\n \r\n CurrencyDef(code = 'clp', name = 'peso', country = 'chile', country_desc_name = \"chilean\"),\r\n CurrencyDef(code = 'dkk', name = 'krone', country = 'denmark', country_desc_name = \"danish\"),\r\n CurrencyDef(code = 'hkd', name = 'dollar', country = 'hong kong', country_desc_name = \"hong kong\"),\r\n CurrencyDef(code = 'eur', name = 'currency', country = 'europe', country_desc_name = \"euro\"),\r\n \r\n CurrencyDef(code = 'brl', name = 'real', country = 'brazil', country_desc_name = \"brazilian\"),\r\n CurrencyDef(code = 'jpy', name = 'yen', country = 'japan', country_desc_name = \"japanese\"),\r\n CurrencyDef(code = 'idr', name = 'rupiah', country = 'indonesia', country_desc_name = \"indonesian\"),\r\n CurrencyDef(code = 'myr', name = 'ringgit', country = 'malaysia', country_desc_name = \"malaysian\"),\r\n \r\n CurrencyDef(code = 'nzd', name = 'dollar', country = 'new zealand', country_desc_name = \"new zealand\"),\r\n CurrencyDef(code = 'nok', name = 'krone', country = 'norway', country_desc_name = \"norweigian\"),\r\n CurrencyDef(code = 'gbp', name = 'pound', country = 'united kingdom', country_desc_name = \"british\"),\r\n \r\n CurrencyDef(code = 'sgd', name = 'dollar', country = 'singapore', country_desc_name = \"singapore\"),\r\n CurrencyDef(code = 'krw', name = 'won', country = 'south korea', country_desc_name = \"south korean\"),\r\n CurrencyDef(code = 'sek', name = 'krona', country = 'sweden', country_desc_name = \"swedish\"),\r\n \r\n CurrencyDef(code = 'chf', name = 'franc', country = 'switzerland', country_desc_name = \"swiss\"),\r\n CurrencyDef(code = 'twd', name = 'dollar', country = 'taiwan', country_desc_name = \"taiwanese\"),\r\n CurrencyDef(code = 'mxn', name = 'peso', country = 'mexico', country_desc_name = \"mexican\"),\r\n ]\r\n\r\n @staticmethod\r\n def findDefinitionForCode(code):\r\n for defn in CurrencyStandards.currencyDefinitions:\r\n if defn.code == code.lower():\r\n return defn\r\n return None\r\n \r\n @staticmethod\r\n def findDefinition(rcg_id, security_name):\r\n \r\n for defn in CurrencyStandards.currencyDefinitions:\r\n if defn.fitsName(security_name) or defn.fitsID(rcg_id):\r\n return defn\r\n return None\r\n \r\n @staticmethod\r\n def standardizeHoldingModel(holding_model):\r\n \r\n newHoldingModel = holding_model### Try Default First\r\n if holding_model.sec_name.lower() in CurrencyStandards.defaults.keys():\r\n code = CurrencyStandards.defaults[holding_model.sec_name.lower()]\r\n defn = CurrencyStandards.findDefinitionForCode(code)\r\n newHoldingModel = defn.standardizeHoldingModel(newHoldingModel)\r\n return newHoldingModel\r\n \r\n if holding_model.rcg_id != None and holding_model.sec_name != None:\r\n defn = CurrencyStandards.findDefinition(holding_model.rcg_id, holding_model.sec_name)\r\n if defn != None:\r\n newHoldingModel = defn.standardizeHoldingModel(newHoldingModel)\r\n \r\n return newHoldingModel","sub_path":"app/models/security/standardization/currencyStandards.py","file_name":"currencyStandards.py","file_ext":"py","file_size_in_byte":8122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"450119141","text":"from django.test import TestCase\nfrom cyberattackapp.backend.commands import GetAttacksCommand, IPLookUpCommand\nfrom cyberattackapp.backend.models import RawCyberAttack\nfrom django.conf import settings\nimport time\nfrom datetime import datetime\n\n\nclass IPLookUpCommandIntegrationTestCase(TestCase):\n\n def test_execute(self):\n ip_lookup_command = IPLookUpCommand('141.114.157.28')\n\n return_value = ip_lookup_command.execute()\n\n self.assertDictEqual(\n return_value,\n {\n 'city': 'Sanford',\n 'state': 'ME',\n 'country': 'US',\n 'latitude': 43.4042,\n 'longitude': -70.7455\n }\n )\n\n def test_execute__local_address(self):\n ip_lookup_command = IPLookUpCommand('192.168.56.1')\n\n return_value = ip_lookup_command.execute()\n\n self.assertDictEqual(\n return_value,\n {\n 'city': '',\n 'state': '',\n 'country': 'unknown',\n 'latitude': 0,\n 'longitude': 0\n }\n )\n\n\nclass CommandIntegrationTestCase(TestCase):\n\n def setUp(self):\n try:\n self.raw_cyberattack_data = RawCyberAttack(\n id=1,\n source_ip='141.114.157.28',\n dest_ip=settings.HONEYPOTS[0]['ip'],\n source_port=42,\n dest_port=100,\n service='foo_service',\n time=time.time() - 58\n )\n self.raw_cyberattack_data.clean()\n self.raw_cyberattack_data.save()\n except Exception as e:\n self.failed_setup = True\n self.skipTest('Skipping CommandIntegrationTestCase: {0}'.format(e))\n else:\n self.failed_setup = False\n\n def test_successful_query(self):\n cyber_attacks = GetAttacksCommand(\n attacker__ip=self.raw_cyberattack_data.source_ip,\n target__location__state=settings.HONEYPOTS[0]['location']['state'],\n source_port=self.raw_cyberattack_data.source_port,\n destination_port=self.raw_cyberattack_data.dest_port,\n service=self.raw_cyberattack_data.service\n ).execute()\n\n self.assertEqual(len(cyber_attacks), 1)\n cyber_attack = cyber_attacks[0]\n\n self.assertEqual(cyber_attack.attacker.ip, self.raw_cyberattack_data.source_ip)\n self.assertEqual(cyber_attack.attacker.location.city, 'Sanford')\n self.assertEqual(cyber_attack.attacker.location.state, 'ME')\n self.assertEqual(cyber_attack.attacker.location.country, 'US')\n self.assertEqual(float(cyber_attack.attacker.location.latitude), 43.404200)\n self.assertEqual(float(cyber_attack.attacker.location.longitude), -70.745500)\n\n self.assertEqual(cyber_attack.target.ip, self.raw_cyberattack_data.dest_ip)\n self.assertEqual(cyber_attack.target.location.city, settings.HONEYPOTS[0]['location']['city'])\n self.assertEqual(cyber_attack.target.location.state, settings.HONEYPOTS[0]['location']['state'])\n self.assertEqual(cyber_attack.target.location.country, settings.HONEYPOTS[0]['location']['country'])\n self.assertEqual(float(cyber_attack.target.location.latitude), float(settings.HONEYPOTS[0]['location']['latitude']))\n self.assertEqual(float(cyber_attack.target.location.longitude), float(settings.HONEYPOTS[0]['location']['longitude']))\n\n self.assertEqual(int(cyber_attack.source_port), int(self.raw_cyberattack_data.source_port))\n self.assertEqual(int(cyber_attack.destination_port), int(self.raw_cyberattack_data.dest_port))\n self.assertEqual(cyber_attack.service, self.raw_cyberattack_data.service)\n\n timestamp = datetime.fromtimestamp(self.raw_cyberattack_data.time)\n self.assertAlmostEqual(int(cyber_attack.timestamp.hour), int(timestamp.hour), delta=1)\n self.assertAlmostEqual(int(cyber_attack.timestamp.minute), int(timestamp.minute), delta=1)\n self.assertAlmostEqual(int(cyber_attack.timestamp.second), int(timestamp.second), delta=1)\n\n def test_expired_attack_query(self):\n time.sleep(5)\n cyber_attacks = GetAttacksCommand().execute()\n\n self.assertEqual(len(cyber_attacks), 0)\n\n def test_empty_query(self):\n cyber_attacks = GetAttacksCommand(service='bar_service').execute()\n\n self.assertEqual(len(cyber_attacks), 0)\n\n def test_invalid_query(self):\n cyber_attacks = GetAttacksCommand(foobar='foobar').execute()\n\n self.assertAlmostEqual(len(cyber_attacks), 0)\n\n def tearDown(self):\n if not self.failed_setup:\n self.raw_cyberattack_data.delete()\n","sub_path":"cyberattackapp/backend/tests/test_integration/test_command_integration.py","file_name":"test_command_integration.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"518788486","text":"# internal dependencies\nimport random\nfrom numpy import reshape as reshape\n\nclass Board(object):\n \"\"\"a mine sweeper board\"\"\"\n EMPTY_SPACE = \"0\"\n MINE_SPACE = \"*\"\n DEFAULT_ROWS = 9\n DEFAULT_COLS = 9\n DEFAULT_MINES = 10\n\n def __init__(self, rows = DEFAULT_ROWS, columns = DEFAULT_COLS, mines = DEFAULT_MINES, fclick = [4,4]):\n self.board = []\n self.mines = mines\n self.rows = rows\n self.columns = columns\n self.first_click = fclick\n self.generate_new_board()\n self.calc_hints()\n self.print_board()\n\n def generate_new_board(self):\n '''generates the board with mines and placeholders for numbers'''\n # create local variables for better readability\n rows = self.rows\n cols = self.columns\n mines = self.mines\n fclick = (self.first_click[0] * (cols)) + self.first_click[1]\n #seed list with appropriate numbers of mines and spaces\n for i in range(rows*cols-mines):\n self.board.append(self.EMPTY_SPACE)\n for j in range(mines):\n self.board.append(self.MINE_SPACE)\n random.shuffle(self.board)\n # clear the first-clicked space if necessary\n if(self.board[fclick] == self.MINE_SPACE):\n first_zero = self.board.index(self.EMPTY_SPACE)\n self.board[first_zero] = self.MINE_SPACE\n self.board[fclick] = self.EMPTY_SPACE\n #reshape the board into rows and columns\n self.board = reshape(self.board, (rows, cols))\n\n def print_board(self, zeroes = EMPTY_SPACE, mines = MINE_SPACE):\n '''pretty-prints the board'''\n new_arr = []\n for row in self.board:\n new_arr.append(' '.join(row).replace(self.EMPTY_SPACE, zeroes).replace(self.MINE_SPACE, mines))\n for row_str in new_arr:\n print(row_str)\n\n def calc_hints(self):\n '''generate numbers after a board is generated'''\n for i in range(self.rows):\n for j in range(self.columns):\n #when a mine is selected do the following\n if(self.board[i][j] == self.MINE_SPACE):\n # k and l loops to access surrounding array indices\n for k in range(-1, 2):\n for l in range(-1, 2):\n #test for edges, continue if inaccessible\n if(i+k < 0 or j+l < 0 or i+k > self.rows-1 or j+l > self.columns-1):\n continue\n if (self.board[i+k][j+l] != self.MINE_SPACE):\n self.board[i+k][j+l] = str(int(self.board[i+k][j+l]) + 1)\n else:\n continue","sub_path":"app/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"245169010","text":"# -*- coding: utf-8 -*-\n# kanwa.py\n#\n# Copyright 2011,2013 Hiroshi Miura \nfrom zlib import decompress\nfrom pkg_resources import resource_filename\nfrom marshal import loads\n\ntry:\n from cPickle import load\nexcept:\n from pickle import load\n\ntry:\n import anydbm as dbm\nexcept:\n import dbm\n\nclass kanwa (object):\n\n _kanwadict = None\n _itaijidict = None\n _jisyo_table = {}\n\n# this class is Borg/Singleton\n _shared_state = {}\n\n def __new__(cls, *p, **k):\n self = object.__new__(cls, *p, **k)\n self.__dict__ = cls._shared_state\n return self\n\n def __init__(self):\n if self._kanwadict is None:\n dictpath = resource_filename(__name__, 'kanwadict2.db')\n self._kanwadict = dbm.open(dictpath,'r')\n if self._itaijidict is None:\n itaijipath = resource_filename(__name__, 'itaijidict2.pickle')\n itaiji_pkl = open(itaijipath, 'rb')\n self._itaijidict = load(itaiji_pkl)\n\n def haskey(self, key):\n return key in self._itaijidict\n\n def lookup(self, key):\n return self._itaijidict[key]\n\n def load(self, char):\n try:#python2\n key = \"%04x\"%ord(unicode(char))\n except:#python3\n key = \"%04x\"%ord(char)\n\n try: #already exist?\n table = self._jisyo_table[key]\n except:\n try:\n table = self._jisyo_table[key] = loads(decompress(self._kanwadict[key]))\n except:\n return None\n return table\n\n","sub_path":"pykakasi/kanwa.py","file_name":"kanwa.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"277730230","text":"from maketweettexts import TwitterJSONParser, twitter_entry\nimport pandas as pd\n\n#input a json file name to read in tweets\nfilename = input(\"JOSN Filename : \")\nprint(\"Parsing data . . .\")\njson_stuff = TwitterJSONParser(filename+'.json')\nall = json_stuff.tweets\nprint(\"Data retrieved\")\n\n#once data is retrieved, ask what section of the tweets we want to train\nstartindex = int(input(\"Give a starting index : \"))\nnumoftweets = int(input(\"Number of tweets you want to label :\"))\n\n#begin labeling the tweets\nprint(\"Label each either as important or not\")\npdindex = []\npdcolumns = ['rawtext, label']\nt = []\nl = []\ndata = {'rawtext': [], 'label': []}\nfor i in range(numoftweets):\n print('-')\n lab = 'a'\n while (lab != 'n' and lab != 'y'):\n print(all[startindex+i].get_raw_text())\n lab = str(input('y or n : '))\n if(lab != 'n' and lab != 'y'):\n print('Answer must be y or n, try again . . .')\n t.append(all[startindex+i].get_raw_text())\n l.append(lab)\n pdindex.append(startindex+i)\n\ndata['rawtext'] = t\ndata['label'] = l\n\n#push the data into a dataframe then export as a csv file into local directory\ndf1 = pd.DataFrame(data, index=pdindex)\noutputfilename = str(filename)+'-startindex'+str(startindex)+'-numtwts'+str(numoftweets)+'.csv'\ndf1.to_csv(outputfilename)\n","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"446348322","text":"### FUNCTIONS ###\nfrom random import randint, uniform\nimport time\nfrom factors import Item\nimport helpMe\nclass Game:\n def __init__(self, user, foxes, chickens, items):\n self.eMagnitude = 0\n self.dayNr = 0\n self.nightNr = 0\n self.user = user\n self.foxes = foxes\n self.chickens = chickens\n self.items = items\n self.stash = []\n\n### CHECKERS ###\n ### EGGS AND CHICKS ###\n eggs = 0\n def newEggs(self):\n eggs = 0\n for i in range (len(self.chickens)):\n egg = randint(0,1)\n eggs += egg\n self.eggs += eggs\n\n def eggPay(self):\n payment = self.eggs*5*self.eMagnitude\n print('You receive {}$'.format(payment))\n self.user.currency += payment\n\n def newChicken(self):\n if self.eggs > 10 and self.eMagnitude > 2.2:\n self.chickens.append(Chicken(\"y\",1))\n else:\n print(\"\\nNo chicken born today\")\n\n ### ATTACK AND DEFENCE ###\n foxesAttackTotal=0\n def foxesAttack(self):\n foxesAttack = 0\n for i in self.foxes:\n foxesAttack += i.agility + i.wisdom + i.alertness\n self.foxesAttackTotal = foxesAttack\n\n totalDefence = 0\n def calcDefence(self, u):\n constant = self.user.defence\n protection = 0\n for i in self.stash:\n protection += i.defence\n self.totalDefence = protection + constant + (u.enraged*10)\n\n### OPTIONS ###\n ### HELP ###\n def help(self, where):\n print('\\nH O M E - H E L P')\n if where == 'home':\n helpMe.home()\n elif where == 'stats':\n pass\n elif where == 'market':\n pass\n\n\n\n ### STATS ###\n def stats(self, user, eggs, chickens, stash):\n print('\\nS T A T S\\n'+\n 'Currency: '+str(user.currency)+'$\\n'+\n 'Constant defence: '+str(user.defence)+'¤\\n'+\n 'Current defence: '+str(self.totalDefence)+'¤\\n'+\n 'Enraged bonus: '+str(user.enraged)+' >:(')\n print('Collected eggs: '+str(eggs)+\n '\\nAs of today: {0:.2f}$ per egg'.format(5*self.eMagnitude))\n print('\\nChicken roaster:')\n for i in chickens:\n print('> '+i.name)\n print('\\nYour stash:')\n for i in stash:\n print('> {} = {}¤ (special={})'.format(i.name,i.defence,i.special))\n\n ### MARKET ###\n def buy(self, item, u):\n if item.price <= u.currency:\n agreement = input('{} buys {}? (y/n)\\n'.format(u.nickname, item.name))\n if agreement == \"y\":\n print('Buying and adding '+item.name+' to your stash...')\n time.sleep(1.5)\n u.currency -= item.price\n self.stash.append(item)\n self.calcDefence(self.user)\n else:\n print(\"Cancelling transaction...\")\n else:\n print('You don\\'t have enough money...'+\n 'Item: {}$\\nYou have: {}'.format(item.price, u.currency))\n\n def market (self, u, i, e):\n shopping = True\n while shopping:\n print('\\nM A R K E T')\n time.sleep(0.5)\n category = input(' buy | sell | help | leave\\n>>>')\n\n if category == 'buy':\n print('\\nM A R K E T - B U Y')\n for x in i:\n print('> {} = {}$, {}¤'.format(x.name,x.price,x.defence))\n itemChoice = input(\"You choose to buy: \")\n if itemChoice == \"scarecrow\":\n self.buy(i[0], u)\n elif itemChoice == \"shout\":\n self.buy(i[1], u)\n elif itemChoice == \"stereo\":\n self.buy(i[2], u)\n elif itemChoice == \"corn\":\n self.buy(i[3], u)\n elif itemChoice == \"drugs\":\n self.buy(i[4], u)\n elif itemChoice == \"gun\":\n self.buy(items[5], u)\n else:\n print(itemChoice+' is not a valid option')\n\n elif category == 'sell':\n print('\\nM A R K E T - S E L L')\n payment = round(e*5*self.eMagnitude,2)\n answer = input('Sell your {} eggs ({}$)?'.format(e,payment))\n if answer == 'y':\n self.eggPay()\n else:\n print(\"You refused to sell your eggs\")\n\n elif category == \"help\":\n self.help('market')\n\n elif category == 'leave':\n print('Returning home...')\n shopping = False\n line = 0\n### STAGES ###\n ### DAYTIME ###\n def dayTime(self):\n self.eMagnitude = round(uniform(0.5, 2.2),2) + self.line\n self.dayNr += 1\n self.foxesAttack()\n self.newEggs()\n self.newChicken()\n self.calcDefence(self.user)\n print('''\\n\\n----------> {}. Day <----------'''.format(self.dayNr))\n awake = True\n while awake:\n print('\\nH O M E')\n time.sleep(0.5)\n option = input(' help | stats | market | sleep\\n>>> ')\n if option == 'help':\n self.help('home')\n elif option == 'stats':\n self.stats(self.user, self.eggs, self.chickens, self.stash)\n elif option == 'market':\n self.market(self.user, self.items, self.eggs)\n elif option == 'sleep':\n awake = False\n else:\n print('Thats not a valid option')\n\n ### NIGHTITME ###\n def nightTime(self):\n victims = []\n self.nightNr += 1\n self.calcDefence(self.user)\n print('''\\n\\n----------> {}. Night <----------'''.format(self.nightNr))\n for item in self.stash:\n if item.special == 'yes':\n if(item.name == \"corn\"):\n self.line += 1\n elif(item.name == \"gun\"):\n for i in self.foxes:\n if i.attention < 20:\n print(\"You killed a fox that wasn't paying attention!\")\n pop(i)\n else:\n print(\"The foxes saw you and went into hiding...\")\n\n trouble = True\n while trouble:\n if self.dayNr >= 15:\n self.end()\n killX = randint(1,len(self.chickens)-1)\n print(self.chickens[killX].name+' is in danger!')\n time.sleep(1.5)\n if self.totalDefence < self.foxesAttackTotal:\n lucky = randint(0,10)\n if lucky < 9:\n victims.append(self.chickens[killX].name)\n self.chickens.pop(killX)\n self.foxesAttackTotal -= 100\n else:\n print(self.chickens[killX].name+' got away!')\n else:\n print(\"Out of the dark, the sun rises. And the foxes retreat\")\n trouble = False\n self.stash = []\n print('\\nV I C T I M S')\n for i in victims:\n print(i)\n self.user.enraged += len(victims)\n\n ### ENDING ###\n def end(self):\n print('You survived!')\n","sub_path":"Experiments/chickenSurvivor/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":7228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"137037561","text":"from opentrons import labware, instruments, modules, robot\nimport math\n\nmetadata = {\n 'protocolName': 'Nextera DNA Flex NGS Library Prep: Post Tagmentation \\\nCleanup',\n 'author': 'Nick ',\n 'source': 'Custom Protocol Request'\n}\n\n# load labware and modules\nmagdeck = modules.load('magdeck', '1')\nmag_plate = labware.load(\n 'biorad_96_wellplate_200ul_pcr', '1', 'tagmentation rxn plate', share=True)\nres12 = labware.load(\n 'usascientific_12_reservoir_22ml', '3', 'reagent reservoir')\n\ntwb = [chan.bottom(5) for chan in res12.wells()[:2]]\nliquid_waste = [chan.top() for chan in res12.wells('A11', length=2)]\n\n\ndef run_custom_protocol(\n p300_type: 'StringSelection...' = 'single',\n p300_mount: 'StringSelection...' = 'right',\n number_of_samples_to_process: int = 24\n):\n # check:\n if number_of_samples_to_process > 96 or number_of_samples_to_process < 1:\n raise Exception('Invalid number of samples to process (must be between \\\n1 and 96).')\n\n num_cols = math.ceil(number_of_samples_to_process/8)\n num_300_racks = math.ceil((num_cols*6)/12)\n slots300 = [str(slot) for slot in range(5, 5+num_300_racks)]\n tips300 = [\n labware.load('opentrons_96_tiprack_300ul', slot) for slot in slots300]\n\n # pipettes\n if p300_type == 'multi':\n pip300 = instruments.P300_Multi(mount=p300_mount, tip_racks=tips300)\n samples300 = mag_plate.rows('A')[:num_cols]\n else:\n pip300 = instruments.P300_Single(mount=p300_mount, tip_racks=tips300)\n samples300 = mag_plate.wells()[:number_of_samples_to_process]\n pip300.set_flow_rate(aspirate=75, dispense=90)\n\n magdeck.engage(height=18)\n robot.comment('Incubating beads on magnet for 3 minutes.')\n pip300.delay(minutes=3)\n\n # remove and discard supernatant\n for s in samples300:\n pip300.pick_up_tip()\n pip300.transfer(65, s.bottom(1), liquid_waste[0], new_tip='never')\n pip300.blow_out()\n pip300.drop_tip()\n\n # TWB washes 3x\n count = 0\n total_twb = 96*3\n for wash in range(3):\n magdeck.disengage()\n\n # resuspend beads in TWB\n for i, s in enumerate(samples300):\n ind = (count*len(twb))//total_twb\n count += 1\n\n side = i % 2 if p300_type == 'multi' else math.floor(i/8) % 2\n angle = 0 if side == 0 else math.pi\n disp_loc = (s, s.from_center(r=0.85, h=-0.6, theta=angle))\n pip300.pick_up_tip()\n pip300.aspirate(100, twb[ind])\n pip300.move_to(s.bottom(5))\n pip300.dispense(100, disp_loc)\n pip300.mix(10, 80, disp_loc)\n pip300.drop_tip()\n\n magdeck.engage(height=18)\n\n if wash < 2:\n robot.comment('Incubating beads on magnet for 3 minutes')\n pip300.delay(minutes=3)\n # remove and discard supernatant\n for s in samples300:\n pip300.pick_up_tip()\n pip300.transfer(\n 120, s.bottom(1), liquid_waste[wash], new_tip='never')\n pip300.blow_out()\n pip300.drop_tip()\n\n robot.comment('Seal the plate, and keep on the magnetic module. The TWB \\\nremains in the wells to prevent overdrying of the beads')\n\n\nrun_custom_protocol(**{'p300_type': 'single', 'p300_mount': 'right', 'number_of_samples_to_process': 24})\n","sub_path":"Library Prep/Nextera DNA flex/nextera_flex_post_tag_cleanup.singlechannel.py","file_name":"nextera_flex_post_tag_cleanup.singlechannel.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"588572395","text":"# Entrada\n\ncelsius = float(input(\"Digite o valor de uma temperatura em graus Celsius: \"))\n\n# Processamento\n\nconvert_f = (9 * celsius + 160) / 5\n\n# Saída\n\nprint(\"A temperatura equivalente em graus Fahrenheit é: \",convert_f)","sub_path":"Fabio01_Parte01/F1_Q20_c_to_f.py","file_name":"F1_Q20_c_to_f.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"792450","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('50_Startups.csv')\nX = dataset.iloc[:,:-1].values\ny = dataset.iloc[:, 4:5].values\n\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X = LabelEncoder()\n\nX[:, 3] = labelencoder_X.fit_transform(X[:,3])\nonehotencoder = OneHotEncoder(categorical_features = [3])\nX = onehotencoder.fit_transform(X).toarray()\n\n# Spliting into a training and a test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size = .25, random_state = 0\n)\n\nfrom sklearn.linear_model import LinearRegression as LR\n\nregressor = LR()\nregressor.fit(X_train, y_train)\n\ny_pred = regressor.predict(X_test)\n\nregressor.score(X_test, y_test)\n\nplt.plot(X_test, y_test, color='g')\nplt.plot(X_test, y_pred, color='b')\n\nfrom sklearn.metrics import mean_squared_error\nimport math\nregression_model_mse = mean_squared_error(y_pred, y_test)\nregression_model_mse_sq = math.sqrt(regression_model_mse)","sub_path":"All Linear Regression/MultiRegression/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"178831022","text":"from .base import BaseCommand\n\nfrom pontomais.config import settings\n\n\nclass ProxyCommand(BaseCommand):\n name = \"proxy\"\n description = \"If you need to use a proxy, you can configure using this command\"\n\n def handle(self):\n self.line(\"\")\n if not settings.config_file_exists():\n self.line(\"Configuration file not found\")\n self.line(\"Please use the command: pontomais configure\")\n return\n\n http_proxy = self.ask(f\"{self.PREFIX}HTTP proxy:\", \"\")\n https_proxy = self.ask(f\"{self.PREFIX}HTTPS proxy:\", \"\")\n\n config = settings.get_configurations()\n config[\"proxy\"] = {\"http\": http_proxy, \"https\": https_proxy}\n settings.set_configurations(config)\n self.line(\"\")\n","sub_path":"pontomais/commands/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"539975881","text":"import bisect\n\n\nclass Solution:\n def findBestValue(self, arr: List[int], target: int) -> int:\n arr.sort()\n n = len(arr)\n prefix = [0]\n for num in arr:\n prefix.append(prefix[-1] + num)\n\n l, r, ans = 0, max(arr), -1\n while l <= r:\n mid = (l + r) // 2\n it = bisect.bisect_left(arr, mid)\n cur = prefix[it] + (n - it) * mid\n if cur <= target:\n ans = mid\n l = mid + 1\n else:\n r = mid - 1\n\n def check(x):\n return sum(x if num >= x else num for num in arr)\n\n choose_small = check(ans)\n choose_big = check(ans + 1)\n return ans if abs(choose_small - target) <= abs(choose_big - target) else ans + 1\n\n\nif __name__ == \"__main\":\n arr = list(map(int, input().split(\",\")))\n target = int(input())\n solution = Solution()\n result = solution.findBestValue(arr, target)\n print(result)\n","sub_path":"Code/CodeRecords/2576/60730/295473.py","file_name":"295473.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"485342995","text":"#!/usr/bin/env python\nimport os\nfrom BaseHTTPServer import HTTPServer\nfrom SimpleHTTPServer import SimpleHTTPRequestHandler\n\nlight = True\n\nclass MyHandler(SimpleHTTPRequestHandler):\n\n def do_POST(self):\n global light\n self.send_response(200)\n self.send_header('Content-type','application/json')\n self.end_headers()\n\n if light:\n light = False\n self.wfile.write(\"{\\\"status\\\": true }\")\n else:\n light = True\n self.wfile.write(\"{\\\"status\\\": false}\")\n\nif __name__ == '__main__':\n HTTPServer(('0.0.0.0', 8080), MyHandler).serve_forever()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"144638635","text":"# coding:utf-8\nimport arcpy\nfrom arcpy import env\nfrom arcpy.sa import *\nimport sympy\n\nimport sys, string, os\nimport xlrd\nimport xlwt\n\n# 将2011年1月到2017年12月间站点降水数据合并到一个xls文件,某些月份有重复行\ndef f1():\n path = 'F:/Test/Data/RGS/RGS83_201101_201712/'\n filesPath=path+'month/'\n files = os.listdir(filesPath)\n inputFiles = []\n for i in range(0, len(files)):\n if os.path.splitext(files[i])[1] == '.xlsx':# \"文件获取\"\n inputFiles.append(filesPath + files[i])\n\n workbook = xlwt.Workbook(encoding='utf-8')\n worksheet = workbook.add_sheet('20点_20点')\n worksheet2 = workbook.add_sheet('8点_8点')\n\n\n c=0 #列变量\n for i in range(0, len(inputFiles)):#len(inputFiles)\n excel = xlrd.open_workbook(inputFiles[i])\n table = excel.sheet_by_index(0)\n rows = table.nrows\n r=1 #行变量,取1是为了留出列标题月份\n for j in range(1,rows):#注意跳过第一行的标题行\n\n if(j==1 or table.cell(j, 1).value != table.cell(j-1, 1).value): #控制跳过重复的数据\n temp=table.cell(j, 4).value\n worksheet.write(r, c, temp)\n\n temp = table.cell(j, 5).value\n worksheet2.write(r, c, temp)\n\n r=r+1\n else:\n continue\n\n c=c+1\n\n outFile = path+\"outFile.xls\"\n workbook.save(outFile)\n#f1()\n\n# 按照2016年7月份的89个站的站号,取出89个站的站点降水量。\ndef f2():\n path = 'F:/Test/Data/RGS/RGS83_201101_201712/'\n filesPath=path+'month/'\n files = os.listdir(filesPath)\n inputFiles = []\n for i in range(0, len(files)):\n if os.path.splitext(files[i])[1] == '.xlsx':# \"文件获取\"\n inputFiles.append(filesPath + files[i])\n\n workbook = xlwt.Workbook(encoding='utf-8')\n worksheet = workbook.add_sheet('83RGS_20点_20点')\n\n\n\n excelRGS = xlrd.open_workbook(path+\"RGSID.xlsx\")\n tableRGS = excelRGS.sheet_by_index(0)\n rowsRGS = tableRGS.nrows\n\n c=0 #列变量\n for i in range(0, len(inputFiles)):#len(inputFiles)\n excel = xlrd.open_workbook(inputFiles[i])\n table = excel.sheet_by_index(0)\n rows = table.nrows\n # r=1 #行变量,取1是为了留出列标题月份\n for j in range(1,rowsRGS):#\n # if(r>=rowsRGS):#全部找到之后跳出循环\n # break\n temp = 999999\n for k in range(1,rows):#注意跳过第一行的标题行\n if(table.cell(k, 1).value == tableRGS.cell(j, 1).value): #自动跳过重复行,因为r+1之后,重复项正好还是不相等\n temp=table.cell(k, 4).value\n\n # r=r+1\n worksheet.write(j, c, temp)\n c=c+1\n\n outFile = path+\"outfile2.xls\"\n workbook.save(outFile)\n#f2()\n\n\n\n","sub_path":"CodesRecords/processOfRGS.py","file_name":"processOfRGS.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"15612689","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\nimport argparse\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\n\nFLAGS = None\n\ndef weight_variable(shape):\n init = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(init)\n\n\ndef bias_variable(shape):\n init = tf.constant(0.1, shape=shape)\n return tf.Variable(init)\n\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=\"SAME\")\n\n\ndef max_pool(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n\n\ndef main(_):\n\n tf_config = json.loads(os.environ.get('TF_CONFIG') or '{}')\n task_config = tf_config.get('task', {})\n task_type = task_config.get('type')\n task_index = task_config.get('index')\n\n FLAGS.job_name = task_type\n print('job_name:%s' %(task_type))\n FLAGS.task_index = task_index\n\n #ps_hosts = FLAGS.ps_hosts.split(\",\")\n #worker_hosts = FLAGS.worker_hosts.split(\",\")\n\n cluster_config = tf_config.get('cluster', {})\n ps_hosts = cluster_config.get('ps')\n worker_hosts = cluster_config.get('worker')\n \n ps_hosts_str = ','.join(ps_hosts)\n worker_hosts_str = ','.join(worker_hosts)\n \n FLAGS.ps_hosts = ps_hosts_str\n FLAGS.worker_hosts = worker_hosts_str\n \n # Construct the cluster and start the server\n ps_spec = FLAGS.ps_hosts.split(\",\")\n worker_spec = FLAGS.worker_hosts.split(\",\")\n \n # Get the number of workers.\n num_workers = len(worker_spec)\n\n\n # Create a cluster from the parameter server and worker hosts.\n #cluster = tf.train.ClusterSpec({\"ps\": ps_hosts, \"worker\": worker_hosts})\n cluster = tf.train.ClusterSpec({\"ps\": ps_spec, \"worker\": worker_spec})\n\n # Create and start a server for the local task.\n server = tf.train.Server(cluster,\n job_name=FLAGS.job_name,\n task_index=FLAGS.task_index,\n start=True)\n\n if FLAGS.job_name == \"ps\":\n server.join()\n elif FLAGS.job_name == \"worker\":\n # Import data\n mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\n \n FLAGS.batch_size = 550\n step = mnist.train.num_examples / 500 - 1\n print(\"train examples: %d, step: %d\" % (mnist.train.num_examples, step) )\n \n \n\n with tf.device(tf.train.replica_device_setter(\n #worker_device=\"/job:worker/task:%d\" % FLAGS.task_index,\n worker_device=\"/job:worker/task:%d/gpu:%d\" % (FLAGS.task_index, 0),\n cluster=cluster)):\n\n x = tf.placeholder(tf.float32, [None, 784])\n y_actual = tf.placeholder(tf.float32, [None, 10])\n keep_prob = tf.placeholder(tf.float32)\n\n\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n\n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) \n h_pool1 = max_pool(h_conv1) \n\n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) \n h_pool2 = max_pool(h_conv2) \n\n W_fc1 = weight_variable([7 * 7 * 64, 1024])\n b_fc1 = bias_variable([1024])\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) # reshape\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) \n\n # dropout\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # dropout\n\n W_fc2 = weight_variable([1024, 10])\n b_fc2 = bias_variable([10])\n y_predict = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) # softmax, [-1, 10]\n\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_actual * tf.log(y_predict), 1)) \n global_step = tf.train.get_or_create_global_step()\n #global_step = tf.train.get_global_step()\n optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)\n train_op = optimizer.minimize(cross_entropy, global_step=global_step)\n\n cross_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_actual, 1))\n accuracy = tf.reduce_mean(tf.cast(cross_prediction, tf.float32)) \n\n # tensorboard\n tf.summary.scalar('cost', cross_entropy)\n tf.summary.scalar(\"accuracy\", accuracy)\n summary_op = tf.summary.merge_all()\n\n # The StopAtStepHook handles stopping after running given steps.\n #hooks = [tf.train.StopAtStepHook(last_step=400)]\n hooks = [tf.train.StopAtStepHook(last_step=step)]\n\n config = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=False,\n #device_filters=[\"/job:ps\", \"/job:worker/task:%d\" % FLAGS.task_index]\n device_filters=[\"/job:ps\", \"/job:worker/task:%d/gpu:%d\" % (FLAGS.task_index, 0)]\n )\n\n # The MonitoredTrainingSession takes care of session initialization,\n # restoring from a checkpoint, saving to a checkpoint, and closing when done\n # or an error occurs.\n # master=\"grpc://\" + worker_hosts[FLAGS.task_index]\n \n with tf.train.MonitoredTrainingSession(master=server.target,\n config=config,\n is_chief=(FLAGS.task_index == 0),\n hooks=hooks,\n max_wait_secs = 120) as mon_sess:\n while not mon_sess.should_stop():\n \n # Run a training step asynchronously.\n # See `tf.train.SyncReplicasOptimizer` for additional details on how to\n # perform *synchronous* training.\n # mon_sess.run handles AbortedError in case of preempted PS.\n #batch_x, batch_y = mnist.train.next_batch(64)\n batch_x, batch_y = mnist.train.next_batch(FLAGS.batch_size)\n# step, _ = mon_sess.run([global_step, train_op], feed_dict={\n# x: batch_x,\n# y_actual: batch_y,\n# keep_prob: 0.8})\n \n\n #print(\"global_step: %f\" % step)\n #if step > 0 and step % 10 == 0:\n step, _, loss, acc = mon_sess.run([global_step, train_op, cross_entropy, accuracy], feed_dict={\n x: batch_x,\n y_actual: batch_y,\n keep_prob: 1.0})\n print(\"step: %d, loss: %f, acc: %f\" % (step, loss, acc))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\")\n # Flags for defining the tf.train.ClusterSpec\n\n parser.add_argument(\n \"--data_dir\",\n type=str,\n default=\"/MNIST_data/\",\n help=\"data directory\"\n )\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=600,\n help=\"batch size\"\n )\n parser.add_argument(\n \"--max_step\",\n type=int,\n default=100,\n help=\"step num\"\n )\n\n parser.add_argument(\n \"--ps_hosts\",\n type=str,\n default=\"\",\n help=\"Comma-separated list of hostname:port pairs\"\n )\n parser.add_argument(\n \"--worker_hosts\",\n type=str,\n default=\"\",\n help=\"Comma-separated list of hostname:port pairs\"\n )\n parser.add_argument(\n \"--job_name\",\n type=str,\n default=\"\",\n help=\"One of 'ps', 'worker'\"\n )\n # Flags for defining the tf.train.Server\n parser.add_argument(\n \"--task_index\",\n type=int,\n default=0,\n help=\"Index of task within the job\"\n )\n\n parser.add_argument(\n# \"--checkpoint_dir\",\n \"--model_save_path\",\n type=str,\n default=\"/wangdongmei/models/logs/log_tf_mnist_dist/\",\n help=\"path to a directory where to restore variables.\"\n )\n\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n default=0.001,\n help=\"learning rate\"\n )\n\n FLAGS, _ = parser.parse_known_args()\n tf.app.run(main=main)\n","sub_path":"tensorflow/mnist/tf_mnist_dist.py","file_name":"tf_mnist_dist.py","file_ext":"py","file_size_in_byte":8458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"107357704","text":"import os\nimport cv2\nimport numpy as np\n\ndef cutter(src):\n\n cap = cv2.VideoCapture(src)\n\n frames = []\n if not cap.isOpened():\n cap.open(src)\n ret = True\n while(True and ret):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n frames.append(frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # When everything done, release the capture\n cap.release()\n\n croped_frames = []\n for cf in frames:\n rsz_f = cf[300:950, 0:1920] # cut unnecessary sky and car view\n croped_frames.append(rsz_f)\n\n return np.asarray(croped_frames)\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"227577088","text":"\n# func should take 3 arguments ap,bp,c.\n# If ap,bp are the computed optimized path values from 2 cells side-by-side,\n# and c is the number in tri above them,\n# then func(ap,bp,c) is the optimized path from c.\n# tri should be a list of lists where tri[0] is the tip row of the triangle.\n# This function then returns the optimized path from the tip of the triangle.\ndef optimizeTrianglePath(func,tri):\n\tpaths = tri[len(tri)-1]\n\tfor row in reversed(tri[0:len(tri)-1]):\n\t\tnewpaths = []\n\t\tfor i in range(0,len(row)):\n\t\t\tnewpaths.append( func(paths[i], paths[i+1], row[i]) )\n\t\tpaths = newpaths\n\treturn paths[0]","sub_path":"cleuder/optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"342286385","text":"from discord.ext import commands\nimport discord\n\nimport random\nimport asyncio\nimport time\n\nclass statusCog:\n def __init__(self, bot):\n self.bot = bot\n\n async def status_change():\n stati = [\n 'with little children', #CBP\n 'with ur mum XD', #CBP\n '( ͡° ͜ʖ ͡°)',\n 'with big, big balls', #CBP\n 'FORTNITE XD XD',\n 'yeeting babies', #CBP\n 'deepfrying the memes',\n 'bepis simulator',\n 'with myself', #CBP\n 'with my peepee', #CBP\n 'with mommy\\'s peepee', #CBP\n 'midget basketball', #CBP\n 'with my rocket', #CBP\n 'with the anthros', #CBP\n 'peek a boo ( ͡° ͜ʖ ͡°)', #CBP\n 'on e621', #CBP\n 'lewding lolis', #CBP\n 'dead',\n 'alone',\n 'Discord',\n 'with high voltage',\n 'on a shitty server', #CBP\n 'in the street', #CBP\n 'with lives of the innocent', #CBP\n 'rm -rf /',\n 'with 14 werewolves', #CBP\n 'in a back alley' #CBP\n ]\n\n helpStati = [\n 'type ?/help',\n 'try ?/help',\n 'use ?/help',\n 'say ?/help',\n '?/help'\n ]\n\n while True:\n await self.bot.change_presence(game=discord.Game(name=random.choice(stati)))\n await asyncio.sleep(20)\n await self.bot.change_presence(game=discord.Game(name=random.choice(helpStati) + ' in ' + str(len(self.bot.guilds) - 2) + ' servers'))\n await asyncio.sleep(10)\n\n self.bot.loop.create_task(status_change())\n\ndef setup(bot):\n bot.add_cog(statusCog(bot))\n","sub_path":"cogs/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"455660059","text":"#!/usr/bin/env python3\n\n# initialize django\nfrom api.models import Category\nimport django\nimport os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'arcticapi.settings'\ndjango.setup()\n\n# regular imports\n\n# main script\n\n\ndef main():\n for cat in Category.objects.all():\n print(cat.id, cat.title)\n\n\n# bootstrap\nif __name__ == '__main__':\n main()\n","sub_path":"run_script.py","file_name":"run_script.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"280454734","text":"import requests\nimport pandas as pd\n\n\nclass LiveLoosers(object):\n def __init__(self):\n self.url = \"https://www.moneycontrol.com/stocks/marketstats/nseloser/index.php\"\n\n def get_live_data(self):\n response = requests.get(self.url).content\n pd.set_option('display.max_columns', None)\n df_list = pd.read_html(response)\n df = df_list[0]\n live_data = {'company_names': [], 'High': [], 'Low': [], 'Loss': [], 'Last_Price': [], 'Prev_Close': []}\n\n company_name = df['Company Name'][0::7]\n for company in company_name:\n live_data['company_names'].append(\" \".join(company.split()[0:2]))\n\n for high in df['High'][0::7]:\n live_data['High'].append(high)\n\n for low in df['Low'][0::7]:\n live_data['Low'].append(low)\n\n for gain in df['% Loss'][0::7]:\n live_data['Loss'].append(gain)\n\n for last_price in df['Last Price'][0::7]:\n live_data['Last_Price'].append(last_price)\n\n for close in df['Prev Close'][0::7]:\n live_data['Prev_Close'].append(close)\n\n df = pd.DataFrame(live_data)\n return df\n\n\nif __name__ == \"__main__\":\n obj = LiveLoosers()\n obj.get_live_data()\n","sub_path":"Share_Market/live_top_loosers.py","file_name":"live_top_loosers.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"324138265","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# Copyright (c) 2021-Present IjVine Corporation ()\n\n##############################################################################\nfrom odoo import api,fields,models\n\n\nclass ChannelTemplateMappings(models.Model):\n\t_name = 'channel.template.mappings'\n\t_inherit = 'channel.mappings'\n\t_description = 'Product Template Mapping'\n\n\tstore_product_id = fields.Char('Store Product ID',required=True)\n\ttemplate_name = fields.Many2one('product.template','Product Template')\n\todoo_template_id = fields.Char('Odoo Template ID',required=True)\n\tdefault_code = fields.Char('Default code/SKU')\n\tbarcode = fields.Char('Barcode/EAN/UPC or ISBN')\n\n\t# _sql_constraints = [\n\t# \t(\n\t# \t\t'channel_store_store_product_id_uniq',\n\t# \t\t'unique(channel_id,store_product_id)',\n\t# \t\t'Store Product ID must be unique for channel product mapping!'\n\t# \t),\n\t# \t(\n\t# \t\t'channel_odoo_odoo_template_id_uniq',\n\t# \t\t'unique(channel_id,odoo_template_id)',\n\t# \t\t'Odoo Template ID must be unique for channel template mapping!'\n\t# \t)\n\t# ]\n\n\n\tdef unlink(self):\n\t\tfor record in self:\n\t\t\tif record.store_product_id:\n\t\t\t\tmatch = record.channel_id.match_product_feeds(record.store_product_id)\n\t\t\t\tif match:\n\t\t\t\t\tmatch.unlink()\n\t\tchannel_ids = self.mapped('channel_id.id')\n\t\tproduct_ids = list(map(int, self.mapped('odoo_template_id')))\n\t\tmappings = self.env['channel.product.mappings'].search(\n\t\t\t[\n\t\t\t\t('channel_id','in',channel_ids),\n\t\t\t\t('odoo_template_id','in',product_ids)\n\t\t\t]\n\t\t)\n\t\tmappings.unlink()\n\t\treturn super(ChannelTemplateMappings,self).unlink()\n\n\t@api.onchange('template_name')\n\tdef change_odoo_id(self):\n\t\tself.odoo_template_id = self.template_name.id\n\n\tdef _compute_name(self):\n\t\tfor record in self:\n\t\t\trecord.name = record.template_name.name if record.template_name else 'Deleted/Undefined'\n","sub_path":"ijvine_ebay_base/models/mappings/product_template_mapping.py","file_name":"product_template_mapping.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"70245526","text":"import matplotlib.pyplot as plt, mpld3\nfrom matplotlib.pyplot import pie, axis, show\nimport io\n\nfrom wordcloud import STOPWORDS, WordCloud\n\n\ndef get_videoid(video_id):\n\n from plot import yt_connect\n pol_comments = yt_connect.generate(video_id)\n\n print(\"Video Id:\", video_id, \"and total comments:\", len(pol_comments))\n neutral = 0;\n positive = 0;\n negative = 0;\n\n for com in pol_comments[0]:\n # print(com[1])\n if (com[1] >= 0.01):\n positive = positive + 1;\n elif (com[1] <= -0.01):\n negative = negative + 1;\n else:\n neutral = neutral + 1;\n\n names = [\"Neutral\", \"Negative\", \"Positive\"]\n values = [neutral, negative, positive]\n\n # names = [\"Positive\", \"Negative\"]\n # values = [positive, negative]\n\n adj_list = pol_comments[1]\n a_list = []\n if(positive > negative):\n for adj in adj_list:\n if(adj[1] >= 0.01):\n a_list.append(adj)\n elif(positive < negative):\n for adj in adj_list:\n print(\"adj\",adj[0])\n if(adj[1] <= -0.01):\n a_list.append(adj)\n\n print(\"a_list\", len(a_list))\n adj_fig = plot_tags_word_cloud(a_list)\n\n fig, ax = plt.subplots()\n explode = (0, 0, 0.01)\n ax.pie(values, labels=names, autopct='%1.0f%%', explode=explode,\n shadow=False, startangle=0, labeldistance=1.05)\n ax.axis('equal')\n list = [mpld3.fig_to_html(fig), mpld3.fig_to_html(adj_fig)]\n return list\n\n\ndef plot_tags_word_cloud(adj_list):\n plt.axis('off')\n fig = plt.figure(figsize=(8, 8))\n stopwords = set(STOPWORDS)\n wordcloud = WordCloud( background_color= 'black', stopwords = stopwords, max_words = 200, max_font_size = 120, random_state = 42).generate(str(adj_list))\n plt.imshow(wordcloud)\n # plt.title('Word Cloud for Tags', fontsize = 20)\n return fig","sub_path":"plot/sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"490152613","text":"# Import data\nimport requests\nimport json\nimport csv\nimport asyncio\n\n# Base URL\nbase_url = 'https://esi.evetech.net/latest/'\n\n# API URLs\nsearch_url = base_url + 'search/'\nmarket_url = base_url + 'markets/'\nuniverse_url = base_url +'universe/'\n\n# What items to search for\nmarket_search_ids = []\n\ndef get_region_id(region_name):\n payload = {'categories': ['region'], 'search': region_name, 'strict': True}\n try:\n r = requests.get(search_url, params=payload)\n return r.json()['region'][0]\n except:\n print('Could not find region ID for: %s' %region_name)\n\ndef get_relevant_itemtypes(region_id, group_search=False, load_existing=False):\n if not(load_existing):\n print('Getting relevant item types:')\n i = 1\n rel_types = []\n while(True):\n r = requests.get(market_url + str(region_id) + '/types', params={'page': i})\n if r.json() == []:\n break\n else:\n rel_types += r.json()\n i += 1\n rel_types = list(set(rel_types))\n\n if group_search:\n market_group_item_ids = []\n print('Getting item and market group data for: %s...' %\n ', '.join(group_search))\n \n r = requests.post(universe_url + 'ids/', json=group_search).json()\n group_data = r['inventory_types']\n \n for item in group_data:\n market_group_item_ids.append(item['id'])\n \n return list(set(market_group_item_ids)&set(rel_types))\n return rel_types\n\nasync def get_item_name(item_library, type_ids, session):\n for type_id in type_ids:\n if str(type_id) not in item_library.keys():\n endpoint = 'types/' + str(type_id)\n url = universe_url + endpoint\n async with session.get(url) as r:\n try:\n type_data = await r.json()\n item_library[type_id] = {\n 'name': type_data['name'],\n 'volume': type_data['packaged_volume'],\n 'market_group_id': type_data['market_group_id']}\n except:\n print('Error for type %i: failed to fetch information.' % type_id)\n else:\n print('Item %i already exists in library, skipping...' % type_id)\n return item_library\n\nasync def get_marketgroup_info(ids, session):\n type_list = []\n for id in ids:\n endpoint = 'groups/' + str(id)\n url = market_url + endpoint\n async with session.get(url) as r:\n try:\n market_json = await r.json()\n type_list += market_json['types']\n except:\n print('Error for market group %i: data unavailable.' % id)\n return type_list\n\nasync def get_item_prices(history_dict, type_ids, region_id, session):\n endpoint = str(region_id) + '/history'\n url = market_url + endpoint\n for type_id in type_ids:\n params = {'type_id': type_id}\n async with session.get(url, params=params) as r:\n try:\n price_data = await r.json()\n # Collect last value\n price = price_data[len(price_data) - 1]\n # Save to history_dict\n history_dict['type_id'] = price\n except:\n print('Error for type %i: market data unavailable.' % type_id)\n\n #return loop.run_until_completed(get_price_history(\n # quert_dict, relevant_item_types, region_id, session\n #))","sub_path":"swagme/import_data.py","file_name":"import_data.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"594624120","text":"import pygame\n\nclass HUD:\n\n pygame.font.init()\n\n screen_w = 1920\n\n border = 5\n middle_gap = 10\n white = (255, 255, 255)\n black = (0, 0, 0)\n font = pygame.font.SysFont('arial', 16)\n b_font = pygame.font.SysFont('arial', 16, True)\n right_side_offset = 160\n\n def __init__(self, num_houses, num_stores, virus, settings):\n self.current_infections = 0\n self.total_infections = 0\n self.total_deaths = 0\n self.total_recoveries = 0\n self.people_alive = 0\n self.time = 0\n self.day = 0\n self.settings_dic = settings\n\n # Top left\n self.simulation_settings_render = HUD.b_font.render('Simulation Settings:', 1, HUD.black)\n self.num_houses_render = HUD.font.render('Number of Houses: ' + str(num_houses), 1, HUD.black)\n self.num_stores_render = HUD.font.render('Number of Stores: ' + str(num_stores), 1, HUD.black)\n self.total_people_render = HUD.font.render('Starting People: ' + str(num_houses * 4), 1, HUD.black)\n self.starting_infections_render = HUD.font.render('Starting Infections: ' + str(self.settings_dic.get('Starting Infections')), 1, HUD.black)\n self.average_iq_render = HUD.font.render('Average IQ: ' + str(self.settings_dic.get('Average Persons IQ')), 1, HUD.black)\n self.iq_range_render = HUD.font.render('IQ Range: ' + str(self.settings_dic.get('Persons IQ Range')), 1, HUD.black)\n\n # Middle\n self.live_stats_render = HUD.b_font.render('Live Simulation Stats:', 1, HUD.black)\n self.current_infections_render = HUD.font.render('Current Infections: ' + str(self.current_infections), 1, HUD.black)\n self.total_infections_render = HUD.font.render('Total Infections: ' + str(self.total_infections), 1, HUD.black)\n self.total_deaths_render = HUD.font.render('Total Deaths: ' + str(self.total_deaths), 1, HUD.black)\n self.total_recoveries_render = HUD.font.render('Total Recoveries: ' + str(self.total_recoveries), 1, HUD.black)\n self.people_alive_render = HUD.font.render('People Alive: ' + str(self.people_alive), 1, HUD.black)\n self.time_render = HUD.font.render('Time: ' + str(self.time), 1, HUD.black)\n self.day_render = HUD.font.render('Day: ' + str(self.day), 1, HUD.black)\n\n # Right\n self.virus_render = HUD.b_font.render('Virus Stats:', 1, HUD.black)\n self.virus_spread_chance = HUD.font.render('Spread Chance: ' + str(virus.spread_chance), 1, HUD.black)\n self.virus_lethality_chance = HUD.font.render('Mortality Chance: ' + str(virus.mortality), 1, HUD.black)\n self.virus_noticibility = HUD.font.render('Virus Noticibility: ' + str(virus.noticibilty), 1, HUD.black)\n\n def update(self, ci, ti, td, tr, pa, virus, time, day):\n self.current_infections = ci\n self.current_infections_render = HUD.font.render('Current Infections: ' + str(self.current_infections), 1, HUD.black)\n\n self.total_infections = ti\n self.total_infections_render = HUD.font.render('Total Infections: ' + str(self.total_infections), 1, HUD.black)\n\n self.total_deaths = td\n self.total_deaths_render = HUD.font.render('Total Deaths: ' + str(self.total_deaths), 1, HUD.black)\n\n self.total_recoveries = tr\n self.total_recoveries_render = HUD.font.render('Total Recoveries: ' + str(self.total_recoveries), 1, HUD.black)\n\n self.people_alive = pa\n self.people_alive_render = HUD.font.render('People Alive: ' + str(self.people_alive), 1, HUD.black)\n\n self.time = time\n\n if self.time == 25:\n self.time_render = HUD.font.render('Hour: 24', 1, HUD.black)\n else:\n self.time_render = HUD.font.render('Hour: ' + str(self.time), 1, HUD.black)\n\n self.day = day\n self.day_render = HUD.font.render('Day: ' + str(self.day), 1, HUD.black)\n\n if virus.noticed:\n self.virus_discovered_render = HUD.font.render('Virus Noticed: Yes', 1, HUD.black)\n else:\n self.virus_discovered_render = HUD.font.render('Virus Noticed: No', 1, HUD.black)\n\n def render(self, display):\n pygame.draw.rect(display, HUD.white, (0, 0, HUD.screen_w, 100))\n\n # left side of top hud will have the current simulation settings\n display.blit(self.simulation_settings_render, (HUD.border, HUD.border))\n display.blit(self.num_houses_render, (HUD.border, HUD.border + 17))\n display.blit(self.num_stores_render, (HUD.border, HUD.border + 17 * 2))\n display.blit(self.total_people_render, (HUD.border, HUD.border + 17 * 3))\n display.blit(self.starting_infections_render, (HUD.border, HUD.border + 17 * 4))\n display.blit(self.average_iq_render, (HUD.border + 190, HUD.border + 17))\n display.blit(self.iq_range_render, (HUD.border + 190, HUD.border + 17 * 2))\n\n # middle top of hud will have the current stats of the simulation\n display.blit(self.live_stats_render, (HUD.screen_w / 2 - self.live_stats_render.get_width() / 2, HUD.border))\n display.blit(self.time_render, (HUD.screen_w / 2 - self.time_render.get_width() - HUD.middle_gap, HUD.border + 17))\n display.blit(self.day_render, (HUD.screen_w / 2 + HUD.middle_gap, HUD.border + 17))\n display.blit(self.current_infections_render, (HUD.screen_w / 2 - self.current_infections_render.get_width() - HUD.middle_gap, HUD.border + 17 * 2))\n display.blit(self.total_infections_render, (HUD.screen_w / 2 + HUD.middle_gap, HUD.border + 17 * 2))\n display.blit(self.total_deaths_render, (HUD.screen_w / 2 - self.total_deaths_render.get_width() - HUD.middle_gap, HUD.border + 17 * 3))\n display.blit(self.total_recoveries_render, (HUD.screen_w / 2 + HUD.middle_gap, HUD.border + 17 * 3))\n display.blit(self.people_alive_render, (HUD.screen_w / 2 - self.people_alive_render.get_width() - HUD.middle_gap, HUD.border + 17 * 4))\n display.blit(self.virus_discovered_render, (HUD.screen_w / 2 + HUD.middle_gap, HUD.border + 17 * 4))\n\n # right side of top hud will have virus settings.\n display.blit(self.virus_render, (HUD.screen_w - HUD.border - HUD.right_side_offset, HUD.border))\n display.blit(self.virus_spread_chance, (HUD.screen_w - HUD.border - HUD.right_side_offset, HUD.border + self.virus_spread_chance.get_height()))\n display.blit(self.virus_lethality_chance, (HUD.screen_w - HUD.border - HUD.right_side_offset, HUD.border + self.virus_spread_chance.get_height() * 2))\n display.blit(self.virus_noticibility, (HUD.screen_w - HUD.border - HUD.right_side_offset, HUD.border + self.virus_spread_chance.get_height() * 3))\n \n\n ","sub_path":"EXE_IN_HERE/lib/hud_manager.py","file_name":"hud_manager.py","file_ext":"py","file_size_in_byte":6673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"87002061","text":"from aiohttp.test_utils import unittest_run_loop\n\nfrom aiohttp_rest_framework.fields import sa_ma_pg_field_mapping\nfrom aiohttp_rest_framework.serializers import ModelSerializer\nfrom aiohttp_rest_framework.utils import ClassLookupDict\nfrom tests.functional.sa.orm.base import BaseTestCase\nfrom tests.functional.sa.utils import get_fixtures_by_name\nfrom tests.test_app.sa.orm import models\n\n\nclass SASerializer(ModelSerializer):\n class Meta:\n model = models.SAField\n fields = \"__all__\"\n\n\nclass FieldsTestCase(BaseTestCase):\n @unittest_run_loop\n async def test_pg_sa_inferred_field_serialization(self) -> None:\n reversed_field_mapping = reversed(ClassLookupDict(sa_ma_pg_field_mapping))\n serializer = SASerializer(self.sa_instance)\n self.assertTrue(serializer.data)\n for field in serializer.fields.values():\n self.assertIn(field, reversed_field_mapping)\n\n @unittest_run_loop\n async def test_pg_sa_inferred_field_deserialization(self) -> None:\n sa_fields_data = get_fixtures_by_name(\"SAField\")[0]\n serializer = SASerializer(data=sa_fields_data)\n serializer.is_valid(raise_exception=True)\n await serializer.save()\n","sub_path":"tests/functional/sa/orm/test_fields.py","file_name":"test_fields.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"519835841","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\nframes = []\nfor i in range(1,17):\n frames.append(pd.read_csv(r'data\\dataset%d.csv'%(i), error_bad_lines=False,sep=';'))\nus_election = pd.concat(frames,ignore_index=True)\n\n\n# In[2]:\n\nus_election.head()\n\n\n# In[2]:\n\nimport random\nstate = set()\nfor i in range(3113):\n if (type(us_election.ST[i]) is not float):\n state.add(us_election.ST[i])\ndata = random.sample(range(50),10)\nselect_state =set()\nfor i in range(10):\n select_state.add(list(state)[data[i]])\n\nstates = list(select_state)\n\n\n# In[3]:\n\ndf3 = pd.DataFrame({'State':'','Year':0,'Party':'Republicans','Mean':0},range(3))\ndf4 = pd.DataFrame({'State':'','Year':0,'Party':'Democrats','Mean':0},range(3))\ndf5 = pd.DataFrame({'State':'','Year':0,'Party':'','Mean':0},range(0))\ndf5\n\n\n# In[4]:\n\nfrom numpy import mean\nfor i in range(10):\n republicans_16 = []\n republicans_12 = []\n republicans_08 = []\n democrats_16 = []\n democrats_12 = []\n democrats_08 = []\n for j in range(3113):\n if us_election.ST[j] == states[i]:\n republicans_16.append(us_election['Republicans 2016'][j])\n republicans_12.append(us_election['Republicans 2012'][j])\n republicans_08.append(us_election['Republicans 2008'][j])\n democrats_16.append(us_election['Democrats 2016'][j])\n democrats_12.append(us_election['Democrats 2012'][j])\n democrats_08.append(us_election['Democrats 2008'][j])\n r_mean_2016 = mean(republicans_16)\n r_mean_2012 = mean(republicans_12)\n r_mean_2008 = mean(republicans_08)\n d_mean_2016 = mean(democrats_16)\n d_mean_2012 = mean(democrats_12)\n d_mean_2008 = mean(democrats_08)\n \n df3.loc[0,'Mean'] = r_mean_2016\n df3.loc[1,'Mean'] = r_mean_2012\n df3.loc[2,'Mean'] = r_mean_2008\n df3.loc[0,'State'] = states[i]\n df3.loc[1,'State'] = states[i]\n df3.loc[2,'State'] = states[i]\n df3.loc[0,'Year'] = 2016\n df3.loc[1,'Year'] = 2012\n df3.loc[2,'Year'] = 2008\n df5 = pd.concat([df5,df3])\n df4.loc[0,'Mean'] = d_mean_2016\n df4.loc[1,'Mean'] = d_mean_2012\n df4.loc[2,'Mean'] = d_mean_2008\n df4.loc[0,'State'] = states[i]\n df4.loc[1,'State'] = states[i]\n df4.loc[2,'State'] = states[i]\n df4.loc[0,'Year'] = 2016\n df4.loc[1,'Year'] = 2012\n df4.loc[2,'Year'] = 2008\n df5 = pd.concat([df5,df4])\ndf5\n\n\n# In[5]:\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nget_ipython().magic('matplotlib inline')\ng = sns.factorplot(x=\"State\", y=\"Mean\", hue=\"Year\",col=\"Party\", data=df5)\nplt.savefig(\"Republicans_and_Democrats_2008_2012_2016.png\")\n\n\n# In[6]:\n\ndf5.to_csv('Analysis4_output.csv', sep='\\t',encoding='utf-8')\n\n","sub_path":"Final/Analysis4/Final_Analysis4_RepublicansVsDemocrats.py","file_name":"Final_Analysis4_RepublicansVsDemocrats.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"8068767","text":"\"\"\"\nDefines standalone webserver\n\"\"\"\n\n# HeadURL\t\t$HeadURL: file:///Z:/backup/svn/webstuff/tags/release20061229_v_1_0_0/webstuff/server/standalone_if.py $\n# Author:\t\t$Author: valdiic $\n# File version:\t$Revision: 270 $\n# Last changes:\t$Date: 2006-10-19 16:57:26 +0300 (Ce, 19 Okt 2006) $\n\n\nfrom common_if import *\nimport SocketServer\nfrom BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler\n\n# server instance\nserverInstance = None\n\nclass MyHandler(BaseHTTPRequestHandler):\n\tdef proc(self):\n\t\tstorage.add_thread()\n\t\ttry:\n\t\t\t# setup thread storage\n\t\t\tstorage[\"headers\"] = []\n\t\t\tstorage[\"response\"] = (200, \"OK\")\n\t\t\tstorage[\"content\"] = \"\"\n\n\t\t\t# get query string\n\t\t\tquery_string = \"\"\n\t\t\tpos = self.path.find('?')\n\t\t\tif pos != -1:\n\t\t\t\tquery_string = self.path[pos+1:]\n\t\t\t\n\t\t\t# store GET variables\n\t\t\tstorage[\"getvars\"] = cgi.parse_qs(query_string, True)\n\t\t\t\n\t\t\t# set up environment\n\t\t\tenv = {}\n\t\t\tenv[\"REQUEST_METHOD\"] = self.command\n\t\t\tenv[\"REQUEST_URI\"] = self.path\n\t\t\tenv[\"PATH_INFO\"] = self.path\n\t\t\tenv[\"SCRIPT_NAME\"] = ''\n\t\t\tenv[\"QUERY_STRING\"] = \"\"\n\t\t\tenv[\"CONTENT_TYPE\"] = self.headers.get(\"Content-Type\", \"\")\n\t\t\tenv[\"CONTENT_LENGTH\"] = self.headers.get(\"Content-Length\", \"\")\n\t\t\tenv[\"HTTP_COOKIE\"] = self.headers.get(\"Cookie\", \"\")\n\t\t\tenv[\"REMOTE_ADDR\"] = self.client_address[0]\n\t\t\tenv[\"SERVER_ADDR\"] = self.server.server_address[0]\n\t\t\tenv[\"SERVER_PORT\"] = str(self.server.server_address[1])\n\t\t\tenv[\"SERVER_PROTOCOL\"] = self.request_version\n\t\t\tenv[\"SERVER_SOFTWARE\"] = \"BaseHTTPServer\"\n\t\t\tenv[\"USER_AGENT\"] = self.headers.get(\"User-Agent\", \"\")\n\t\t\tenv[\"ACCEPT\"] = self.headers.get(\"Accept\", \"\")\n\t\t\tenv[\"ACCEPT_LANGUAGE\"] = self.headers.get(\"Accept-Language\", \"\")\n\t\t\tenv[\"ACCEPT_CHARSET\"] = self.headers.get(\"Accept-Charset\", \"\")\n\t\t\tenv[\"ACCEPT_ENCODING\"] = self.headers.get(\"Accept-Encoding\", \"\")\n\t\t\tenv[\"REFERER\"] = self.headers.get(\"Referer\", \"\")\n\n\t\t\taddr = env[\"SERVER_ADDR\"]\n\t\t\tport = str(env[\"SERVER_PORT\"])\n\t\t\thost_header = self.headers.get(\"Host\", \"\")\n\t\t\tif host_header != \"\":\n\t\t\t\tenv[\"SERVER_NAME\"] = host_header\n\t\t\telse:\n\t\t\t\tenv[\"SERVER_NAME\"] = addr + \":\" + port\n\n\t\t\t# store POST\n\t\t\tstorage[\"postvars\"] = cgi.parse(self.rfile, env, 1)\n\n\t\t\t# store cookies\n\t\t\tcookie_obj = Cookie.SimpleCookie()\n\t\t\tcookie_obj.load(env[\"HTTP_COOKIE\"])\n\t\t\tcookies = {}\n\t\t\tfor key, morsel in cookie_obj.iteritems():\n\t\t\t\tcookies[key] = morsel.value\n\t\t\tstorage[\"cookies\"] = cookies\n\n\t\t\t# store environment\n\t\t\tenv[\"QUERY_STRING\"] = query_string\n\t\t\tstorage[\"env\"] = env\n\t\t\t\n\t\t\t# execute user function\n\t\t\tself.server.user_func()\n\t\t\t\n\t\t\t# write response, headers, a blank line and the output\n\t\t\tcode, descr = storage[\"response\"]\n\t\t\tself.send_response(code, descr)\n\n\t\t\theader(\"Content-Length\", len(storage[\"content\"]))\n\t\t\tfor key, value in storage[\"headers\"]:\n\t\t\t\tself.send_header(key, value)\n\t\t\tself.wfile.write(\"\\r\\n\")\n\n\t\t\tself.wfile.write(storage[\"content\"])\n\t\tfinally:\n\t\t\t# clean thread storage\n\t\t\tstorage.remove_thread()\n\t\n\tdef do_GET(self):\n\t\tself.proc()\n\n\tdef do_POST(self):\n\t\tself.proc()\n\nclass MyServer(SocketServer.ThreadingMixIn, HTTPServer):\n\tdef __init__(self, addr, user_func):\n\t\tHTTPServer.__init__(self, addr, MyHandler)\n\t\tself.user_func = user_func\n\n#\n# Below are functions callable by user\n#\n\ndef init(user_func, ip = \"127.0.0.1\", port = 8000):\n\t\"\"\"Initialize standalone HTTP server.\"\"\"\n\t\n\taddr = (ip, port)\n\tglobal serverInstance\n\tserverInstance = MyServer(addr, user_func)\n\t\ndef run():\n\t\"\"\"Listen to requests\"\"\"\n\treturn serverInstance.serve_forever()\n","sub_path":"bsdradius/webstuff/server/standalone_if.py","file_name":"standalone_if.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"312434571","text":"# Copyright 2016 Raytheon BBN Technologies\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n\nfrom __future__ import print_function, division\nimport time\nimport logging\nimport sys\nlogging.basicConfig(format='%(levelname)s:\\t%(message)s', level=logging.INFO)\n\nimport numpy as np\nimport scipy as sp\nimport pandas as pd\n\nfrom instruments.stanford import SR830\nfrom sweep import Sweep\nfrom procedure import FloatParameter, Quantity, Procedure\n\n\nclass FieldTest(Procedure):\n frequency = FloatParameter(name=\"Lockin Frequency\", unit=\"Hz\")\n noise = Quantity(name=\"Noise\", unit=\"V/Hz^1/2\")\n\n lock = SR830(\"GPIB1::9::INSTR\")\n\n def instruments_init(self):\n self.tc_delay = 9*self.lock.tc\n self.averages = 5\n \n self.lock.channel_1_type = 'X Noise'\n\n def lockin_measure():\n time.sleep(self.tc_delay)\n return np.mean( [self.lock.ch1 for i in range(self.averages)] )\n\n self.frequency.set_method(self.lock.set_frequency)\n self.noise.set_method(lockin_measure)\n\n def instruments_shutdown(self):\n self.lock.channel_1_type = 'X' \n\nif __name__ == '__main__':\n\n proc = FieldTest()\n\n # Define a sweep over prarameters\n sw = Sweep(proc)\n values = np.append( np.append(np.arange(0.1,105,5), np.arange(200,1100,50)), np.arange(2000,7000,500)).tolist()\n sw.add_parameter_hack(proc.frequency, values)\n\n # Define a writer\n sw.add_writer('SweepFrequency.h5', 'NoiseVsFreq-30ms', proc.noise)\n\n proc.instruments_init()\n for i in sw:\n logging.info(\"Freq, Noise: %f, %g\" % (proc.frequency.value, proc.noise.value) )\n proc.instruments_shutdown()\n","sub_path":"scripts/SR830-NoiseSweep.py","file_name":"SR830-NoiseSweep.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"217230595","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.11-x86_64/egg/reviewbotext/resources.py\n# Compiled at: 2018-07-31 04:09:55\nfrom __future__ import unicode_literals\nimport json\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom djblets.webapi.decorators import webapi_login_required, webapi_request_fields, webapi_response_errors\nfrom djblets.webapi.errors import DOES_NOT_EXIST, INVALID_FORM_DATA, NOT_LOGGED_IN, PERMISSION_DENIED\nfrom reviewboard.diffviewer.models import FileDiff\nfrom reviewboard.reviews.models import BaseComment, Review\nfrom reviewboard.webapi.decorators import webapi_check_local_site\nfrom reviewboard.webapi.resources import resources, WebAPIResource\nfrom reviewbotext.models import Tool\n\nclass ToolResource(WebAPIResource):\n \"\"\"Resource for workers to update the installed tools list.\n\n This API endpoint isn't actually RESTful, and just provides a place\n for workers to \"dump\" their entire list of installed tools as a single\n POST. A GET request will not actually return a list of tools.\n \"\"\"\n name = b'tool'\n allowed_methods = ('GET', 'POST')\n model_object_key = b'id'\n uri_object_key = b'tool_id'\n\n @webapi_login_required\n @webapi_check_local_site\n @webapi_response_errors(DOES_NOT_EXIST, INVALID_FORM_DATA, NOT_LOGGED_IN, PERMISSION_DENIED)\n @webapi_request_fields(required={b'hostname': {b'type': str, \n b'description': b'The hostname of the POSTing worker.'}, \n b'tools': {b'type': str, \n b'description': b'A JSON payload containing tool information.'}})\n def create(self, request, hostname, tools, *args, **kwargs):\n \"\"\"Add to the list of installed tools.\n\n The hostname field should contain the hostname the celery\n worker is using (This should be unique to that worker under\n proper configuration).\n\n The tools field should contain a JSON payload describing the\n list of tools installed at the worker. This payload should\n correspond to a list of dictionaries, with each dictionary\n corresponding to a tool. The dictionary should contain the\n following information:\n - 'name': The descriptive name of the tool.\n - 'entry_point': The entry point corresponding to the tool.\n - 'version': The tool version.\n - 'description': Longer description of the tool.\n - 'tool_options': A JSON payload describing the custom\n options the tool provides (see reviewbotext.models.Tool\n for a description of this payload).\n\n Here is an example tools payload:\n [\n {\n \"name\": \"Example Tool 1\",\n \"entry_point\": \"example1\",\n \"version\": \"1.0.1\",\n \"description\": \"An example tool.\",\n \"tool_options\": \"[]\"\n },\n {\n \"name\": \"Example Tool 2\",\n \"entry_point\": \"example2\",\n \"version\": \"1.2.1\",\n \"description\": \"The second example tool.\",\n \"tool_options\": \"[]\"\n },\n ]\n\n TODO: Use the hostname.\n \"\"\"\n from reviewbotext.extension import ReviewBotExtension\n extension = ReviewBotExtension.instance\n if request.user.id != extension.settings[b'user']:\n return PERMISSION_DENIED\n try:\n tools = json.loads(tools)\n except:\n return (\n INVALID_FORM_DATA,\n {b'fields': {b'dtools': b'Malformed JSON.'}})\n\n for tool in tools:\n obj, created = Tool.objects.get_or_create(entry_point=tool[b'entry_point'], version=tool[b'version'], defaults={b'name': tool[b'name'], \n b'description': tool[b'description'], \n b'tool_options': tool[b'tool_options'], \n b'in_last_update': True, \n b'timeout': tool[b'timeout'], \n b'working_directory_required': tool[b'working_directory_required']})\n if not created and not obj.in_last_update:\n obj.in_last_update = True\n obj.save()\n\n return (201, {})\n\n\ntool_resource = ToolResource()\n\nclass ReviewBotReviewResource(WebAPIResource):\n \"\"\"Resource for creating reviews with a single request.\n\n This resource allows Review Bot to create a full review using a single\n POST request. Using the traditional API would result in a high volume\n of requests from Review Bot, creating stress on the server.\n\n Each user may only have one review draft per request at a time. This\n resource allows concurrent review of a single request by creating\n the review and publishing it in a single transaction.\n \"\"\"\n name = b'review_bot_review'\n allowed_methods = ('GET', 'POST')\n\n @webapi_login_required\n @webapi_check_local_site\n @webapi_response_errors(DOES_NOT_EXIST, INVALID_FORM_DATA, NOT_LOGGED_IN, PERMISSION_DENIED)\n @webapi_request_fields(required={b'review_request_id': {b'type': int, \n b'description': b'The ID of the review request.'}}, optional={b'ship_it': {b'type': bool, \n b'description': b'Whether or not to mark the review \"Ship It!\"'}, \n b'body_top': {b'type': str, \n b'description': b'The review content above the comments.'}, \n b'body_top_rich_text': {b'type': bool, \n b'description': b'Whether the body-top should be formatted using Markdown.'}, \n b'body_bottom': {b'type': str, \n b'description': b'The review content below the comments.'}, \n b'body_bottom_rich_text': {b'type': bool, \n b'description': b'Whether the body-bottom should be formatted using Markdown.'}, \n b'diff_comments': {b'type': str, \n b'description': b'A JSON payload containing the diff comments.'}})\n def create(self, request, review_request_id, ship_it=False, body_top=b'', body_top_rich_text=False, body_bottom=b'', body_bottom_rich_text=False, diff_comments=None, *args, **kwargs):\n \"\"\"Creates a new review and publishes it.\"\"\"\n try:\n review_request = resources.review_request.get_object(request, review_request_id=review_request_id, *args, **kwargs)\n except ObjectDoesNotExist:\n return DOES_NOT_EXIST\n\n if not body_top:\n body_top = b''\n if not body_bottom:\n body_bottom = b''\n new_review = Review.objects.create(review_request=review_request, user=request.user, body_top=body_top, body_top_rich_text=body_top_rich_text, body_bottom=body_bottom, body_bottom_rich_text=body_bottom_rich_text, ship_it=ship_it)\n if diff_comments:\n try:\n diff_comments = json.loads(diff_comments)\n for comment in diff_comments:\n filediff = FileDiff.objects.get(pk=comment[b'filediff_id'], diffset__history__review_request=review_request)\n if comment[b'issue_opened']:\n issue = True\n issue_status = BaseComment.OPEN\n else:\n issue = False\n issue_status = None\n new_review.comments.create(filediff=filediff, interfilediff=None, text=comment[b'text'], first_line=comment[b'first_line'], num_lines=comment[b'num_lines'], issue_opened=issue, issue_status=issue_status, rich_text=comment[b'rich_text'])\n\n except KeyError:\n return (\n INVALID_FORM_DATA,\n {b'fields': {b'diff_comments': b'Diff comments were malformed'}})\n except ObjectDoesNotExist:\n return (INVALID_FORM_DATA,\n {b'fields': {b'diff_comments': b'Invalid filediff_id'}})\n\n new_review.publish(user=request.user)\n return (\n 201,\n {self.item_result_key: new_review})\n\n\nreview_bot_review_resource = ReviewBotReviewResource()","sub_path":"pycfiles/reviewbot_extension-1.0.1-py2.7/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":8107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"622433102","text":"# coding=utf-8\r\n'''\r\n@author: huitingn@qti.qualcomm.com\r\n@version: 0.1\r\n@since: 4/20/2016\r\n@summary: Delete all alarms.\r\n'''\r\n\r\nimport fs_wrapper\r\nimport settings.common as SC\r\nfrom case_utility import *\r\nfrom logging_wrapper import log_test_case, save_fail_log, print_report_line\r\nfrom test_case_base import TestCaseBase\r\nfrom qrd_shared.case import *\r\nfrom test_suit_cmcc_devci_clock import *\r\nfrom urlparse import clear_cache\r\n\r\nclass test_suit_cmcc_devci_clock_case31(TestCaseBase):\r\n '''\r\n\r\n @see: L{TestCaseBase }\r\n '''\r\n \r\n \r\n def test_case_main(self, case_results):\r\n global case_flag , TAG\r\n case_flag = False\r\n TAG = 'Dev-ci cases: Clock '\r\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], self.name +' : case Start')\r\n log_test_framework(TAG, self.name + \" -Start\")\r\n\r\n #clear current app\r\n wakeUpDevice()\r\n settings.kill_allpid()\r\n #launch clock\r\n start_activity(\"com.android.deskclock\", \"com.android.deskclock.DeskClock\")\r\n click_imageview_by_desc('Alarm', waitForView=1)\r\n \r\n while search_text('Delete alarm', searchFlag=TEXT_MATCHES):\r\n click_imageview_by_desc('Delete alarm', waitForView=1)\r\n #search_text('Alarm deleted', searchFlag=TEXT_MATCHES)\r\n while search_view_by_desc('Expand alarm'):\r\n click_imageview_by_desc('Expand alarm', waitForView=1)\r\n click_imageview_by_desc('Delete alarm', waitForView=1)\r\n #search_text('Alarm deleted', searchFlag=TEXT_MATCHES)\r\n \r\n #check\r\n click_imageview_by_desc('Alarm', waitForView=1) \r\n if search_view_by_id('alarms_empty_view') is True: \r\n goback()\r\n goback()\r\n case_flag = True\r\n \r\n \r\n \r\n if case_flag:\r\n qsst_log_case_status(STATUS_SUCCESS, \"\" , SEVERITY_HIGH)\r\n else:\r\n qsst_log_case_status(STATUS_FAILED, \"\", SEVERITY_HIGH)\r\n \r\n case_results.append((self.case_config_map[fs_wrapper.CASE_NAME_ATTR], case_flag))\r\n \r\n \r\n \r\n \r\n def search_fail_reason(self):\r\n if search_text(\"Unfortunately\"):\r\n if search_text(\"OK\"):\r\n click_button_by_text(\"OK\")\r\n take_screenshot()\r\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], \"Occurs Crash\")\r\n \r\n elif search_text(\"isn't responding\"):\r\n if search_text(\"OK\"):\r\n click_button_by_text(\"OK\")\r\n take_screenshot()\r\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], \"Occurs ANR\")\r\n \r\n else:\r\n take_screenshot()\r\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], \"Unknown reason\")\r\n \r\n \r\n \r\n \r\n def test_case_end(self):\r\n '''\r\n record the case result\r\n '''\r\n '''\r\n @attention: modify by min.sheng\r\n '''\r\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], TAG + ' : end')\r\n if can_continue() and case_flag == True:\r\n # shutdown()\r\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], TAG + ': case pass')\r\n print_report_line(self.case_config_map[fs_wrapper.CASE_NAME_ATTR] + TAG + ' : \\tpass')\r\n else:\r\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], TAG + ' : case fail')\r\n print_report_line(self.case_config_map[fs_wrapper.CASE_NAME_ATTR] + TAG + ' : \\tfail')\r\n save_fail_log()\r\n self.search_fail_reason() #add by huitingn\r\n '''\r\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], ' ui_phone_case1 : end')\r\n if can_continue() and case_flag == True:\r\n # shutdown()\r\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], ' ui_phone_case1: case pass')\r\n print_report_line(self.case_config_map[fs_wrapper.CASE_NAME_ATTR] + ' ui_phone_case1 : \\tpass')\r\n else:\r\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], ' ui_phone_case1: case fail')\r\n print_report_line(self.case_config_map[fs_wrapper.CASE_NAME_ATTR] + ' ui_phone_case1 : \\tfail')\r\n save_fail_log()\r\n '''\r\n\r\n ","sub_path":"Source/QSST/Config/data/M/test_env/test_suit_cmcc_devci_clock/test_suit_cmcc_devci_clock_case71.py","file_name":"test_suit_cmcc_devci_clock_case71.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"87964477","text":"import concurrent.futures as cf\nimport os \nimport time\nimport sys,getopt\nimport threading\nR = threading.Lock()\ndef walkFile(src_path,dst_path,dir,quantit):\n R.acquire()\n if os.path.exists(dst_path) == False:\n os.makedirs(dst_path)\n if os.path.exists(dst_path+\"/\"+dir) == False:\n os.mkdir(dst_path+\"/\"+dir)\n print(dir)\n R.release()\n for root, dirs, files in os.walk(src_path+\"/\"+dir):\n\n # root 表示当前正在访问的文件夹路径\n # dirs 表示该文件夹下的子目录名list\n # files 表示该文件夹下的文件list\n\n # # 遍历文件\n # for f in files:\n # print(os.path.join(root, f))\n\n # 遍历所有的文件夹\n for file in files:\n src = src_path + \"/\"+dir+\"/\"+file\n dst = dst_path + \"/\"+dir+\"/\"+file[:-4]+\".j2k\"\n if os.path.exists(dst) == True:\n continue\n os.system(\"/home/yangxv/kakadu/kdu_compress -o \"+ dst+\" Qfactor=\"+str(quantit)+\" -i \"+src )\n os.system(\"/home/yangxv/kakadu/kdu_expand -o \"+ dst_path + \"/\"+dir+\"/\"+file[:-4]+\".ppm\"+\" -i \"+dst)\n\n\n\ndef main(argv):\n #walkFile(\"/mnt/imagenet_data/PNG/val\", ,\"n01440764\",1)\n inputfile = ''\n outputfile = ''\n quant = 0\n try:\n opts, args = getopt.getopt(argv,\"q:i:o:\",[\"quant\",\"ifile=\",\"ofile=\"])\n except getopt.GetoptError:\n print ('test.py -i -o ')\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-i\", \"--ifile\"):\n inputfile = arg\n elif opt in (\"-o\", \"--ofile\"):\n outputfile = arg\n elif opt in (\"-q\",\"--quant\"):\n quant = int(arg)\n tp = cf.ThreadPoolExecutor(16) # 设置线程数16\n futures = []\n startTime = time.time()\n print(inputfile)\n for root, dirs, files in os.walk(inputfile):\n print(len(dirs))\n for dir in dirs:\n future = tp.submit(walkFile,inputfile,outputfile, dir,quant)\n futures.append(future)\n count = 0\n for future in cf.as_completed(futures):\n count += 1\n endTime = time.time()\n runTime = endTime-startTime\n print(runTime)\n tp.shutdown()\n os.system('pause')\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"mulit_thread_comj2k.py","file_name":"mulit_thread_comj2k.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"443652838","text":"from config import Config\nfrom tools import files\nimport os\n\n\n\nclass Automate():\n\n def __init__(self, conf=None, schemas=False ):\n self.conf = conf\n self.schemas_dir = conf._all['schemas']\n if schemas and conf:\n self.schemas_list = files.scan_dir(self.schemas_dir)\n\n\n def _join_paths(self, left, right):\n return \"{}/{}\".format(left, right)\n\n def load_schema(self, name):\n self.schema = file._open_yaml(self._join_paths(self.schema_dir, name))\n\n\n def run(self):\n print(self.get_schema(self.schemas_list, \"tucuman.yml\"))\n\nif __name__ == '__main__':\n\n path_conf = os.environ['JUS_CONF']\n conf = Config(path_conf)\n app = Automate(conf, schemas=True)\n app.run()\n\n\n\n\n\n\n\n\n\n","sub_path":"src/automate.py","file_name":"automate.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"511217717","text":"# cook your dish here\r\nn=int(input())\r\nweapons=list(map(int,input().split()))\r\neven=0\r\nodd=0\r\nfor i in weapons:\r\n if i%2==0:\r\n even+=1\r\n else:\r\n odd+=1\r\nif even>odd:\r\n print('READY FOR BATTLE')\r\nelse:\r\n print('NOT READY')","sub_path":"Beginner/Mahasena.py","file_name":"Mahasena.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"271050475","text":"#!/usr/bin/python\n#-*- coding:UTF-8 -*-\nimport sys\nimport math\nimport random\n\n\n# la cantidad de cazadores que controlas\nbusters_per_player = int(input())\n\n# la cantidad de fantasmas en el mapa\nghost_count = int(input())\n\n# si esto es 0, su base está en la parte superior izquierda del mapa\n# si es 1, en la parte inferior derecha\nmy_team_id = int(input())\n\n# fantasmas en el juego\nfantasmas = {}\n\n# fantasmas atrapados\nfantasmas_atrapados = []\n\n# cazadores por equipo\ncazadores = [{},{}]\n\n# tamaño de cuadrado a separar el mapa\ncuadrado = 1700\n\n# clase que genera una posición (x,y)\nclass Posicion:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n# funcion que calcula la distancia entre dos posiciones\ndef Distancia(posicion_uno, posicion_dos):\n dx = posicion_uno.x-posicion_dos.x\n dy = posicion_uno.y-posicion_dos.y\n return math.sqrt(dx*dx+dy*dy)\n\n# bases de cada equipo según su ID\nbases = [Posicion(0,0),Posicion(16000,9000)]\n\n# función para mover al cazador\ndef Mover(x,y, mensaje):\n return \"MOVE %s %s %s\" % (x,y,mensaje)\n\n# funcion para cazar un fantasma\ndef Cazar(id_fantasma):\n return \"BUST %s Cazado\" % id_fantasma\n\n# funcion para soltar un fantasma\ndef Soltar():\n return \"RELEASE Atrapado\"\n\n# clase que maneja los parametros básicos de cada jugador (cazador/fantasma)\nclass Entity(Posicion):\n def __init__(self, entity_id, x, y):\n Posicion.__init__(self, x, y)\n self.entity_id = entity_id\n self.tarea = Funcion(self)\n self.fuera_alcance = False\n\n def actualizar(self, x, y):\n self.x = x\n self.y = y\n self.fuera_alcance = False\n \n def accion(self):\n self.fuera_alcance = True\n return(self.tarea.accion())\n\n# clase Cazador\nclass Cazador(Entity):\n def __init__(self, entity_id, x, y, id_team, state, id_fantasma):\n Entity.__init__(self, entity_id, x, y)\n self.id_team = id_team\n self.ocupado = state == 1\n self.id_fantasma = id_fantasma\n self.id_cazado = None\n cazadores[self.id_team][self.entity_id] = self\n\n def actualizar(self, x, y, state, id_fantasma):\n Entity.actualizar(self, x, y)\n self.ocupado = state == 1\n self.id_fantasma = id_fantasma\n\n# clase Fantasma\nclass Fantasma(Entity):\n def __init__(self, entity_id, x, y, num_cazadores, state):\n Entity.__init__(self, entity_id, x, y)\n self.num_cazadores = num_cazadores\n self.vida = state\n fantasmas[self.entity_id] = self\n \n def actualizar(self, x, y, num_cazadores):\n Entity.actualizar(self, x, y)\n self.num_cazadores = num_cazadores\n self.vida = self.vida - self.num_cazadores\n\n\n# clase para asignar las funciones\nclass Funcion:\n def __init__(self, entity, funcion_default=None):\n self.entity = entity\n\n if funcion_default == None:\n funcion_default = self\n\n self.funcion_default = funcion_default\n\n def accion(self):\n return Mover(8000,4500,'ALV')\n\n\n# clase explorador\nclass Explorador(Funcion):\n def __init__(self, entity, funcion_default=None):\n # establecemos la funcion y posicion\n Funcion.__init__(self, entity, funcion_default)\n self.x = entity.x\n self.y = entity.y\n\n def accion(self):\n # si llegó a la posicion que fue enviado\n # calculamos un siguiente movimiento\n if self.entity.x == self.x and self.entity.y == self.y:\n self.x, self.y = self.movimiento()\n\n # lo movemos a la siguiente posicion\n return Mover(self.x, self.y,'Explorar')\n\n # calcular un movimiento de forma aleatoria\n def movimiento(self):\n distancia = None\n x = random.randint(500,15500)\n y = random.randint(500,8500)\n\n for j in range(math.ceil(9000/cuadrado)):\n for i in range(math.ceil(16000/cuadrado)):\n posicion = Posicion(min(16000,i*cuadrado+(cuadrado/2)),min(9000,j*cuadrado+(cuadrado/2)))\n costo = 2100*random.randint(10000,16000)-Distancia(posicion,self.entity)\n if distancia == None or distancia < costo:\n distancia = costo\n x = posicion.x\n y = posicion.y\n return (int(x),int(y))\n\n\n# clase capturador\nclass Capturador(Explorador):\n def accion(self):\n # verificar si tiene un fantasma atrapado\n if self.entity.ocupado:\n # determinamos la base correspondiente\n base = bases[self.entity.id_team]\n\n # verificamos si la distancia es inferior a 1600 unidades\n if Distancia(self.entity, base) < 1600:\n # soltamos el fantasma\n del fantasmas[self.entity.id_fantasma]\n return Soltar()\n else:\n # de lo contrario nos movemos a la base\n return Mover(base.x, base.y,'Prisionero')\n\n # variables de distancias\n distancia_minima = 16000*9000\n fantasma_cerca = 0\n\n # guardar los fantasmas que pueden ser cazados\n cazar = []\n\n # recorremos los fantasmas disponibles\n for id_fantasma, fantasma in fantasmas.items():\n # verificamos si el fantasma está disponible para atraparlo\n # y si no esta fuera del alcance\n if id_fantasma not in fantasmas_atrapados and not fantasma.fuera_alcance:\n # calculamos la distancia entre el fantasma y el cazador\n distancia = Distancia(self.entity, fantasma)\n\n # si está entre 900 y 1760 \n if 900 < distancia < 1760:\n # el fantasma puede ser atrapado\n # lo guardamos en el array de cazar\n cazar.append(fantasma)\n\n elif distancia >= 1760:\n # de lo contrario establecemos las distancias minimas\n # hacia el fantasma\n if distancia_minima > distancia:\n distancia_minima = distancia\n fantasma_cerca = fantasma\n\n # si el array cazar no está vacío\n # cazamos al fantasmas\n if cazar != []:\n if self.entity.id_cazado and fantasmas[self.entity.id_cazado] in cazar:\n return Cazar(self.entity.id_cazado)\n else:\n return Cazar(cazar[0].entity_id)\n\n # de lo contrario nos movemos hacia el fantasma\n if fantasma_cerca == 0:\n return self.funcion_default.accion()\n\n return Mover(fantasma_cerca.x, fantasma_cerca.y,'No escaparas')\n\n\n# funcion para definir las tareas\ndef Tareas(buster):\n # funciones que desarrolla un cazador\n # E: explorador, C: capturador\n funciones = ['E','C']\n\n # tarea por default es ninguna\n tarea = None\n\n # recorremos las funciones\n for funcion in funciones:\n if funcion == \"C\":\n tarea = Capturador(buster, tarea)\n elif funcion == \"E\":\n tarea = Explorador(buster, tarea)\n\n # y la asignamos\n buster.tarea = tarea\n\n\n# actualizar jugadores (cazadores y fantasmas)\ndef Jugadores(entity_id, x, y, entity_type, state, value):\n # verificar si es un fantasma\n if entity_type == -1:\n # verificar si ya está guardado en el array\n if entity_id in fantasmas:\n # si esta guardado actualizamos sus parametros\n fantasmas[entity_id].actualizar(x, y, value)\n else:\n # de lo contrario lo guardamos en el array\n Fantasma(entity_id, x, y, value, state)\n else:\n # si no es un fantasma, es un cazador\n # verificamos si está guardado\n if entity_id in cazadores[entity_type]:\n # de estar guardado, actualizamos sus parametros\n cazadores[entity_type][entity_id].actualizar(x, y, state, value)\n else:\n # de no estar guardado lo guardamos\n cazador = Cazador(entity_id, x, y, entity_type, state, value)\n\n # si el cazador es de mi equipo, le asignamos una tarea\n if entity_type == my_team_id:\n Tareas(cazador)\n\n\n# loop del juego\nwhile True:\n # la cantidad de cazadores y fantasmas visibles para ti\n entities = int(input())\n\n for i in range(entities):\n # entity_id: ID del cazador o ID del fantasma\n # y: posición del cazador / fantasma\n # entity_type: ID del equipo si es un cazador, -1 si es un fantasma.\n # state: Para cazadores: 0=inactivo, 1=llevando un fantasma\n # value: Para cazadores: ID del fantasma siendo llevado. Para fantasmas: número de cazadores que intentan atraparlo.\n entity_id, x, y, entity_type, state, value = [int(j) for j in input().split()]\n Jugadores(entity_id, x, y, entity_type, state, value)\n\n # fantasmas atrapados\n fantasmas_atrapados = [fantasma for fantasma in fantasmas.values() if fantasma.vida <= 0]\n\n for entity_id in sorted(cazadores[my_team_id].keys()):\n buster = cazadores[my_team_id][entity_id]\n print(str(buster.accion()))\n\n # definir a los fantasmas atrapados como fuera de alcance\n # para que no se repitan los cazadores en la misma direccion\n for id_fantasma in sorted(fantasmas.keys()):\n fantasma = fantasmas[id_fantasma]\n fantasma.accion()","sub_path":"primera_liga.py","file_name":"primera_liga.py","file_ext":"py","file_size_in_byte":9287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"514634181","text":"from __future__ import print_function\nimport lucene\nimport codecs\nfrom datetime import datetime\nfrom java.nio.file import Paths\nfrom org.apache.lucene.store import SimpleFSDirectory\nfrom org.apache.lucene.index import IndexReader, DirectoryReader\nfrom org.apache.lucene.analysis.standard import StandardAnalyzer\nfrom org.apache.lucene.analysis.en import EnglishAnalyzer\nfrom org.apache.lucene.queryparser.classic import QueryParser\nfrom org.apache.lucene.util import BytesRef\nfrom org.apache.lucene.index import MultiFields, PostingsEnum, Term\nfrom org.apache.lucene.search import IndexSearcher, TermQuery, BooleanQuery, BooleanClause\nfrom org.apache.lucene.search.similarities import LMDirichletSimilarity\nfrom bs4 import BeautifulSoup\n\nINDEX_BASE_DIR = '/home/singh/indexes/'\nINDEX_DIR = 'lucene-index.robust04-full.pos.dv'\n\n# Read Relation Data\ndef read_relation(filename, verbose=True):\n data = []\n for line in open(filename):\n line = line.strip().split()\n data.append( (int(line[0]), line[1], line[2]) )\n if verbose:\n print('[%s]\\n\\tInstance size: %s' % (filename, len(data)), end='\\n')\n return data\n\n# Read Data Dict\ndef read_data(filename, word_dict = None):\n data = {}\n with open(filename) as f:\n for index, line in enumerate(f):\n if index >= 250:\n break\n\n line = line.strip().split()\n tid = line[0]\n if word_dict == None:\n data[tid] = list(map(int, line[2:]))\n else:\n data[tid] = []\n for w in line[2:]:\n wid = int(w)\n if wid in word_dict:\n #word_dict[w] = len(word_dict)\n data[tid].append(word_dict[wid])\n #print(data[tid])\n print('[%s]\\n\\tData size: %s' % (filename, len(data)), end='\\n')\n return data, word_dict\n\n\n# Read Word Dict and Inverse Word Dict\ndef read_word_dict(filename):\n word_dict = {}\n iword_dict = {}\n for line in open(filename):\n line = line.strip().split()\n word_dict[int(line[1])] = line[0]\n iword_dict[line[0]] = int(line[1])\n print('[%s]\\n\\tWord dict size: %d' % (filename, len(word_dict)), end='\\n')\n return word_dict, iword_dict\n\n\ndef get_lm_matched_docs(query, searcher, qparser):\n #did_dict = {}\n dids = []\n scores = []\n query = qparser.parse(query)\n searcher.setSimilarity(LMDirichletSimilarity())\n scoreDocs = searcher.search(query, 2000).scoreDocs\n print(\"Found %d document(s) that matched query '%s':\" % (len(scoreDocs), query))\n\n for scoreDoc in scoreDocs:\n doc = searcher.doc(scoreDoc.doc)\n did = doc.get(\"id\")\n #text = doc.get(\"raw\")\n #did_dict[did] = {}\n #did_dict[did]['text'] = text\n #did_dict[did]['score'] = scoreDoc.score\n dids.append(did)\n scores.append(scoreDoc.score)\n\n return dids, scores\n\n\ndef store_corpus_docs(data, qrel_docs, searcher, qparser):\n lm_docs = {}\n\n for qid in data:\n query = ' '.join(data[qid])\n print(\"qid:%s; query: %s\" % (qid, query))\n doc_dict = get_lm_matched_docs(query, searcher, qparser)\n for did in doc_dict:\n if did not in qrel_docs:\n parsed_doc = BeautifulSoup(doc_dict[did], \"html5lib\")\n text = parsed_doc.get_text().replace('\\n', ' ')\n text = ' '.join(text.split())\n lm_docs[did] = text\n\n print(\"lm docs not in qrels: %s\" % (len(lm_docs)))\n f = codecs.open('/home/fernando/MatchZoo/data/robust04/corpus_n_stem2.txt', 'w', encoding='utf8')\n for did in lm_docs:\n f.write(\"%s %s\\n\" % (did, lm_docs[did]))\n f.close()\n\n\nif __name__ == \"__main__\":\n lucene.initVM()\n index = DirectoryReader.open(SimpleFSDirectory(Paths.get(INDEX_BASE_DIR + INDEX_DIR)))\n searcher = IndexSearcher(index)\n analyzer = EnglishAnalyzer()\n qparser = QueryParser(\"contents\", analyzer)\n\n qid_doc_list = {}\n qrel_dict = {}\n qrel_docs = set()\n\n rel_file = '/home/fernando/MatchZoo/data/robust04/cv_splits/test.5.txt'\n rel = read_relation(filename=rel_file)\n #rel.extend(read_relation(filename='/home/fernando/MatchZoo/data/robust04/relation_train.txt'))\n #rel.extend(read_relation(filename='/home/fernando/MatchZoo/data/robust04/relation_valid.txt'))\n print('Instance size: %s' % (len(rel)), end='\\n')\n word_dict, _ = read_word_dict(\"/home/fernando/MatchZoo/data/robust04/word_dict_new_n_stem_filtered_rob04_embed.txt\")\n\n for label, d1, d2 in rel:\n qrel_dict[(d1, d2)] = label\n qrel_docs.add(d2)\n\n print('corpus doc size in test rel file: %s' % (len(qrel_docs)), end=\"\\n\")\n\n datapath = '/home/fernando/MatchZoo/data/robust04/corpus_preprocessed_q_n_stem.txt'\n data, _ = read_data(datapath, word_dict)\n qrel_stats = {}\n baseline_f = open('/home/fernando/MatchZoo/data/robust04/cv_splits/predict.test.5.ql.txt','w')\n for label, d1, d2 in rel:\n if d1 not in qid_doc_list:\n qid_doc_list[d1] = []\n query = ' '.join(data[d1])\n print(d1 + \" \" + query)\n rel, non_rel = 0, 0\n doc_list, scores = get_lm_matched_docs(query, searcher, qparser)\n\n for id, doc in enumerate(doc_list):\n\n if (d1, doc) not in qrel_dict:\n qid_doc_list[d1].append((0, doc))\n baseline_f.write(\"%s\\t%s\\t%s\\t%d\\t%f\\t%s\\t%d\\n\" % (d1, \"Q0\", doc, id, scores[id], \"ql_baseline\", 0))\n non_rel += 1\n else:\n qid_doc_list[d1].append((qrel_dict[(d1, doc)], doc))\n baseline_f.write(\"%s\\t%s\\t%s\\t%d\\t%f\\t%s\\t%d\\n\" % (d1, \"Q0\", doc, id, scores[id], \"ql_baseline\", qrel_dict[(d1, doc)]))\n rel += 1\n print(\"rel:%d non-rel:%d\" % (rel, non_rel))\n\n if (label, d2) not in qid_doc_list[d1]:\n # qid_doc_list[d1].append((label, d2))\n if d1 not in qrel_stats:\n qrel_stats[d1] = {}\n qrel_stats[d1]['high_rel'] = 0\n qrel_stats[d1]['rel'] = 0\n qrel_stats[d1]['non_rel'] = 0\n\n if label == 2:\n qrel_stats[d1]['high_rel'] += 1\n elif label == 1:\n qrel_stats[d1]['rel'] += 1\n else:\n qrel_stats[d1]['non_rel'] += 1\n\n baseline_f.close()\n\n for d1 in qrel_stats:\n print(\"qid:%s high-rel:%d rel:%d non-rel:%d\" % (d1, qrel_stats[d1]['high_rel'], qrel_stats[d1]['rel'], qrel_stats[d1]['non_rel']))\n\n f = open('/home/fernando/MatchZoo/data/robust04/cv_splits/test.5.ql.txt','w')\n for qid in qid_doc_list:\n print(\"length of docs for qid(%s): %d\" % (qid, len(qid_doc_list[qid])))\n for (label, docid) in qid_doc_list[qid]:\n f.write(str(label) + \" \" + qid + \" \" + docid + \"\\n\")\n f.close()\n","sub_path":"data/common_utils/IndexSearcher.py","file_name":"IndexSearcher.py","file_ext":"py","file_size_in_byte":6885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"74117969","text":"\"\"\"\n\nHTTPX 必须为每个请求建立一个新连接(连接不会被重用)。随着对主机的请求数量增加,这很快就会变得低效。\n\n另一方面,Client实例使用HTTP 连接池。这意味着当您向同一主机发出多个请求时,Client将重用底层 TCP 连接,而不是为每个请求重新创建一个。\n\n与使用顶级 API 相比,这可以带来显着的性能改进,包括:\n\n减少跨请求的延迟(无握手)。\n减少 CPU 使用率和往返次数。\n减少网络拥塞。\n\"\"\"\nimport httpx\n\n\"\"\"\n表达式\nClient是作为上下文管理器。这将确保在离开with块时正确清理连接\nwith httpx.Client() as client:\n\n\"\"\"\n\n\ndef mm1():\n\n with httpx.Client() as client:\n r = client.get('https://example.com')\n print(r.text)\n\n\ndef mm12():\n \"\"\"自定义标头的请求\"\"\"\n url = 'http://httpbin.org/headers'\n headers = {'user-agent': 'my-app/0.0.1'}\n with httpx.Client(headers=headers) as client:\n r = client.get(url)\n print(r.text)\n print(r.json()['headers'])\n\n\nclient = httpx.AsyncClient()\n\n\n\nimport asyncio\nimport httpx\nimport threading\nimport time\n\n\ndef sync_main(url, sign):\n \"\"\"同步 http 请求的耗时表现\"\"\"\n response = httpx.get(url).status_code\n print(f'sync_main: {threading.current_thread()}: {sign}: {response}')\n\n\n# sync_start = time.time()\n# [sync_main(url='https://www.example.com/', sign=i) for i in range(50)]\n# sync_end = time.time()\n# print(sync_end - sync_start)\n\n\nasync def async_main2(url, sign):\n \"\"\"\"异步\" http 请求\"\"\"\n response = await client.get(url)\n status_code = response.status_code\n print(f'async_main: {threading.current_thread()}: {sign}:{status_code}')\n\n\n# loop = asyncio.get_event_loop()\n# tasks = [async_main2(url='https://www.example.com/', sign=i) for i in range(50)]\n# async_start = time.time()\n# loop.run_until_complete(asyncio.wait(tasks))\n# async_end = time.time()\n# loop.close()\n# print(async_end - async_start)","sub_path":"httpx_demo/api_2.py","file_name":"api_2.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"134228023","text":"N = int(input())\nA = list(map(int, input().split()))\nMOD = 10**9 + 7\n\ndef ncr(n, r):\n if r > n:\n return 1\n ret = 1\n for i in range(r):\n ret *= (n - i)\n for i in range(r):\n ret //= (r - i)\n return ret\n\nans = 1\ncnt = 0\nprev = 0\nfor a in A:\n if a == -1:\n cnt += 1\n else:\n m = a - prev\n ans *= ncr(m + cnt, cnt)\n cnt = 0\n prev = a\n ans %= MOD\nprint(ans)\n","sub_path":"AtCoder/arc/023c.py","file_name":"023c.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"357038238","text":"from flask import abort, Blueprint, current_app, render_template, flash, redirect, request\nfrom flask_login import current_user, login_required\nfrom webapp.db import db\nfrom webapp.news.forms import CommentForm\nfrom webapp.utils import get_redirect_target\nfrom webapp.weather import weather_by_city\nfrom webapp.news.models import News, Comments\n\nblueprint = Blueprint('news', __name__)\n\n\n@blueprint.route('/')\ndef index():\n page_title = 'Новости'\n weather = weather_by_city(current_app.config['WEATHER_DEFAULT_CITY'])\n news_list = News.query.filter(News.text.isnot(None)).order_by(News.published.desc()).all()\n return render_template('news/index.html', page_title=page_title, weather_text=weather, news_list=news_list)\n\n\n@blueprint.route('/news/')\ndef single_news(news_id):\n weather = weather_by_city(current_app.config['WEATHER_DEFAULT_CITY'])\n my_news = News.query.filter(News.id == news_id).first()\n\n if not my_news:\n abort(404)\n\n comment_form = CommentForm(news_id=my_news.id)\n return render_template('news/single_news.html', page_title=my_news.title,\n news=my_news, weather_text=weather, comment_form=comment_form)\n\n\n@blueprint.route('/news/comment', methods=['POST'])\n@login_required\ndef add_comment():\n form = CommentForm()\n if form.validate_on_submit():\n comment = Comments(text=form.comment_text.data, news_id=form.news_id.data, user_id=current_user.id)\n db.session.add(comment)\n db.session.commit()\n flash('Комментарий добавлен!')\n else:\n for field, errors in form.errors.items():\n for error in errors:\n flash(f'Ошибка в заполнении поля {getattr(form, field).label.text} - {error}')\n return redirect(get_redirect_target())\n","sub_path":"webapp/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"509972537","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 10 16:56:47 2020\r\n\r\n@author: 92801\r\n\"\"\"\r\nimport re\r\nxfile=open('mito_gene.fa','r')\r\ngeneinformation=[]\r\ngenesequence=[]\r\nseq=str()\r\ncount=0\r\n\r\nfor line in xfile:\r\n if line.startswith('>'):\r\n geneinformation.append(line)\r\n seq=''\r\n count+=1\r\n else:\r\n line=line.rstrip()\r\n seq=str(line)\r\n seq=re.sub('A', 't', seq)\r\n seq=re.sub('T', 'a', seq)\r\n seq=re.sub('G', 'c', seq)\r\n seq=re.sub('C', 'g', seq)\r\n seq=seq.swapcase()\r\n seq=''.join(reversed(seq))\r\n genesequence.append(seq)\r\n\r\nyfile=open('Reverse_complementary_sequences_of_mitochondria_genes.fa','w')\r\nfor i in range(count):\r\n line1=geneinformation[i]\r\n line2=genesequence[i] +'\\n'\r\n yfile.write(line1)\r\n yfile.write(line2)\r\nyfile.close()\r\n \r\n\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"Practical 8/Mito_RC.py","file_name":"Mito_RC.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"167959912","text":"\"\"\" Uma modificação do sutil.copyfileobj() que nem sempre funciona para mim. \n Por questões de desempenhos em arquivos grandes, o arquivo de origem\n é lido aos poucos e seu conteúdo é guardado no buffer e então escrito\n no arquivo de destino.\n\"\"\"\ndef copiaArquivo(origem, destino):\n # Buffer de 16kb. Talvez seja melhor aumentá-lo.\n tamanhoBuffer = 16 * 1024\n with open(origem, 'rb') as arqOrigem:\n with open(destino, 'wb') as arqDestino:\n while True:\n buf = arqOrigem.read(tamanhoBuffer)\n if not buf:\n break\n destino.write(buf)\n\n\ndef copiaDadosParaArquivo(dados, destino):\n # Buffer de 16kb. Talvez seja melhor aumentá-lo. Exemplo: 64kb (2**16 ou 65536).\n tamanhoBuffer = 16 * 1024\n copiado = 0\n with open(destino, 'wb') as arqDestino:\n while True:\n # Size\n buf = dados.read(tamanhoBuffer)\n if not buf:\n break\n destino.write(buf)\n copiado = copiado + len(buf) # Quantidade copiada\n print(copiado)\n\n\ndef copiaUrlParaArquivo(url, destino):\n # Fazendo a requisição\n import urllib3\n http = urllib3.PoolManager()\n r = http.request('GET', url, preload_content=False)\n\n # Buffer de 16kb. Talvez seja melhor aumentá-lo. Exemplo: 64kb (2**16 ou 65536).\n tamanhoBuffer = 16 * 1024\n copiado = 0\n with open(destino, 'wb') as arqDestino:\n while True:\n buf = r.read(tamanhoBuffer)\n if not buf:\n break\n arqDestino.write(buf)\n copiado = copiado + len(buf) # Quantidade copiada\n \n r.release_conn()\n quantidadeCopiada = \"{}kb\".format(copiado/1024)\n return quantidadeCopiada\n","sub_path":"mycopyutil.py","file_name":"mycopyutil.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"57197627","text":"from django.http.response import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.conf import settings\n\nfrom public_chat.models import PublicChatRoom\n\nDEBUG = False\n\ndef public_chat_view(request, room_id):\n\n context = {}\n\n try:\n room = PublicChatRoom.objects.get(id=room_id)\n context['room_id'] = room_id\n context['room'] = room\n except PublicChatRoom.DoesNotExist:\n return HttpResponse(\"Room doesn't exist.\")\n\n context['debug_mode'] = settings.DEBUG\n context['debug'] = DEBUG\n\n return render(request, f\"public_chat/public_chat_rooms.html\", context)\n\ndef public_chat_create(request):\n \n user = request.user\n\n context = {}\n\n if user.is_authenticated:\n if request.POST:\n title = request.POST.get('title')\n description = request.POST.get('description')\n\n try:\n room = PublicChatRoom.objects.get(title=title)\n\n if room:\n context['error'] = 'Room with this title already exist. Please choose another title.'\n\n except PublicChatRoom.DoesNotExist:\n\n PublicChatRoom.objects.create(author=user, title=title, description=description)\n return redirect('home')\n else:\n return redirect('login')\n\n return render(request, \"public_chat/create_public_chat.html\", context)\n\ndef my_chat_rooms(request):\n user = request.user\n \n context = {}\n\n if user.is_authenticated:\n try:\n rooms = PublicChatRoom.objects.filter(author=user)\n context['rooms'] = rooms\n context['user'] = user\n except PublicChatRoom.DoesNotExist:\n return HttpResponse(\"You don't own any public chat rooms yet.\")\n else:\n return redirect('login')\n\n return render(request, \"public_chat/my_public_chat_rooms.html\", context)\n\ndef public_chat_delete(request, room_id):\n \n user = request.user\n\n if user.is_authenticated:\n room = PublicChatRoom.objects.get(id=room_id)\n\n if user.id == room.author.id:\n PublicChatRoom.objects.get(id=room_id).delete()\n else:\n return HttpResponse(\"You can only delete your own room.\")\n else:\n return redirect('login')\n\n return redirect('home')\n","sub_path":"public_chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"132353888","text":"#-----------------------------------------------------------------------------------\n#- plot_face.py\n#- DESCRIPTION: Plot the face of a 3D grid\n#-----------------------------------------------------------------------------------\nimport numpy as np\nimport sys\nimport netCDF4\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors as c\nfrom matplotlib import ticker\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import axes3d\n\ndef read_prob_size(prob_size_file):\n #- Figure out dimensions from ProbSize file\n i = 0\n f = open(prob_size_file, 'r')\n for line in f:\n line = line.rstrip()\n if (i == 0):\n IMax=int(line)\n elif (i==1):\n JMax=int(line)\n elif (i==2):\n KMax=int(line)\n i = i + 1\n f.close()\n return IMax,JMax,KMax\n\n#- Read Grid from ascii file, return x,y,z vectors\ndef read_grid_ascii(grid_file,prob_size_file):\n #- Figure out dimensions from ProbSize file\n i = 0\n IMax,JMax,KMax = read_prob_size(prob_size_file)\n\n #- Read in grid data\n f = open(grid_file, 'r')\n x,y,z = [],[],[]\n for line in f:\n line = line.strip()\n cols = line.split(',')\n x.append(float(cols[0]))\n y.append(float(cols[1]))\n z.append(float(cols[2]))\n f.close()\n\n #- Reshape to 3-D array\n X = np.reshape(x,(IMax,JMax,KMax),order='F')\n Y = np.reshape(y,(IMax,JMax,KMax),order='F')\n Z = np.reshape(z,(IMax,JMax,KMax),order='F')\n return X,Y,Z,IMax,JMax,KMax\n#-------------------------------------------------------------------\n\n\n#---PLOT_BATHY------------------------------------------------------\n#- INPUT: 3-D variables X,Y,Z\n#-\n#-------------------------------------------------------------------\ndef plot_bathy(X,Y,Z,level=None,fname=None):\n if(level == None):\n level = 0\n im_width = 5\n im_height = 5\n [nk,nj,ni] = X.shape #- (10, 23, 98)\n zmin = np.min(Z)\n zmax = np.max(Z)\n\n #- Set up figure\n fig = plt.figure(figsize=(im_width, im_height),dpi=72)\n ax = fig.add_subplot(111)\n\n ##- Plot wireframe\n #ax.plot_wireframe(X[0,:,:], Y[0,:,:],Z[0,:,:] )\n ##- Plot surface\n print(zmin)\n print(zmax)\n heatmap = plt.pcolor(X[:,:,level], Y[:,:,level],Z[:,:,level],cmap=cm.jet, vmin=zmin,vmax=-3.0)\n #- colorbar legend\n cbar = plt.colorbar(heatmap)\n cbar.ax.set_ylabel('Depth (m)')\n\n ##- Label axes\n plt.xlabel('X')\n plt.ylabel('Y')\n plt.title('San Diego Bay Curvilinear Mesh: Bathymetry K-Level '+str(level))\n if (fname == None):\n plt.show() \n else:\n plt.savefig(fname,dpi=96)\n print(\" Plotting: \"+fname)\n \n\n#---PLOT_3D------------------------------------------------------\n#- INPUT: 3-D variables X,Y,Z\n#-\n#-------------------------------------------------------------------\ndef plot_3d(X,Y,Z,level=None):\n if(level == None):\n level = 0\n [ni,nj,nk] = X.shape #- (10, 23, 98)\n zmin = np.min(Z[:,:,level])\n zmax = np.max(Z[:,:,level])\n\n #- Set up figure\n #fig = plt.figure()\n fig = plt.figure(figsize=plt.figaspect(0.5)*1.5)\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X[:,:,level], Y[:,:,level],Z[:,:,level],cmap=cm.jet, \\\n vmin=zmin,vmax=zmax, rstride=1,cstride=1)\n ax.plot_surface(X[:,:,nk-1], Y[:,:,nk-1],Z[:,:,nk-1],cmap=cm.jet, \\\n vmin=zmin,vmax=zmax, rstride=1,cstride=1)\n #ax.scatter(X[1,1,1], Y[1,1,1],Z[1,1,1],c='y',s=100)\n ##- Label axes\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n ax.set_title('3D Curvilinear Mesh: Bathymetry')\n\n plt.show()\n\n\n\n#---PLOT_FACE------------------------------------------------------\n#- INPUT: 3-D variables X,Y,Z\n#-\n#-------------------------------------------------------------------\ndef plot_face(X,Y,Z,level=None):\n if(level == None):\n level = 0\n [ni,nj,nk] = X.shape #- (10, 23, 98)\n zmin = np.min(Z[:,:,0])\n zmax = np.max(Z[:,:,nk-1])\n\n #- Set up figure\n #fig = plt.figure()\n fig = plt.figure(figsize=plt.figaspect(0.5)*1.5)\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X[level,:,:], Y[level,:,:],Z[level,:,:],cmap=cm.jet, \\\n vmin=zmin,vmax=zmax, rstride=1,cstride=1)\n #ax.plot_surface(X[:,:,nk-1], Y[:,:,nk-1],Z[:,:,nk-1],cmap=cm.jet, \\\n # vmin=zmin,vmax=zmax, rstride=1,cstride=1)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n ax.set_title('Monterey Curvilinear Mesh: J-K Plane (I='+str(level)+')')\n #ax.set_title('Seamount Test Case: Bathymetry')\n\n plt.show()\n\n\n\nsrc_dir = './output/'\nprob_size_file = src_dir+'ProbSize_mont.dat'\ngrid_file = src_dir+'Grid_mont.dat'\nX,Y,Z,imax,jmax,kmax = read_grid_ascii(grid_file,prob_size_file)\nlevel = round(2)\nplot_face(X,Y,Z,level)\n","sub_path":"jupyter-notebooks/scripts/plot_face.py","file_name":"plot_face.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"82431376","text":"\"\"\"\nProgram: pickle_lesson3.py\nAuthor: Myles Wilkerson\nDate: 2/10/2021\nThe Hidden Genius Project\nCohort: OAK8\n\"\"\"\n\ninstructor_pod = {}\n\njacore_leader = {}\nandrew_leader = {}\nrichard_leader = {}\naris_leader = {}\ngabriel_leader = {}\n\njacore_members = {}\nandrew_members = {}\nrichard_members = {}\naris_members = {}\ngabriel_members = {}\n\n#4 Create an empty dictionary for the other 3 PODs; Aris, Gabriel and Richard\n\n#5 Add the names and telephone numbers of each member POD\njacore_members['Moussa Ndiaye'] = '(123) 456-7890'\njacore_members['Morris Jones'] = '(925) 286-5922'\njacore_members['Prince Fields'] = '(510) 472-0804'\njacore_members['Akari Johnson'] = '(510) 500-2206'\n\nandrew_members['Mallick Abdullah'] = '(510) 409-8755'\nandrew_members['Ronin Youngjones'] = '(415) 910-3415'\nandrew_members['Glenn Ivory'] = '(510) 328-8290'\n\nrichard_members['Prince Fields'] = '(510) 472-0804'\nrichard_members['Mattew Dudley'] = '(510) 816-2411'\nrichard_members['Kymari Rhodes'] = '(510) 575-1982'\nrichard_members['Josiah Johnson'] = '(510) 860-5112'\n\naris_members['Milan Kral'] = '(510) 816-3232'\naris_members['Maurice Richardson'] = '(510) 424-7789'\naris_members['Zyion Williams'] = '(510) 480-5785'\naris_members['Hyab Isayas'] = '(510) 612-3737'\n\ngabriel_members['David Brickley'] = '(510) 631-6288'\ngabriel_members['Myles Wilkerson'] = '(510) 500-7266'\ngabriel_members['Emmanuel Torbor'] = '(510) 934-4133'\n\njacore_leader['Jacore'] = jacore_members\nandrew_leader['Andrew'] = andrew_members\naris_leader['Aris'] = aris_members\nrichard_leader['Richard'] = richard_members\ngabriel_leader['Gabriel'] = gabriel_members\n\n \n#6 Add all the PODS to the all_pod_members dictionary\ninstructor_pod['Baba'] = jacore_leader\ninstructor_pod['Hodari'] = andrew_leader\ninstructor_pod['David'] = richard_leader\ninstructor_pod['Paris'] = aris_leader\ninstructor_pod['Akeem'] = gabriel_leader\n\n#9 Print all the Pod leaders and POD membership\n\n\n\nfor instructor_pod, pod_leader in instructor_pod.items():\n print(\"This is the pod's instructor: \", instructor_pod)\n \n for pod_leader, pod_member in pod_leader.items():\n print(\"This is the pod's leader: \", pod_leader);\n for pod_member, phone_number in pod_member.items():\n print(pod_member,phone_number)\n print(\"\\n\")\n","sub_path":"assignment_23/pickle_lesson3.py","file_name":"pickle_lesson3.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"328558421","text":"# -*- coding: utf-8 -*-\nimport hashlib\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.template import Context\nfrom django.views.generic import CreateView, TemplateView\nfrom fine_pw import settings\nfrom pages.views import CommonContextMixin, PageView\nfrom .models import Email, Subscribe\nfrom subscribe.forms import EmailForm, EmptyForm\nfrom django.core.mail import send_mail, EmailMessage, EmailMultiAlternatives\nfrom django.template.loader import render_to_string, get_template\nfrom django.shortcuts import render\nfrom django.utils.translation import ugettext as _, get_language\n\n\nclass EmailCreateVIew(CommonContextMixin,\n CreateView):\n \"\"\"\n Contact page.\n \"\"\"\n model = Email\n # fields = ('email_address',)\n template_name = 'subscribe_b.html'\n form_class = EmailForm\n # success_url = reverse('subscribe_b')\n\n def get_context_data(self, **kwargs):\n subscribe_obj = Subscribe.objects.filter(online=True).first()\n if not (subscribe_obj and subscribe_obj.page and subscribe_obj.page.is_visible):\n raise Http404()\n\n context = super(EmailCreateVIew, self).get_context_data(**kwargs)\n context.update(PageView().get_context_data(page_obj=subscribe_obj))\n\n context['subscribe_button_name'] = subscribe_obj.button_name\n context['subscription_success_message'] = subscribe_obj.subscription_success_message\n if 'subscription_success' in self.kwargs:\n context['subscription_success'] = self.kwargs['subscription_success']\n\n return context\n\n def form_valid(self, form):\n \"\"\"\n If the form is valid, save the associated model.\n \"\"\"\n self.object = form.save(commit=False)\n # create a hash key for each user for security\n hash_object = hashlib.sha1('{}{}'.format(self.object.email_address,\n settings.SECRET_KEY))\n hex_dig = hash_object.hexdigest()\n self.object.hash_key = hex_dig\n\n # send email\n to = [self.object.email_address]\n from_email = settings.EMAIL_HOST_USER\n website_url = settings.ALLOWED_HOSTS[0]\n unsubscribe_url = website_url + reverse('unsubscribe_b', args=(hex_dig,))\n subscribe_obj = Subscribe.objects.filter(online=True).first()\n subject = subscribe_obj.email_subject\n\n # prepare the email context\n html_template = get_template('subscription_email.html')\n text_template = get_template('subscription_email.txt')\n c = Context({\n 'website_url': website_url,\n 'unsubscribe_url': unsubscribe_url,\n })\n html = html_template.render(c)\n text = text_template.render(c)\n\n # send email\n email = EmailMultiAlternatives(subject, text, from_email, to)\n email.attach_alternative(html, \"text/html\")\n email.send()\n\n self.object.save()\n\n c = self.get_context_data()\n c['subscription_success'] = True # to display success message!\n c['form'] = form\n return render(self.request,\n self.template_name,\n c)\n\n\ndef unsubscribe(request, hash_key):\n \"\"\"This view function is used to remove an email by user.\n\n :param request: Http request parameter to display html page.\n :type request: request.\n :param hash_key: Hash key of the email which will be deleted. It is used in unsubscribe url.\n :type hash_key: string.\n :returns: HttpResponse -- Returns http response to display rendered page.\n \"\"\"\n hw_object = PageView()\n context = hw_object.get_context_data()\n\n if Email.objects.filter(hash_key=hash_key):\n email_to_delete = Email.objects.filter(hash_key=hash_key)[0]\n context['email_address_to_delete'] = email_to_delete.email_address\n if Subscribe.objects.all():\n subscribe_obj = Subscribe.objects.filter(online=True).first()\n context['subscription_cancellation_confirm_message'] = subscribe_obj.subscription_cancellation_confirm_message\n else:\n context['subscription_cancellation_confirm_message'] = _(\"Do you confirm cancellation?\")\n else:\n context['subscription_cancellation_confirm_message'] = _(\"Your subscription was already cancelled.\")\n context['was_subscription_already_cancelled'] = True\n context['is_unsubscription'] = True\n\n if request.method == 'POST': # If the form has been submitted\n form = EmptyForm(request.POST) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n # if subscription is confirmed by the user\n try:\n email_to_delete.delete() # delete the email\n except UnboundLocalError as e:\n pass\n context['was_subscription_already_cancelled'] = True\n context['subscription_cancellation_confirm_message'] = _(\"Your subscription is cancelled!\")\n return render(request, 'unsubscrine_b.html', context)\n else:\n form = EmptyForm() # An unbound form for unsubsciption\n context['form'] = form\n context['nav_home'] = False # to link brand name to home\n return render(request, 'unsubscrine_b.html', context)","sub_path":"subscribe/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"480422670","text":"class Solution(object):\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n Solution.res = []\n self.curse([], 1, n, k)\n return Solution.res\n \n def curse(self, rest, start_idx, n, k):\n if k == 0:\n Solution.res.append(rest)\n return\n for i in range(start_idx, n-k+2):\n self.curse(rest + [i], i+1, n, k-1)","sub_path":"Combinations.py","file_name":"Combinations.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"605542169","text":"#!./venv/bin/python\n\nfrom electrum import bitcoin, util\n\nimport sys\n\nlines = [x.strip() for x in sys.stdin.readlines()]\n\nif len(lines) !=1:\n print(\"wrong input\")\n sys.exit(1)\n\n_xtype, _depth, _fp, _cn, _c, K = bitcoin.deserialize_xpub(lines[0])\ntype = \"p2pkh\"\nif len(sys.argv) == 2:\n type = sys.argv[1]\npubkey = bitcoin.pubkey_to_address(type, util.bh2u(K))\n\nprint(pubkey)\n","sub_path":"xpub2btc.py","file_name":"xpub2btc.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"481333255","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author:Ocean-yyl\n# datetime:2020-04-12 15:13\n# software: PyCharm\n\"\"\"简单文本分类器\"\"\"\nimport os\n\nimport jieba\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\n\n\n# 预处理文本\ndef preprocess(path):\n\ttext_with_space = \"\"\n\twith open(path, encoding=\"utf-8\", errors=\"ignore\") as f:\n\t\ttextfile = f.read()\n\t\ttextcuts = jieba.cut(textfile)\n\t\tfor word in textcuts:\n\t\t\ttext_with_space += word + \" \"\n\treturn text_with_space\n\n\n# 分类加载文本路径与分类标签信息\ndef load_train_set(path, classtag):\n\tallfiles = os.listdir(path)\n\tprocessed_textset = []\n\tclass_tags = []\n\tfor file in allfiles:\n\t\tpathname = os.path.join(path, file)\n\t\tprocessed_textset.append(preprocess(pathname))\n\t\tclass_tags.append(classtag)\n\n\treturn processed_textset, class_tags\n\n\ndef run():\n\tprocessed_textset1, class_tags1 = load_train_set(\"./car\", \"汽车\")\n\tprocessed_textset2, class_tags2 = load_train_set(\"./sports\", \"运动\")\n\ttrain_data = processed_textset1 + processed_textset2\n\tclasstags_list = class_tags1 + class_tags2\n\n\tcount_vector = CountVectorizer()\n\tvect_matrix = count_vector.fit_transform(train_data) # 构建向量矩阵,此处若使用transform,会报错sklearn.exceptions.NotFittedError: Vocabulary not fitted or provided\n\t\"\"\"\n\tCountVectorizer.fit_transform\n\tLearn the vocabulary dictionary and return term-document matrix.\n\tThis is equivalent to fit followed by transform, but more efficiently implemented.\n\t\n\tCountVectorizer.transform\n\tTransform documents to document-term matrix.\n\tExtract token counts out of raw text documents using the vocabulary\n\tfitted with fit or the one provided to the constructor.\n\t\"\"\"\n\n\t# tfidf 提取特征\n\ttrain_tfidf = TfidfTransformer(use_idf=False).fit_transform(vect_matrix) # 构建模型\n\tclf = MultinomialNB()\n\tclf.fit(train_tfidf, classtags_list) # 训练模型\n\n\n\ttest_count_vector = count_vector.transform([preprocess(\"./test/text_news\")]) # 加载待预测新文件为向量矩阵\n\ttest_tfidf = TfidfTransformer(use_idf=False).fit_transform(test_count_vector) # 构建新的tfidf模型\n\tpredicted_result = clf.predict(test_tfidf) # 使用之前训练的模型来预测\n\tprint(predicted_result) # 打印预测结果\n\n\nif __name__ == '__main__':\n\trun()\n","sub_path":"main/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"429708176","text":"\"\"\"\nExample\n\"\"\"\n\nclass Point():\n\n def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n \n def __str__(self):\n return \"Point: {}x{}x{}\".format(self.x, self.y, self.z)\n\nif __name__ == \"__main__\":\n \n p1 = Point(0, 0, 0)\n\n print(p1)\n","sub_path":"fa_5/1_Classes/C_point.py","file_name":"C_point.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"313316272","text":"import numpy as np\n\n\ndef one_hot(X):\n u = np.unique(X)\n z = np.zeros([X.shape[0], u.shape[0]])\n \n for j in range(X.shape[0]):\n for k in range(len(u)):\n if X[j] == u[k]:\n z[j,k] = 1\n \n return z\n\n\nindex = np.genfromtxt('storage/feature_selection.csv', delimiter = ',', usecols = 1)\nh, x_2008, y_2008, x_test_2008 = np.load('storage/raw_data_2008.npy')\n_, _, _, x_test_2012 = np.load('storage/raw_data_2012.npy')\n\nX = np.concatenate([x_2008, x_test_2008, x_test_2012], axis = 0)\nnew_X = np.zeros([X.shape[0], 1])\nfor i in range(len(index)): \n if index[i] == 1:\n print('One-hot encoding '+ h[i] +' to ' + str(len(np.unique(X[:, i]))) + ' dimensions')\n z = one_hot(X[:, i])\n new_X = np.concatenate([new_X, z], axis = 1)\n \n \n elif index[i] == 2:\n \"\"\"\n print('Normalizing ' + h[i] + ' [' + str(np.min(X[:, i])) + ',' + str(np.max(X[:, i])) + '] to [0,1]')\n z2 = (x_2008[:, i] - np.min(x_2008[:, i])) / (np.max(x_2008[:, i]) - np.min(x_2008[:, i]))\n z4 = (x_test[:, i] - np.min(x_test[:, i])) / (np.max(x_test[:, i]) - np.min(x_test[:, i]))\n\n z = np.concatenate([z2, z4], axis = 0)\n z = np.reshape(z, (z.shape[0], 1))\n new_X = np.concatenate([new_X, z], axis = 1)\n \"\"\"\n print('Adding ' + h[i] + ' [' + str(np.min(X[:, i])) + ',' + str(np.max(X[:, i])) + ']')\n z = np.reshape(X[:, i], (X[:, i].shape[0], 1))\n new_X = np.concatenate([new_X, z], axis = 1)\n\n\nX = new_X\nx_2008 = X[0:64667, :]\nx_test_2008 = X[64667:80667, :]\nx_test_2012 = X[80667::, :]\nx_2012 = []\ny_2012 = []\n\nnp.save('storage/one-hot_2008.npy', [h, x_2008, y_2008, x_test_2008])\n\nnp.save('storage/one-hot_2012.npy', [h, x_2012, y_2012, x_test_2012])\n","sub_path":"experimental/scripts/one_hot_encoding.py","file_name":"one_hot_encoding.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"82210919","text":"import qiskit\n\nfrom qiskit import IBMQ\nIBMQ.save_account ('key')\n\nqiskit.__qiskit_version__\n\n\n\n# qiskit\n# python \n# sha256\n# WeGo_Delta\n\nimport hashlib\n\ny = '23dc4da786eff8147c4e72b9807785afee48bb'\npassword = [\"1004\"]\n#binarizes\ndef main():\n thash = \"1004\"\n thashd = thash.encode()\n hash = hashlib.sha256( thashd )\n hexa = hash.hexdigest()\n return hexa \n\t\nmainHs = main()\n' '.join(format(ord(x), 'b') for x in mainHs)\n\n\nbinary = ' '.join(format(ord(x), 'b') for x in mainHs)\n\nsplit_threads_string = binary.split(' ')\n\nadata = split_threads_string[0]\nxdata = adata.split('000')\nprint(xdata[-1])\n\n# Grover Algo.\n# def solveIt():\n# for x in xdata:\n# if x == y:\n# return 1\n\n# print(solveIt())\n\n\n\n# import numpy as np\n# import matplotlib.pyplot as plt\n# %matplotlib inline\n\n# # importing Qiskit\n# from qiskit import BasicAer, IBMQ\n# from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute, transpile #compile\n# from qiskit.tools.visualization import plot_histogram","sub_path":"qiskit_hackathon_test.py","file_name":"qiskit_hackathon_test.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"577350799","text":"from . import _dbg\nfrom . import sh, gs, gsq\nfrom .margo_common import TokenCounter, OutputLogger, Chan\nfrom .margo_state import State, make_props, actions\nimport os\nimport sublime\nimport subprocess\nimport threading\nimport time\n\nipc_codec = 'msgpack'\n\nif ipc_codec == 'msgpack':\n\tfrom .vendor import umsgpack\n\tipc_dec = umsgpack.load\n\tipc_enc = umsgpack.dump\n\tipc_ignore_exceptions = (umsgpack.InsufficientDataException, BrokenPipeError)\nelif ipc_codec == 'cbor':\n\tfrom .vendor.cbor_py import cbor\n\tipc_dec = cbor.load\n\tipc_enc = cbor.dump\n\tipc_ignore_exceptions = (BrokenPipeError)\nelse:\n\traise Exception('impossibru')\n\nclass MargoAgent(threading.Thread):\n\tdef __init__(self, mg):\n\t\tthreading.Thread.__init__(self)\n\t\tself.daemon = True\n\n\t\tself.mg = mg\n\t\t_, self.domain = mg.agent_tokens.next()\n\t\tself.cookies = TokenCounter('%s,request' % self.domain)\n\t\tself.proc = None\n\t\tself.lock = threading.Lock()\n\t\tself.out = OutputLogger(self.domain, parent=mg.out)\n\t\tself.global_handlers = {}\n\t\tself.req_handlers = {}\n\t\tself.req_chan = Chan()\n\t\tself.starting = threading.Event()\n\t\tself.starting.set()\n\t\tself.started = threading.Event()\n\t\tself.stopped = threading.Event()\n\t\tself.ready = threading.Event()\n\t\tgopaths = [\n\t\t\tos.path.join(sublime.packages_path(), 'User', 'margo'),\n\t\t\tmg.package_dir,\n\t\t]\n\t\tpsep = os.pathsep\n\t\tself._env = {\n\t\t\t'GOPATH': psep.join(gopaths),\n\t\t\t'PATH': psep.join([os.path.join(p, 'bin') for p in gopaths]) + psep + os.environ.get('PATH'),\n\t\t}\n\n\t\tself._mod_ev = threading.Event()\n\t\tself._mod_view = None\n\t\tself._pos_view = None\n\n\tdef __del__(self):\n\t\tself.stop()\n\n\tdef stop(self):\n\t\tif self.stopped.is_set():\n\t\t\treturn\n\n\t\tself.starting.clear()\n\t\tself.stopped.set()\n\t\tself.req_chan.close()\n\t\tself._stop_proc()\n\t\tself._release_handlers()\n\t\tself.mg.agent_stopped(self)\n\n\tdef ok(self):\n\t\treturn self.proc and self.proc.poll() is None\n\n\tdef _release_handlers(self):\n\t\twith self.lock:\n\t\t\thdls, self.req_handlers = self.req_handlers, {}\n\n\t\trs = AgentRes(error='agent stopping. request aborted', agent=self)\n\t\tfor rq in hdls.values():\n\t\t\trq.done(rs)\n\n\tdef run(self):\n\t\tself._start_proc()\n\n\tdef _start_proc(self):\n\t\tself.mg.agent_starting(self)\n\t\tself.out.println('starting')\n\n\t\tgs_gopath = sh.psep.join((gs.user_path(), gs.dist_path()))\n\t\tgs_gobin = gs.dist_path('bin')\n\t\tinstall_cmd = ['go', 'install', '-v', 'disposa.blue/margo/cmd/margo']\n\t\tcmd = sh.Command(install_cmd)\n\t\tcmd.env = {\n\t\t\t'GOPATH': gs_gopath,\n\t\t\t'GOBIN': gs_gobin,\n\t\t}\n\t\tcr = cmd.run()\n\t\tfor v in (cr.out, cr.err, cr.exc):\n\t\t\tif v:\n\t\t\t\tself.out.println('%s:\\n%s' % (install_cmd, v))\n\n\t\tmg_cmd = [\n\t\t\tsh.which('margo', m={'PATH': gs_gobin}) or 'margo',\n\t\t\t'sublime', '-codec', ipc_codec,\n\t\t]\n\t\tself.out.println(mg_cmd)\n\t\tcmd = sh.Command(mg_cmd)\n\t\tcmd.env = {\n\t\t\t'GOPATH': gs_gopath,\n\t\t\t'PATH': gs_gobin,\n\t\t}\n\t\tpr = cmd.proc()\n\t\tif not pr.ok:\n\t\t\tself.stop()\n\t\t\tself.out.println('Cannot start margo: %s' % pr.exc)\n\t\t\treturn\n\n\t\tself.proc = pr.p\n\t\tgsq.launch(self.domain, self._handle_send)\n\t\tgsq.launch(self.domain, self._handle_send_mod)\n\t\tgsq.launch(self.domain, self._handle_recv)\n\t\tgsq.launch(self.domain, self._handle_log)\n\t\tself.started.set()\n\t\tself.starting.clear()\n\t\tself.proc.wait()\n\n\tdef _stop_proc(self):\n\t\tself.out.println('stopping')\n\t\tp = self.proc\n\t\tif not p:\n\t\t\treturn\n\n\t\tfor f in (p.stdin, p.stdout, p.stderr):\n\t\t\ttry:\n\t\t\t\tf.close()\n\t\t\texcept Exception as exc:\n\t\t\t\tself.out.println(exc)\n\n\tdef _handle_send_ipc(self, rq):\n\t\twith self.lock:\n\t\t\tself.req_handlers[rq.cookie] = rq\n\n\t\ttry:\n\t\t\tipc_enc(rq.data(), self.proc.stdin)\n\t\t\texc = None\n\t\texcept ipc_ignore_exceptions as e:\n\t\t\texc = e\n\t\texcept Exception as e:\n\t\t\texc = e\n\t\t\tif not self.stopped.is_set():\n\t\t\t\tgs.error_traceback(self.domain)\n\n\t\tif exc:\n\t\t\twith self.lock:\n\t\t\t\tself.req_handlers.pop(rq.cookie, None)\n\n\t\t\trq.done(AgentRes(error='Exception: %s' % exc, rq=rq, agent=self))\n\n\tdef send(self, action={}, cb=None, view=None):\n\t\trq = AgentReq(self, action, cb=cb, view=view)\n\t\ttimeout = 0.200\n\t\tif not self.started.wait(timeout):\n\t\t\trq.done(AgentRes(error='margo has not started after %0.3fs' % (timeout), timedout=timeout, rq=rq, agent=self))\n\t\t\treturn rq\n\n\t\tif not self.req_chan.put(rq):\n\t\t\trq.done(AgentRes(error='chan closed', rq=rq, agent=self))\n\n\t\treturn rq\n\n\tdef view_modified(self, view):\n\t\tself._mod_view = view\n\t\tself._mod_ev.set()\n\n\tdef view_pos_changed(self, view):\n\t\tself._pos_view = view\n\t\tself._mod_ev.set()\n\n\tdef _send_mod(self):\n\t\tmod_v, self._mod_view = self._mod_view, None\n\t\tpos_v, self._pos_view = self._pos_view, None\n\t\tif mod_v is None and pos_v is None:\n\t\t\treturn\n\n\t\tview = pos_v\n\t\taction = actions.ViewPosChanged\n\t\tif mod_v is not None:\n\t\t\taction = actions.ViewModified\n\t\t\tview = mod_v\n\n\t\tself.send(action=action, view=view).wait()\n\n\tdef _handle_send_mod(self):\n\t\tdelay = 0.500\n\t\twhile not self.stopped.is_set():\n\t\t\tself._mod_ev.wait(delay)\n\t\t\tif self._mod_ev.is_set():\n\t\t\t\tself._mod_ev.clear()\n\t\t\t\ttime.sleep(delay * 1.5)\n\t\t\t\tself._send_mod()\n\n\tdef _handle_send(self):\n\t\tfor rq in self.req_chan:\n\t\t\tself._handle_send_ipc(rq)\n\n\tdef _nop_handler(self, rs):\n\t\tpass\n\n\tdef _handler(self, rs):\n\t\tif not rs.cookie:\n\t\t\treturn self._nop_handler\n\n\t\twith self.lock:\n\t\t\trq = self.req_handlers.pop(rs.cookie, None)\n\t\t\tif rq:\n\t\t\t\trs.set_rq(rq)\n\t\t\t\treturn rq.done\n\n\t\tif rs.cookie in self.global_handlers:\n\t\t\treturn self.global_handlers[rs.cookie]\n\n\t\treturn lambda rs: self.out.println('unexpected response: %s' % rs)\n\n\tdef _notify_ready(self):\n\t\tif self.ready.is_set():\n\t\t\treturn\n\n\t\tself.ready.set()\n\t\tself.mg.agent_ready(self)\n\n\tdef _handle_recv_ipc(self, v):\n\t\tself._notify_ready()\n\t\trs = AgentRes(v=v, agent=self)\n\t\t# call the handler first. it might be on a timeout (like fmt)\n\t\tfor handle in [self._handler(rs), self.mg.render]:\n\t\t\ttry:\n\t\t\t\thandle(rs)\n\t\t\texcept Exception:\n\t\t\t\tgs.error_traceback(self.domain)\n\n\tdef _handle_recv(self):\n\t\ttry:\n\t\t\tv = None\n\t\t\twhile not self.stopped.is_set():\n\t\t\t\tv = ipc_dec(self.proc.stdout) or {}\n\t\t\t\tif v:\n\t\t\t\t\tself._handle_recv_ipc(v)\n\t\texcept ipc_ignore_exceptions:\n\t\t\tpass\n\t\texcept Exception as e:\n\t\t\tself.out.println('ipc: recv: %s: %s' % (e, v))\n\t\tfinally:\n\t\t\tself.stop()\n\n\tdef _handle_log(self):\n\t\ttry:\n\t\t\tfor ln in self.proc.stderr:\n\t\t\t\ttry:\n\t\t\t\t\tself.out.println('log: %s' % self._decode_ln(ln))\n\t\t\t\texcept (ValueError, OSError):\n\t\t\t\t\tpass\n\t\t\t\texcept Exception:\n\t\t\t\t\tgs.error_traceback(self.domain)\n\t\texcept (ValueError, OSError):\n\t\t\tpass\n\t\texcept Exception:\n\t\t\tgs.error_traceback(self.domain)\n\n\tdef _decode_ln(self, ln):\n\t\tif isinstance(ln, bytes):\n\t\t\tln = ln.decode('utf-8', 'replace')\n\n\t\treturn ln.rstrip('\\r\\n')\n\nclass AgentRes(object):\n\tdef __init__(self, v={}, error='', timedout=0, rq=None, agent=None):\n\t\tself.data = v\n\t\tself.cookie = v.get('Cookie')\n\t\tself.state = State(v=v.get('State') or {})\n\t\tself.error = v.get('Error') or error\n\t\tself.timedout = timedout\n\t\tself.agent = agent\n\t\tself.set_rq(rq)\n\n\tdef set_rq(self, rq):\n\t\tif self.error and rq:\n\t\t\tact = rq.action\n\t\t\tif act and act.get('Name'):\n\t\t\t\tself.error = 'action: %s, error: %s' % (act.get('Name'), self.error)\n\t\t\telse:\n\t\t\t\tself.error = 'error: %s' % self.error\n\n\tdef get(self, k, default=None):\n\t\treturn self.state.get(k, default)\n\nclass AgentReq(object):\n\tdef __init__(self, agent, action, cb=None, view=None):\n\t\tself.start_time = time.time()\n\t\t_, cookie = agent.cookies.next()\n\t\tself.cookie = 'action:%s(%s)' % (action['Name'], cookie)\n\t\tself.domain = self.cookie\n\t\tself.action = action\n\t\tself.cb = cb\n\t\tself.props = make_props(view=view)\n\t\tself.rs = DEFAULT_RESPONSE\n\t\tself.lock = threading.Lock()\n\t\tself.ev = threading.Event()\n\t\tself.view = view\n\n\tdef done(self, rs):\n\t\twith self.lock:\n\t\t\tif self.ev.is_set():\n\t\t\t\treturn\n\n\t\t\tself.rs = rs\n\t\t\tself.ev.set()\n\n\t\tif self.cb:\n\t\t\ttry:\n\t\t\t\tself.cb(self.rs)\n\t\t\texcept Exception:\n\t\t\t\tgs.error_traceback(self.domain)\n\n\tdef wait(self, timeout=None):\n\t\tif self.ev.wait(timeout):\n\t\t\treturn self.rs\n\n\t\treturn None\n\n\tdef data(self):\n\t\treturn {\n\t\t\t'Cookie': self.cookie,\n\t\t\t'Props': self.props,\n\t\t\t'Action': self.action,\n\t\t}\n\nDEFAULT_RESPONSE = AgentRes(error='default agent response')\n","sub_path":"gosubl/margo_agent.py","file_name":"margo_agent.py","file_ext":"py","file_size_in_byte":7999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"31677155","text":"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n#\n#\thantei.py\n#\n#\t\t\t\t\t\tJul/31/2014\n#\nimport\tsys\nimport\tstring\n#\nsys.path.append ('/var/www/data_base/common/python_common')\nfrom text_manipulate import text_read_proc\nfrom text_manipulate import dict_display_proc\n\n# --------------------------------------------------------------------\ndef hantei_proc (dict_aa,dict_bb,dict_cc,dict_dd):\n\thantei = True\n\tlen_aa = len (dict_aa)\n\tlen_bb = len (dict_bb)\n\tlen_cc = len (dict_cc)\n\tif (len_aa < 9):\n\t\thantei = False\n\t\tprint (\"*** error *** len_aa = %d\" % len_aa)\n\telif (len_aa != len_bb):\n\t\thantei = False\n\t\tprint (\"*** error *** len_aa != len_bb\")\n\telif (dict_aa == dict_bb):\n\t\thantei = False\n\t\tprint (\"*** error *** dict_aa == dict_bb\")\n\telif ((len_bb - len_cc) != 1):\n\t\thantei = False\n\t\tprint (\"*** error *** len_cc = %d\" % len_cc)\n\telif (dict_aa != dict_dd):\n\t\thantei = False\n\t\tprint (\"*** error *** dict_aa != dict_dd\")\n#\n\treturn\thantei\n#\n# --------------------------------------------------------------------\n#sys.stderr.write (\"*** 開始 ***\\n\")\ndbase = sys.argv[1]\nlang = sys.argv[2]\nfile_aa = sys.argv[3]\nfile_bb = sys.argv[4]\nfile_cc = sys.argv[5]\nfile_dd = sys.argv[6]\n#\ndict_aa = text_read_proc (file_aa)\ndict_bb = text_read_proc (file_bb)\ndict_cc = text_read_proc (file_cc)\ndict_dd = text_read_proc (file_dd)\n#\nhantei = hantei_proc (dict_aa,dict_bb,dict_cc,dict_dd)\n#\nprint (\"%s\\t%s\\t%s\" % (dbase,lang,hantei))\nif (hantei == False):\n\tprint (dict_aa.keys ())\n\tprint (dict_bb.keys ())\n\tprint (dict_cc.keys ())\n\tprint (dict_dd.keys ())\n\tprint (\"*** error *** \")\n#\n#sys.stderr.write (\"*** 終了 ***\\n\")\n# --------------------------------------------------------------------\n\n","sub_path":"tools/checker/hantei/hantei.py","file_name":"hantei.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"229996603","text":"from flask import Flask, request\nimport json\n\napp = Flask(__name__)\n\n@app.route(\"/greetings/\")\ndef hello():\n return \"Hello Worldo!\"\n\n@app.route(\"/calc/sum/\", methods = ['GET'])\ndef calc():\n a = int(request.args.get('a'))\n b = int(request.args.get('b'))\n return json.dumps(a+b)\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=int(\"5000\"), debug=True)","sub_path":"8_actively_updating_microservices/flaskapp/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"95657467","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 7 15:37:36 2020\n\n@author: csevern\n\"\"\"\n#%% Load Data\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pickle\nfrom time import time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn import metrics\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets import load_digits\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import scale\n\n\ndf = pd.read_csv(\"\", encoding='utf-8')\n\n# Assign colum names to the dataset\n#%% Select Data\n\n# Read dataset to pandas dataframe\n\nprint(df.head())\ndf = df.sample(frac=1).reset_index(drop=True)\nX = df.iloc[:10000, 1:-1].values\ny = df['CCs'].tolist()[:10000]\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)\n\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nscaler.fit(X_train)\n\n#%%Train\n\nfrom sklearn.neighbors import KNeighborsClassifier, RadiusNeighborsClassifier, KNeighborsRegressor, NearestNeighbors\nscore = 0\nfor i in range(1,100,3):\n p=i+2\n sc2=0\n classifier = KNeighborsClassifier(n_neighbors=i, weights='distance')\n classifier.fit(X_train, y_train)\n \n classifier1 = RadiusNeighborsClassifier(radius=p,n_neighbors=i, weights='distance')\n classifier1.fit(X_train, y_train)\n\n y_pred = classifier.predict(X_test)\n y_pred1 = classifier1.predict(X_test)\n \n sc1 =classifier.score(X_train,y_train)\n sc2 =classifier1.score(X_train,y_train)\n print(i,p,sc1,sc2)\n if sc1 > sc2:\n ml = classifier\n mltype = \"KNN\"\n scr = sc1\n pred = y_pred\n \n else:\n ml = classifier1\n mltype = \"Radius\"\n scr = sc2\n pred = y_pred1\n #print(\"K nearest =\", i, \"Radius=\", i, \"Type=\", mltype, scr) \n if scr > score:\n print(\"K nearest =\", i, \"Radius=\", i, \"Type=\", mltype, scr)\n score = scr\n saveml = ml\n savepred = pred\n \n \n\n\nsave_class = open(\"KNN_CCs.pickle\", \"wb\")\nprint(\"saving file\")\npickle.dump(classifier, save_class)\nsave_class.close()\nfrom sklearn.metrics import classification_report\nif mltype == \"KNN\":\n\n print(\"=\"*20, \"KNN\", \"=\"*20)\n #print(confusion_matrix(y_test, y_pred))\n print(classification_report(y_test, savepred))\n #print(classifier.score(X_train,y_train))\n #print(classifier.score(X_train,y_train))\nelse:\n print(\"=\"*20, \"RNN\", \"=\"*20)\n #print(confusion_matrix(y_test, y_pred1))\n print(classification_report(y_test, savepred))\n ## print(classifier1.score(X_train,y_train))\n #print(classifier1.score(X_train,y_train))\n","sub_path":"Scikit-Learn/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"112496715","text":"#myCards\nfrom sikuli import *\nimport util\nreload(util)\n\n\n\ndef getMyCards(myImg=\"Alice.png\"):\n myChair = find(myImg).nearby() #Get my chair\n fullCard =Pattern(\"fullCard.png\").similar(0.51)\n fCR = myChair.find(fullCard) #Get the region for card on the right (full card)\n hCR = fCR.left(20) #Get the region for card on the left (half hidden card)\n\n #Draw rectan\n util.drawHoverRect(fCR)\n util.drawHoverRect(hCR)\n\n return [util.getCardID(fCR),util.getCardID(hCR)]\n\n#print getMyCards()","sub_path":"my_cards.sikuli/my_cards.py","file_name":"my_cards.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"277173430","text":"\n\n#calss header\nclass _TRAVESTY():\n\tdef __init__(self,): \n\t\tself.name = \"TRAVESTY\"\n\t\tself.definitions = [u'something that fails to represent the values and qualities that it is intended to represent, in a way that is shocking or offensive: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_travesty.py","file_name":"_travesty.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"457289532","text":"import gspread, json, requests, time, sendgrid\r\nfrom sendgrid.helpers.mail import *\r\nimport pandas as pd\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\n\r\n# from google sheet url.\r\nlist_id = '12eS-bMLHY5aNoiQiw2fN2rD3glKEKg3WpUBJKZSPJ2k'\r\n\r\nscope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\r\ncredentials = ServiceAccountCredentials.from_json_keyfile_name('sp47test.json', scope)\r\ngc = gspread.authorize(credentials)\r\nrequests_wks = gc.open_by_key(list_id).sheet1\r\ndf = pd.DataFrame(requests_wks.get_all_records())\r\n\r\nexample = [{'name':'Haowei Liu','email':'sophialiuhw@gmail.com'}]\r\nmentor = pd.DataFrame(example)\r\n\r\ncount = 0\r\nif df.iloc[0]['Processed']=='N':\r\n target = df['Name'][count]\r\n target_email = mentor[mentor['name']==target]['email'][0]\r\n df.iloc[count]['Processed']='Y'\r\nto_email = Email(df.iloc[count]['Email'])\r\n\r\nkey=\"\"\r\nsg = sendgrid.SendGridAPIClient(apikey=key)\r\nfrom_email = Email('hwliu47@gmail.com')\r\nsubject = \"Testing with Sendgrid\"\r\ncontent = Content(\"text/plain\", \"the first content\")\r\nmail = Mail(from_email, subject,to_email, content)\r\n# response = sg.client.mail.send.post(request_body=mail.get())\r\n\r\n\r\nprint(df.head())\r\n\r\n","sub_path":"gsapi.py","file_name":"gsapi.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"556953352","text":"# -*- coding: utf-8 -*-\n# author: huihui\n# date: 2020/11/23 2:47 下午\n\n'''爬取数据,获取网页url'''\n\ndef f1():\n content = open('保险-腾讯网.html.mhtml').read()\n content = content.replace('\\n', '')\n with open('result.txt', 'w') as f:\n f.write(content)\n\n\ndef f2():\n content = open('result.txt').read()\n content = content.replace('https://new.qq.com/omn/', '\\nhttps://new.qq.com/omn/')\n with open('result.txt', 'w') as f:\n f.write(content)\n\n\ndef f3():\n lines = open('result.txt').readlines()\n with open('result.txt', 'w') as f:\n for line in lines:\n line = line.strip().split('\"')[0]\n f.write(line + '\\n')\n\n\n# f1()\n# f2()\n# f3()\n\ndef f4():\n lines = open('result.txt').readlines()\n urls = []\n for line in lines:\n line = line.strip()\n if line.endswith('.html') and line not in urls:\n print(line)\n","sub_path":"qq_insurance/x2.py","file_name":"x2.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"4134170","text":"import urllib.request\nimport urllib\n\n#1,网站url\nurl = 'http://www.baidu.com'\n\n#2.创建request请求对象\nrequest = urllib.request.Request(url)\n\n#3.发送请求获取结果\nresponse = urllib.request.urlopen(request)\nhtmldata = response.read()\n\n#4.设置编码方式\nhtmldata = htmldata.decode('utf-8')\n\n#打印结果\nprint(htmldata)\n\n#打印爬去网页的各类信息\nprint(\"response的类型:\",type(response))\nprint(\"请求的url:\",response.geturl())\nprint(\"响应的信息:\",response.info())\nprint(\"状态码:\",response.getcode())\n\n#7.爬取数据保存到文件\nfileOb = open('baidu.html','w',encoding='utf-8')\nfileOb.write(htmldata)\nfileOb.close()","sub_path":"spider_baidu.py","file_name":"spider_baidu.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"176651834","text":"# Crie uma lista com todas as letras do alfabeto\n\n# Remova as vogais dessa lista e crie uma tupla com elas\n\n# Crie uma coleção com as letras do seu nome (utilizando a lista e a tupla, sem remover itens).\n# depois, adicione sua idade e o nome do seu livro favorito\n\nalfabeto = ['a', 'b', 'c', 'd', 'e', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\nvogais = []\n\nfor letra in alfabeto:\n if letra == 'a' or letra == 'e' or letra == 'i' or letra == 'o' or letra == 'u':\n vogais.append(alfabeto.pop(alfabeto.index(letra)))\n\nvogais = tuple(vogais)\n\nprint(alfabeto)\nprint(vogais)\n\neu = []\nletras = [alfabeto, vogais]\nfor l in letras:\n for letra in l:\n if letra == 'e' or letra == 'd' or letra == 'u' or letra == 'a' or letra == 'r':\n eu.append(letra)\n\neu.append(18)\neu.append('livro')\neu = set(eu)\n\nprint(eu)\n","sub_path":"Exercicios/Aulas02/Aula02/exercicio_um.py","file_name":"exercicio_um.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"610997362","text":"# -*- coding: utf-8 -*-\nlesson(grade=6, subject='English', title='ALPHABETICAL ORDER', week=13,\n browser_title='Class 6 English Alphabetical Order',\n lesson_title='ALPHABETICAL ORDER',\n summary=u'शब्दहरूलाई Alphabetical order मा राख्न सिकाउने क्रियाकलाप')\n\ncss('global')\ncss('lesson.css')\n\nfor f in [\n 'ui.core',\n 'ui.draggable',\n 'ui.droppable',\n 'jquery.clickable'\n ]:\n java_script(f)\n\nfor index in range(0,6):\n image('ele' + str(index) + '.png', 'ele' + str(index))\n image('house' + str(index) + '.png', 'house' + str(index))\n image('ship' + str(index) + '.png', 'ship' + str(index))\n\nimage('correct.png', 'correct')\nimage('incorrect.png', 'incorrect')\nimage('background.jpg')\n\nfooter_configuration(link_next=True, link_previous=True, scoreboard=True, link_check_answer=True)\n","sub_path":"lessons/alphabetical-order/description.py","file_name":"description.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"647735377","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\nThis script imports an individual RMG-Java themo library from a local directory and saves the output\nthermo library py file into a path of the user's choosing. This library will be automatically\nsaved to libraryname.py in the input/thermo/libraries directory and can \nbe used directly as an RMG-Py thermo library.\n\nusage:\nimportJavaThermoLibrary.py [-h] INPUT LIBRARYNAME\n\npositional arguments:\nINPUT the input path of the RMG-Java thermo library directory\nLIBRARYNAME the libraryname for the RMG-Py format thermo library\n\n\"\"\"\n\nimport argparse\nimport os\nfrom rmgpy.data.thermo import ThermoLibrary\nfrom rmgpy import settings\n \nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser()\n parser.add_argument('inputPath', metavar='INPUT', type=str, nargs=1,\n help='the input path of the RMG-Java thermo library directory')\n parser.add_argument('libraryName', metavar='OUTPUT', type=str, nargs=1,\n help='the libraryName for the RMG-Py format thermo library') \n \n args = parser.parse_args()\n inputPath = args.inputPath[0]\n libraryName = args.libraryName[0]\n \n library = ThermoLibrary()\n library.loadOld(\n dictstr = os.path.join(inputPath, 'Dictionary.txt'),\n treestr = '',\n libstr = os.path.join(inputPath, 'Library.txt'),\n numParameters = 12,\n numLabels = 1,\n pattern = False,\n )\n library.name = libraryName\n\n # Save in Py format \n library.save(os.path.join(settings['database.directory'], 'thermo', 'libraries', libraryName+'.py'))\n","sub_path":"scripts/importJavaThermoLibrary.py","file_name":"importJavaThermoLibrary.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"401336679","text":"# Copyright 2020 Eugene Molotov \n# License MIT (https://opensource.org/licenses/MIT).\nfrom odoo import exceptions\nfrom odoo.tests import common\n\n\n@common.tagged(\"post_install\", \"-at_install\")\nclass TestExcludedUsers(common.TransactionCase):\n def test_create(self):\n admin_user = self.env.ref(\"base.user_admin\")\n Users = self.env[\"res.users\"].with_user(admin_user)\n login = \"test_excluded_user\"\n rule_record = self.env.ref(\"access_limit_max_users.max_users_limit\")\n rule_record.max_records = Users.search_count(\n [(\"is_excluded_from_limiting\", \"=\", False)]\n )\n\n vals = {\"name\": login, \"login\": login, \"is_excluded_from_limiting\": True}\n\n with self.assertRaises(exceptions.ValidationError):\n Users.create(vals)\n\n Users.sudo().create(vals)\n\n def test_write(self):\n admin_user = self.env.ref(\"base.user_admin\")\n demo_user = self.env.ref(\"base.user_demo\").with_user(admin_user)\n rule_record = self.env.ref(\"access_limit_max_users.max_users_limit\")\n rule_record.max_records = self.env[\"res.users\"].search_count(\n [(\"is_excluded_from_limiting\", \"=\", False)]\n )\n\n vals = {\"is_excluded_from_limiting\": True}\n\n with self.assertRaises(exceptions.ValidationError):\n demo_user.write(vals)\n\n demo_user.sudo().write(vals)\n","sub_path":"access_limit_max_users/tests/test_excluded_users.py","file_name":"test_excluded_users.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"257465166","text":"# Author: Guðjón Ingi Valdimarsson\n# Date: 20.03.2020\n\nclass ItemExistsException(Exception):\n pass\n\nclass NotFoundException(Exception):\n pass\n\nclass Bucket():\n class _Node():\n def __init__(self, key=None, data=None, next=None):\n self.key = key\n self.data = data\n self.next = next\n\n def __init__(self):\n self.size = 0\n self.head = self._Node()\n \n def insert(self, key, data):\n if self.contains(key):\n raise ItemExistsException()\n self.head.next = self._Node(key, data, self.head.next)\n self.size += 1\n \n def update(self, key, data):\n node = self._find(key)\n node.data = data\n\n def _find(self, key, removeBool=False):\n walker = self.head.next\n prevNode = self.head # Used for remove function to have a reference to the previous node\n while walker != None:\n if walker.key == key:\n if removeBool:\n return (walker, prevNode)\n return walker\n walker = walker.next\n prevNode = prevNode.next\n raise NotFoundException()\n\n def find(self, key):\n return self._find(key).data\n\n def contains(self, key):\n try:\n self._find(key)\n return True\n except NotFoundException:\n return False\n\n def remove(self, key):\n node, prevNode = self._find(key, True)\n prevNode.next = node.next\n self.size -= 1\n\n def __setitem__(self, key, data):\n try:\n self.update(key, data)\n except NotFoundException:\n self.insert(key, data)\n\n def __getitem__(self, key):\n return self.find(key)\n\n def __len__(self):\n return self.size\n\n def __iter__(self):\n walker = self.head.next\n while walker != None:\n yield (walker.key, walker.data)\n walker = walker.next\n\n def __str__(self):\n ret_str = \"\"\n for item in self:\n ret_str += \"({}: {}), \".format(item[0], item[1])\n return ret_str.strip(\", \")\n\nif __name__ == \"__main__\":\n buc = Bucket()\n for x in range(10):\n buc.insert(x, \"value\")\n print (buc)\n print (len(buc))\n buc.update(5, \"not value\")\n print (buc)\n buc[5] = \"Val\"\n buc[10] = \"value\"\n print (buc)\n buc.remove(10)\n print (buc)\n print (buc.find(4))\n print (buc[4])\n for item in buc:\n print (item)\n","sub_path":"Assignment_5/Bucket.py","file_name":"Bucket.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"278905262","text":"import argparse\nimport sys\nimport codecs\nfrom operator import itemgetter\n\nimport torch\n\nfrom data_loader import DataLoader\nimport data_loader\n\nfrom simple_nmt.seq2seq import Seq2Seq\nfrom simple_nmt.encoder import Encoder\nfrom simple_nmt.decoder import Decoder\nfrom simple_nmt.decoder import Generator\nfrom simple_nmt.attention import Attention\n\nfrom hyperparams import HyperParams_translate\n\n\ndef read_text():\n # This method gets sentences from standard input and tokenize those.\n lines = []\n\n sys.stdin = codecs.getreader(\"utf-8\")(sys.stdin.detach())\n\n for line in sys.stdin:\n if line.strip() != '':\n lines += [line.strip().split(' ')]\n\n return lines\n\n\ndef to_text(indice, vocab):\n # This method converts index to word to show the translation result.\n lines = []\n\n for i in range(len(indice)):\n line = []\n for j in range(len(indice[i])):\n index = indice[i][j]\n\n if index == data_loader.EOS:\n # line += ['']\n break\n else:\n line += [vocab.itos[index]]\n\n line = ' '.join(line)\n lines += [line]\n\n return lines\n\n\nif __name__ == '__main__':\n sys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach())\n config = HyperParams_translate()\n\n # Load saved model.\n saved_data = torch.load(config.model, map_location='cpu')\n\n # Load configuration setting in training.\n train_config = saved_data['config']\n\n if train_config.dsl:\n assert config.lang is not None\n\n if config.lang == train_config.lang:\n is_reverse = False\n else:\n is_reverse = True\n\n if not is_reverse:\n # Load vocabularies from the model.\n src_vocab = saved_data['src_vocab']\n tgt_vocab = saved_data['tgt_vocab']\n else:\n src_vocab = saved_data['tgt_vocab']\n tgt_vocab = saved_data['src_vocab']\n else:\n # Load vocabularies from the model.\n src_vocab = saved_data['src_vocab']\n tgt_vocab = saved_data['tgt_vocab']\n\n # Initialize dataloader, but we don't need to read training & test corpus.\n # What we need is just load vocabularies from the previously trained model.\n loader = DataLoader()\n\n loader.load_vocab(src_vocab, tgt_vocab)\n input_size = len(loader.src.vocab)\n output_size = len(loader.tgt.vocab)\n\n # Declare sequence-to-sequence model.\n model = Seq2Seq(input_size,\n train_config.word_vec_size,\n train_config.hidden_size,\n output_size,\n n_layers=train_config.n_layers,\n dropout_p=train_config.dropout\n )\n \n if train_config.dsl:\n if not is_reverse:\n model.load_state_dict(saved_data['model'][0])\n else:\n model.load_state_dict(saved_data['model'][1])\n else:\n model.load_state_dict(saved_data['model']) # Load weight parameters from the trained model.\n model.eval() # We need to turn-on the evaluation mode, which turns off all drop-outs.\n\n # We don't need to draw a computation graph, because we will have only inferences.\n torch.set_grad_enabled(False)\n\n # Put models to device if it is necessary.\n # if config.gpu_id >= 0:\n # model.cuda(config.gpu_id)\n\n # Get sentences from standard input.\n lines = read_text()\n \n with torch.no_grad(): # Also, declare again to prevent to get gradients.\n while len(lines) > 0:\n # Since packed_sequence must be sorted by decreasing order of length,\n # sorting by length in mini-batch should be restored by original order.\n # Therefore, we need to memorize the original index of the sentence.\n sorted_lines = lines[:config.batch_size]\n lines = lines[config.batch_size:]\n \n lengths = [len(_) for _ in sorted_lines]\n orders = [i for i in range(len(sorted_lines))]\n\n sorted_tuples = sorted(zip(sorted_lines, lengths, orders), \n key=itemgetter(1),\n reverse=True\n )\n sorted_lines = [sorted_tuples[i][0] for i in range(len(sorted_tuples))]\n lengths = [sorted_tuples[i][1] for i in range(len(sorted_tuples))]\n orders = [sorted_tuples[i][2] for i in range(len(sorted_tuples))]\n\n # Converts string to list of index.\n x = loader.src.numericalize(loader.src.pad(sorted_lines),\n device='cuda:%d' % config.gpu_id if config.gpu_id >= 0 else 'cpu'\n )\n \n if config.beam_size == 1:\n # Take inference for non-parallel beam-search.\n y_hat, indice = model.search(x)\n output = to_text(indice, loader.tgt.vocab)\n\n sorted_tuples = sorted(zip(output, orders), key=itemgetter(1))\n output = [sorted_tuples[i][0] for i in range(len(sorted_tuples))]\n\n sys.stdout.write('\\n'.join(output) + '\\n')\n else:\n # Take mini-batch parallelized beam search.\n batch_indice, _ = model.batch_beam_search(x,\n beam_size=config.beam_size,\n max_length=config.max_length,\n n_best=config.n_best,\n length_penalty=config.length_penalty,\n )\n\n # Restore the original orders.\n output = []\n for i in range(len(batch_indice)):\n output += [to_text(batch_indice[i], loader.tgt.vocab)]\n sorted_tuples = sorted(zip(output, orders), key=itemgetter(1))\n output = [sorted_tuples[i][0] for i in range(len(sorted_tuples))]\n\n for i in range(len(output)):\n sys.stdout.write('\\n'.join(output[i]) + '\\n')\n\n","sub_path":"translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"632481577","text":"\"\"\"\n test_extensions\n ~~~~~~~~~~~~~~\n \n :copyright: (c) 2017 by 0xE8551CCB.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n\nfrom xcrawler.extensions.default_useragent import DefaultUserAgentExtension\nfrom xcrawler.extensions.default_filter import DefaultRequestFilterExtension\nfrom xcrawler.extensions.retry import RetryRequestExtension\n\n\ndef test_default_useragent_extension():\n class Crawler(object):\n settings = {\n 'DEFAULT_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) '\n 'AppleWebKit/603.3.8 (KHTML, like Gecko) Version'\n '/10.1.2 Safari/603.3.8'\n }\n\n ext = DefaultUserAgentExtension()\n crawler = Crawler()\n\n ext.on_crawler_started(crawler)\n assert ext.user_agent == crawler.settings['DEFAULT_USER_AGENT']\n\n assert ext.process_request(None, None) is None\n\n class Request(object):\n def __init__(self):\n self.headers = {}\n\n assert \"User-Agent\" in ext.process_request(Request(), None).headers\n ext.user_agent = None\n assert 'User-Agent' not in ext.process_request(Request(), None).headers\n\n req = Request()\n req.headers['User-Agent'] = 'User-Agent'\n assert ext.process_request(\n req, None).headers['User-Agent'] == 'User-Agent'\n\n\ndef test_default_filter_extension():\n ext = DefaultRequestFilterExtension()\n\n from xcrawler.http import Request\n\n class FakeSpider:\n name = 'fake_spider'\n\n def parse(self):\n pass\n\n # GET request\n req = Request('http://foo.example/1?query=bar1', FakeSpider())\n assert ext.process_request(req, req.spider) is req\n assert ext.process_request(req, req.spider) is None\n\n req2 = Request('http://foo.example/1?query=bar2', FakeSpider())\n assert ext.process_request(req2, req2.spider) is req2\n\n # POST request\n req3 = Request('http://foo.example/login', FakeSpider(), method='POST')\n assert ext.process_request(req3, req3.spider) is req3\n assert ext.process_request(req3, req3.spider) is req3\n assert ext.process_request(req3, req3.spider) is req3\n\n\ndef test_retry_request_extension():\n class Crawler(object):\n settings = {\n 'RETRY_ENABLED': True,\n 'RETRY_ON_TIMEOUT': True,\n 'RETRY_ON_CONNECTION_ERROR': True,\n 'RETRY_ON_STATUS_CODE': {400, 403, 404, 503},\n 'MAX_TRIES': 1\n }\n\n ext = RetryRequestExtension()\n\n crawler = Crawler()\n ext.on_crawler_started(crawler)\n\n # check settings are ok\n assert ext.retry_enabled is True\n assert ext.retry_on_timeout is True\n assert ext.retry_on_connection_error is True\n assert ext.retry_on_status_code == {400, 403, 404, 503}\n assert ext.max_tries == 1\n\n from xcrawler.errors import (HTTPTimeoutError,\n HTTPConnectionError,\n HTTPStatusError)\n from xcrawler.http import Request, Response\n\n url = 'http://foo.example.com'\n\n class FakeSpider:\n name = 'fake_spider'\n\n def parse(self): pass\n\n request = lambda: Request(url, FakeSpider())\n\n # retry on timeout\n req = request()\n\n assert ext.process_http_error(HTTPTimeoutError(), None,\n req, req.spider) is req\n assert req.retry_count == 1\n\n # max retry count exceeds, cannot retry that again.\n assert ext.process_http_error(HTTPTimeoutError(), None,\n req, req.spider) is None\n\n # retry on connection error\n req1 = request()\n assert ext.process_http_error(HTTPConnectionError(), None,\n req1, req1.spider) is req1\n assert req1.retry_count == 1\n\n # retry on http status error\n req2 = request()\n resp1 = Response(req2.url, 400, b'content', 'utf-8',\n req2, reason='Bad request')\n assert ext.process_http_error(HTTPStatusError(), resp1,\n resp1.request, None) is req2\n assert req2.retry_count == 1\n","sub_path":"tests/test_extensions.py","file_name":"test_extensions.py","file_ext":"py","file_size_in_byte":4042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"381241746","text":"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nTesting BertTokenizer op in DE\n\"\"\"\nimport numpy as np\nimport mindspore.dataset as ds\nfrom mindspore import log as logger\nimport mindspore.dataset.text as nlp\n\nBERT_TOKENIZER_FILE = \"../data/dataset/testTokenizerData/bert_tokenizer.txt\"\n\nvocab_bert = [\n \"床\", \"前\", \"明\", \"月\", \"光\", \"疑\", \"是\", \"地\", \"上\", \"霜\", \"举\", \"头\", \"望\", \"低\", \"思\", \"故\", \"乡\",\n \"繁\", \"體\", \"字\", \"嘿\", \"哈\", \"大\", \"笑\", \"嘻\",\n \"i\", \"am\", \"mak\", \"make\", \"small\", \"mistake\", \"##s\", \"during\", \"work\", \"##ing\", \"hour\",\n \"😀\", \"😃\", \"😄\", \"😁\", \"+\", \"/\", \"-\", \"=\", \"12\", \"28\", \"40\", \"16\", \" \", \"I\",\n \"[CLS]\", \"[SEP]\", \"[UNK]\", \"[PAD]\", \"[MASK]\"\n]\npad = ''\ntest_paras = [\n # test chinese text\n dict(\n first=1,\n last=4,\n expect_str=[[['床'], ['前'], ['明'], ['月'], ['光']],\n [['疑'], ['是'], ['地'], ['上'], ['霜']],\n [['举'], ['头'], ['望'], ['明'], ['月']],\n [['低'], ['头'], ['思'], ['故'], ['乡']]],\n vocab_list=vocab_bert\n ),\n # test english text\n dict(\n first=5,\n last=5,\n expect_str=[[['i', pad],\n [\"am\", pad],\n ['mak', '##ing'],\n ['small', pad],\n ['mistake', '##s'],\n ['during', pad],\n ['work', '##ing'],\n ['hour', '##s']]],\n lower_case=True,\n vocab_list=vocab_bert\n ),\n dict(\n first=5,\n last=5,\n expect_str=[[['I', pad],\n [\"am\", pad],\n ['mak', '##ing'],\n ['small', pad],\n ['mistake', '##s'],\n ['during', pad],\n ['work', '##ing'],\n ['hour', '##s']]],\n lower_case=False,\n vocab_list=vocab_bert\n ),\n # test emoji tokens\n dict(\n first=6,\n last=7,\n expect_str=[\n [['😀'], ['嘿'], ['嘿'], ['😃'], ['哈'], ['哈'], ['😄'], ['大'], ['笑'], ['😁'], ['嘻'], ['嘻']],\n [['繁'], ['體'], ['字']]],\n normalization_form=nlp.utils.NormalizeForm.NFKC,\n vocab_list=vocab_bert\n ),\n # test preserved tokens\n dict(\n first=8,\n last=12,\n expect_str=[\n [['[UNK]'], ['[CLS]']],\n [['[UNK]'], ['[SEP]']],\n [['[UNK]'], ['[UNK]']],\n [['[UNK]'], ['[PAD]']],\n [['[UNK]'], ['[MASK]']],\n ],\n lower_case=False,\n vocab_list=vocab_bert,\n preserve_unused_token=True,\n ),\n # test special symbol\n dict(\n first=13,\n last=13,\n expect_str=[[['12'], ['+'], ['/'], ['-'], ['28'], ['='], ['40'], ['/'], ['-'], ['16']]],\n preserve_unused_token=True,\n vocab_list=vocab_bert\n ),\n # test non-default parms\n dict(\n first=8,\n last=8,\n expect_str=[\n [['[UNK]'], [' '], ['[CLS]']],\n ],\n lower_case=False,\n vocab_list=vocab_bert,\n preserve_unused_token=True,\n keep_whitespace=True\n ),\n dict(\n first=8,\n last=8,\n expect_str=[\n [['unused'], [' '], ['[CLS]']],\n ],\n lower_case=False,\n vocab_list=vocab_bert,\n preserve_unused_token=True,\n keep_whitespace=True,\n unknown_token=''\n ),\n dict(\n first=8,\n last=8,\n expect_str=[\n [['unused'], [' '], ['['], ['CLS'], [']']],\n ],\n lower_case=False,\n vocab_list=vocab_bert,\n preserve_unused_token=False,\n keep_whitespace=True,\n unknown_token=''\n ),\n]\n\n\ndef check_bert_tokenizer(first, last, expect_str,\n vocab_list,\n suffix_indicator='##',\n max_bytes_per_token=100, unknown_token='[UNK]',\n lower_case=False, keep_whitespace=False,\n normalization_form=nlp.utils.NormalizeForm.NONE,\n preserve_unused_token=False):\n dataset = ds.TextFileDataset(BERT_TOKENIZER_FILE, shuffle=False)\n if first > 1:\n dataset = dataset.skip(first - 1)\n if last >= first:\n dataset = dataset.take(last - first + 1)\n vocab = nlp.Vocab.from_list(vocab_list)\n tokenizer_op = nlp.BertTokenizer(\n vocab=vocab, suffix_indicator=suffix_indicator,\n max_bytes_per_token=max_bytes_per_token, unknown_token=unknown_token,\n lower_case=lower_case, keep_whitespace=keep_whitespace,\n normalization_form=normalization_form,\n preserve_unused_token=preserve_unused_token)\n dataset = dataset.map(operations=tokenizer_op)\n count = 0\n for i in dataset.create_dict_iterator():\n text = nlp.to_str(i['text'])\n logger.info(\"Out:\", text)\n logger.info(\"Exp:\", expect_str[count])\n np.testing.assert_array_equal(text, expect_str[count])\n count = count + 1\n\n\ndef test_bert_tokenizer():\n \"\"\"\n Test WordpieceTokenizer\n \"\"\"\n for paras in test_paras:\n check_bert_tokenizer(**paras)\n\n\nif __name__ == '__main__':\n test_bert_tokenizer()\n","sub_path":"tests/ut/python/dataset/test_bert_tokenizer.py","file_name":"test_bert_tokenizer.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"466306696","text":"import requests \r\nfrom bs4 import BeautifulSoup\r\n\r\ndef define(word):\r\n\turl = \"http://www.dictionary.reference.com/browse/{todefine}?s=t\".format(todefine=word)\r\n\tr = requests.get(url)\r\n\tsoup = BeautifulSoup(r.content)\r\n\r\n\tdefine = soup.find(\"div\", {\"class\": \"def-content\"})\r\n\treturn define.text.encode('utf-8')\r\n","sub_path":"mysite/words/getdef.py","file_name":"getdef.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"413063156","text":"import dbus\nimport dbus.service\nimport random\nimport time\nimport os\n\nfrom mkbackup_btrfs_config import Config, MountInfo, connect, Myos, __version__\n\nconfig = Config()\n\nclass MkBackup:\n def __init__(self, bus_name, base_path):\n# self._bus = dbus.SystemBus()\n# self.notification = Notification()\n Intervals(bus_name, base_path)\n\n for intv in config.ListIntervals():\n Properties(bus_name, os.path.join(base_path, intv), intv)\n\nclass Intervals(dbus.service.Object):\n def __init__(self, bus_name, bus_path):\n super().__init__(bus_name, bus_path)\n\n @dbus.service.method(dbus_interface='at.xundeenergie.mkbackup.Intervals', \n in_signature='', out_signature='v')\n def Names(self):\n return config.ListIntervals()\n \nclass Properties(dbus.service.Object):\n def __init__(self, bus_name, bus_path, interval):\n super().__init__(bus_name, bus_path)\n self.interface = \"at.xundeenergie.mkbackup.Status\"\n self.interval = interval\n self.STATI = ['reset', 'stop', 'running', 'finished']\n self.properties = dict()\n self.properties[self.interface] = dict()\n self.properties[self.interface]['progress'] = 0 # 0-100 \n self.properties[self.interface]['status'] = 'stop' # stop, running, finished, reset\n self.properties[self.interface]['transfer'] = config.getTransfer(interval)\n self.properties[self.interface]['lastrun'] = 0 # datetime\n self.properties[self.interface]['finished'] = True # Boolean\n self.properties[self.interface]['name'] = interval # Boolean\n self.properties['function'] = dict()\n self.properties['function']['progress'] = self.update_progress\n self.properties['function']['status'] = self.update_status\n# self.properties['function']['lastrun'] = self.update_lastrun\n from dbus import Interface\n \n def update_progress(self, interface, incr):\n print(\"Update progress: %i / %i, %s\" % (float(incr),\n self.properties[interface]['progress'],self.properties[interface]['status']))\n if self.properties[interface]['status'] == 'running':\n if 0 < self.properties[interface]['progress'] + float(incr) < 100:\n #self.properties[interface]['progress'] = self.properties[interface]['progress'] + float(incr)\n self.properties[interface]['progress'] += float(incr)\n elif self.properties[interface]['progress'] + float(incr) >= 100:\n self.properties[interface]['progress'] = 99\n else:\n print('B', incr, type(incr))\n return self.properties[interface]['progress']\n\n def update_status(self, interface, status):\n if status in self.STATI:\n print(\"Update status: %s\" % status)\n print(\"Status: \", self.properties[interface]['status'])\n if status == 'finished':\n self.properties[interface]['status'] = status\n self.properties[interface]['progress'] = 100\n elif status == 'stop':\n self.properties[interface]['status'] = status\n self.properties[interface]['progres'] = 0\n elif status == 'reset':\n self.properties[interface]['status'] = 'running'\n self.properties[interface]['progress'] = 0\n print(\"status: \", self.properties[interface]['status'])\n return self.properties[interface]['status']\n\n @dbus.service.method(dbus.PROPERTIES_IFACE,\n in_signature='ss', out_signature='v')\n def Get(self, interface_name, property_name):\n return self.GetAll(interface_name)[property_name]\n\n @dbus.service.method(dbus.PROPERTIES_IFACE,\n in_signature='s', out_signature='a{sv}')\n def GetAll(self, interface_name):\n if interface_name == self.interface:\n return self.properties[interface_name]\n else:\n raise dbus.exceptions.DBusException(\n 'at.xundeenergie.mkbackup.UnknownInterface',\n 'The Foo object does not implement the %s interface'\n % interface_name)\n\n @dbus.service.method(dbus.PROPERTIES_IFACE,\n in_signature='ssv')\n def Set(self, interface_name, property_name, new_value):\n # validate the property name and value, update internal state…\n \"\"\"https://recalll.co/ask/v/topic/D-Bus-D-Feet-Send-Dictionary-of-String%2CVariants-in-Python-Syntax/5565e1372bd273d7108b7b82\n __import__('gi.repository.GLib', globals(), locals(), ['Variant']).Variant(\"s\", \"value\")\"\"\"\n if interface_name in self.properties:\n if property_name in self.properties[interface_name]:\n func = self.properties['function'].get(property_name)\n new_value = func(interface_name, new_value)\n #self.properties[str(interface_name)][str(property_name)] = new_value\n self.PropertiesChanged(interface_name,\n { property_name: new_value, 'interval': self.interval}, [])\n else:\n raise dbus.exceptions.DBusException(\n 'at.xundeenergie.mkbackup.UnknownInterface',\n 'The Foo object does not implement the %s interface'\n % interface_name)\n\n @dbus.service.signal(dbus.PROPERTIES_IFACE,\n signature='sa{sv}as')\n def PropertiesChanged(self, interface_name, changed_properties,\n invalidated_properties):\n pass\n","sub_path":"mkbackup-btrfs/usr/lib/python3/dist-packages/mkbackup-dbus/services/mkbackup.py","file_name":"mkbackup.py","file_ext":"py","file_size_in_byte":5541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"637860028","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom Qt import QtWidgets, QtCore\n\nfrom NodeGraphQt import (NodeGraph,\n PropertiesBinWidget,\n NodeTreeWidget,\n update_nodes_by_down,\n setup_context_menu)\nfrom example_nodes import Nodes\nfrom os.path import join\n\nif __name__ == '__main__':\n QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)\n app = QtWidgets.QApplication([])\n\n # create node graph.\n graph = NodeGraph()\n\n # set up default menu and commands.\n setup_context_menu(graph)\n\n # widget used for the node graph.\n graph_widget = graph.widget\n graph_widget.resize(1100, 800)\n graph_widget.show()\n\n # show the properties bin when a node is \"double clicked\" in the graph.\n properties_bin = PropertiesBinWidget(node_graph=graph)\n properties_bin.setWindowFlags(QtCore.Qt.Tool)\n\n def show_prop_bin(node):\n if not properties_bin.isVisible():\n properties_bin.show()\n graph.node_double_clicked.connect(show_prop_bin)\n\n # show the nodes list when a node is \"double clicked\" in the graph.\n node_tree = NodeTreeWidget(node_graph=graph)\n\n def show_nodes_list(node):\n if not node_tree.isVisible():\n node_tree.update()\n node_tree.show()\n graph.node_double_clicked.connect(show_nodes_list)\n\n # registered nodes.\n [graph.register_node(n) for n in Nodes]\n\n # load preset session\n graph.load_session(join('example_nodes', 'networks', 'example.nodes'))\n\n # update nodes\n update_nodes_by_down(graph.all_nodes())\n\n app.exec_()\n","sub_path":"example_math_nodes.py","file_name":"example_math_nodes.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"565429249","text":"from urlparse import urljoin, urlparse\n\nfrom scrapy.http import FormRequest, Request\nfrom scrapy.spider import Spider\n\nfrom documents_download.items import DocumentsDownloadItem\n\n\nclass OsceSpider(Spider):\n name = 'osce_spider'\n start_urls = [\"http://www.osce.org/resources/documents\"]\n\n base_url = \"http://www.osce.org\" # 'To append with relative download url.\n page_to_crawl = 1\n\n def parse(self, response):\n data = {\"filters_1\": '15',\n \"date_from\": '1994',\n \"document_type\": '462',\n 'language': 'en',\n 'op': 'Apply Filters',\n 'solrsort_field': 'score',\n 'solrsort_order': 'desc',\n 'rows': '10',\n 'filters_2': '0',\n 'filters_3': '0',\n 'filters_4': '0',\n 'date_to': '',\n 'form_build_id': 'form-b8269dd21d4683ff29b6cb35f1ada469',\n 'form_id': 'osce_search_form_resources'\n }\n url = urljoin(self.base_url, \"/resources/documents\")\n yield FormRequest(url=url,\n formdata=data,\n callback=self.parse_listing)\n\n def parse_listing(self, response):\n doc_download_links = response.xpath(\"//ul[@class='links']//a[contains(text(),'English')]//@href\").extract()\n for link in doc_download_links:\n full_link = urljoin(self.base_url, link)\n item = DocumentsDownloadItem()\n item['file_url'] = full_link\n item['file_name'] = self.get_title(full_link) # item ID will be the file name\n item['page'] = self.page_to_crawl\n yield item\n\n next_page_url = response.xpath(\"//li[contains(@class,'pager-current')]/following-sibling::li[1]//a//@href\").extract()\n if next_page_url:\n next_page_full_url = urljoin(self.base_url, next_page_url[0])\n yield Request(url=next_page_full_url,\n callback=self.parse_listing)\n self.page_to_crawl += 1\n\n def get_title(self, url):\n query_path = urlparse(url).path\n if query_path:\n parts = query_path.split('/') # example /pc/12323\n if len(parts) == 3:\n doc_id = parts[2]\n else:\n doc_id = parts[1]\n return doc_id\n\n\n\n\n","sub_path":"documents_download/documents_download/spiders/OsceSpider.py","file_name":"OsceSpider.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"148338043","text":"def get_one_cycle_schedule_fn(lr_min, lr_max, n_epochs):\n\n x0 = 0\n x1 = (45*n_epochs)//100\n x2 = (90*n_epochs)//100\n x3 = n_epochs\n\n def schedule(x):\n if x <= x1:\n return (lr_min * (x1-x) + lr_max * (x - x0))/(x1 - x0)\n\n if x1 < x and x < x2:\n return (lr_max * (x2 - x) + lr_min * (x - x1))/(x2 - x1)\n\n if x2 <= x:\n return (lr_min * (x3 - x) + lr_min/10 * (x - x2))/(x3 - x2)\n\n return schedule\n\n\n","sub_path":"end-to-end/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"361453755","text":"import cv2\r\nimport numpy as np \r\n\r\ndef loadImage(img_file):\r\n img = cv2.imread(img_file) # RGB order\r\n if img.shape[0] == 2: img = img[0]\r\n if len(img.shape) == 2 : img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\r\n if img.shape[2] == 4: img = img[:,:,:3]\r\n img = np.array(img)\r\n\r\n return img\r\n\r\ndef tlwh_2_maxmin(bboxes):\r\n new_bboxes = []\r\n for bbox in bboxes:\r\n xmin, ymin, xmax, ymax = bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]\r\n new_bboxes.append([xmin, ymin, xmax, ymax])\r\n new_bboxes = np.array(new_bboxes)\r\n return new_bboxes\r\n\r\nbboxes = [[[221.23238, 42.006588],\r\n [418.39377, 29.14823 ],\r\n [426.98108, 160.82007 ],\r\n [229.8197, 173.67842 ]],\r\n\r\n [[ 77.333336, 57.333332],\r\n [209.33333, 57.333332],\r\n [209.33333, 193.33333 ],\r\n [ 77.333336, 193.33333 ]],\r\n\r\n [[ 96.11382, 197.67732 ],\r\n [440.59128, 192.57397 ],\r\n [442.72766, 336.7794 ],\r\n [ 98.250206, 341.88272 ]]]\r\n\r\n\r\ndef merge_bb(bboxes, dist = 20):\r\n merge_bb = []\r\n for i in bboxes:\r\n if (len(i) > 1):\r\n merge_bb.append([i[0][0], i[-1][1], i[-1][2], i[0][3]])\r\n return merge_bb\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\ndef order_points(pts):\r\n\t# initialzie a list of coordinates that will be ordered\r\n\t# such that the first entry in the list is the top-left,\r\n\t# the second entry is the top-right, the third is the\r\n\t# bottom-right, and the fourth is the bottom-left\r\n\trect = np.zeros((4, 2), dtype = \"float32\")\r\n\t# the top-left point will have the smallest sum, whereas\r\n\t# the bottom-right point will have the largest sum\r\n\ts = pts.sum(axis = 1)\r\n\trect[0] = pts[np.argmin(s)]\r\n\trect[2] = pts[np.argmax(s)]\r\n\t# now, compute the difference between the points, the\r\n\t# top-right point will have the smallest difference,\r\n\t# whereas the bottom-left will have the largest difference\r\n\tdiff = np.diff(pts, axis = 1)\r\n\trect[1] = pts[np.argmin(diff)]\r\n\trect[3] = pts[np.argmax(diff)]\r\n\t# return the ordered coordinates\r\n\treturn rect\r\n\r\ndef four_point_transform(image, pts):\r\n\t# obtain a consistent order of the points and unpack them\r\n\t# individually\r\n\trect = order_points(pts)\r\n\t(tl, tr, br, bl) = rect\r\n\t# compute the width of the new image, which will be the\r\n\t# maximum distance between bottom-right and bottom-left\r\n\t# x-coordiates or the top-right and top-left x-coordinates\r\n\twidthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\r\n\twidthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\r\n\tmaxWidth = max(int(widthA), int(widthB))\r\n\t# compute the height of the new image, which will be the\r\n\t# maximum distance between the top-right and bottom-right\r\n\t# y-coordinates or the top-left and bottom-left y-coordinates\r\n\theightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\r\n\theightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\r\n\tmaxHeight = max(int(heightA), int(heightB))\r\n\t# now that we have the dimensions of the new image, construct\r\n\t# the set of destination points to obtain a \"birds eye view\",\r\n\t# (i.e. top-down view) of the image, again specifying points\r\n\t# in the top-left, top-right, bottom-right, and bottom-left\r\n\t# order\r\n\tdst = np.array([\r\n\t\t[0, 0],\r\n\t\t[maxWidth - 1, 0],\r\n\t\t[maxWidth - 1, maxHeight - 1],\r\n\t\t[0, maxHeight - 1]], dtype = \"float32\")\r\n\t# compute the perspective transform matrix and then apply it\r\n\tM = cv2.getPerspectiveTransform(rect, dst)\r\n\twarped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\r\n\t# return the warped image\r\n\treturn warped\r\n","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"601804953","text":"# !-*-coding:utf-8-*-\r\nimport sys\r\n\r\nimport pandas as pd\r\nfrom QTableModel import pandasModel # Импортируем для отображения dataframe\r\nfrom Final_tample3 import Ui_MainWindow # Наш шаблон\r\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog\r\nfrom PyQt5 import QtWidgets\r\nfrom PyQt5 import QtCore\r\n# from Mpv_dataframe_graphic import drow_graphic\r\nfrom graphics import MyMpvCanvas, NavigationToolbar\r\nimport matplotlib\r\n\r\n\r\nclass MainWindow(QMainWindow, Ui_MainWindow):\r\n def __init__(self): # ---шаблон\r\n\r\n QMainWindow.__init__(self) # Констукторы от родительких классов -- - шаблон\r\n self.setupUi(self) # Передаем родительскому конструктору настройки -- -- шаблон\r\n\r\n \"\"\"======================Пример внедрения графика в интерфейс===================================================================\"\"\"\r\n # Для отрисовки графика\r\n # Создаем размещение кладем в него виджет\r\n self.companovka_for_mpv = QtWidgets.QVBoxLayout(self.graphics_widget)\r\n # Создаем наш объект графика\r\n self.mycanvas_object = MyMpvCanvas(self, width=5, height=4, dpi=100)\r\n\r\n # Создаем dataframe\r\n df2 = pd.read_excel('file2.xlsx') # Читаем файл №2\r\n df2.plot(ax=self.mycanvas_object.axes)\r\n\r\n # Передаем аргументы\r\n # кладем график объект в размещение\r\n self.companovka_for_mpv.addWidget(self.mycanvas_object)\r\n # создаем объект панель инструментов для работы с графиком\r\n # В качесве аргумента передаем объект график\r\n self.canvastoolbar = NavigationToolbar(self.mycanvas_object, self)\r\n # Кладем понель инструментов с графиком в размещение\r\n self.companovka_for_mpv.addWidget(self.canvastoolbar)\r\n # Отобразить график\r\n self.graphics_widget.show()\r\n \"\"\"===============================================================================================\"\"\"\r\n\r\n \"\"\"Инициализируем пустые датафреймы для дальнейшей работы\"\"\"\r\n self.new_df1 = pd.DataFrame()\r\n self.new_df2 = pd.DataFrame()\r\n\r\n self.new_df_for_saving = pd.DataFrame()\r\n\r\n \"\"\" кнопки выбора каталогов\r\n Привязка кнопок берем с макета подключить(self.наш метод)\"\"\"\r\n self.pushButton_cat1.clicked.connect(self.insert_dataframe1)\r\n self.pushButton_cat2.clicked.connect(self.insert_dataframe2)\r\n\r\n # Кнопка Импорт --> в представление\r\n self.pushButton.clicked.connect(self.comparing_button_clicked)\r\n\r\n \"\"\"Привязка кнопок для переключения между страницами stacke_dwidget\"\"\"\r\n self.import_data_button.clicked.connect(self.import_data_button_clicked)\r\n self.comparing_button.clicked.connect(self.comparing_button_clicked)\r\n self.result_button.clicked.connect(self.result_button_clicked)\r\n self.graphics_button.clicked.connect(self.graphics_button_clicked)\r\n\r\n \"\"\"Привязка кнопки сравнить в comparing_cats\"\"\"\r\n self.compare_cats.clicked.connect(self.compare_dataframe)\r\n\r\n \"\"\"Привязка кнопки(тригера) в меню бар \"\"\"\r\n self.action_save_as.triggered.connect(self.saveResult_to_excel)\r\n\r\n # ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////\r\n # Подключаем двойное нажатие на колонку в TableView для того чтобы поменять название\r\n # При двойном нажатии будет обработчик self.renameColumn\r\n self.tableView_1.horizontalHeader().sectionDoubleClicked.connect(self.renameColumn_tableview_1)\r\n self.tableView_2.horizontalHeader().sectionDoubleClicked.connect(self.renameColumn_tableview_2)\r\n self.result_tableView.horizontalHeader().sectionDoubleClicked.connect(self.renameColumn_result_tableView)\r\n\r\n \"\"\"Оброботка кнопки выбора каталога 1\"\"\"\r\n\r\n def insert_dataframe1(self):\r\n \"\"\"Метод который отображает данные в tableView\"\"\"\r\n \"\"\"Открывается filedialog и берет путь\"\"\"\r\n file_path = QFileDialog.getOpenFileName(self, \"Выбрать каталог excel \", \"excel\")[0]\r\n if len(file_path) > 0:\r\n\r\n if 'xlsx' in file_path:\r\n self.new_df1 = pd.read_excel(file_path) # Читаем файл №1 по пути который получили\r\n # self.new_df1 = df1 # Записываем в mew_df1 для дальнейшей работы с ним\r\n\r\n \"\"\"Создаем объект для первой таблицы из pandasModel\"\"\"\r\n self.dataframe1model = pandasModel(self.new_df1)\r\n self.plainTextEdit_cat1.setPlainText(file_path) # путь в текстовое поле 1\r\n self.tableView_cat1.setModel(self.dataframe1model) # В view import\r\n self.tableView_1.setModel(self.dataframe1model) # Кладем даные в tableview_1\r\n\r\n\r\n else:\r\n self.msgBox_another_file = QtWidgets.QMessageBox()\r\n self.msgBox_another_file.setWindowTitle('Ошибка')\r\n self.msgBox_another_file.setText(\"Вы выбрали не верный формат файла\")\r\n self.msgBox_another_file.setStandardButtons(QtWidgets.QMessageBox.Ok)\r\n self.msgBox_another_file.exec_()\r\n else:\r\n\r\n pass # Заглушка\r\n\r\n \"\"\"Оброботка кнопки выбора каталога 1\"\"\"\r\n\r\n def insert_dataframe2(self):\r\n \"\"\"Получаем путь к файлу\"\"\"\r\n file_path = QFileDialog.getOpenFileName(self, \"Выбрать каталог excel \")[0]\r\n\r\n \"\"\"Проверка на провильность выбронного пути \"\"\"\r\n if len(file_path) > 0: # Если выбрано что-то\r\n\r\n if 'xlsx' in file_path: # Проверяем есть ли путь к excel\r\n self.new_df2 = pd.read_excel(file_path) # Читаем файл №2 по пути который получили\r\n # self.new_df2 = df2 # Записываем в new_df2 для дальнейшей работы с ним\r\n\r\n \"\"\"Создаем модель для представления с помощью класса pandasModel\"\"\"\r\n self.dataframe2model = pandasModel(self.new_df2)\r\n self.plainTextEdit_cat2.setPlainText(file_path) # путь в текстовое поле 2\r\n\r\n self.tableView_cat2.setModel(self.dataframe2model) # В view import\r\n self.tableView_2.setModel(self.dataframe2model) # Кладем даные в tableview\r\n\r\n\r\n\r\n else:\r\n self.msgBox_another_file = QtWidgets.QMessageBox()\r\n self.msgBox_another_file.setWindowTitle('Ошиб��а')\r\n self.msgBox_another_file.setText(\"Вы выбрали не верный формат файла\")\r\n self.msgBox_another_file.setStandardButtons(QtWidgets.QMessageBox.Ok)\r\n self.msgBox_another_file.exec_()\r\n\r\n else:\r\n pass # Заглушка если нажата отмена или закрыть окно\r\n\r\n \"\"\"Метод который будет сравнивать каталоги \"\"\"\r\n\r\n def compare_dataframe(self):\r\n \"\"\"При нажатии на кнопку он будет проверять выбраны ли данные \"\"\"\r\n if self.new_df1.empty or self.new_df2.empty: # Проверка на имеющиеяся данные\r\n self.msgBox_compare_error = QtWidgets.QMessageBox()\r\n self.msgBox_compare_error.setWindowTitle(\"Ошибка\")\r\n self.msgBox_compare_error.setText(\"Вы должны выбрать 2 каталога для сравнения!\")\r\n self.msgBox_compare_error.setStandardButtons(QtWidgets.QMessageBox.Ok)\r\n self.msgBox_compare_error.exec_()\r\n else:\r\n\r\n \"\"\"Если данные выбраны пытаемся их сравнить\"\"\"\r\n try:\r\n \"\"\"Произовдим слияние таблиц по столбцам\"\"\"\r\n \"\"\"Сливаются только те значения кторые есть в обоих таблицах\"\"\"\r\n self.new_df_for_saving = pd.merge(self.new_df1, self.new_df2,\r\n on=[\"year\", \"month\", \"day\", \"hour\", \"min\"],\r\n how=\"inner\")\r\n self.new_df_for_saving['mpv_difference'] = self.new_df_for_saving['mpv_x'] - self.new_df_for_saving[\r\n 'mpv_y']\r\n\r\n # self.new_df_for_saving = result_df # Результат сохраняем в атрибут new_df_for_saving\r\n\r\n \"\"\"Создаем объект из второй таблицы с помощью класса pandasModel\r\n для того чтобы их вывести на result_tableView\"\"\"\r\n self.dataframe_resultmodel = pandasModel(self.new_df_for_saving)\r\n # self.result_tableView = QTableView() # Представление для показа результата\r\n self.result_tableView.setWindowTitle(\"Результат сравнения\")\r\n self.result_tableView.setModel(self.dataframe_resultmodel) # Кладем даные в Qtableview\r\n self.result_tableView.show()\r\n\r\n self.stackedWidget.setCurrentIndex(2)\r\n # Если успешно переход QStackedWidget на resultWidget\r\n\r\n\r\n except KeyError: # Если значения столбцов не совпадают\r\n self.msgBox_compare_error = QtWidgets.QMessageBox()\r\n self.msgBox_compare_error.setWindowTitle(\"Ошибка сравнения\")\r\n self.msgBox_compare_error.setText(\"Название столбцов в 2-х каталогах должны быть одинаковы!\")\r\n self.msgBox_compare_error.setStandardButtons(QtWidgets.QMessageBox.Ok)\r\n self.msgBox_compare_error.exec_()\r\n\r\n \"\"\"Для сохранения результата\"\"\"\r\n\r\n def saveResult_to_excel(self):\r\n \"Запускаем файл диалог который возвращяет путь\"\r\n file_path = QFileDialog.getSaveFileName(self, \"Сохранить в excel\", \"table.xlsx\",\r\n \"Excel(*.xlsx);;\"\r\n \"All Files (*)\")[0]\r\n\r\n if len(file_path) > 0: # Если есть путь\r\n writer = pd.ExcelWriter(file_path, engine='xlsxwriter') # Объявляем путь для сохранения excel\r\n\r\n # Кладем new_df_for_saving dataframe в лист1\r\n self.new_df_for_saving.to_excel(writer, 'Sheet1')\r\n\r\n # Диалоговое окно об учаешном сохранении результата\r\n writer.save()\r\n self.msgBoxSave = QtWidgets.QMessageBox()\r\n self.msgBoxSave.setWindowTitle(\"Сохранение файла\")\r\n self.msgBoxSave.setText(\"Файл успешно сохранен!\")\r\n self.msgBoxSave.setStandardButtons(QtWidgets.QMessageBox.Ok)\r\n self.msgBoxSave.exec()\r\n else:\r\n pass\r\n\r\n \"\"\" Обработка кнопок для переключения между страницами stacke_dwidget\"\"\"\r\n\r\n # self.import_data_button.clicked.connect(self.import_data_button_clicked)\r\n # self.comparing_button.clicked.connect(self.comparing_button_clicked)\r\n # self.result_button.clicked.connect(self.result_button_clicked)\r\n # self.graphics_button.clicked.connect(self.graphics_button_clicked)\r\n\r\n def import_data_button_clicked(self):\r\n self.stackedWidget.setCurrentIndex(0)\r\n\r\n def comparing_button_clicked(self):\r\n self.stackedWidget.setCurrentIndex(1)\r\n\r\n def result_button_clicked(self):\r\n self.stackedWidget.setCurrentIndex(2)\r\n\r\n def graphics_button_clicked(self):\r\n self.stackedWidget.setCurrentIndex(3)\r\n\r\n '''Обработчик двойного клика столбцов в представлении tableview_1'''\r\n\r\n def renameColumn_tableview_1(self, index):\r\n oldTitle = self.dataframe1model.headerData(\r\n index, QtCore.Qt.Horizontal, QtCore.Qt.DisplayRole)\r\n\r\n newTitle, accepted = QtWidgets.QInputDialog.getText(\r\n self, 'Изменение названия колонки', oldTitle)\r\n\r\n if accepted and oldTitle != newTitle:\r\n self.dataframe1model.setHeaderData(\r\n index, QtCore.Qt.Horizontal, newTitle, QtCore.Qt.DisplayRole)\r\n\r\n # Перезапись названий колонок new_df1\r\n old_name = self.new_df1.columns[index] # Получаем старое значение\r\n self.new_df1 = self.new_df1.copy()\r\n self.new_df1.rename(columns={old_name: newTitle}, inplace=True) # Заменяем название столбца\r\n\r\n '''Обработчик двойного клика столбцов в представлении tableview_2'''\r\n\r\n def renameColumn_tableview_2(self, index):\r\n oldTitle = self.dataframe2model.headerData(\r\n index, QtCore.Qt.Horizontal, QtCore.Qt.DisplayRole)\r\n\r\n newTitle, accepted = QtWidgets.QInputDialog.getText(\r\n self, 'Изменение названия колонки', oldTitle)\r\n\r\n if accepted and oldTitle != newTitle:\r\n self.dataframe2model.setHeaderData(\r\n index, QtCore.Qt.Horizontal, newTitle, QtCore.Qt.DisplayRole)\r\n\r\n # Перезапись названий колонок new_df2\r\n old_name = self.new_df2.columns[index] # Получаем старое значение\r\n self.new_df2 = self.new_df2.copy() # копируем dataframe чтобы внести изменение\r\n self.new_df2.rename(columns={old_name: newTitle}, inplace=True) # Заменяем название\r\n\r\n '''Обработчик двойного клика столбцов в представлении renameColumn_result_tableView'''\r\n\r\n def renameColumn_result_tableView(self, index):\r\n oldTitle = self.dataframe_resultmodel.headerData(\r\n index, QtCore.Qt.Horizontal, QtCore.Qt.DisplayRole)\r\n\r\n newTitle, accepted = QtWidgets.QInputDialog.getText(\r\n self, 'Изменение названия колонки ', oldTitle)\r\n\r\n if accepted and oldTitle != newTitle:\r\n self.dataframe_resultmodel.setHeaderData(\r\n index, QtCore.Qt.Horizontal, newTitle, QtCore.Qt.DisplayRole)\r\n\r\n # Перезапись названий колонок self.new_df_for_saving\r\n old_name = self.new_df_for_saving.columns[index] # Получаем старое значение\r\n self.new_df_for_saving = self.new_df_for_saving.copy() # копируем dataframe чтобы внести изменение\r\n\r\n self.new_df_for_saving.rename(columns={old_name: newTitle}, inplace=True) # Заменяем название\r\n\r\n # self.widget = QtWidgets.QWidget()\r\n # self.widget.setLayout(layout)\r\n # self.widget.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n win = MainWindow()\r\n\r\n win.show()\r\n sys.exit(app.exec_())\r\n","sub_path":"Diploma17.02.20 UsingStackedWidget/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"435042447","text":"#!/usr/bin/env python\nimport sys\nsys.path.append('/home/kshitij/PS/scripts/')\nfrom scripts import rf01\nfrom scripts import xgb01\nfrom scripts import adaB_svm\nfrom scripts import adaB_DT\n\nprint(\"Combining Method : Default\\n\")\n\nprint(\"Data Used is data/australian.csv\\n\")\n\nprint(\"AdaBoost + Decision Trees : 0\")\nprint(\"AdaBoost + SVM : 1\")\nprint(\"XgBoost : 2\")\nprint(\"Random_Forest : 3 \\n\")\nans = int(input(\"ENTER_YOUR_SELECTION : \\n\"))\n\nif ans == 3:\n print(\"\\nRandom_Forest : 3\")\n cc = rf01.run()\nelif ans == 2:\n print(\"\\nXgBoost : 2\")\n xgb01.run()\nelif ans == 1:\n print(\"\\nAdaBoost + SVM : 1\")\n adaB_svm.run()\nelif ans == 0:\n print(\"\\nAdaBoost + Decision Trees : 0\")\n adaB_DT.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"262548935","text":"#!/usr/bin/python3\n\ndef main():\n '''\n Main function\n '''\n\n # read the number of integers\n n = int(input())\n\n t = tuple([int(i) for i in input().split()])\n\n print(hash(t))\n\n# if file is run\nif __name__=='__main__':\n\n # run main function\n main()\n","sub_path":"python/data_types/tuples.py","file_name":"tuples.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"270235233","text":"from flask_restful import Resource\nfrom flask import request\nfrom services.random_sample import sample\n# import _pickle as pkl\n\nimport json\n\n\nclass FetchTopReviews(Resource):\n def __init__(self):\n # self.file = open(\"sample_review.p\", 'rb')\n # self.reviews = pkl.load(self.file)\n pass\n\n def post(self):\n queries = json.loads(request.data.decode('utf-8'))['data']\n\n query_length = len(queries)\n\n k = int(request.args.get(\"k\", 20))\n sample_reviews = sample.get_sample()\n # sample_reviews = self.reviews\n top_k = []\n queries_set = set()\n for word in queries:\n queries_set.add(word.lower())\n for index, review in enumerate(sample_reviews):\n review_set = review['review/text1']\n review_score = review['review/score']\n\n diff_set = queries_set - review_set\n score = query_length - len(diff_set)\n\n top_k = self.insert_ele(top_k, k, (index, score/query_length, review_score))\n\n print(top_k)\n output = dict()\n data = []\n for ki in top_k:\n out_doc = dict()\n out_doc['_id'] = sample_reviews[ki[0]]['_id']\n out_doc['product/productId'] = sample_reviews[ki[0]]['product/productId']\n out_doc['review/userId'] = sample_reviews[ki[0]]['review/userId']\n out_doc['review/profileName'] = sample_reviews[ki[0]]['review/profileName']\n out_doc['review/helpfulness'] = sample_reviews[ki[0]]['review/helpfulness']\n out_doc['review/score'] = sample_reviews[ki[0]]['review/score']\n out_doc['review/time'] = sample_reviews[ki[0]]['review/time']\n out_doc['review/summary'] = sample_reviews[ki[0]]['review/summary']\n out_doc['review/text'] = sample_reviews[ki[0]]['review/text']\n\n data.append(out_doc)\n output['data'] = data\n output['count'] = len(data)\n\n return output\n\n def insert_ele(self, sorted_listed, k, n):\n # Searching for the position\n i = 0\n while i < len(sorted_listed):\n if sorted_listed[i][1] <= n[1]:\n break\n i += 1\n\n while i < len(sorted_listed) and sorted_listed[i][1] == n[1]:\n if sorted_listed[i][2] <= n[2]:\n break\n i += 1\n\n # Inserting n in the list\n sorted_listed = sorted_listed[:i] + [n] + sorted_listed[i:]\n\n if len(sorted_listed) > k:\n return sorted_listed[:k]\n return sorted_listed\n","sub_path":"src/resources/fetchtopreviews.py","file_name":"fetchtopreviews.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"319111872","text":"import re\nimport json\nimport os\nfrom flask import *\napp = Flask(__name__)\n\nimport logging\nlog = logging.getLogger('werkzeug')\nlog.setLevel(logging.ERROR)\n\n@app.template_filter()\ndef msgSafe(msg):\n\tmsg = re.sub(r\"[^a-zA-Z]\", \"\", msg)\n\treturn msg\n\n@app.route('/favicon.ico') \ndef favicon(): \n return redirect(url_for('static', filename='favicon.ico'), code=302)\n\n@app.route('/append_anime', methods=['POST']) # Per aggiungere un anime\ndef append_anime():\n\tres = request.form\n\tdata = {\n\t\t\"title\": request.form['title'],\n\t\t\"season\": request.form['season'],\n\t\t\"link\": request.form['link']\n\t}\n\tappendAnime(data)\n\treturn redirect(url_for('index'))\n\n@app.route('/delete_anime', methods=['POST']) # Per aggiungere un anime\ndef delete_anime():\n\tres = request.form\n\t# print(res, flush=True)\n\tdeleteAnime(res['delete_anime'])\n\n\treturn redirect(url_for('index'))\n\n@app.route('/index')\n@app.route('/')\ndef index():\n\n\tanime = readData()\n\tenv = getmyenv()\n\treturn render_template('index.html', infos=anime, env=env)\n\n\n\n####### DATA\n\ndef readData():\n\twith open('json/table.json' , 'r') as f:\n\t\treturn json.loads(f.read())\n\ndef writeData(table):\n\tf = open(\"json/table.json\", 'w')\n\tf.write(json.dumps(table, indent=4))\n\tf.close()\n\treturn table\n\ndef deleteAnime(title):\n\n\ttable = readData()\n\n\tfor anime in table:\n\t\tif anime[\"title\"] == title:\n\t\t\ttable.remove(anime)\n\t\t\tbreak\n\n\twriteData(table)\n\ndef appendAnime(data):\n\tdef myOrder(serieInfo):\n\t\treturn serieInfo[\"title\"]\n\n\ttable = readData()\n\n\tfor anime in table:\n\t\tif data[\"title\"] == anime[\"title\"]: # Se esiste già l'anime nella tabella\n\n\t\t\tif data[\"season\"] in anime[\"seasons\"]: # Se esiste già la stagione\n\t\t\t\tanime[\"seasons\"][data[\"season\"]].append(data[\"link\"]) # aggiunge un'altro link\n\t\t\t\t# print(f\"\\n-> È stata aggiunto un altro link per la stagione {season} della serie {SonarrTitle}.\")\n\t\t\telse:\n\t\t\t\tanime[\"seasons\"][data[\"season\"]] = [data[\"link\"]] # inizializza una nuova stagione\n\t\t\t\t# print(f\"\\n-> È stata aggiunta la stagione {season} per la serie {SonarrTitle}.\")\n\n\t\t\tbreak\n\telse: # se non è stato trovato nessun anime\n\t\ttable.append({\n\t\t\t\"title\": data[\"title\"],\n\t\t\t\"seasons\": {data[\"season\"]: [data[\"link\"]]}\n\t\t})\n\t\t# print(f\"\\n-> È stata aggiunta la serie {SonarrTitle}.\")\n\n\ttable.sort(key=myOrder)\n\twriteData(table)\n\n\n### getenv\n\ndef getmyenv():\n\tenv = {}\n\n\tenv[\"ANIME_PATH\"] = os.getenv('ANIME_PATH') # cartella dove si trovano gli anime\n\tenv[\"SONARR_URL\"] = os.getenv('SONARR_URL') # Indirizzo ip + porta di sonarr\n\tenv[\"API_KEY\"] = os.getenv('API_KEY') # Chiave api di sonarr\n\tenv[\"CHAT_ID\"] = os.getenv('CHAT_ID') # telegramm\n\tenv[\"BOT_TOKEN\"] = os.getenv('BOT_TOKEN') # telegramm\n\n\treturn env","sub_path":"config/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"451068887","text":"# encoding: utf-8\r\n\r\n\"\"\"\r\n@author: ����\r\n@license: Apache Licence\r\n@contact: 744516468@qq.com\r\n@file: xc.py\r\n@time: 2019/5/9 17:08\r\n\"\"\"\r\n\r\nimport json\r\nimport requests\r\nimport csv\r\n\r\n\r\ndef getColumn():\r\n columnss = {}\r\n #系统列表展示字段信息的url\r\n url = 'http://172.28.3.157:8080/xchzz/cenrep/queryByIdGridPanel.action?id=22'\r\n result = json.loads(requests.post(url).content.decode())\r\n columnModel = result['columnModel'].replace('\\n', '')\r\n columnModel = columnModel.split('{')\r\n for c in columnModel:\r\n c = c.replace('}', '')\r\n values = c.split(',')\r\n name = ''\r\n header = ''\r\n for value in values:\r\n value = value.split(':')\r\n if len(value) == 2:\r\n key = value[0].strip()\r\n value = value[1].replace('\\'', '').strip()\r\n if key == 'name':\r\n name = value\r\n elif key == 'header':\r\n header = value\r\n if name != '' and header != '':\r\n columnss[name] = header\r\n print(columnss)\r\n return columnss\r\n\r\n#解析字典url\r\ndef getDict(url):\r\n dict = {}\r\n result = json.loads(requests.post(url).content.decode())\r\n result = result['root']\r\n for r in result:\r\n dict[r['code']] = r['remark']\r\n return dict\r\n\r\ndef getData():\r\n columns = getColumn()\r\n values = list(columns.values())\r\n with open('data\\\\刑满释放_数据中心.csv', 'w', newline='', encoding='utf-8') as csvfile:\r\n writer = csv.DictWriter(csvfile, fieldnames=values)\r\n writer.writeheader()\r\n start = 0\r\n limit = 1000\r\n #列表详情字段信息的url,包含了列表展示数据的总数\r\n url = 'http://172.28.3.157:8080/xchzz/gx/GX_WT_CulpritManage/queryByCondFormWTCulpritManage.action'\r\n #获取Headers中Form Data的内容\r\n data = {'start': start, 'limit': limit, 'streetNo': '', 'streetCommNo': '', 'personName': '', 'idCard': '', 'isExact': 0}\r\n result = json.loads(requests.post(url, data).content.decode())\r\n count = result['totalProperty']\r\n infos = result['root']\r\n if len(infos) > 0:\r\n #街道编号\r\n streetNo = getDict('http://172.28.3.157:8080/xchzz/cenrep/common/loadGeneralCodeCodeLoader.action?codeTableName=GX_DIC_STREETNO&_dc=1560479355857')\r\n #社区编号\r\n commNo = getDict('http://172.28.3.157:8080/xchzz/cenrep/common/loadGeneralCodeCodeLoader.action?codeTableName=GX_DIC_COMMNO&_dc=1560479355859')\r\n #性别\r\n sex = getDict('http://172.28.3.157:8080/xchzz/cenrep/common/loadGeneralCodeCodeLoader.action?codeTableName=STD_GB_SEX&_dc=1560479356719')\r\n #是否重点管理\r\n isStressManage = getDict('http://172.28.3.157:8080/xchzz/cenrep/common/loadGeneralCodeCodeLoader.action?codeTableName=GX_DIC_ISMLPOBJECT&_dc=1560480281641')\r\n #当前状况\r\n nowStatus = getDict('http://172.28.3.157:8080/xchzz/cenrep/common/loadGeneralCodeCodeLoader.action?codeTableName=GX_DIC_NOWSTATUS&_dc=1560480281644')\r\n #表现情况\r\n acquitCircs = getDict('http://172.28.3.157:8080/xchzz/cenrep/common/loadGeneralCodeCodeLoader.action?codeTableName=GX_DIC_ACQUITCIRCS&_dc=1560480281646')\r\n #活动地区类型\r\n areaType = getDict('http://172.28.3.157:8080/xchzz/cenrep/common/loadGeneralCodeCodeLoader.action?codeTableName=GX_DIC_AREATYPE&_dc=1560479366771')\r\n\r\n for info in infos:\r\n row = {}\r\n for item in info:\r\n if item in columns:\r\n if item == 'streetNo' and info[item] in streetNo:\r\n info[item] = streetNo[info[item]]\r\n if item == 'commNo' and info[item] in commNo:\r\n info[item] = commNo[info[item]]\r\n if item == 'sex' and info[item] in sex:\r\n info[item] = sex[info[item]]\r\n if item == 'isStressManage' and info[item] in isStressManage:\r\n info[item] = isStressManage[info[item]]\r\n if item == 'nowStatus' and info[item] in nowStatus:\r\n info[item] = nowStatus[info[item]]\r\n if item == 'acquitCircs' and info[item] in acquitCircs:\r\n info[item] = acquitCircs[info[item]]\r\n if item == 'areaType' and info[item] in areaType:\r\n info[item] = areaType[info[item]]\r\n row[columns[item]] = info[item]\r\n writer.writerow(row)\r\n pageCount = (count - 1) // limit + 1\r\n if pageCount > 1:\r\n for i in range(1, pageCount + 1):\r\n start = i * limit\r\n data = {'start': start, 'limit': limit, 'streetNo': '', 'streetCommNo': '', 'personName': '',\r\n 'idCard': '', 'isExact': 0}\r\n result = json.loads(requests.post(url, data).content.decode())\r\n infos = result['root']\r\n if len(infos) > 0:\r\n for info in infos:\r\n row = {}\r\n for item in info:\r\n if item in columns:\r\n if item == 'streetNo' and info[item] in streetNo:\r\n info[item] = streetNo[info[item]]\r\n if item == 'commNo' and info[item] in commNo:\r\n info[item] = commNo[info[item]]\r\n if item == 'sex' and info[item] in sex:\r\n info[item] = sex[info[item]]\r\n if item == 'isStressManage' and info[item] in isStressManage:\r\n info[item] = isStressManage[info[item]]\r\n if item == 'nowStatus' and info[item] in nowStatus:\r\n info[item] = nowStatus[info[item]]\r\n if item == 'acquitCircs' and info[item] in acquitCircs:\r\n info[item] = acquitCircs[info[item]]\r\n if item == 'areaType' and info[item] in areaType:\r\n info[item] = areaType[info[item]]\r\n row[columns[item]] = info[item]\r\n writer.writerow(row)\r\n \r\ngetData()\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"other/xc/xmsf_datasource.py","file_name":"xmsf_datasource.py","file_ext":"py","file_size_in_byte":6719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"436690532","text":"from datetime import datetime\nfrom typing import List\nfrom fastapi import APIRouter\nfrom httpx import AsyncClient\n\nfrom test_fastapi.apis.schemas.person import PersonRequest, Person\n\nrouter = APIRouter()\n\nENDPOINT = \"http://localhost:9518/first_names\"\nPARAMS = (\n (dict(count=100, waiting_time=100), ) * 5 +\n (dict(count=50, waiting_time=80), ) * 5 +\n (dict(count=70, waiting_time=50), ) * 5\n)\n\n@router.get(\"/first_names\")\nasync def first_names() -> List[Person]:\n \"\"\"Ping an external server for first_name collection.\"\"\"\n users = []\n async with AsyncClient() as client:\n for params in PARAMS:\n request = await client.get(ENDPOINT, params=params)\n assert r.status_code == 200\n parsed = PersonResponse.parse_obj(request.json())\n users.extend(parsed.items)\n\n return users\n","sub_path":"test_fastapi/test_fastapi/apis/first_names.py","file_name":"first_names.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"283299775","text":"# @Author : GentleCP\n# @Email : 574881148@qq.com\n# @File : ucashelper.py\n# @Item : PyCharm\n# @Time : 2019/11/28/028 13:58\n# @WebSite : https://www.gentlecp.com\n\nimport sys\nimport click\nimport logging\n\nfrom core import ui\nfrom core.wifi import AccHacker\nfrom core.assess import Assesser\nfrom core.grade import GradeObserver\nfrom core.download import Downloader\nfrom core.wifi import WifiLoginer\n\nimport settings\n\n\n@click.group()\ndef start():\n \"\"\"UCASHelper is a useful tool for UCASer, following are the arguments that you could choose\"\"\"\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(lineno)d:[%(message)s]')\n\n@click.command(name='ui',help='Get UI interface of UCASHelper')\ndef UI():\n ui.main()\n\n\n@click.command(name='down',help='Download resources from sep website')\ndef download_source():\n downloader = Downloader(user_info=settings.USER_INFO,\n urls=settings.URLS,\n source_dir=settings.SOURCE_DIR,\n filter_list=settings.FILTER_LIST)\n downloader.run()\n\n\n@click.command(name='assess',help='Auto assess courses and teachers')\ndef auto_assess():\n assesser = Assesser(user_info=settings.USER_INFO,\n urls=settings.URLS,\n assess_msgs=settings.ASSESS_MSG)\n assesser.run()\n\n\n@click.command(name='grade',help='Query your grades')\ndef query_grades():\n gradeObserver = GradeObserver(user_info=settings.USER_INFO,\n urls=settings.URLS)\n gradeObserver.run()\n\n\n@click.command(name='hack',help='Hack wifi accounts')\ndef hack_accounts():\n hacker = AccHacker(data_path='data/data.txt', password_path='data/password.txt')\n hacker.run()\n\n@click.command(name='login',help='Login campus network')\ndef login_wifi():\n wifiLoginer = WifiLoginer(accounts_path=settings.ACCOUNTS_PATH)\n wifiLoginer.login()\n\n\n@click.command(name='logout',help='Logout campus network')\ndef logout_wifi():\n wifiLoginer = WifiLoginer(accounts_path=settings.ACCOUNTS_PATH)\n wifiLoginer.logout()\n\n\nif __name__ == '__main__':\n commands = [UI,auto_assess,download_source,query_grades,hack_accounts,login_wifi,logout_wifi]\n for command in commands:\n start.add_command(command)\n start()","sub_path":"ucashelper.py","file_name":"ucashelper.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"536593244","text":"#coding:utf-8\nimport Image\nimport pytesseract\nimport ImageEnhance\n\nimg = Image.open('11.png')\nimg = img.convert('RGBA')\nimg = img.convert('L')\nimg.save('end_9.png')\n\nsharpness = ImageEnhance.Sharpness(img)# Sharpened\nimg = sharpness.enhance(7.0)\nimg.save('end_0.png')\n#img = ImageEnhance.Color(img) # Black and white\n#img = img.enhance(0)\nimg.save('end_1.png')\n#img = ImageEnhance.Brightness(img) # Increase brightness\n#img = img.enhance(3)\nimg.save('end_2.png')\nimg = ImageEnhance.Contrast(img) # High contrast\nimg = img.enhance(8)\nimg.save('end_3.png')\n#img = ImageEnhance.Color(img)\n#img = img.convert('L')\n","sub_path":"2.verification_code/IMG_denoising/denoising.py","file_name":"denoising.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"206281638","text":"#!/usr/bin/env python3\n\n\ndef int_to_str(n, base):\n \"\"\"\n Converts an integer to a string in a base between binary and hexadecimal.\n\n Parameters\n ----------\n n : int\n Integer to convert to a string.\n base : int\n Integer, between 2 and 16, that specifies the base of the string\n representation.\n\n Returns\n -------\n str\n The representation of `n` in the specified base.\n\n Raises\n ------\n ValueError\n The provided `base` does not fall between 2 and 16.\n \"\"\"\n if base < 2 or base > 16:\n raise ValueError((\"base needs to be between \"\n \"binary and hexadecimal, [2, 16]\"))\n\n integer_chars = \"0123456789ABCDEF\"\n\n if n < base:\n return integer_chars[n]\n else:\n quotient, remainder = divmod(n, base)\n return int_to_str(quotient, base) + integer_chars[remainder]\n\n","sub_path":"Chapter_4/recursive_integer_to_string.py","file_name":"recursive_integer_to_string.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"349971738","text":"import os\nimport sys\nimport json\nimport re\nimport subprocess\n\n\ndef _execute(cmd, **kwds):\n try:\n return subprocess.check_output(cmd, shell=True, **kwds)\n except OSError as exc:\n sys.stderr.write(str(exc) + '\\n')\n return None\n\n\ndef extract_tornado_apps(params):\n try:\n return params['tornado_apps'][0]['apps']\n except LookupError:\n sys.stderr.write(\"Invalid tornado_apps structure, could not find inside params\")\n return None\n\n\ndef get_img_path(python_exec):\n res = re.search(\"\\d\\.?\\d?$\", tornado_app['python_exec'])\n py_version = 2 if res is None else int(res.group())\n\n if py_version < 3:\n py_img = 'python-27-rhel7'\n else:\n py_img = 'python-36-rhel7'\n\n return os.path.join('registry.access.redhat.com/rhscl', py_img)\n\n\ndef create_requirements_file(virtual_env_path, project_path):\n activate_path = os.path.join(virtual_env_path, 'bin/activate')\n req_path = os.path.join(project_path, 'requirements.txt')\n return _execute('source {} && pip freeze > {}'.format(activate_path, req_path), executable='/bin/bash')\n\n\ndef build_img(project_path, base_img):\n # build img requires s2i from OpenShift\n # https://docs.openshift.com/enterprise/3.0/creating_images/s2i.html\n out_img = base_img + '-tornado-app'\n res =_execute(\n 's2i build -c {path} {img} {out_img}'.format(\n path=project_path,\n img=base_img,\n out_img=out_img\n )\n )\n return out_img if res is not None else None\n\n\ndef run_container(img, entry_point):\n # run tornado container and expose random port\n return _execute(\n 'docker run -e APP_FILE={entry} -p 8080 -itd {img}'.format(\n entry=entry_point,\n img=img\n )\n )\n\n\nif __name__ == '__main__':\n params = {} if os.isatty(sys.stdin.fileno()) else json.load(sys.stdin)\n tornado_apps = extract_tornado_apps(params)\n if tornado_apps is None:\n sys.exit(1)\n\n for tornado_app in tornado_apps:\n py_img = get_img_path(tornado_app['python_exec'])\n\n # if there is virtual env then prepare requirements.txt file (if exists - replace)\n if tornado_app['virtual_env_path'] is not None:\n if create_requirements_file(tornado_app['virtual_env_path'], tornado_app['project_path']) is None:\n continue\n\n # build docker image with tornado application\n img = build_img(tornado_app['project_path'], py_img)\n if img is None:\n continue\n\n # start docker container with tornado application\n run_container(img, tornado_app['main_file'])\n","sub_path":"src/actors/containerization/services/tornado/tornado_generator/tornado_generator.py","file_name":"tornado_generator.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"341935250","text":"from django.conf \\\n import settings\nfrom django.conf.urls \\\n\timport include\nfrom django.conf.urls.static \\\n import static\nfrom django.contrib \\\n\timport admin\nfrom django.urls \\\n\timport path\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('frontend.urls')),\n path('antifraude/', include('antifraude.urls')),\n]\n\n#MEDIA ROOT\nurlpatterns += static(\n settings.MEDIA_URL, \n document_root= settings.MEDIA_ROOT\n)\n\n#STATIC ROOT\nurlpatterns += static(\n settings.STATIC_URL, \n document_root= settings.STATIC_ROOT\n)\n","sub_path":"Security/security/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"602844446","text":"#!/usr/bin/env python3\n#Updated 10/12/2019\n\n###################################################################\n# Wiring Info #\n# Relay Signal GPIO pins 5,6 - Orange And Green solid wires #\n# Relay Ground - Orange / white and green / white wires #\n# Sensor Grounds - Green and Green / White wires #\n# Sensor =5v - Orange and Orange / White wires #\n# Sensor Data - Resister =5v and Blue and Blue / White wires #\n###################################################################\nimport RPi.GPIO as control\nimport datetime\nimport time\nimport os\nimport json\nimport urllib2\nfrom os import path\nfrom time import localtime, strftime\n\nimport subprocess\n\n#Global Variables\non_time = 11\noff_time = 24\nlamp1 = False #water\nlamp2 = False #bed\n\ncontrol.setmode(control.BCM)\n\n#Display GPIO setup\nRST = None\nL_pin = 27\nR_pin = 23\nC_pin = 4\nU_pin = 17\nD_pin = 22\n\nA_pin = 5\nB_pin = 6\n\n#Relay GPIO setup\nl1_relay = 6 #relay for lamp1\nl2_relay = 5 #relay for lamp2\ncontrol.setup(l1_relay, control.OUT)\ncontrol.setup(l2_relay, control.OUT)\n\ndef read_outside():\n url = 'https://api.ambientweather.net/v1/devices?applicationKey=c103229ca2234ebba2ead05db7bd8c163ad2f77758f7497e8f27a9afc74aa3e1&apiKey=f0f5077b6d104b178399c603501b415bf18cb8236315407481d329b9fbf82531'\n response = urllib2.urlopen(url)\n data = response.read()\n outside = json.loads(data)[0][\"lastData\"][\"feelsLike\"]\n return outside\n\ndef lamps_off():\n control.output(l1_relay, False)\n control.output(l2_relay, False)\n lamp1 = False\n lamp2 = False\n\ndef lamp1_only():\n control.output(l1_relay, True)\n control.output(l2_relay, False)\n lamp1 = True\n lamp2 = False\n\ndef lamp2_only():\n control.output(l1_relay, False)\n control.output(l2_relay, True)\n lamp1 = False\n lamp2 = True\n\ndef both_lamps():\n control.output(l1_relay, True)\n control.output(l2_relay, True)\n lamp1 = True\n lamp2 = True\n\ndef curtime():\n time = strftime(\"%H\", localtime())\n return time\n\ndef day():\n day = datetime.datetime.today().weekday()\n return day\n\ndef main():\n#Monday=0\n#Tuesday=1\n#Wednesday=2\n#Thursday=3\n#FRiday=4\n#Saturday=5\n#Sunday=6\n lamp2_only()\n time.sleep(900)\n\nwhile True:\n main()\n","sub_path":"lamp2.py","file_name":"lamp2.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"460141424","text":"\n# coding=utf-8\n\n# https://movie.douban.com/j/new_search_subjects?\n# sort=T&range=0,10&tags=%E5%89%A7%E6%83%85&start=0\n# 分析:\n# 请求需要带去的参数\n# sort:T 排序\n# range:0,10 评分范围\n# tags:电影,剧情 标签(电影类型)\n# start:0 数据的起始位置 从0 开始 每页20条数据\nimport json\nimport os\nimport requests\n\n\ndef load_page():\n \"\"\"爬虫程序\"\"\"\n base_url = 'https://movie.douban.com/j/new_search_subjects'\n\n for i in range(101):\n print(i)\n # 构造关键字 请求需要带上的参数\n key_words = {\n \"sort\": \"T\", # 排序的方式\n \"range\": \"0,10\", # 电影评分的范围\n \"tags\": \"电影,科幻\", # 检索的标签\n # \"playable\": \"1\", # 是否可以播放\n \"start\": str(i*20), # 检索的开始位置 (这里可以去改变的 从0 开始 一个电影代表的是一条数据)\n # \"genres\": \"喜剧\", # 类型\n # \"countries\": \"中国大陆\", # 国家地区\n }\n \"\"\"发送请求加载页面\"\"\"\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36'}\n # 构造请求\n try:\n # 发送请求\n response = requests.get(base_url, params=key_words, headers=headers)\n if response.status_code == 200:\n json_data = response.json()\n print(json_data)\n if not os.path.exists('./data'):\n os.mkdir('./data')\n file_path = os.path.join('./data', 'douban.json')\n\n with open(file_path, 'a', encoding='utf-8') as fp:\n fp.write(json.dumps(json_data)+'\\n')\n # json.dump(json_data+'\\n', fp)\n else:\n print('请求出错')\n except Exception as err:\n print(err)\n\n\nif __name__ == \"__main__\":\n load_page()\n\n\n\n","sub_path":"pySpider/douban movie.py","file_name":"douban movie.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"180333133","text":"# find the elements that multiplies and gives result\ndef findElements(result, array):\n visited = set()\n for i in array:\n r = result / i\n if r in visited:\n return \"{} and {}\".format(r, i)\n visited.add(i)\n return \"no match\"\n\n\nprint(findElements(20, [1, 2, 3, 4, 5]))\n","sub_path":"src/main/python/algorithms/elucidate_algo/multiplication_of_two_elements.py","file_name":"multiplication_of_two_elements.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"495254703","text":"import warnings\r\nwarnings.filterwarnings('ignore')\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom influxdb import *\r\nimport Config as cg\r\nfrom influxdb import DataFrameClient\r\nimport datetime\r\n\r\n\r\n\r\nclass TotalLoss():\r\n \r\n def __init__(self):\r\n self.DFDBClient = DataFrameClient(host=cg.INFLUX_DB_IP, port=cg.INFLUX_DB_PORT, database=cg.INFLUX_DB)\r\n\r\n def __call__(self):\r\n self.output()\r\n \r\n def read_Data(self):\r\n con_obj = InfluxDBClient(host=cg.INFLUX_DB_IP, port=cg.INFLUX_DB_PORT, database=cg.INFLUX_DB)\r\n query = 'select * from ' + cg.WRITE_MEASUREMENT + ' where time > now() - 1d '\r\n df = pd.DataFrame(con_obj.query(query, chunked=True, chunk_size=10000).get_points())\r\n df['time'] = df['time'].astype('datetime64[ns]')\r\n df['time'] = df['time'] + datetime.timedelta(hours=5, minutes=30)\r\n return df\r\n \r\n def total_sum(self,df):\r\n y = pd.DataFrame(df.groupby('DeviceID')['kvah_loss_total'].sum()).astype(np.float64)\r\n y.columns = ['Sum']\r\n y = y.reset_index()\r\n return y \r\n \r\n def time(self,df):\r\n df['Time_max'] = df['time'].dt.time\r\n a=df.loc[list(df.groupby('DeviceID')['kvah_loss_total'].idxmax())][['Time_max','DeviceID']]\r\n b=df.loc[list(df.groupby('DeviceID')['kvah_loss_total'].idxmin())][['Time_max','DeviceID']]\r\n a.index=a['DeviceID']\r\n a=a.drop('DeviceID',axis=1)\r\n a['Time_min']=list(b['Time_max'])\r\n return a\r\n \r\n def time_as_index(self,df):\r\n t = pd.DataFrame(df.groupby(['DeviceID'])['time'].max())\r\n t.reset_index(inplace = True)\r\n return t\r\n\r\n def output(self):\r\n df = self.read_Data()\r\n x = df.groupby('DeviceID')['kvah_loss_total'].describe()\r\n x.columns = ['Total_Count', 'Average', 'SD', 'Minimum', '25th_percentile', 'Median', '75th_percentile', 'Maximum']\r\n y = self.total_sum(df)\r\n x = x.merge(y, on = 'DeviceID', how = \"outer\")\r\n t = self.time_as_index(df)\r\n x=x.merge(t,on='DeviceID', how = \"outer\")\r\n a=self.time(df)\r\n x=x.merge(a,on='DeviceID', how = \"outer\")\r\n x = x.fillna(0)\r\n x.set_index('time', inplace = True)\r\n print(self.DFDBClient.write_points(x, cg.TOTAL_LOSS))\r\n return x\r\n \r\n\r\nif __name__ == '__main__':\r\n cat = TotalLoss()\r\n t = cat.output()\r\n print(t)\r\n print(t.columns)\r\n\r\n","sub_path":"Loss_Total.py","file_name":"Loss_Total.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"618150097","text":"import random\n\nclass FlashCard(object):\n def __init__(self, word, definition, box_num):\n self.word = word\n self.definition = definition\n self.box = box_num\n\nclass LeitnerSpacing():\n def __init__(self, flash_cards):\n \"\"\"\n @param flash_cards: (word, def) tuples or a dictionary of word def pairs\n @param num_boxes: number of groups\n \"\"\"\n self.boxes = [[] for _ in range(5)]\n self.box_probs = [.5, .2, .15, .1, .05]\n for word, definition in self.flash_cards:\n fc = FlashCard(word, definition, 0)\n self.boxes[0].append(fc)\n self.results = 0\n self.cards_given = 0.0\n\n def _move_word(self, flash_card, new_box):\n box_num = flash_card.box\n if box_num == new_box:\n return\n for idx, fc in enumerate(self.boxes[box_num]):\n if fc is flash_card:\n self.boxes[box_num].pop(idx)\n break\n flash_card.box = new_box\n self.boxes[new_box].append(flash_card)\n\n def get_card(self):\n self.cards_given += 1\n rand_index = random.rand()\n running_sum = 0\n for idx in range(5):\n if rand_index <= running_sum + self.box_probs[idx]:\n box = idx\n break\n num_cards = len(self.boxes[box])\n card_idx = int(num_cards*random.rand())\n return self.boxes[box][card_idx]\n\n def report_result(self, flash_card, result):\n \"\"\"\n @param word: the word to report results for\n @param result: the true or false value indicating whether\n correct or incorrect.\n \"\"\"\n self.results += int(result)\n if result:\n word_box = flash_card.box\n if word_box != 0:\n self._move_word(flash_card, word_box-1)\n else:\n self._move_word(flash_card, 0)\n\n def __str__(self):\n return [[fc.word for fc in self.boxes[idx]] for idx in range(5)]\n\n def __repr__(self):\n print((\"Current stats:\\nPercent Correct:\" \n \" {:.2%}\\nSounds played: {:d}\").format(self.results/self.cards_given,\n int(self.cards_given)))\n return [[fc.word for fc in self.boxes[idx]] for idx in range(5)]\n","sub_path":"schemes/LeitnerSpacing.py","file_name":"LeitnerSpacing.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"424388390","text":"from ..funcs.singleton import Singleton\nfrom .sql_alchemy_connector import SqLiteDatabase\nfrom ..funcs.constants import SQLITE\nfrom ..funcs.event import Event\nfrom ..funcs.log import create_logger\n\nlogger = create_logger('EventHandler')\n\n\nclass EventHandler(metaclass=Singleton):\n\n def __init__(self):\n self.__sqlAlchemyConnector = SqLiteDatabase(SQLITE, dbname='eventDatabase.sqlite')\n self.__sqlAlchemyConnector.create_chat_event_table()\n self.__sqlAlchemyConnector.create_kotlin_table()\n\n def add_event(self, event_as_cbor):\n event = Event.from_cbor(event_as_cbor)\n seq_no = event.meta.seq_no\n feed_id = event.meta.feed_id\n content = event.content.content\n\n cont_ident = content[0].split('/')\n application = cont_ident[0]\n\n if application == 'chat':\n chatMsg = content[1]['messagekey']\n chat_id = content[1]['chat_id']\n timestamp = content[1]['timestampkey']\n\n self.__sqlAlchemyConnector.insert_event(feed_id=feed_id, seq_no=seq_no, application=application,\n chat_id=chat_id,\n timestamp=timestamp, data=chatMsg)\n\n elif application == 'KotlinUI':\n username = content[1]['username']\n timestamp = content[1]['timestamp']\n text = content[1]['text']\n self.__sqlAlchemyConnector.insert_kotlin_event(feed_id=feed_id, seq_no=seq_no, application=application,\n username=username,\n timestamp=timestamp, text=text)\n\n elif application == 'MASTER':\n self.master_handler(seq_no, feed_id, content, cont_ident)\n\n else:\n raise InvalidApplicationError('Invalid application called %s' % application)\n\n def get_event_since(self, application, timestamp, chat_id):\n return self.__sqlAlchemyConnector.get_all_events_since(application, timestamp, chat_id)\n\n def get_all_events(self, application, chat_id):\n return self.__sqlAlchemyConnector.get_all_event_with_chat_id(application, chat_id)\n\n def get_Kotlin_usernames(self):\n return self.__sqlAlchemyConnector.get_all_usernames()\n\n def get_all_kotlin_events(self, feed_id):\n return self.__sqlAlchemyConnector.get_all_kotlin_events(feed_id=feed_id)\n\n def get_all_entries_by_feed_id(self, feed_id):\n return self.__sqlAlchemyConnector.get_all_entries_by_feed_id(feed_id)\n\n def get_last_kotlin_event(self):\n return self.__sqlAlchemyConnector.get_last_kotlin_event()\n\n def master_handler(self, seq_no, feed_id, content, cont_ident):\n event = cont_ident[1]\n if event == 'MASTER':\n self.__sqlAlchemyConnector.insert_master_event(True, feed_id, None, None, seq_no, None, None, None, 0)\n elif event == 'Trust':\n self.__sqlAlchemyConnector.insert_master_event(False, feed_id, None, content[1]['feed_id'], seq_no, True,\n False, None, None)\n elif event == 'Untrust':\n self.__sqlAlchemyConnector.insert_master_event(False, feed_id, None, content[1]['feed_id'], seq_no, False,\n True, None, None)\n elif event == 'Name':\n self.__sqlAlchemyConnector.insert_master_event(False, feed_id, None, None, seq_no, None, None,\n content[1]['name'], None)\n elif event == 'NewFeed':\n self.__sqlAlchemyConnector.insert_master_event(False, feed_id, content[1]['feed_id'], None, seq_no, True,\n False, None, None)\n elif event == 'Block':\n self.__sqlAlchemyConnector.insert_master_event(False, feed_id, None, content[1]['feed_id'], seq_no, False,\n True, None, None)\n elif event == 'Unblock':\n self.__sqlAlchemyConnector.insert_master_event(False, feed_id, None, content[1]['feed_id'], seq_no, True,\n False, None, None)\n elif event == 'Radius':\n self.__sqlAlchemyConnector.insert_master_event(False, feed_id, None, None, seq_no, None,\n None, None, content[1]['radius'])\n else:\n raise InvalidApplicationError('Invalid action called %s' % event)\n\n\nclass InvalidApplicationError(Exception):\n def __init__(self, message):\n super(InvalidApplicationError, self).__init__(message)\n","sub_path":"groups/07-logStore/src/logStore/database/event_handler.py","file_name":"event_handler.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"443948374","text":"# Created by Aashish Adhikari at 3:55 PM 6/8/2020\n\nclass Solution(object):\n def containsNearbyDuplicate(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: bool\n \"\"\"\n\n dic = {}\n\n for idx in range(len(nums)):\n\n if nums[idx] in dic:\n existing = dic[nums[idx]]\n for item in existing:\n if idx - item <= k:\n return True\n\n dic[nums[idx]].append(idx)\n else:\n dic[nums[idx]] = [idx]\n\n return False\n\n\n\nsol = Solution()\nprint(sol.containsNearbyDuplicate([1,0,1,1],1))","sub_path":"LeetCode_Problems/Easy_Questions/Contains_Duplicate_II.py","file_name":"Contains_Duplicate_II.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"147494746","text":"import socket\r\nimport threading\r\nimport rsa\r\nfrom rsa.bigfile import *\r\nimport pickle\r\n\r\n\r\nMSGLEN = 2048\r\n\r\n\r\nclass MySocket:\r\n def __init__(self, sock=None):\r\n if not sock:\r\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n else:\r\n self.sock = sock\r\n (pubkey, privkey) = rsa.newkeys(512)\r\n self.pubkey = pubkey\r\n self.privkey = privkey\r\n\r\n def send(self, msg):\r\n totalsent = 0\r\n while totalsent < len(msg):\r\n # print(dir(self.sock))\r\n sent = self.sock.send(msg[totalsent:])\r\n if sent == 0:\r\n raise RuntimeError(\"Socket connection broken\")\r\n totalsent += sent\r\n\r\n def sendFile(self, fname):\r\n with open(fname, 'rb') as f:\r\n l = f.read(MSGLEN)\r\n while l:\r\n print(\"Sending...\")\r\n print(l)\r\n self.send(l)\r\n l = f.read(MSGLEN)\r\n\r\n def receive(self):\r\n chunks = []\r\n bytes_recd = 0\r\n while bytes_recd < MSGLEN:\r\n # print(dir(self.sock))\r\n chunk = self.sock.recv(min(MSGLEN - bytes_recd, 2048))\r\n if chunk == b'':\r\n break\r\n # raise RuntimeError(\"Socket connection broken\")\r\n chunks.append(chunk)\r\n bytes_recd += len(chunk)\r\n return b''.join(chunks)\r\n\r\n def receiveFile(self, fname):\r\n with open(fname, 'wb') as f:\r\n l = self.receive()\r\n while l:\r\n print(l)\r\n f.write(l)\r\n l = self.receive()\r\n\r\n\r\nclass ServerSocket(MySocket):\r\n def __init__(self):\r\n MySocket.__init__(self)\r\n self.sock.bind((socket.gethostname(), 27015))\r\n self.sock.listen(5)\r\n self.clients = []\r\n\r\n def accept(self):\r\n (c, _) = self.sock.accept()\r\n client = MySocket(c)\r\n self.clients.append(client)\r\n return client\r\n\r\n\r\nclass ClientSocket(MySocket):\r\n def __init__(self):\r\n MySocket.__init__(self)\r\n\r\n def connect(self, host, port):\r\n self.sock.connect((host, port))\r\n\r\n\r\nclass ClientThread(threading.Thread):\r\n def __init__(self, server, sock):\r\n threading.Thread.__init__(self)\r\n self.sock = sock\r\n\r\n def run(self):\r\n self.sock.send(pickle.dumps(server.pubkey))\r\n self.sock.sock.shutdown(socket.SHUT_WR)\r\n print(\"Sent Key\")\r\n fname = 'server_data/some_file.enc'\r\n self.sock.receiveFile(fname)\r\n decryptFile(fname, server.privkey)\r\n print(\"Received Data\")\r\n\r\n\r\ndef decryptFile(fname, privkey):\r\n rfile = 'server_data/some_file.txt'\r\n with open(fname, 'rb') as infile, open(rfile, 'wb') as outfile:\r\n decrypt_bigfile(infile, outfile, privkey)\r\n return rfile\r\n\r\n\r\ndef encryptFile(fname, pubkey):\r\n rfile = 'client_data/some_file.enc'\r\n with open(fname, 'rb') as infile, open(rfile, 'wb') as outfile:\r\n encrypt_bigfile(infile, outfile, pubkey)\r\n return rfile\r\n\r\n\r\nchoice = input(\"(s)erver or (c)lient: \")\r\nif choice == 's':\r\n server = ServerSocket()\r\n while True:\r\n client = server.accept()\r\n ClientThread(server, client).start()\r\nelse:\r\n client = ClientSocket()\r\n client.connect(socket.gethostname(), 27015)\r\n pubkey = pickle.loads(client.receive())\r\n print(type(pubkey).__name__)\r\n print(pubkey)\r\n fname = encryptFile('client_data/some_file.txt', pubkey)\r\n client.sendFile(fname)\r\n","sub_path":"bachelors/year4/semestre1/python_ruby/HomeWorks/python/18/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"541689656","text":"import gc\nimport logging\nimport os\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.applications import MobileNetV2, ResNet50, InceptionV3\nfrom tensorflow.keras.applications import mobilenet_v2, resnet50, inception_v3\nfrom tensorflow.keras.layers import (\n AveragePooling2D,\n Dense,\n Dropout,\n Flatten,\n)\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.preprocessing.image import (\n ImageDataGenerator,\n)\n\ntry:\n from rich.logging import RichHandler\nexcept ModuleNotFoundError:\n os.system(\"pip install rich\")\n from rich.logging import RichHandler\n\nhandler = RichHandler()\nformatter = logging.Formatter(\"%(message)s\")\nlogger = logging.getLogger(\"main\")\n\nhandler.setFormatter(formatter)\nif len(logger.handlers) == 0:\n logger.addHandler(handler)\n\n\n\"\"\"Script for training Face Mask Detector\n\nThis is the script used to train the Face Mask Detector.\n\"\"\"\n\n\ndef build_model_mnet():\n \"\"\"\n Building the transfer learning model with MobileNet_V2\n Returns\n -------\n tensorflow.keras.Model\n The model for training.\n \"\"\"\n baseModel = MobileNetV2(\n weights=\"imagenet\", include_top=False, input_shape=(224, 224, 3)\n )\n headModel = baseModel.output\n headModel = AveragePooling2D(pool_size=(7, 7))(headModel)\n headModel = Flatten(name=\"flatten\")(headModel)\n headModel = Dense(128, activation=\"relu\")(headModel)\n headModel = Dropout(0.5)(headModel)\n headModel = Dense(3, activation=\"softmax\")(headModel)\n\n model = Model(inputs=baseModel.input, outputs=headModel)\n for layer in baseModel.layers:\n layer.trainable = False\n\n return model\n\n\ndef build_model_resnet():\n \"\"\"\n Building the transfer learning model with ResNet50\n Returns\n -------\n tensorflow.keras.Model\n The model for training.\n \"\"\"\n baseModel = ResNet50(\n weights=\"imagenet\", include_top=False, input_shape=(224, 224, 3)\n )\n headModel = baseModel.output\n headModel = AveragePooling2D(pool_size=(7, 7))(headModel)\n headModel = Flatten(name=\"flatten\")(headModel)\n headModel = Dense(128, activation=\"relu\")(headModel)\n headModel = Dropout(0.5)(headModel)\n headModel = Dense(3, activation=\"softmax\")(headModel)\n\n model = Model(inputs=baseModel.input, outputs=headModel)\n for layer in baseModel.layers:\n layer.trainable = False\n\n return model\n\n\ndef build_model_inception():\n \"\"\"\n Building the transfer learning model with Inception_V3\n Returns\n -------\n tensorflow.keras.Model\n The model for training.\n \"\"\"\n baseModel = InceptionV3(\n weights=\"imagenet\", include_top=False, input_shape=(224, 224, 3)\n )\n headModel = baseModel.output\n headModel = AveragePooling2D(pool_size=(5, 5))(headModel)\n headModel = Flatten(name=\"flatten\")(headModel)\n headModel = Dense(128, activation=\"relu\")(headModel)\n headModel = Dropout(0.5)(headModel)\n headModel = Dense(3, activation=\"softmax\")(headModel)\n\n model = Model(inputs=baseModel.input, outputs=headModel)\n for layer in baseModel.layers:\n layer.trainable = False\n\n return model\n\n\ndef main():\n \"\"\"\n Main function.\n \"\"\"\n EPOCHS = 8\n BATCH_SIZE = 32\n TRAIN_TEST_SPLIT = 0.8 # Training: 0.8, Validation: 0.2\n\n DATASET_PATH = \"../input/facemaskdetection/dataset\"\n TOTAL_DATA_COUNT = len(list(Path(DATASET_PATH).rglob(\"**/*.jpg\")))\n TRAIN_DATA_COUNT = np.ceil(TOTAL_DATA_COUNT * TRAIN_TEST_SPLIT).astype(\"int32\")\n VAL_DATA_COUNT = TOTAL_DATA_COUNT - TRAIN_DATA_COUNT\n TRAIN_STEPS_PER_EPOCH = np.ceil(TRAIN_DATA_COUNT / BATCH_SIZE).astype(\"int32\")\n VAL_STEPS_PER_EPOCH = np.ceil(VAL_DATA_COUNT / BATCH_SIZE).astype(\"int32\")\n\n logger.info(f\"Epochs: {EPOCHS}\\nBatch size: {BATCH_SIZE}\")\n logger.info(\n f\"Total data size: {TOTAL_DATA_COUNT}. Splitted {TRAIN_TEST_SPLIT * 100}% as training.\"\n )\n logger.info(\n f\"Train data size: {TRAIN_DATA_COUNT}\\tValidation data size: {VAL_DATA_COUNT}\"\n )\n logger.info(\n f\"Steps per epoch:\\nTraining: {TRAIN_STEPS_PER_EPOCH}\\tValidation: {VAL_STEPS_PER_EPOCH}\"\n )\n\n logger.info(\"Building models...\")\n models = [\n {\n \"name\": \"MobileNetV2\",\n \"model\": build_model_mnet(),\n \"preprocessor\": mobilenet_v2.preprocess_input,\n },\n {\n \"name\": \"ResNet50\",\n \"model\": build_model_resnet(),\n \"preprocessor\": resnet50.preprocess_input,\n },\n {\n \"name\": \"InceptionV3\",\n \"model\": build_model_inception(),\n \"preprocessor\": inception_v3.preprocess_input,\n },\n ]\n\n for model_dict in models:\n model_name = model_dict[\"name\"]\n model = model_dict[\"model\"]\n preprocessor = model_dict[\"preprocessor\"]\n\n logger.info(f\"Training {model_name}...\")\n optimizer = optimizers.Adam(learning_rate=1e-4, decay=1e-4 / 20)\n model.compile(\n loss=\"categorical_crossentropy\", optimizer=optimizer, metrics=[\"accuracy\"]\n )\n\n aug = ImageDataGenerator(\n preprocessing_function=preprocessor,\n rotation_range=20,\n zoom_range=0.15,\n shear_range=0.15,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True,\n fill_mode=\"nearest\",\n validation_split=0.2,\n )\n\n train_generator = aug.flow_from_directory(\n DATASET_PATH,\n target_size=(224, 224),\n color_mode=\"rgb\",\n classes=[\"WMFD\", \"CMFD\", \"IMFD\"],\n class_mode=\"categorical\",\n batch_size=BATCH_SIZE,\n subset=\"training\",\n )\n\n val_generator = aug.flow_from_directory(\n DATASET_PATH,\n target_size=(224, 224),\n color_mode=\"rgb\",\n classes=[\"WMFD\", \"CMFD\", \"IMFD\"],\n class_mode=\"categorical\",\n batch_size=BATCH_SIZE,\n subset=\"validation\",\n )\n\n logger.info(f\"Class indices: {train_generator.class_indices}\")\n\n H = model.fit(\n train_generator,\n validation_data=val_generator,\n epochs=EPOCHS,\n steps_per_epoch=TRAIN_STEPS_PER_EPOCH,\n verbose=1,\n )\n\n plt.style.use(\"seaborn-darkgrid\")\n plt.figure()\n plt.plot(H.history[\"loss\"], label=\"Train Loss\")\n plt.plot(H.history[\"val_loss\"], label=\"Validation Loss\")\n plt.plot(H.history[\"accuracy\"], label=\"Train Accuracy\")\n plt.plot(H.history[\"val_accuracy\"], label=\"Validation Accuracy\")\n plt.title(f\"Loss and Accuracy Graph for {model_name}\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss / Accuracy\")\n plt.legend(loc=\"lower left\")\n plt.show()\n model.save(f\"{model_name}_MaskDetectorFull.h5\")\n print(f\"Collected: {gc.collect()}\")\n","sub_path":"facemaskdetection/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"67268318","text":"# implementacja algorytmu slow MIS\n\nfrom network.message import Message\nfrom random import randint as rint\n\nclass FastMIS(object):\n \"\"\" Klasa implementujaca algorytm fast MIS.\n \n Klasa zlicza ile rund dzialal algorytm.\n \"\"\"\n \n def __init__(self, network):\n \"\"\" init.\n \n Argumenty:\n network - siec na jakiej ma dzialac algorytm\n \"\"\"\n \n self.network = network\n self.roundCounter = 0\n \n def initiate(self):\n \"\"\" Metoda inicjujaca kazdy wezel przed rozpoczeciem dzialania algorytmu.\n \n Stan kazdego wezla jest ustawiany na 'unmarked', a lista sasiadow jest kopiowana do pamieci.\n \"\"\"\n \n for node in self.network.ndList:\n node.memory['state'] = 'unmarked'\n node.memory['neighbors_cp'] = [neighIndex for neighIndex in node.neighbors]\n \n def checkIfUndecidedExists(self):\n \"\"\" Metoda sprawdza czy w sieci istnieje wezel, ktory nie zadecydowal czy jest MIS czy nie.\n \n Zwraca True jesli taki wezel istnieje, False w p.p.\n \"\"\"\n \n for node in self.network.ndList:\n if node.memory['state'] == 'unmarked' or node.memory == 'marked':\n return True\n \n return False\n \n def checkMsgsFromNeighbors(self, node, msgs):\n \"\"\" Metoda ocenia na podstawie wiadomosci od sasiadow czy dany wezel moze przystapic do MIS.\n \n Argumety:\n node - rozpatrywany wezel\n msgs - lista wiadomosci\n \n Zwraca True, jesli wezel moze dolaczyc do MIS, False w p.p.\n \"\"\"\n \n # metoda zwraca False jesli wezel nie moze przystapic do MIS\n # true w p.p.\n \n # sprawdzanie wiadomosci\n for msg in msgs:\n # jesli sasiad ma wyzszy stopien niz wezel\n if msg.infoDict['degree'] > len(node.memory['neighbors_cp']):\n if msg.infoDict['is_marked'] == 'marked':\n return False\n # jesli sasiad ma tyle samo sasiadow co wezel\n elif msg.infoDict['degree'] == len(node.memory['neighbors_cp']):\n # jesli sasiad ma wyzsze ID, to ma wyzszy priorytet niz wezel\n if msg.iSourceID > node.ID:\n return False\n return True\n \n def roundOfFastMIS(self):\n \"\"\" Jedna runda algorytmu.\n \"\"\"\n \n # czyszczenie inboxow i outboxow\n for node in self.network.ndList:\n node.inbox = []\n node.outbox = []\n \n # jesli wezel juz zdecydowal, ze jest w MIS (albo ze nie jest), to nic nie robi\n for node in self.network.ndList:\n if node.memory['state'] == 'in_MIS' or node.memory['state'] == 'not_in_MIS':\n continue\n \n # wezel zaznacza sie z pr 1/2d(v)\n maxRand = 2*len(node.memory['neighbors_cp'])\n \n # jesli wezel juz nie ma zadnych sasiadow, to zaznacza sie z pr 1\n if maxRand == 0:\n maxRand = 1\n \n if rint(1, maxRand) == 1:\n node.memory['state'] = 'marked'\n \n # jesli wezel sie zaznaczyl, to pyta sasiadow czy moze przystapic do MIS\n for neigh in node.memory['neighbors_cp']:\n msg = Message(node.ID, neigh, {'marked_and_degree':''})\n node.outbox.append(msg)\n \n else:\n node.memory['state'] = 'unmarked'\n \n self.network.processAllNodesOutboxes()\n \n # kazdy wezel odpowiada swoim stanem zaznaczenia i stopniem\n # o ile nie zostal 'usuniety' z sieci (tzn jelsi nie zdecydowal jasno, ze jest w MIS lub poza MIS)\n for node in self.network.ndList:\n if node.memory['state'] == 'in_MIS' or node.memory['state'] == 'not_in_MIS':\n node.inbox = []\n node.outbox = []\n continue\n \n while node.inbox:\n msg = node.inbox.pop()\n answer = Message(node.ID, msg.iSourceID, { 'is_marked':node.memory['state'], 'degree':len(node.memory['neighbors_cp']) })\n node.outbox.append(answer)\n \n self.network.processAllNodesOutboxes()\n \n # przetwarzanie wiadomosci\n for node in self.network.ndList:\n # jesli wezel juz zdezydowal, ze jest albo nie jestw MIS, to nic nie robi\n if node.memory['state'] == 'in_MIS' or node.memory['state'] == 'not_in_MIS':\n continue\n \n # jesli wezel sie nie zaznaczyl, to wezel nic nie robi\n if node.memory['state'] == 'unmarked':\n continue\n\n result = self.checkMsgsFromNeighbors(node, node.inbox)\n \n node.inbox = []\n \n if result == False:\n node.memory['state'] = 'unmarked'\n else:\n node.memory['state'] = 'in_MIS'\n \n for neigh in node.memory['neighbors_cp']:\n msg = Message(node.ID, neigh, {'request_not_in_MIS':''})\n node.outbox.append(msg)\n \n self.network.processAllNodesOutboxes()\n \n # jesli wezel dostal teraz jakies wiadomosci, to nie moze byc w MIS\n for node in self.network.ndList:\n # nie burzymy juz stworzonej struktury\n if node.memory['state'] == 'in_MIS' or node.memory['state'] == 'not_in_MIS':\n continue\n \n for msg in node.inbox:\n if msg.infoDict.has_key('request_not_in_MIS'):\n node.memory['state'] = 'not_in_MIS'\n \n # informowanie wszystkich sasiadow ze wezel zostaje usuniety z sieci\n for neigh in node.memory['neighbors_cp']:\n msg = Message(node.ID, neigh, {'request_delete':''})\n node.outbox.append(msg)\n \n break\n \n # czyszczenie inboxow\n for node in self.network.ndList:\n node.inbox = []\n \n self.network.processAllNodesOutboxes()\n# \n# # usuwanie sasiadow od ktorych wezly dostaly wiadomosci\n for node in self.network.ndList:\n if node.memory['state'] == 'in_MIS' or node.memory['state'] == 'not_in_MIS':\n continue\n# \n senders = [msg.iSourceID for msg in node.inbox]\n newNeighbors = [neigh for neigh in node.memory['neighbors_cp'] if neigh not in senders]\n node.memory['neighbors_cp'] = newNeighbors\n \n \n def execute(self):\n \"\"\" Metoda wykonuje rundy tak dlugo, az kazdy wezel nie zadecyduje o przynaleznosci (badz nie) do MIS.\n \"\"\"\n \n self.initiate()\n \n while self.checkIfUndecidedExists():\n self.roundOfFastMIS()\n self.roundCounter +=1\n ","sub_path":"algorithm/fast_MIS.py","file_name":"fast_MIS.py","file_ext":"py","file_size_in_byte":7092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"425455660","text":"#TSP GA Implementation\r\n\r\nimport numpy, random, operator\r\n\r\nnoCities = 10 #including starting city\r\npopSize = 10 #no of solutions/individuals\r\neliteNo = 5 #elitism, the number of individuals who don't go through the random selection process\r\nmutationRate = 0.01 #mutation rate, chance of mutated member of population every gen\r\ngenerations = 500 #500 iterations then terminate\r\npopLeft = popSize - eliteNo #non-elite population\r\n\r\nif popLeft % 2 != 0:\r\n popLeft = popLeft - 1\r\n\r\n#class City to hold the properties of each city\r\nclass City:\r\n \r\n #creates city properties when object is created\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n \r\n #getDistance() method, to be called through cityName.getDistance(otherCity)\r\n def getDistance(self, other):\r\n #use Pythagorean theorem to calculate distance\r\n distance = float(numpy.sqrt((self.x-other.x)**2 + (self.y-other.y)**2))\r\n return distance\r\n \r\n #use __repr__ method to format when City object is called with print()\r\n def __repr__(self):\r\n return \"(\" + str(self.x) + \", \" + str(self.y) + \")\"\r\n \r\n#class route to hold the properties of each route#\r\nclass Route:\r\n \r\n #creates route properties when object is created\r\n def __init__(self, route):\r\n self.route = route\r\n self.fitness = 0.0\r\n self.distance = 0\r\n \r\n #function to get total distance of a route\r\n def fitnessFunction(self):\r\n for x in range(0, len(self.route)):\r\n if x + 1 < len(self.route):\r\n self.distance += self.route[x].getDistance(self.route[x+1])\r\n else: \r\n self.distance = self.route[x].getDistance(self.route[0])\r\n return self.distance\r\n \r\n #function to invert distance, and record that as fitness, smaller distance = larger fitness\r\n def getFitness(self):\r\n self.fitness = 1/float(self.distance)\r\n return self.fitness\r\n \r\n #use __repr__ method to format when Route object is called with print()\r\n def __repr__(self):\r\n return \"Path: \" + str(self.route) + \" Distance: \" + str(self.distance) + \" Fitness: \" + str(self.fitness)\r\n\r\n#function to create cityList, can be changed later to accomadate user input for city\r\ndef genCityList(noCities):\r\n cityList = []\r\n for x in range(0, noCities):\r\n cityList.append(City(x=int(random.random()*200), y=int(random.random()*200)))\r\n return cityList\r\n\r\n#function to generate an individual/route\r\ndef genRoute(cityList):\r\n route = random.sample(cityList, len(cityList))\r\n return route\r\n\r\n#function to generate initial population\r\ndef genPop(popSize, cityList):\r\n population = []\r\n for x in range(0, popSize):\r\n population.append(Route(genRoute(cityList)))\r\n return population\r\n\r\n#function to sort the routes\r\ndef sortRoutes(population):\r\n fitnessScore = {}\r\n for x in range(0, len(population)):\r\n population[x].fitnessFunction()\r\n fitnessScore[x] = population[x].getFitness()\r\n #returns fitnessScore as list of tuples sorted in descending order by the 1st element\r\n return sorted(fitnessScore.items(), key = operator.itemgetter(1), reverse = True)\r\n\r\n#selection function, don't randomly select until the eliteNo'th element\r\ndef selection(routeSorted, eliteNo, popLeft):\r\n results = []\r\n addElement = []\r\n #loop to create elitism\r\n for x in range(0, eliteNo):\r\n results.append(routeSorted[x])\r\n del routeSorted[:eliteNo]\r\n addElement = selectionRandom(routeSorted, popLeft)\r\n #adds the randomly selected routes from addElement to the results list\r\n results.extend(addElement)\r\n #returns list of results\r\n return results\r\n\r\n#weigh non-elite population and select who goes through based on random weighted chance\r\ndef selectionRandom(routeSorted, popLeft):\r\n weight = []\r\n choice = []\r\n #when val() called it will return the 1st element of the tuple\r\n val = operator.itemgetter(1)\r\n total = 0\r\n #use loop to get total\r\n for x in range(0, len(routeSorted)):\r\n total = total + val(routeSorted[x])\r\n #use loop to get weight from total\r\n for x in range(0, len(routeSorted)):\r\n weight.append(val(routeSorted[x])//total)\r\n #only half of routeSorted will be selected and returned, // is int division operator\r\n choice = random.choices(population = routeSorted, weights = weight, k = popLeft//2)\r\n return choice\r\n\r\n#create mating pool \r\ndef matingPool(results, population):\r\n matPool = []\r\n #when index() called it will return the 0st element of the tuple\r\n index = operator.itemgetter(0)\r\n for x in range(0, len(population)):\r\n #get the 0th element from the tuple and use it to call population to get route path\r\n a = index(population[x])\r\n matPool.append(results[a].route)\r\n #return the list matPool, containing the path of all the selected routes\r\n return matPool\r\n\r\n#create offspring\r\ndef breed(parent1, parent2):\r\n childGenes = [] #offspring's route path\r\n childP1 = [] #part 1 of child's genes/path\r\n childP2 = [] #part 2 of child's genes/path\r\n #note: may need to change parent1 to parent1.route\r\n #find two points in the path sequence to splice\r\n point1 = int(random.random()*len(parent1))\r\n point2 = int(random.random()*len(parent1))\r\n #find whether point1, or point2 is smaller, make that the start gene\r\n startGene = min(point1, point2)\r\n #find whether point1, or point2 is larger, make that the end gene\r\n endGene = max(point1, point2)\r\n #create for loop, between start and end, which will create part 1 of the child's genes\r\n for x in range(startGene, endGene):\r\n #append xth element of parent1 to childP1\r\n childP1.append(parent1[x])\r\n #childP2 is an array of items, that uses the order of cities from parent2 if the city is not in childP1\r\n childP2 = [item for item in parent2 if item not in childP1]\r\n #splice the genes together to make offspring\r\n childGenes = childP1 + childP2\r\n return childGenes\r\n\r\n#algorithm to match mates\r\ndef breedPopulation(matPool, eliteNo, popSize):\r\n children = []\r\n #how many offspring can be made\r\n length = popSize - len(matPool)\r\n #pool is a list of random routes from matPool\r\n pool = random.sample(matPool, len(matPool))\r\n #elitism - add on the elite solutions from earlier to new population\r\n for x in range(0, eliteNo):\r\n children.append(matPool[x])\r\n #create offspring\r\n for x in range(0, length):\r\n child = breed(pool[x], pool[len(matPool) - x - 1])\r\n children.append(child)\r\n return children\r\n\r\n#mutate function to take a route, and randomly choose whether or not to mutate it\r\ndef mutate(individual, mutationRate):\r\n #loop len(individual) times\r\n for x in range(len(individual)):\r\n #gen random num, if less than mutationRate mutate route\r\n if(random.random() < mutationRate):\r\n #swapWith picks random index in route\r\n swapWith = int(random.random()*len(individual))\r\n city1 = individual[x]\r\n city2 = individual[swapWith]\r\n #the two selected cities swap place\r\n individual[x] = city2\r\n individual[swapWith] = city1\r\n #return route\r\n return individual\r\n\r\n#mutatePopulation() to create new pop\r\ndef mutatePopulation(pop, mutationRate):\r\n mutatedPop = []\r\n #loop for no. elements in population\r\n for x in range(0, len(pop)):\r\n #get routes and assign them to mutatedPop\r\n mutatedInd = mutate(pop[x], mutationRate)\r\n mutatedPop.append(Route(mutatedInd))\r\n #return list mutatedPop\r\n return mutatedPop\r\n\r\n#next generation to repeat every generation after first\r\ndef nextGeneration(currentGen, eliteSize, mutationRate, popSize, popLeft):\r\n #get popRanked through calling sortRoutes with currentGen\r\n popRanked = sortRoutes(currentGen)\r\n #get selectionResults through calling selection() with popRanked\r\n selectionResults = selection(popRanked, eliteNo, popLeft)\r\n #get matPool by calling matingPool\r\n matPool = matingPool(currentGen, selectionResults)\r\n #get children through calling breedPopulation()\r\n children = breedPopulation(matPool, eliteNo, popSize)\r\n #get nextGeneration through calling mutatePopulation\r\n nextGeneration = mutatePopulation(children, mutationRate)\r\n return nextGeneration\r\n\r\ndef geneticAlgorithm(cityList, popSize, eliteNo, mutationRate, generations, popLeft):\r\n val = operator.itemgetter(1)\r\n index = operator.itemgetter(0)\r\n pop = genPop(popSize, cityList)\r\n print(\"initial distance: \" + str(1/val(sortRoutes(pop)[0])))\r\n for x in range(0, generations):\r\n pop = nextGeneration(pop, eliteNo, mutationRate, popSize, popLeft)\r\n print(\"final distance: \" + str(1/val(sortRoutes(pop)[0])))\r\n bestRouteIndex = index(sortRoutes(pop)[0])\r\n bestRoute = pop[bestRouteIndex]\r\n return bestRoute\r\n\r\n#run program\r\ncityList = genCityList(noCities)\r\ngeneticAlgorithm(cityList, popSize, eliteNo, mutationRate, generations, popLeft)\r\n\r\n\"\"\"\r\nTest to see if mutate() and mutatePopulation() work:\r\n a = genCityList(noCities)\r\n b = genPop(popSize, a)\r\n c = sortRoutes(b)\r\n d = selection(c, eliteNo, popLeft)\r\n e = matingPool(d, b)\r\n f = breedPopulation(e, eliteNo, popSize)\r\n g = mutatePopulation(f, mutationRate)\r\n print(g)\r\n\r\n I think it works.\r\n \r\nTest to see if breed() and breedPopulation() work:\r\n a = genCityList(noCities)\r\n b = genPop(popSize, a)\r\n c = sortRoutes(b)\r\n d = selection(c, eliteNo, popLeft)\r\n e = matingPool(d, b)\r\n f = breedPopulation(e, eliteNo, popSize)\r\n print(f)\r\n \r\n I think it workds, may need later testing\r\n \r\nTest to see if matingPool() outputs path of all selected routes:\r\n a = genCityList(noCities)\r\n b = genPop(popSize, a)\r\n c = sortRoutes(b)\r\n d = selection(c, eliteNo, popLeft)\r\n e = matingPool(d, b)\r\n print(e)\r\n\r\n It works. I think. May need later testing\r\n\r\nTest to see if selection and selectionRandom work:\r\n a = genCityList(noCities)\r\n b = genPop(popSize, a)\r\n c = sortRoutes(b)\r\n d = selection(c, eliteNo, popLeft)\r\n print(len(d))\r\n \r\n It works.\r\n\r\nTest to see if initial population can be generated and then sorted based on fitness\r\n a = genCityList(noCities)\r\n b = genPop(popSize, a)\r\n c = sortRoutes(b)\r\n print(c)\r\n\r\n It works.\r\n\r\nTest to check if initial population can be generated:\r\n a = genCityList(noCities)\r\n b = genPop(popSize, a)\r\n print(b)\r\n\r\n It works.\r\n\r\nTest to check if Route can be generated from cityList:\r\n a = genCityList(noCities)\r\n b = genRoute(a)\r\n print(b)\r\n \r\n It works.\r\n\r\nTest for genCityList():\r\n a = genCityList(noCities)\r\n print(a)\r\n\r\n It works.\r\n\r\n\"\"\"\r\n","sub_path":"TSP GA Complete.py","file_name":"TSP GA Complete.py","file_ext":"py","file_size_in_byte":10829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"136854480","text":"def for_J():\r\n \"\"\"We are creating user defined function for alphabetical pattern of capital J with \"*\" symbol\"\"\"\r\n row=7\r\n col=5\r\n for i in range(row):\r\n for j in range(col):\r\n if i==0 or (j==2 and i<6)or i-j==5:\r\n print(\"*\",end=\" \")\r\n else:\r\n print(\" \",end=\" \")\r\n print()\r\ndef while_J():\r\n i=0\r\n while i<7:\r\n j=0\r\n while j<5:\r\n if i==0 or (j==2 and i<6)or i-j==5 :\r\n print(\"*\",end=\" \")\r\n else:\r\n print(\" \",end=\" \")\r\n j+=1\r\n i+=1\r\n print() \r\n","sub_path":"Alphabets/Capital_Alphabets/J.py","file_name":"J.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"585272525","text":"\"\"\"\nДомашнее задание №1\n\nИспользование библиотек: ephem\n\n* Установите модуль ephem\n* Добавьте в бота команду /planet, которая будет принимать на вход \n название планеты на английском, например /planet Mars\n* В функции-обработчике команды из update.message.text получите \n название планеты (подсказка: используйте .split())\n* При помощи условного оператора if и ephem.constellation научите \n бота отвечать, в каком созвездии сегодня находится планета.\n\n\"\"\"\nimport logging\nimport ephem\nimport settings\nimport datetime\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n\nlogging.basicConfig(format='%(name)s - %(levelname)s - %(message)s',\n level=logging.INFO,\n filename='bot.log'\n)\n\n\nPROXY = {\n 'proxy_url': 'socks5://t2.learn.python.ru:1080',\n 'urllib3_proxy_kwargs': {\n 'username': 'learn', \n 'password': 'python'\n }\n}\n\n\ndef greet_user(update, bot): #Функция, отвечающая на /start\n text = 'Напишите /planet '\n print(text)\n update.message.reply_text(text)\n\n\ndef talk_to_me(update, bot): #Функция повторюшка\n user_text = update.message.text \n print(user_text.split())\n update.message.reply_text(user_text)\n\n\ndef planet(update, bot): #Функция возвращает созвездие\n planet_name = update['message']['text'].split(' ')[1] # Вычленяю название планеты из update\n if planet_name == 'Mars': \n date = str(datetime.datetime.today()) # Сегодняшняя дата + время\n date = date.split(' ')[0].replace('-','/') # Убираю время и привожу к нужному виду\n planet_loc = ephem.Mars(date) # Локация планеты в сегодняшний день\n planet_const = ephem.constellation(planet_loc) # Название созввездия исходя из локации\n update.message.reply_text(f'{planet_name} сегодня в {planet_const}') # Отвечаю в телеграм \n else:\n update.message.reply_text('Попробуйте спросить про Марс')\n \n\ndef main():\n mybot = Updater(settings.API_KEY, use_context=True, request_kwargs=PROXY)\n \n dp = mybot.dispatcher\n dp.add_handler(CommandHandler(\"start\", greet_user))\n dp.add_handler(CommandHandler(\"planet\", planet))\n dp.add_handler(MessageHandler(Filters.text, talk_to_me))\n \n mybot.start_polling()\n mybot.idle()\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"8_ephem_bot.py","file_name":"8_ephem_bot.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"231245282","text":"from django.forms import ModelForm\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm\nfrom django.core.exceptions import ValidationError\nfrom django import forms\nfrom bootstrap_datepicker_plus import DatePickerInput\nfrom bootstrap_modal_forms.forms import BSModalForm\nfrom .models import Member, Service, Group, SERVICE_GROUP\nfrom .utils import ActivationMailFormMixin\n\n\nclass MemberForm(BSModalForm):\n class Meta:\n model = Member\n fields = ['name','english_name', 'gender','email', 'group']\n\n\nclass GroupForm(BSModalForm):\n class Meta:\n model = Group\n fields = ['name']\n\n\nclass ServiceForm(ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(ServiceForm, self).__init__(*args, **kwargs)\n self.fields['service_category'] = forms.ChoiceField(choices=SERVICE_GROUP)\n\n class Meta:\n model = Service\n exclude = ('slug', 'coordinator')\n # fields = ('service_date', 'service_category', 'edit')\n attrs = {'class': 'table table-sm'}\n # widgets = {\n # 'service_date': DatePickerInput(), # default date-format %m/%d/%Y will be used\n # # 'service_category': widgets.Select(attrs={'class': 'select'}),\n # }\n\n\nclass ServiceUpdateForm(ModelForm):\n # service_category = forms.ChoiceField(choices=set([(s.service_category, s.service_category) for s in Service.objects.all()]))\n def __init__(self, *args, **kwargs):\n super(ServiceUpdateForm, self).__init__(*args, **kwargs)\n self.fields['service_category'].widget.attrs['readonly'] = True\n\n class Meta:\n model = Service\n exclude = ('slug', 'coordinator')\n # fields = ('service_date', 'service_category', 'edit')\n attrs = {'class': 'table table-sm'}\n widgets = {\n 'service_date': DatePickerInput(), # default date-format %m/%d/%Y will be used\n # 'service_category': widgets.Select(attrs={'class': 'select'}),\n # 'service_category': widget.fields\n }\n\n\nclass ResendActivationEmailForm(\n ActivationMailFormMixin, forms.Form):\n\n email = forms.EmailField()\n\n mail_validation_error = (\n 'Could not re-send activation email. '\n 'Please try again later. (Sorry!)')\n\n def save(self, **kwargs):\n User = get_user_model()\n try:\n user = User.objects.get(\n email=self.cleaned_data['email'])\n except:\n # logger.warning(\n # 'Resend Activation: No user with '\n # 'email: {} .'.format(\n # self.cleaned_data['email']))\n return None\n self.send_mail(user=user, **kwargs)\n return user\n\n\nclass UserCreationForm(\n ActivationMailFormMixin,\n BaseUserCreationForm):\n\n username = forms.CharField(\n max_length=255,\n help_text=(\n \"The name displayed on your \"\n \"public profile.\"))\n\n email = forms.EmailField(\n max_length=255,\n help_text=(\n \"The email displayed on your \"\n \"public profile.\"))\n\n mail_validation_error = (\n 'User created. Could not send activation '\n 'email. Please try again later. (Sorry!)')\n\n class Meta(BaseUserCreationForm.Meta):\n model = get_user_model()\n fields = ('username', 'email')\n\n def clean_username(self):\n username = self.cleaned_data['username']\n disallowed = (\n 'activate',\n 'create',\n 'disable',\n 'login',\n 'logout',\n 'password',\n 'profile',\n )\n if username in disallowed:\n raise ValidationError(\n \"A user with that username\"\n \" already exists.\")\n return username\n\n def save(self, **kwargs):\n user = super().save(commit=False)\n # valid_member = Member.objects.filter(email=user.email).count()\n\n # if not valid_member:\n # # raise ValidationError(\"You are not a LLF member. Please contact the LLF admin to add you to the group first.\")\n #\n if not user.pk:\n user.is_active = False\n send_mail = True\n else:\n send_mail = False\n user.save()\n self.save_m2m()\n # Member.objects.update_or_create(\n # username=user,\n # defaults={\n # 'username': self.cleaned_data['username'],\n # 'slug': slugify(f\"{self.cleaned_data['username']}-{self.cleaned_data['email'].split('@')[0]}\"),\n # })\n if send_mail:\n self.send_mail(user=user, **kwargs)\n return user","sub_path":"catalog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"517360968","text":"import json\n\n__author__ = 'tea'\n\n\nclass Proposal:\n descr = None\n price = None\n stuff = None\n ref = None\n name = None\n def __init__(self, name, descr, price, stuff, ref):\n self.descr = descr\n self.price = price\n self.stuff = stuff\n self.ref = ref\n self.name = name\n\n def to_json(self):\n return json.dumps(self, default=lambda o: o.__dict__)\n","sub_path":"LittleNuGa/model/proposal.py","file_name":"proposal.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"538278957","text":"import os\n\nif not os.environ.get('SECRET_KEY'):\n try:\n with open('.domain_secret_key', 'rb') as secret:\n key = secret.read()\n except (OSError, IOError):\n key = None\n\n if not key:\n key = os.urandom(128)\n try:\n with open('.domain_secret_key', 'wb') as secret:\n secret.write(key)\n secret.flush()\n except (OSError, IOError):\n pass\n\n\nclass Config(object):\n\n SECRET_KEY = os.environ.get('SECRET_KEY') or key\n\n SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'sqlite:///{}/domain.db'.format(os.path.dirname(os.path.abspath(__file__)))\n\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n SESSION_TYPE = \"filesystem\"\n\n SESSION_FILE_DIR = \"/tmp/flask_session\"\n\n SESSION_COOKIE_HTTPONLY = True\n\n PERMANENT_SESSION_LIFETIME = 604800 # 7 days in seconds\n\n MAILFROM_ADDR = \"noreply@spanda.io\"\n\n LOG_FOLDER = os.environ.get('LOG_FOLDER') or os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logs')\n\n CACHE_REDIS_URL = os.environ.get('REDIS_URL')\n if CACHE_REDIS_URL:\n CACHE_TYPE = 'redis'\n else:\n CACHE_TYPE = 'simple'\n\n ACCESSKEYID = os.environ.get('ACCESSKEYID') or \"Iet1ahxien8c\"\n ACCESSKEYSECRET = os.environ.get(\"ACCESSKEYSECRET\") or \"Vaz1laePh9Xaivai2aXie8yaopei6EekiekuupepaQuahch7ieghaiHoo0aexido\"\n\n DOMAIN = os.environ.get(\"DOMAIN\") or \"install.dog\"\n\n DINGTOKEN = os.environ.get('DINGTOKEN') or 'https://oapi.dingtalk.com/robot/send?access_token=2d3e7d42f8e15e90665fc08f7954619f040b46f2ac77dea364db304b815f6bee'\n","sub_path":"GRDomain/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"250263031","text":"# coding:cp932\r\n# python 3\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef rms(a,b):\r\n return np.sqrt(((a-b)**2).mean()*2)\r\n# ファイル読み込み\r\ndata = np.loadtxt(\"data.csv\", delimiter=\" \")\r\ndata2 = np.loadtxt(\"data2.csv\", delimiter=\" \")\r\n# x の列を抽出\r\nsamplex = data[:,0]\r\n# y の列を抽出\r\nsampley = data[:,1]\r\n# 点をプロット\r\nplt.plot(samplex, sampley, \"bo\", label=\"train\")\r\nplt.plot(data2[:,0], data2[:,1], \"ro\", label=\"test\")\r\nx = np.array([i/100 for i in range(101)])\r\n#plt.plot(x, np.sin(x*np.pi*2), \"r--\", label=\"正解\")\r\n\r\nfor M in [0,1,3,10]:\r\n # グラフフィッティング\r\n fit = np.polyfit(samplex,sampley,M) # 3次式の係数が返される\r\n func = np.poly1d(fit) # 係数を投げて関数を作る。\r\n # グラフを作成\r\n y = func(x)\r\n plt.plot(x,y,label=\"M=%d\"%M)\r\n #print(\"M={0}, rms={1}, testrms={2}\".format(M, rms(sampley,func(samplex)), rms(data2[:,1],func(data2[:,0]))))\r\n print(M)\r\n for i in fit:\r\n print(\"{:.2f}\".format(i))\r\n \r\n\r\nplt.legend()\r\n#plt.show()\r\n#plt.savefig(\"fig.png\")\r\n","sub_path":"01/src/fit.py","file_name":"fit.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"614541764","text":"import sys\nprint(sys.path)\n\nfrom bodyposition import sdf_unet256, sdf_unet256_tensorboard\nimport time\n\nsdf_type='lungs'\n\nstart_time = time.time()\n\nsdf_unet256_tensorboard.train(\n imshape=256,\n sdf_type=sdf_type,\n skip_h5=False,\n batch_size=16,\n epochs=200,\n filename_prefix='',\n validation_ids = [19, 39],\n test_ids = [20, 40],\n n_data = 40,\n)\n\nprint(f\"{sdf_type}: Training time: {time.time() - start_time} seconds\")","sub_path":"devel/metacentrum/metalungs.py","file_name":"metalungs.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"70468045","text":"from __future__ import annotations\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PolyCollection\nfrom matplotlib.colors import Normalize, LogNorm\nimport matplotlib.animation as animation\nimport matplotlib.cm as cm\nmatplotlib.use(\"TkAgg\")\n\nfrom pyJJAsim.static_problem import StaticConfiguration\nfrom pyJJAsim.time_evolution import TimeEvolutionResult\n\n__all__ = [\"Plot\", \"CircuitPlot\", \"CircuitMovie\"]\n\n\nclass Plot:\n\n \"\"\"Plots a circuit configuration.\n\n Allows one to show node quantities, junction quantities, face quantities and vortices.\n - Node quantities are shown as colors on the nodes\n - junction quantities are displayed by arrows whose length is proportional to the quantity value\n - face quantities are shown as colors on the faces\n - vortices are displayed by symbols (concentric rings, vorticity equals nr of rings, color shows sign).\\\n\n Base class used by both CircuitPlot and CircuitMovie.\\\n\n Node quantity options:\n - \"phi\": gauge depent phases\n - \"U\": node voltage or potential\n\n Junction quantity options:\n - \"theta\": gauge_invariant_phase_difference\n - \"V\": voltage\n - \"I\": current\n - \"I_sup\": supercurrent\n - \"I_s\": current sources\n - \"EJ\": josephson energy\n - \"EM\": magnetic_energy\n - \"EC\": capacitive energy\n - \"Etot\": total_energy\n\n Face quantity options:\n - \"Phi: magnetic_flux\n - \"n\": vortex configuration\n - \"J\": face_current\n\n\n \"\"\"\n def __init__(self, config: StaticConfiguration | TimeEvolutionResult, time_point=0, show_vortices=True,\n vortex_diameter=0.25, vortex_color=(0, 0, 0),\n anti_vortex_color=(0.8, 0.1, 0.2), vortex_alpha=1, show_grid=True, grid_width=1,\n grid_color=(0.3, 0.5, 0.9), grid_alpha=0.5, show_colorbar=True, show_arrows=True,\n arrow_quantity=\"I\", arrow_width=0.005, arrow_scale=1, arrow_headwidth=3, arrow_headlength=5,\n arrow_headaxislength=4.5, arrow_minshaft=1, arrow_minlength=1, arrow_color=(0.2, 0.4, 0.7),\n arrow_alpha=1, show_nodes=True, node_diameter=0.2,\n node_face_color=(1,1,1), node_edge_color=(0, 0, 0), node_alpha=1, show_node_quantity=False,\n node_quantity=\"phase\", node_quantity_cmap=None, node_quantity_clim=(0, 1), node_quantity_alpha=1,\n node_quantity_logarithmic_colors=False, show_face_quantity=False, face_quantity=\"n\",\n face_quantity_cmap=None, face_quantity_clim=(0, 1), face_quantity_alpha=1,\n face_quantity_logarithmic_colors=False,\n figsize=None, title=\"\"):\n\n \"\"\"\n Constructor for Plot and handling plot options.\n \"\"\"\n self.config = config\n self.time_point = time_point\n\n if not isinstance(config, (StaticConfiguration, TimeEvolutionResult)):\n raise ValueError(\"config must be a StaticConfiguration or DynamicConfiguration object\")\n\n self.show_vortices = show_vortices\n self.vortex_diameter = vortex_diameter\n self.vortex_color = vortex_color\n self.anti_vortex_color = anti_vortex_color\n self.vortex_alpha = vortex_alpha\n self.show_grid = show_grid\n self.grid_width = grid_width\n self.grid_color = grid_color\n self.grid_alpha = grid_alpha\n self.show_colorbar = show_colorbar\n self.show_arrows = show_arrows\n self.arrow_quantity = arrow_quantity\n\n self.arrow_width = arrow_width\n self.arrow_scale = arrow_scale\n self.arrow_headwidth = arrow_headwidth\n self.arrow_headlength = arrow_headlength\n self.arrow_headaxislength = arrow_headaxislength\n self.arrow_minshaft = arrow_minshaft\n self.arrow_minlength = arrow_minlength\n\n self.arrow_color = arrow_color\n self.arrow_alpha = arrow_alpha\n self.show_nodes = show_nodes\n self.node_diameter = node_diameter\n self.node_face_color = node_face_color\n self.node_edge_color = node_edge_color\n\n self.node_alpha = node_alpha\n self.show_node_quantity = show_node_quantity\n self.node_quantity = node_quantity\n self.node_quantity_cmap = node_quantity_cmap\n self.node_quantity_clim = node_quantity_clim\n self.node_quantity_alpha = node_quantity_alpha\n self.node_quantity_logarithmic_colors = node_quantity_logarithmic_colors\n\n self.show_face_quantity = show_face_quantity\n self.face_quantity = face_quantity\n self.face_quantity_cmap = face_quantity_cmap\n self.face_quantity_clim = face_quantity_clim\n self.face_quantity_alpha = face_quantity_alpha\n self.face_quantity_logarithmic_colors = face_quantity_logarithmic_colors\n\n self.figsize = figsize if figsize is not None else [6.4, 4.8]\n self.colorbar = None\n self.title = title\n\n self.fig = None\n self.ax = None\n\n _node_quantities = {\n \"phi\": 0, \"phase\": 0, \"phases\": 0, \"U\": 1, \"potential\": 1,\n }\n\n _junction_quantities = {\n \"th\": 0, \"theta\": 0, \"phase_difference\": 0, \"gauge_invariant_phase_difference\": 0,\n \"V\": 1, \"voltage\": 1,\n \"I\": 3, \"current\": 3,\n \"Isup\": 4, \"I_sup\": 4, \"Isuper\": 4, \"I_super\": 4, \"supercurrent\": 4, \"super_current\": 4,\n \"I_s\": 5, \"Is\": 5, \"current_sources\": 5,\n \"EJ\": 6, \"josephson_energy\": 6, \"EM\": 7, \"magnetic_energy\": 7, \"EC\": 8, \"capacitive_energy\": 8,\n \"capacitance_energy\": 8, \"Etot\": 9, \"E_tot\": 9, \"ETot\": 9, \"total_energy\": 9, \"energy\": 9,\n }\n\n _face_quantities = {\n \"Phi\": 0, \"flux\": 0, \"magnetic_flux\": 0,\n \"n\": 2, \"vortices\": 2, \"vortex_configuration\": 2, \"face_current\": 3, \"J\": 3,\n }\n\n def _get_lims(self):\n x, y = self.config.get_circuit().get_node_coordinates()\n xmin, xmax, ymin, ymax = np.min(x), np.max(x), np.min(y), np.max(y)\n dx, dy = xmax - xmin, ymax - ymin\n D = self.node_diameter * 0.5\n return xmin - 0.05 * dx - D, xmax + 0.05 * dx + D, ymin - 0.05 * dy - D, ymax + 0.05 * dy + D\n\n def _set_axes(self, ):\n xmin, xmax, ymin, ymax = self._get_lims()\n self.ax.set_xlim(xmin, xmax)\n self.ax.set_ylim(ymin, ymax)\n self.time_label = self.ax.annotate(\"\", (xmin + 0.99 * (xmax - xmin), ymin + 0.98 * (ymax - ymin)), color=[1, 0.5, 0.2], ha='right', va='center')\n x0, y0, width, height = self.ax.get_position().bounds\n a_width = width * self.figsize[0]\n a_height = height * self.figsize[1]\n if a_width / (xmax - xmin) > a_height / (ymax - ymin):\n new_width = a_height * (xmax - xmin) / (ymax - ymin) / self.figsize[0]\n x0 = x0 + (width - new_width) / 2\n width = new_width\n else:\n new_height = a_width * (ymax - ymin) / (xmax - xmin) / self.figsize[1]\n y0 = y0 + (height - new_height) / 2\n height = new_height\n self.ax.set_position([x0, y0, width, height])\n\n def _is_static(self):\n return isinstance(self.config, StaticConfiguration)\n\n def _get_node_quantity(self):\n \"\"\"\n Get node quantity (either \"phi\": gauge dependent phases or \"U\": potential)\n \"\"\"\n if isinstance(self.node_quantity, np.ndarray):\n return self.node_quantity.flatten()\n quantity = self._node_quantities[self.node_quantity]\n if quantity == 0: # phi\n out = self.config.get_phi() if self._is_static() else self.config.get_phi(self.time_point)\n out = out.copy()\n out -= np.round(out / (np.pi * 2.0)).astype(out.dtype) * np.pi * 2.0\n return out\n if quantity == 1: # U\n return self.config.get_U(self.time_point)\n\n def _get_junction_quantity(self):\n if isinstance(self.arrow_quantity, np.ndarray):\n return self.arrow_quantity.flatten()\n quantity = self._junction_quantities[self.arrow_quantity]\n if quantity == 0: # theta\n out = self.config.get_theta() if self._is_static() else self.config.get_theta(self.time_point)\n out = out.copy()\n out -= np.round(out / (np.pi * 2.0)).astype(out.dtype) * np.pi * 2.0\n return out\n if quantity == 1: # V\n return self.config.get_V(self.time_point)\n if quantity == 3: # I\n return self.config.get_I() if self._is_static() else self.config.get_I(self.time_point)\n if quantity == 4: # I\n return self.config.get_I() if self._is_static() else self.config.get_Isup(self.time_point)\n if quantity == 5: # I_ext_J\n return self.config.problem._Is() if self._is_static() else self.config.problem._Is(self.time_point)\n if quantity == 6: # EJ\n return self.config.get_EJ() if self._is_static() else self.config.get_EJ(self.time_point)\n if quantity == 7: # EM\n return self.config.get_EM() if self._is_static() else self.config.get_EM(self.time_point)\n if quantity == 8: # EC\n return self.config.get_EC(self.time_point)\n if quantity == 9: # Etot\n return self.config.get_Etot() if self._is_static() else self.config.get_Etot(self.time_point)\n\n def _get_face_quantity(self):\n if isinstance(self.face_quantity, np.ndarray):\n return self.face_quantity.flatten()\n quantity = self._face_quantities[self.face_quantity]\n if quantity == 0: # Phi\n return self.config.get_flux() if self._is_static() else self.config.get_flux(self.time_point)\n if quantity == 2: # n\n return self.config.get_n() if self._is_static() else self.config.get_n(self.time_point)\n if quantity == 3: # J\n return self.config.get_J() if self._is_static() else self.config.get_J(self.time_point)\n\n def _marker_scale_factor(self):\n xlim = self.ax.get_xlim()\n x0, y0, width, height = self.ax.get_position().bounds\n return (width * self.figsize[0]) / (xlim[1] - xlim[0]) *72\n\n\n def _plot_grid(self):\n x1, y1, x2, y2 = self.config.get_circuit().get_juncion_coordinates()\n self.ax.plot(np.stack((x1, x2)), np.stack((y1, y2)), color=self.grid_color,\n alpha=self.grid_alpha, linewidth=self.grid_width, zorder=0)\n\n def _plot_nodes(self, node_quantity):\n x, y = self.config.get_circuit().get_node_coordinates()\n marker_size = self.node_diameter * self._marker_scale_factor()\n if not self.show_node_quantity:\n nodes_handle = self.ax.plot([x], [y], markeredgecolor=self.node_edge_color, markerfacecolor=self.node_face_color,\n markersize=marker_size, marker=\"o\", alpha=self.node_alpha, zorder=2)\n nodes_handle = nodes_handle[0]\n else:\n cnorm = Normalize(*self.node_quantity_clim) if not self.node_quantity_logarithmic_colors else LogNorm(*self.node_quantity_clim)\n nodes_handle = self.ax.scatter(x.flatten(), y.flatten(), s=marker_size**2, c=node_quantity, cmap=self.node_quantity_cmap,\n edgecolors=self.node_edge_color, alpha=self.node_quantity_alpha, norm=cnorm)\n if self.show_colorbar:\n label = self.node_quantity if isinstance(self.node_quantity, str) else \"\"\n self.colorbar = plt.colorbar(cm.ScalarMappable(norm=cnorm, cmap=self.node_quantity_cmap), ax=self.ax, label=label)\n return nodes_handle\n\n def _plot_arrows(self, arrow_quantity):\n I = arrow_quantity * self.arrow_scale\n x1, y1, x2, y2 = self.config.get_circuit().get_juncion_coordinates()\n xq = x1 + 0.5 * (1 - I) * (x2 - x1)\n yq = y1 + 0.5 * (1 - I) * (y2 - y1)\n dxq, dyq = I * (x2 - x1), I * (y2 - y1)\n return self.ax.quiver(xq, yq, dxq, dyq, edgecolor=self.arrow_color, facecolor=self.arrow_color,\n angles='xy', scale=1, scale_units='xy', width=self.arrow_width,\n headwidth=self.arrow_headwidth, headlength=self.arrow_headlength,\n headaxislength=self.arrow_headaxislength, minshaft=self.arrow_minshaft,\n minlength=self.arrow_minlength, alpha=self.arrow_alpha, zorder=3)\n\n def _plot_faces(self, face_quantity):\n nodes = self.config.get_circuit().get_faces()\n x, y = self.config.get_circuit().get_node_coordinates()\n verts = [np.stack((x[n], y[n]), axis=-1) for n in nodes]\n cnorm = Normalize(*self.face_quantity_clim) if not self.face_quantity_logarithmic_colors else LogNorm(*self.face_quantity_clim)\n coll = PolyCollection(verts, array=face_quantity, edgecolors='none', cmap=self.face_quantity_cmap,\n norm=cnorm, alpha=self.face_quantity_alpha, zorder=-1)\n faces_handle = self.ax.add_collection(coll)\n if self.show_colorbar and not (self.show_nodes and self.show_node_quantity):\n label = self.face_quantity if isinstance(self.face_quantity, str) else \"\"\n self.colorbar = plt.colorbar(cm.ScalarMappable(norm=cnorm, cmap=self.face_quantity_cmap), ax=self.ax, label=label)\n return faces_handle\n\n def _plot_vortices(self, n):\n marker_size = self.vortex_diameter * self._marker_scale_factor()\n xc, yc = self.config.get_circuit().get_face_centroids()\n ns = np.unique(n)\n vort_handles = []\n for ni in ns:\n if ni != 0:\n color = self.vortex_color if ni > 0 else self.anti_vortex_color\n na = np.abs(ni)\n for k in reversed(range(na)):\n frac = (2 * k + 1) / (2 * na - 1)\n p = self.ax.plot(xc[n == ni], yc[n == ni], markerfacecolor=color,\n markeredgecolor=color, marker=\"o\", linestyle=\"\",\n markersize=frac * marker_size, alpha=self.vortex_alpha, zorder=4)\n vort_handles += p\n if k > 0:\n frac = (2 * k) / (2 * na - 1)\n p = self.ax.plot(xc[n == ni], yc[n == ni], markerfacecolor=[1, 1, 1],\n markeredgecolor=[1, 1, 1], marker=\"o\", linestyle=\"\",\n markersize=frac * marker_size, alpha=self.vortex_alpha, zorder=4)\n vort_handles += p\n return vort_handles\n\n def test_method(self):\n pass\n\nclass CircuitPlot(Plot):\n\n def __init__(self, config: StaticConfiguration | TimeEvolutionResult, time_point=0, show_vortices=True,\n vortex_diameter=0.25, vortex_color=(0, 0, 0), anti_vortex_color=(0.8, 0.1, 0.2), vortex_alpha=1,\n show_grid=True, grid_width=1, grid_color=(0.3, 0.5, 0.9), grid_alpha=0.5, show_colorbar=True,\n show_arrows=True, arrow_quantity=\"I\", arrow_width=0.005, arrow_scale=1, arrow_headwidth=3,\n arrow_headlength=5, arrow_headaxislength=4.5, arrow_minshaft=1, arrow_minlength=1,\n arrow_color=(0.2, 0.4, 0.7), arrow_alpha=1, show_nodes=True, node_diameter=0.2,\n node_face_color=(1, 1, 1), node_edge_color=(0, 0, 0), node_alpha=1, show_node_quantity=False,\n node_quantity=\"phase\", node_quantity_cmap=None, node_quantity_clim=(0, 1), node_quantity_alpha=1,\n node_quantity_logarithmic_colors=False, show_face_quantity=False, face_quantity=\"n\",\n face_quantity_cmap=None, face_quantity_clim=(0, 1), face_quantity_alpha=1,\n face_quantity_logarithmic_colors=False, figsize=None, title=\"\"):\n\n super().__init__(config, time_point, show_vortices, vortex_diameter, vortex_color, anti_vortex_color,\n vortex_alpha, show_grid, grid_width, grid_color, grid_alpha, show_colorbar, show_arrows,\n arrow_quantity, arrow_width, arrow_scale, arrow_headwidth, arrow_headlength,\n arrow_headaxislength, arrow_minshaft, arrow_minlength, arrow_color, arrow_alpha, show_nodes,\n node_diameter, node_face_color, node_edge_color, node_alpha, show_node_quantity, node_quantity,\n node_quantity_cmap, node_quantity_clim, node_quantity_alpha, node_quantity_logarithmic_colors,\n show_face_quantity, face_quantity, face_quantity_cmap, face_quantity_clim, face_quantity_alpha,\n face_quantity_logarithmic_colors, figsize, title)\n\n self.time_point = time_point\n\n if isinstance(config, TimeEvolutionResult):\n if not config.problem.store_time_steps[self.time_point]:\n raise ValueError(\"The requested timepoint from config to plot has not been stored during \"\n \"simulation (set with the config.store_time_steps property)\")\n\n @staticmethod\n def _assert_single_configuration(data):\n if data.ndim >= 2:\n raise ValueError(\"must select single configuration\")\n\n def make(self):\n self.fig, self.ax = plt.subplots(figsize=self.figsize)\n plt.title(self.title)\n self._set_axes()\n\n # get data\n n = self.config.get_n() if self._is_static() else self.config.get_n(self.time_point)[..., 0]\n self._assert_single_configuration(n)\n node_quantity, face_quantity, arrow_quantity = None, None, None\n if self.show_nodes and self.show_node_quantity:\n node_quantity = self._get_node_quantity()\n self._assert_single_configuration(node_quantity)\n if self.show_face_quantity:\n face_quantity = self._get_face_quantity()\n self._assert_single_configuration(face_quantity)\n if self.show_arrows:\n arrow_quantity = self._get_junction_quantity()\n self._assert_single_configuration(arrow_quantity)\n\n # plot data\n if self.show_grid:\n self._plot_grid()\n if self.show_face_quantity:\n self._plot_faces(face_quantity)\n if self.show_nodes:\n self._plot_nodes(node_quantity)\n if self.show_arrows:\n self._plot_arrows(arrow_quantity)\n if self.show_vortices:\n self._plot_vortices(n)\n\n # return handles\n if self.colorbar is not None:\n return self.fig, self.ax, self.colorbar\n else:\n return self.fig, self.ax\n\n\nclass CircuitMovie(Plot):\n\n def __init__(self, config: TimeEvolutionResult, problem_nr=0, time_points=None, show_vortices=True, vortex_diameter=0.25,\n vortex_color=(0, 0, 0), anti_vortex_color=(0.8, 0.1, 0.2), vortex_alpha=1, show_grid=True,\n grid_width=1, grid_color=(0.3, 0.5, 0.9), grid_alpha=0.5, show_colorbar=True, show_arrows=True,\n arrow_quantity=\"I\", arrow_width=0.005, arrow_scale=1, arrow_headwidth=3, arrow_headlength=5,\n arrow_headaxislength=4.5, arrow_minshaft=1, arrow_minlength=1, arrow_color=(0.2, 0.4, 0.7),\n arrow_alpha=1, show_nodes=True, node_diameter=0.2, node_face_color=(1, 1, 1),\n node_edge_color=(0, 0, 0), node_alpha=1, show_node_quantity=False, node_quantity=\"phase\",\n node_quantity_cmap=None, node_quantity_clim=(0, 1), node_quantity_alpha=1,\n node_quantity_logarithmic_colors=False, show_face_quantity=False, face_quantity=\"n\",\n face_quantity_cmap=None, face_quantity_clim=(0, 1), face_quantity_alpha=1,\n face_quantity_logarithmic_colors=False, figsize=None, animate_interval=5, title=\"\"):\n\n super().__init__(config, time_points, show_vortices, vortex_diameter, vortex_color, anti_vortex_color, vortex_alpha,\n show_grid, grid_width, grid_color, grid_alpha, show_colorbar, show_arrows, arrow_quantity,\n arrow_width, arrow_scale, arrow_headwidth, arrow_headlength, arrow_headaxislength,\n arrow_minshaft, arrow_minlength, arrow_color, arrow_alpha, show_nodes, node_diameter,\n node_face_color, node_edge_color, node_alpha, show_node_quantity, node_quantity,\n node_quantity_cmap, node_quantity_clim, node_quantity_alpha, node_quantity_logarithmic_colors,\n show_face_quantity, face_quantity, face_quantity_cmap, face_quantity_clim, face_quantity_alpha,\n face_quantity_logarithmic_colors, figsize, title)\n\n if self.time_point is None:\n self.time_point = np.ones(self.config.problem._Nt(), dtype=bool)\n if not (self.time_point.dtype in (bool, np.bool)):\n try:\n self.time_point = np.zeros(self.config.problem._Nt(), dtype=bool)\n self.time_point[time_points] = True\n except:\n raise ValueError(\"Invalid time_points; must be None, mask, slice or index array\")\n self.problem_nr = problem_nr\n self.time_point &= self.config.problem.store_time_steps\n self.time_points_nz = np.flatnonzero(self.time_point)\n self.animate_interval = animate_interval\n self.faces_handle, self.nodes_handle, self.arrows_handle, self.vortices_handle = None, None, None, []\n self.item = None\n self.node_quantity_data = None\n self.face_quantity_data = None\n self.arrow_quantity_data = None\n self.n_data = None\n\n def show(self):\n self.fig, self.ax = plt.subplots(figsize=self.figsize)\n self._set_axes()\n plt.title(self.title)\n\n self.n_data = self.config.get_n()[:, self.problem_nr, self.config._time_point_index(self.time_point)]\n if self.show_nodes and self.show_node_quantity:\n self.node_quantity_data = self._get_node_quantity()[:, self.problem_nr, :]\n if self.show_face_quantity:\n self.face_quantity_data = self._get_face_quantity()[:, self.problem_nr, :]\n if self.show_arrows:\n self.arrow_quantity_data = self._get_junction_quantity()[:, self.problem_nr, :]\n if self.show_grid:\n self._plot_grid()\n time_point_list = np.arange(self.n_data.shape[-1], dtype=int)\n self.ani = animation.FuncAnimation(self.fig, self._animate, time_point_list,\n init_func=self._init, interval=self.animate_interval, blit=True)\n if self.colorbar is not None:\n return self.ani, self.fig, self.ax, self.colorbar\n else:\n return self.ani, self.fig, self.ax\n\n def _get_time_point_mask(self, time_point):\n mask = np.zeros(self.config.problem._Nt(), dtype=bool)\n mask[time_point] = True\n return mask\n\n def _animate(self, i):\n if self.show_face_quantity:\n self.faces_handle = self._update_faces(i)\n if self.show_nodes:\n self.nodes_handle = self._update_nodes(i)\n if self.show_arrows:\n self.arrows_handle = self._update_arrows(i)\n if self.show_vortices:\n self.vortex_handles =self._plot_vortices(self.n_data[:, i])\n self.time_label.set_text(str(self.time_points_nz[i]))\n handles = [self.nodes_handle, self.arrows_handle, self.faces_handle, self.time_label] + self.vortex_handles\n return [h for h in handles if h is not None]\n\n def _init(self):\n if self.show_face_quantity:\n self.faces_handle = self._plot_faces(self.face_quantity_data[:, 0])\n if self.show_nodes and self.show_node_quantity:\n self.nodes_handle = self._plot_nodes(self.node_quantity_data[:, 0])\n if self.show_arrows:\n self.arrows_handle = self._plot_arrows(self.arrow_quantity_data[:, 0])\n if self.show_vortices:\n self.vortex_handles = self._plot_vortices(self.n_data[:, 0])\n handles = [self.faces_handle, self.nodes_handle, self.arrows_handle, self.time_label] + self.vortex_handles\n return [h for h in handles if h is not None]\n\n def _update_faces(self, i):\n face_quantity = self.face_quantity_data[:, i]\n self.faces_handle.set_array(face_quantity)\n return self.faces_handle\n\n def _update_nodes(self, i):\n if self.show_node_quantity:\n if self.node_quantity_data.ndim >= 2:\n node_quantity = self.node_quantity_data[:, i]\n else:\n node_quantity = self.node_quantity_data\n self.nodes_handle.set_array(node_quantity)\n return self.nodes_handle\n\n def _update_arrows(self, i):\n I = self.arrow_quantity_data[:, i] * self.arrow_scale\n x1, y1, x2, y2 = self.config.get_circuit().get_juncion_coordinates()\n xq, yq = x1 + 0.5 * (1 - I) * (x2 - x1), y1 + 0.5 * (1 - I) * (y2 - y1)\n U, V = I * (x2 - x1), I * (y2 - y1)\n self.arrows_handle.set_UVC(U, V)\n self.arrows_handle.X = xq\n self.arrows_handle.Y = yq\n self.arrows_handle.XY = np.concatenate((xq[:, None], yq[:, None]), axis=1)\n self.arrows_handle._offsets = self.arrows_handle.XY\n return self.arrows_handle\n\n","sub_path":"circuit_visualize.py","file_name":"circuit_visualize.py","file_ext":"py","file_size_in_byte":25164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"457636375","text":"class Song(object):\n def __init__(self, lyrics):\n self.lyrics = lyrics\n\n def sing_me_a_song(self):\n for line in self.lyrics:\n print(line)\n\nhappy_bday = Song([\"Happy birthday to you\",\n \"I don't want to get sued\",\n \"So I'll stop right there\"])\n\nbulls_on_parade = Song([\"They rally around tha family\",\n \"With pockets full of shells\"])\n\ncool_to_hate = Song(['I hate the jocks and I hate the geeks',\n 'I hate the trendies but I also hate the freaks'])\n\nhappy_bday.sing_me_a_song()\n\nbulls_on_parade.sing_me_a_song()\n\ncool_to_hate.sing_me_a_song()\n","sub_path":"ex40.py","file_name":"ex40.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"367620814","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport json\nimport re\nfrom pathlib import Path\n\nassert(len(sys.argv) == 3)\n\nfilepath = sys.argv[1]\noutputpath = sys.argv[2]\n\nassert(Path(filepath).exists())\n\nscripts = {}\ndirname = os.path.basename(filepath) or filepath.split('/')[-2]\npattern = re.compile(f'{dirname}-(\\w+)\\\\.js')\nfor file in os.listdir(filepath):\n match = pattern.match(file)\n if match:\n print(f\"handle script {file}\")\n with open(filepath + '/' + file, 'r') as fin:\n scripts[match.group(1)] = fin.read()\n\nobj = {}\nprint(f'handle template {dirname}.json')\nwith open(filepath + f'/{dirname}.json', 'r') as fin:\n obj = json.load(fin)\n\nobj[\"codes\"] = scripts\n\nwith open(outputpath, 'w') as fout:\n fout.write(json.dumps(obj, indent = 4, ensure_ascii=False))\n","sub_path":"data/pack_loader.py","file_name":"pack_loader.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"529139133","text":"# -*- coding=utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport keras\nfrom keras.models import Model\nfrom keras.layers import *\nfrom model import BasicModel\nfrom utils.utility import *\nfrom layers.Match import *\nfrom keras.utils.vis_utils import plot_model\n\nclass ConvWeakCollaboration(BasicModel):\n def __init__(self, config):\n super(ConvWeakCollaboration, self).__init__(config)\n self._name = 'WeakCollaboration'\n self.check_list = ['number_q_lstm_units', 'number_d_lstm_units', 'q_lstm_dropout', 'd_lstm_dropout', 'embed',\n 'embed_size', 'vocab_size', 'num_layers', 'hidden_sizes']\n self.embed_trainable = config['train_embed']\n self.setup(config)\n self.initializer_fc = keras.initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=11)\n self.initializer_gate = keras.initializers.RandomUniform(minval=-0.01, maxval=0.01, seed=11)\n if not self.check():\n raise TypeError('[WeakCollaboration] parameter check wrong')\n print('[WeakCollaboration] init done', end='\\n')\n\n def setup(self, config):\n if not isinstance(config, dict):\n raise TypeError('parameter config should be dict:', config)\n\n self.set_default('dropout_rate', 0.)\n self.set_default('q_lstm_dropout', 0.)\n self.set_default('d_lstm_dropout', 0.)\n self.set_default('mask_zero', False)\n self.config.update(config)\n\n def build(self):\n query = Input(name=\"query\", batch_shape=[None, None], dtype='int32')\n show_layer_info('Input', query)\n doc = Input(name=\"doc\", batch_shape=[None, None], dtype='int32')\n show_layer_info('Input', doc)\n\n input_embed = self.config['vocab_size'] if self.config['mask_zero'] else self.config['vocab_size']\n embedding = Embedding(input_embed, self.config['embed_size'], weights=[self.config['embed']],\n trainable=self.embed_trainable, name=\"embeddings\",\n mask_zero=self.config['mask_zero'])\n q_embed = embedding(query)\n show_layer_info('Embedding', q_embed)\n d_embed = embedding(doc)\n show_layer_info('Embedding', d_embed)\n q_lstm_layer = Bidirectional(LSTM(self.config[\"number_q_lstm_units\"],\n dropout=self.config[\"q_lstm_dropout\"],\n recurrent_dropout=self.config[\"q_lstm_dropout\"],\n return_sequences=True),\n name=\"q_lstm\")\n d_lstm_layer = Bidirectional(LSTM(self.config[\"number_d_lstm_units\"],\n dropout=self.config[\"d_lstm_dropout\"],\n recurrent_dropout=self.config[\"d_lstm_dropout\"],\n return_sequences=True),\n name=\"d_lstm\")\n q_mat = q_lstm_layer(q_embed)\n show_layer_info('Bibirectional-LSTM', q_mat)\n d_mat = d_lstm_layer(d_embed)\n show_layer_info('Bibirectional-LSTM', d_mat)\n input_mat = Match(normalize=True)([q_mat, d_mat]) # the result is cosine similarity matrix\n show_layer_info('Match', input_mat)\n #input_mat = BatchNormalization()(input_mat)\n #input_mat = Dropout(self.config[\"dropout_rate\"])(input_mat)\n input_mat = Reshape((self.config[\"text1_maxlen\"], self.config[\"text2_maxlen\"]))(input_mat)\n show_layer_info('Match', input_mat)\n merged = Conv1D(self.config['filters'], self.config['kernel_size'],\n activation=self.config['conv_activation'], name=\"conv1\", padding='same')(input_mat)\n merged = BatchNormalization()(merged)\n merged = Dropout(self.config[\"conv_dropout\"])(merged)\n show_layer_info('Conv1D', merged)\n merged = MaxPooling1D(pool_size=self.config['pool_size'], name=\"maxPool1\")(merged)\n show_layer_info('MaxPooling1D', merged)\n merged = Conv1D(self.config['filters'], self.config['kernel_size'],\n activation=self.config['conv_activation'], name=\"conv2\", padding='same')(input_mat)\n show_layer_info('Conv1D', merged)\n merged = BatchNormalization()(merged)\n merged = Dropout(self.config[\"conv_dropout\"])(merged)\n merged = MaxPooling1D(pool_size=self.config['pool_size'], name=\"maxPool2\")(merged)\n show_layer_info('MaxPooling1D', merged)\n \"\"\"\n merged = Conv1D(self.config['filters'], self.config['kernel_size'],\n activation=self.config['conv_activation'], name=\"conv3\", padding='same')(input_mat)\n show_layer_info('Conv1D', merged)\n merged = BatchNormalization()(merged)\n merged = Dropout(self.config[\"conv_dropout\"])(merged)\n merged = MaxPooling1D(pool_size=self.config['pool_size'], name=\"maxPool3\")(merged)\n \"\"\"\n show_layer_info('MaxPooling1D', merged)\n merged = Flatten()(merged)\n\n dense = Dense(self.config[\"hidden_sizes\"][0], activation=self.config['hidden_activation'],\n name=\"MLP_combine_0\")(merged)\n show_layer_info('Dense', dense)\n for i in range(self.config[\"num_layers\"] - 1):\n dense = BatchNormalization()(dense)\n dense = Dropout(self.config[\"dropout_rate\"])(dense)\n dense = Dense(self.config[\"hidden_sizes\"][i + 1], activation=self.config['hidden_activation'],\n name=\"MLP_combine_\" + str(i + 1))(dense)\n show_layer_info('Dense', dense)\n dense = BatchNormalization()(dense)\n dense = Dropout(self.config[\"dropout_rate\"])(dense)\n # out_ = Dense(1, activation=self.config['output_activation'], name=\"MLP_out\")(dense)\n if self.config['target_mode'] == 'classification':\n out_ = Dense(2, activation=self.config['output_activation'], name=\"MLP_out\")(dense)\n elif self.config['target_mode'] in ['regression', 'ranking']:\n out_ = Dense(1, activation=self.config['output_activation'], name=\"MLP_out\")(dense)\n show_layer_info('Output', out_)\n\n model = Model(inputs=[query, doc], outputs=[out_])\n plot_model(model, to_file='../conv_wc_model_plot.png', show_shapes=True, show_layer_names=True)\n return model","sub_path":"MatchZoo/matchzoo/models/conv_weak_collaboration.py","file_name":"conv_weak_collaboration.py","file_ext":"py","file_size_in_byte":6370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"117708357","text":"import csv\r\n\r\n\r\n#keep the file open and place it in a variable\r\nwith open(\"odlr_users_csv.csv\", mode='r') as test_file:\r\n reader = csv.reader(test_file, delimiter=',')\r\n line_count = 0\r\n for row in reader:\r\n if line_count == 0:\r\n print(f'{\",\".join(row)}')\r\n line_count += 1\r\n print(f'{row[0]} {row[1]} {row[2]}')\r\n line_count += 1\r\n print(f'There are {line_count} entries')\r\n","sub_path":"CSV_Scripts/cvsReadingScript.py","file_name":"cvsReadingScript.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"112267034","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2019/12/10 16:19\r\n# @Author : Ruiqi Wang\r\n\r\nimport os\r\nimport time\r\nimport pprint\r\nimport torch\r\nimport argparse\r\n\r\n\r\ndef str2bool(v):\r\n if isinstance(v, bool):\r\n return v\r\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\r\n return True\r\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\r\n return False\r\n else:\r\n raise argparse.ArgumentTypeError('Boolean value expected.')\r\n\r\n\r\ndef set_gpu(x):\r\n os.environ['CUDA_VISIBLE_DEVICES'] = x\r\n print('using gpu:', x)\r\n\r\n\r\ndef check_dir(path):\r\n '''\r\n Create directory if it does not exist.\r\n path: Path of directory.\r\n '''\r\n if not os.path.exists(path):\r\n os.mkdir(path)\r\n\r\n\r\ndef count_accuracy(logits, label):\r\n pred = torch.argmax(logits, dim=1).view(-1)\r\n label = label.view(-1)\r\n bacc = pred.eq(label).float()\r\n accuracy = 100 * pred.eq(label).float().mean()\r\n return accuracy, bacc\r\n\r\n\r\nclass Timer():\r\n def __init__(self):\r\n self.o = time.time()\r\n\r\n def measure(self, p=1):\r\n x = (time.time() - self.o) / float(p)\r\n x = int(x)\r\n if x >= 3600:\r\n return '{:.1f}h'.format(x / 3600)\r\n if x >= 60:\r\n return '{}m'.format(round(x / 60))\r\n return '{}s'.format(x)\r\n\r\n\r\ndef log(log_file_path, string):\r\n '''\r\n Write one line of log into screen and file.\r\n log_file_path: Path of log file.\r\n string: String to write in log file.\r\n '''\r\n with open(log_file_path, 'a+') as f:\r\n f.write(string + '\\n')\r\n f.flush()\r\n print(string)\r\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"138460698","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# import bibliotek\nimport sys, string\nfrom wikipedia import *\n\n# ustawienia podstawowe\nmylang = 'pl'\nfamily = 'wikipedia'\nplwiki = getSite(mylang, family)\n\n# sciaganie listy stron linkujacych do Szablon:Prowincje Włoch\npage = Page(plwiki, u'Szablon:Prowincje_Włoch')\nprowincjeWlochReferences = list(page.getReferences())\n\nfor reference in prowincjeWlochReferences:\n\tif reference.title().find(':') == -1:\n\t\tpagetext = reference.get()\n\t\tpagetext = pagetext.replace(u'Prowincje Włoch}}', u'Włochy}}')\n\t\treference.put(pagetext, u'zmieniono navibox z \"Prowincje Włoch\" na \"Włochy\"')\n","sub_path":"old/prowincjeWloch2Wlochy.py","file_name":"prowincjeWloch2Wlochy.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"16637348","text":"import heapq\nimport sys\ninput=sys.stdin.readline\n\n\n\ndef find(x):\n if parent[x] != x:\n parent[x] = find(parent[x])\n return parent[x]\n\ndef union(r_u, r_v):\n if rank[r_v] > rank[r_u]:\n parent[r_u] = r_v\n else:\n parent[r_v] = r_u\n if rank[r_v] == rank[r_u]:\n rank[r_v] += 1\n\ndef solution(n, m, h, a):\n for i in range(n):\n parent[i] = i\n rank[i] = 0\n x, value = 0, 0\n while h:\n w, u, v = heapq.heappop(h)\n ru, rv = find(u), find(v)\n if ru != rv:\n union(ru, rv)\n x, value = x+1, value+w\n if x == n-1:\n return a-value\n\n\nwhile True:\n n, m = map(int, input().split())\n if (n,m) == (0,0):\n break\n parent = dict()\n rank = dict()\n h = []\n all = 0\n for _ in range(m):\n x, y, z = map(int, input().split())\n all += z\n heapq.heappush(h, [z, x, y])\n print(solution(n, m, h, all))\n","sub_path":"MST/[6497]전력난/[6497]전력난.py","file_name":"[6497]전력난.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"108789132","text":"class Employee:\n\n num_of_emps = 0\n raise_amt = 1.04\n\n def __init__(self, first, last, pay):\n self.first = first\n self.last = last\n self.email = first + '.' + last + '@email.com' # aafu lee ni add garna milxa; sap arguments ma matra aako pardainaa\n self.pay = pay\n\n Employee.num_of_emps += 1 # Constructor ma class variable pani add garna milxa\n\n def fullname(self): # full-name nikalyo\n return f'{self.firstname} {self.lastname}'\n\n def apply_raise(self): # Apply raise garyo \n self.pay = int(self.pay * self.raise_amt) # Most important => self.... tara sabbai badauna ko lagi Employee.raise amount garako vayee huni\n\n \n\n @classmethod\n def increasPayPercent(cls, amount): # class method => (cls as the first amount) \n cls.raise_amt = amount\n\n @classmethod # Second/ Alternative constructor pani vaninxa, \n def changeString(cls, emp_str):\n first, last, pay = emp_str.split('-')\n return cls (first, last, pay ) # ALways needs to be returned ; Alternative constructor \n\n @staticmethod \n def is_workday(day):\n if day.weekday() == 5 or day.weekday() == 6:\n return False\n return True\n\n\n# emp_1 = Employee('Corey', 'Schafer', 50000) # no need for new\n\n# emp_2 = Employee('Test', 'Employee', 60000)\n\nemp_str_1 = 'John-Doe-70000'\nemp_str_2 = 'Steve-Smith-30000'\nemp_str_3 = 'Jane-Doe-90000'\n\nStrEmp1 = Employee.changeString(emp_str_1)\nprint(StrEmp1.email)\n\nprint(StrEmp1.__dict__)\n\n\n\n# print(StrEmp1.fullname())\n\n\n\n\n# Employee.increasPayPercent(1.05)\n\n# Employee.increasPayPercent(1.06) # class method call from the class\n\n# print(\"emp_1=> \", emp_1.raise_amt) \n# print(emp_1.increasPayPercent(2)) # we increased it by 2 but for rest it is 1.06\n# print( emp_1.__dict__)\n# (emp_1.apply_raise())\n# print( emp_1.__dict__)\n# print( emp_2.__dict__)\n\n\n\n\n\n#first, last, pay = emp_str_1.split('-')\n#new_emp_1 = Employee(first, last, pay)\n\nnew_emp_1 = Employee.changeString(emp_str_1)\n\n# print(\"access garna milyo constructor ko pani=>\", new_emp_1.email)\n# print(\"access garxa, so like second constructor=>\",new_emp_1.pay)\n\n\n\n# import datetime\n# my_date = datetime.date(2016, 7, 11) # 2016-07-11\n\n\n# print(Employee.is_workday(my_date))\n\nprint(Employee.num_of_emps)","sub_path":"coreySch/3-static-class.py","file_name":"3-static-class.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"251850588","text":"from icon_cli.models.Icx import Icx\nfrom icon_cli.utils import hex_to_int\n\n\nclass Cps(Icx):\n def __init__(self, network) -> None:\n super().__init__(network)\n self.CPS_CONTRACT = \"cx9f4ab72f854d3ccdc59aa6f2c3e2215dd62e879f\"\n\n def query_active_proposals(self, address):\n proposals = self.call(\n self.CPS_CONTRACT,\n \"get_active_proposals\",\n {\"_wallet_address\": address},\n )\n if len(proposals) > 0:\n for proposal in proposals:\n proposal[\"new_progress_report\"] = hex_to_int(proposal[\"new_progress_report\"]) # noqa 503\n proposal[\"last_progress_report\"] = hex_to_int(proposal[\"last_progress_report\"]) # noqa 503\n return address, proposals\n\n def query_cps_balance(self):\n balance = self.call(self.CPS_CONTRACT, \"get_remaining_fund\", None)\n return hex_to_int(balance) / 10 ** 18\n\n def query_cps_contributors(self) -> list:\n params = {\"_start_index\": 0, \"_end_index\": 100}\n contributors = self.call(self.CPS_CONTRACT, \"get_contributors\", params)\n return contributors\n\n def query_cps_preps(self):\n preps = self.call(self.CPS_CONTRACT, \"get_PReps\", None)\n for prep in preps:\n prep[\"delegated\"] = hex_to_int(prep[\"delegated\"]) / 10 ** 18\n return preps\n\n def query_period_status(self):\n period_status = self.call(self.CPS_CONTRACT, \"get_period_status\", None)\n for k, v in period_status.items():\n if k in [\n \"current_block\",\n \"next_block\",\n \"remaining_time\",\n \"period_span\",\n ]: # noqa 503\n period_status[k] = hex_to_int(v)\n return period_status\n\n def query_proposal_details(self, address):\n params = {\"_wallet_address\": address}\n proposal_details = self.call(self.CPS_CONTRACT, \"get_proposal_detail_by_wallet\", params)\n proposals = proposal_details[\"data\"]\n for proposal in proposals:\n for k, v in proposal.items():\n if v[:2] == \"0x\" and len(v) != 42:\n proposal[k] = hex_to_int(v)\n return proposals\n","sub_path":"icon_cli/dapps/cps/Cps.py","file_name":"Cps.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"363026466","text":"import random\ndef get_number_or_alphabet():\n number = 1\n lowercase = 2\n uppercase = 3\n result_type = random.randint(1, 3)\n if result_type == number:\n result = chr(48 + random.randint(0, 9))\n elif result_type == lowercase:\n result = chr(97 + random.randint(0, 25))\n elif result_type == uppercase:\n result = chr(65 + random.randint(0, 25))\n return result\n\n\ndef get_string(length):\n result = ''\n for i in range(length):\n result += get_number_or_alphabet()\n return result\n","sub_path":"Python/010/manger/frame/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"602241994","text":"#!/usr/bin/env python\n#\n# Created by: Shawn Chen \n#\n# LICENSE\n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the Free\n# Software Foundation; either version 2 of the License, or(at your option)\n# any later version.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n# more details at http://www.gnu.org/copyleft/gpl.html\n#\n# Brief\n# Solves LeetCode Problem 53: Maximum Subarray\n\nclass Solution(object):\n def maxSubArray(self, nums):\n \"\"\"\n DP\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return 0\n sums, maxsum = [0] * len(nums), nums[0]\n sums[0] = nums[0]\n for i in xrange(1, len(nums)):\n sums[i] = max(sums[i - 1] + nums[i], nums[i])\n if sums[i] > maxsum:\n maxsum = sums[i]\n return maxsum\n\n def maxSubArray2(self, nums):\n \"\"\"\n Divide and conquer\n :type nums: List[int]\n :rtype: int\n \"\"\"\n def divide(nums, left, right, maxsum):\n if left > right:\n return -999999999\n mid = (left + right) / 2\n lmax = divide(nums, left, mid - 1, maxsum)\n rmax = divide(nums, mid + 1, right, maxsum)\n maxsum = max(lmax, rmax, maxsum)\n m2lmax, m2rmax = 0, 0\n mysum = 0\n for i in xrange(mid - 1, left - 1, -1):\n mysum += nums[i]\n m2lmax = max(m2lmax, mysum)\n for i in xrange(mid + 1, right + 1):\n mysum += nums[i]\n m2rmax = max(m2rmax, mysum)\n maxsum = max(maxsum, m2lmax + m2rmax + nums[mid])\n return maxsum\n if not nums:\n return 0\n return divide(nums, 0, len(nums) - 1, 0)\n","sub_path":"Problem53/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"340783407","text":"import pytest\nfrom functools import reduce\n\n@pytest.mark.skip(\"New SR TE model is not available\")\ndef test_bgp_sr_te_1000_policies(api):\n \"\"\"\n Test BGP SRTE Policy configuration applied properly for 1000 policies\n\n Validate the configuration against RestPy\n \"\"\"\n BGPV4_SR_TE = {\n \"PolicyType\": \"ipv4\",\n \"Distinguisher\": 1,\n \"PolicyColor\": 1,\n \"EndPointV4\": \"10.10.10.2\",\n \"SetNextHop\": \"manually\",\n \"SetNextHopIpType\": \"ipv4\",\n \"Ipv4NextHop\": \"10.10.10.2\",\n }\n BGPV4_SR_TE_TUNNEL = {\n \"PrefValue\": 400,\n \"BindingSIDType\": \"sid4\",\n \"SID4Octet\": 483001,\n \"UseAsMPLSLabel\": \"true\",\n }\n\n BGPV4_SR_TE_TUNNEL_SEGMENTS_LIST = {\n \"Count\": 1000,\n \"NumberOfSegmentsV4\": 5,\n \"EnWeight\": \"True\",\n \"Weight\": 1,\n }\n\n BGPV4_SR_TE_TUNNEL_SEGMENTS = {\n \"SegmentType\": \"mplssid\",\n \"Label\": [1018001, 432999, 1048333, 1048561, 432001],\n }\n\n config = api.config()\n\n # setup port container\n p1 = config.ports.port(name=\"p1\")[-1]\n\n # setup device container\n d = config.devices.device(name=\"d\", container_name=p1.name)[-1]\n\n # setup ethernet\n eth = d.ethernet\n eth.name = \"e\"\n eth.mac = \"00:01:00:00:00:01\"\n\n # setup ipv6\n ip = eth.ipv4\n ip.name = \"i4\"\n ip.address = \"10.10.10.1\"\n ip.gateway = \"10.10.10.2\"\n ip.prefix = 32\n\n # setup bgp basic\n bgp = ip.bgpv4\n bgp.name = \"b4\"\n bgp.router_id = \"193.0.0.1\"\n bgp.as_number = 65511\n bgp.as_type = \"ebgp\"\n bgp.as_number_set_mode = bgp.DO_NOT_INCLUDE_AS\n bgp.local_address = \"10.10.10.1\"\n bgp.dut_address = \"10.10.10.2\"\n\n # setup bgp advanced\n bgp.advanced.hold_time_interval = 90\n bgp.advanced.keep_alive_interval = 30\n\n # setup bgp sr te policy\n for i in range(1, 1001):\n policy = bgp.sr_te_policies.bgpsrtepolicy()[-1]\n policy.policy_type = policy.IPV4\n policy.distinguisher = BGPV4_SR_TE[\"Distinguisher\"]\n policy.color = i\n policy.ipv4_endpoint = BGPV4_SR_TE[\"EndPointV4\"]\n\n hop = policy.next_hop\n hop.next_hop_mode = hop.MANUAL\n hop.next_hop_address_type = hop.IPV4\n hop.ipv4_address = BGPV4_SR_TE[\"Ipv4NextHop\"]\n\n # setup tunnel tlv\n tunnel = policy.tunnel_tlvs.bgptunneltlv(active=True)[-1]\n\n # setup tunnel tlv segment lists\n seglist = tunnel.segment_lists.bgpsegmentlist(active=True)[-1]\n seglist.segment_weight = 1\n\n # setup preference sub tlv\n pref_sub_tlv = tunnel.preference_sub_tlv\n pref_sub_tlv.preference = BGPV4_SR_TE_TUNNEL[\"PrefValue\"]\n\n # setup binding sub tlv\n bind_sub_tlv = tunnel.binding_sub_tlv\n bind_sub_tlv.binding_sid_type = bind_sub_tlv.FOUR_OCTET_SID\n bind_sub_tlv.four_octet_sid = BGPV4_SR_TE_TUNNEL[\"SID4Octet\"]\n bind_sub_tlv.bsid_as_mpls_label = True\n\n # setup segment list segments\n for label in BGPV4_SR_TE_TUNNEL_SEGMENTS[\"Label\"]:\n seg = seglist.segments.bgpsegment(active=True)[-1]\n seg.segment_type = seg.MPLS_SID\n seg.mpls_label = label\n\n api.set_config(config)\n\n validate_sr_te_config(\n api,\n BGPV4_SR_TE,\n BGPV4_SR_TE_TUNNEL,\n BGPV4_SR_TE_TUNNEL_SEGMENTS_LIST,\n BGPV4_SR_TE_TUNNEL_SEGMENTS,\n )\n\n\ndef validate_sr_te_config(\n api,\n BGPV4_SR_TE,\n BGPV4_SR_TE_TUNNEL,\n BGPV4_SR_TE_TUNNEL_SEGMENTS_LIST,\n BGPV4_SR_TE_TUNNEL_SEGMENTS,\n):\n \"\"\"\n Validate BGP SRTE Config\n \"\"\"\n\n ixnetwork = api._ixnetwork\n bgpv4 = (\n ixnetwork.Topology.find()\n .DeviceGroup.find()\n .Ethernet.find()\n .Ipv4.find()\n .BgpIpv4Peer.find()\n )\n\n assert (bgpv4.CapabilitySRTEPoliciesV4.Values)[0] == \"true\"\n\n bgpv4_sr_te = (\n ixnetwork.Topology.find()\n .DeviceGroup.find()\n .Ethernet.find()\n .Ipv4.find()\n .BgpIpv4Peer.find()\n .BgpSRTEPoliciesListV4\n )\n for attr in BGPV4_SR_TE:\n if attr == \"PolicyType\":\n assert [BGPV4_SR_TE[attr] for i in range(1, 1001)] == getattr(\n bgpv4_sr_te, attr\n ).Values\n elif attr == \"PolicyColor\":\n assert [i for i in range(1, 1001)] == (\n [int(value) for value in getattr(bgpv4_sr_te, attr).Values]\n )\n elif attr == \"Distinguisher\":\n assert BGPV4_SR_TE[attr] == int(\n (getattr(bgpv4_sr_te, attr).Values)[0]\n )\n else:\n assert BGPV4_SR_TE[attr] == (getattr(bgpv4_sr_te, attr).Values)[0]\n\n bgpv4_sr_te_tunnel = bgpv4_sr_te.BgpSRTEPoliciesTunnelEncapsulationListV4\n for attr in BGPV4_SR_TE_TUNNEL:\n if attr in [\"PrefValue\", \"SID4Octet\"]:\n assert [BGPV4_SR_TE_TUNNEL[attr] for i in range(1, 1001)] == (\n [\n int(value)\n for value in getattr(bgpv4_sr_te_tunnel, attr).Values\n ]\n )\n else:\n assert [BGPV4_SR_TE_TUNNEL[attr] for i in range(1, 1001)] == (\n getattr(bgpv4_sr_te_tunnel, attr).Values\n )\n\n bgpv4_sr_te_tunnel_seg_lists = (\n bgpv4_sr_te_tunnel.BgpSRTEPoliciesSegmentListV4\n )\n for attr in BGPV4_SR_TE_TUNNEL_SEGMENTS_LIST:\n if attr == \"Weight\":\n assert BGPV4_SR_TE_TUNNEL_SEGMENTS_LIST[attr] == int(\n (getattr(bgpv4_sr_te_tunnel_seg_lists, attr).Values)[0]\n )\n else:\n assert BGPV4_SR_TE_TUNNEL_SEGMENTS_LIST[attr] == (\n getattr(bgpv4_sr_te_tunnel_seg_lists, attr)\n )\n\n bgpv4_sr_te_tunnel_segments = (\n bgpv4_sr_te_tunnel_seg_lists.BgpSRTEPoliciesSegmentsCollectionV4\n )\n for attr in BGPV4_SR_TE_TUNNEL_SEGMENTS:\n if attr == \"Label\":\n lg = [BGPV4_SR_TE_TUNNEL_SEGMENTS[attr] for i in range(1, 1001)]\n assert reduce(lambda x, y: x + y, lg) == (\n [\n int(value)\n for value in getattr(\n bgpv4_sr_te_tunnel_segments, attr\n ).Values\n ]\n )\n else:\n assert [\n BGPV4_SR_TE_TUNNEL_SEGMENTS[attr] for i in range(1, 5001)\n ] == getattr(bgpv4_sr_te_tunnel_segments, attr).Values\n\n\nif __name__ == \"__main__\":\n pytest.main([\"-sv\", __file__])\n","sub_path":"tests/bgp/test_bgp_sr_te_1000_policies.py","file_name":"test_bgp_sr_te_1000_policies.py","file_ext":"py","file_size_in_byte":6409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"169608099","text":"# Copyright (c) 2021, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\nfrom coremltools.converters.mil.input_types import InputType\nfrom coremltools.converters.mil.mil import Builder as mb, types\nfrom coremltools.converters.mil.mil.ops.defs._utils import parse_einsum_equation\nfrom coremltools.converters.mil.mil.types.symbolic import any_symbolic, is_symbolic\n\n\ndef _reverse_input_einsum_eq(equation):\n \"\"\"\n Reverse the input order of the einsum eqaution\n e.g.:\n input : \"nchw,nwhu->nchu\"\n returns : \"nwhu,nchw->nchu\"\n \"\"\"\n input_output_strings = equation.split('->')\n assert len(input_output_strings) == 2, \"invalid equation\"\n input_strings = input_output_strings[0].split(',')\n assert len(input_strings) == 2, \"invalid equation\"\n equation = input_strings[1] + ',' + input_strings[0] + '->' + input_output_strings[1]\n return equation\n\n\ndef build_einsum_mil(a_var, b_var, equation, name):\n \"\"\"\n Get MIL variables as input and build a variable using MIL builder, that\n contains the output of the einsum equation\n\n :param a_var:\n - var\n - first input variable\n :param b_var:\n - var\n - second input variable\n :param equation:\n - str\n - the einsum equation\n :param name:\n - str\n - name tp be assigned to the output var\n\n :return:\n - var\n - output var that contains the einsum result\n \"\"\"\n\n ## TODO: rdar://73851694 (Update einsum op translation to support generic cases) \n\n parsed_vectors = parse_einsum_equation(equation)\n equation_rev = _reverse_input_einsum_eq(equation)\n parsed_vectors_rev = parse_einsum_equation(equation_rev)\n\n def _swap(a, b):\n return b, a\n\n if parsed_vectors == ([0,1,2,3],[0,1,4,3],[0,1,2,4]) or parsed_vectors_rev == ([0,1,2,3],[0,1,4,3],[0,1,2,4]): # equation == \"bnqd,bnkd->bnqk\"\n if parsed_vectors_rev == ([0,1,2,3],[0,1,4,3],[0,1,2,4]):\n a_var, b_var = _swap(a_var, b_var)\n x = mb.matmul(x=a_var, y=b_var, transpose_x=False, transpose_y=True, name=name)\n elif parsed_vectors == ([0,1,2],[2,3],[0,1,3]) or parsed_vectors_rev == ([0,1,2],[2,3],[0,1,3]): # equation == \"abc,cd->abd\"\n if parsed_vectors_rev == ([0,1,2],[2,3],[0,1,3]):\n a_var, b_var = _swap(a_var, b_var)\n x = mb.matmul(x=a_var, y=b_var, transpose_x=False, transpose_y=False, name=name)\n elif parsed_vectors == ([0,1,2],[2,3,4],[0,1,3,4]) or parsed_vectors_rev == ([0,1,2],[2,3,4],[0,1,3,4]): # equation == \"abc,cde->abde\"\n if parsed_vectors_rev == ([0,1,2],[2,3,4],[0,1,3,4]):\n a_var, b_var = _swap(a_var, b_var)\n x_1 = mb.reshape(x=a_var, shape=[a_var.shape[0] * a_var.shape[1], a_var.shape[2]])\n x_2 = mb.reshape(x=b_var, shape=[b_var.shape[0], b_var.shape[1] * b_var.shape[2]])\n x = mb.matmul(x=x_1, y=x_2, transpose_x=False, transpose_y=False)\n x = mb.reshape(\n x=x, shape=[a_var.shape[0], a_var.shape[1], b_var.shape[1], b_var.shape[2]], name=name\n )\n elif parsed_vectors == ([0,1,2,3],[0,4,2,3],[0,2,4,1]) or parsed_vectors_rev == ([0,1,2,3],[0,4,2,3],[0,2,4,1]): # equation == \"BTNH,BFNH->BNFT\"\n if parsed_vectors_rev == ([0,1,2,3],[0,4,2,3],[0,2,4,1]):\n a_var, b_var = _swap(a_var, b_var)\n x_1 = mb.transpose(x=a_var, perm=[0, 2, 1, 3])\n x_2 = mb.transpose(x=b_var, perm=[0, 2, 1, 3])\n x = mb.matmul(x=x_2, y=x_1, transpose_x=False, transpose_y=True, name=name)\n elif parsed_vectors == ([0,1,2,3],[0,3,1,4],[0,2,1,4]) or parsed_vectors_rev == ([0,1,2,3],[0,3,1,4],[0,2,1,4]): # equation == \"BNFT,BTNH->BFNH\"\n if parsed_vectors_rev == ([0,1,2,3],[0,3,1,4],[0,2,1,4]):\n a_var, b_var = _swap(a_var, b_var)\n b_var = mb.transpose(x=b_var, perm=[0, 2, 1, 3])\n x = mb.matmul(x=a_var, y=b_var, transpose_x=False, transpose_y=False)\n x = mb.transpose(x=x, perm=[0, 2, 1, 3], name=name)\n elif parsed_vectors == ([0,1,2,3],[2,3,4],[0,1,4]) or parsed_vectors_rev == ([0,1,2,3],[2,3,4],[0,1,4]): # equation == \"abcd,cde->abe\"\n if parsed_vectors_rev == ([0,1,2,3],[2,3,4],[0,1,4]):\n a_var, b_var = _swap(a_var, b_var)\n x_1 = mb.reshape(x=a_var, shape=[a_var.shape[0], a_var.shape[1], a_var.shape[2] * a_var.shape[3]])\n x_2 = mb.reshape(x=b_var, shape=[b_var.shape[0] * b_var.shape[1], b_var.shape[2]])\n x = mb.matmul(x=x_1, y=x_2, transpose_x=False, transpose_y=False, name=name)\n elif parsed_vectors == ([0,1,2,3],[0,3,2,4],[0,1,2,4]) or parsed_vectors_rev == ([0,1,2,3],[0,3,2,4],[0,1,2,4]): # equation == \"nchw,nwhu->nchu\"\n if parsed_vectors == ([0,1,2,3],[0,3,2,4],[0,1,2,4]):\n x = mb.einsum(values=(a_var, b_var), equation=equation, name=name)\n else:\n x = mb.einsum(values=(b_var, a_var), equation=equation_rev, name=name)\n elif parsed_vectors == ([0,1,2],[2,1,3],[0,1,3]) or parsed_vectors_rev == ([0,1,2],[2,1,3],[0,1,3]): # equation == \"chw,whu->chu\"\n if parsed_vectors == ([0,1,2],[2,1,3],[0,1,3]):\n x = mb.einsum(values=(a_var, b_var), equation=equation, name=name)\n else:\n x = mb.einsum(values=(b_var, a_var), equation=equation_rev, name=name)\n else:\n raise NotImplementedError(\n \"Einsum unsupported equation format: \", equation\n )\n\n return x\n\n\ndef is_symbolic_dim_in_prog(prog):\n '''\n Takes in a MIL program object, checks if any of the tensors in it contain a symbolic dimension.\n Returns true if it does.\n\n :param prog: coremltools.converters.mil.Program\n :return: bool\n '''\n def _does_block_contain_symbolic_shape(block):\n for op in block.operations:\n for b in op.blocks:\n if _does_block_contain_symbolic_shape(b):\n return True\n for out in op.outputs:\n if types.is_tensor(out.sym_type):\n shape = out.sym_type.get_shape()\n if any_symbolic(shape):\n return True\n elif types.is_scalar(out.sym_type) or types.is_str(out.sym_type):\n if is_symbolic(out.val):\n return True\n elif types.is_list(out.sym_type):\n if types.is_tensor(out.elem_type):\n if any_symbolic(out.elem_type.get_shape()):\n return True\n else:\n raise NotImplementedError(\"\\'{}\\' type in a list not handled\".format(out.elem_type))\n else:\n raise NotImplementedError(\"\\'{}\\' type is not handled\".format(out.sym_type))\n return False\n\n for f in prog.functions.values():\n if _does_block_contain_symbolic_shape(f):\n return True\n return False\n\n\ndef get_output_names(outputs):\n \"\"\"\n :param: list[ct.TensorType/ct.ImageType]\n :return: list[str]\n \"\"\"\n output_names = None\n if outputs is not None:\n assert all([isinstance(t, InputType) for t in outputs]), \\\n \"outputs must be a list of ct.ImageType or ct.TensorType\"\n output_names = [t.name for t in outputs]\n if all([name is None for name in output_names]):\n output_names = None\n return output_names","sub_path":"coremltools/converters/mil/frontend/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":7428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"201972151","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 9 22:46:24 2017\r\n\r\n@author: Administrator\r\n\"\"\"\r\nimport pickle\r\nimport numpy as np\r\n\r\ndef get_average_core(l_actor,actor_dict):\r\n l_core=[]\r\n for one in l_actor:\r\n if one in actor_dict:\r\n l_core.append(actor_dict[one])\r\n if len(l_core)>0:\r\n return int(sum(l_core)/len(l_core))\r\n else:\r\n return 0\r\n\r\ndef stringToNum(filmName,cost,theme,director,actor,releaseDate,competition,topic,screen3,screen30=None):\r\n #name=['month', 'director', 'actor', 'genre', 'screen 3day', 'screen 30day', 'search', 'competition', 'budget','boxoffice']\r\n with open(r'director.pkl','rb') as f:\r\n director_dict=pickle.load(f)\r\n with open(r'actor.pkl','rb') as f:\r\n actor_dict=pickle.load(f)\r\n with open(r'genre.pkl','rb') as f:\r\n genre_dict=pickle.load(f)\r\n l=[]\r\n l.append(int(releaseDate.split('.')[1]))\r\n if director in director_dict:\r\n l.append(round(director_dict[director]))\r\n else:\r\n l.append(0)\r\n l.append(get_average_core(actor.split(','),actor_dict))\r\n l.append(get_average_core(theme.split(','),genre_dict))\r\n l.append(int(screen3))\r\n if screen30!=None:\r\n l.append(int(screen30))\r\n l.append(int(topic))\r\n l.append(int(competition))\r\n l.append(int(cost))\r\n return l\r\n\r\n\r\ndef predict(l):\r\n with open('tree.pkl','rb') as f:\r\n tree=pickle.load(f)\r\n l_all=[l]\r\n return tree.classify(np.array(l_all))[0]\r\n\r\nif __name__=='__main__':\r\n l=stringToNum(filmName=\"速度与激情8\",cost='175',theme=\"动作,犯罪\",director=\"F·加里·格雷\", actor=\"范·迪塞尔,道恩·强森,查理兹·塞隆,杰森·斯坦森,米歇尔·罗德里格兹\", \\\r\n releaseDate='2017.4.14',competition='3',topic='80',screen3='520000',screen30='2333999')\r\n label=predict(l)\r\n print(label)\r\n \r\n l_for_labels=[0,0.5,1,1.5,2,3,4,5,10,20,1000]\r\n if label<9:\r\n print(\"{}亿~{}亿\".format(l_for_labels[label],l_for_labels[label+1]))\r\n else:\r\n print(\"{}+亿\".format(l_for_labels[label]))\r\n \r\n ","sub_path":"predictBoxOffice/decision_tree/model_predict_3.py","file_name":"model_predict_3.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"248966511","text":"import serial\nfrom collections import deque\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\n\n\nclass Communicator(object):\n\t''' \n\tCommunicates with the board and reads data\n\t'''\n\tdef __init__(self, port, maxLen, baud=115200):\n\t\ttry:\n\t\t\t# Not really useful atm\n\t\t\tself.arduino = serial.Serial(port, baud)\n\t\t\tprint(\"Arduino connected!\")\n\t\texcept serial.SerialException as err:\n\t\t\tprint(\"Error: Arduino not found\")\n\t\tself.maxLen = maxLen\n\t\tself.lx = deque([0.0]*maxLen)\n\t\tself.ly = deque([0.0]*maxLen)\n\t\tself.fig = plt.figure()\n\t\t#plt.ion()\n\t\t#line, = plt.plot(self.ly)\n\t\tself.start_time = time.time()\n\t\t#self.ax = plt.axes(xlim=(0, 10), ylim=(0, 5))\n\t\tself.ax = plt.axes(ylim=(0, 5))\n\t\t#l = plt.plot([0, maxLen, 0, 1024])\n\t\tself.l, = self.ax.plot([],[],'r-')\n\t\t\n\t\t#for i in range(1500):\n\t\t\t#ani = animation.FuncAnimation(self.fig, self.update, interval = 0.01, blit = True)\n\t\t\t#self.update()\n\t\t\t#line.set_xdata(self.lx)\n\t\t\t#line.set_ydata(self.ly)\n\t\t\t#plt.draw()\n\t\t\t#plt.show()\n\t\t\t#self.ax.autoscale(True,'both',True)\t\t\t\n\n\t\tself.read_with_threshold()\n\t\t#print(self.lx)\n\t\t#print(self.ly)\n\t\tplt.plot(list(self.lx),list(self.ly))\n\t\tplt.show()\n\n\tdef test(self):\n\t\tself.arduino.flushInput()\n\t\tfor i in range(1000):\n\t\t\tself.lx.appendleft(i)\n\t\t\tplt.plot(self.lx,self.ly, 'b')\n\t\t\ttime.sleep(0.05)\n\n\tdef update(self, num):\n\t\tval = self.readSerial()\n\t\tt = time.time() - self.start_time\n\t\tself.lx = self.addToBuf(self.lx, t)\n\t\tself.ly = self.addToBuf(self.ly, val)\n\t\tself.l.set_data(list(self.lx), list(self.ly))\n\t\treturn self.l,\n\n\tdef read_with_threshold(self):\n\t\tval = self.readSerial()\n\t\twhile val < 1:\n\t\t\tval = self.readSerial()\n\t\t\tprint(val)\n\t\t\tpass\n\t\tif val > 1:\n\t\t\tt0 = time.time()\n\t\t\tt = time.time()\n\t\t\twhile (t - t0) < 0.5:\n\t\t\t\tself.lx = self.addToBuf(self.lx, (t-t0)*1000)\n\t\t\t\tself.ly = self.addToBuf(self.ly, val)\n\t\t\t\tval = self.readSerial()\n\t\t\t\tt = time.time()\n\t\t\t\t\n\tdef readSerial(self):\n\t\tval = \"\"\n\t\ttry:\n\t\t\tval = bytes.decode(self.arduino.read(4))\n\t\t\t#print(val)\n\t\t\tvalue = float(val.strip())\n\t\t\tvalue = (value / 1024) * 5\n\t\t\t#print(val)\n\t\t\tprint(\"Ok\")\n\t\t\treturn value\n\t\texcept ValueError:\n\t\t\tprint(\"Error {0}\".format(val,), val.strip())\n\t\t\treturn self.readSerial()\n\n\n\tdef addToBuf(self, l, v):\n\t\t\n\t\tif len(l) < self.maxLen:\n\t\t\tl.append(v)\n\t\telse:\n\t\t\tl.pop()\n\t\t\tl.appendleft(v)\n\t\treturn l\n\n\t# def test(self):\n\t# \t#print(type(self.arduino))\n\t# \ttime.sleep(0.2)\n\t# \tself.arduino.flushInput()\n\t# \tcounter = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\t# \tfor i in range(10000):\n\t# \t\ttry:\n\t# \t\t\tval = bytes.decode(self.arduino.readline())\n\t# \t\t\tval = int(val.split('\\n')[0])\n\t# \t\t\tcounter[val - 10] += 1\n\t# \t\texcept ValueError:\n\t# \t\t\tprint(\"Warning: Invalid value\")\n\t# \tfor iter in counter:\n\t# \t\tprint(iter)\n\nif __name__ == '__main__':\n\ta = Communicator('/dev/ttyACM0', 1000, 57600)\n\n\n\t#a.test()","sub_path":"old_files/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"568848098","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport inspect\nimport typeguard\n\n\ndef _bail_if_private(candidate: str, allow_private: False):\n if (\n candidate.startswith(\"_\")\n and not allow_private\n and not (candidate.startswith(\"__\") and candidate.endswith(\"__\"))\n ):\n raise ValueError(\n f\"It's disencouraged to patch/mock private interfaces.\\n\"\n \"This would result in way too coupled tests and implementation. \"\n \"Please consider using patterns like dependency injection instead. \"\n \"If you really need to do this use the allow_private=True argument.\"\n )\n\n\ndef _validate_function_signature(argspec: inspect.FullArgSpec, args, kwargs):\n type_errs = []\n for idx in range(0, len(args)):\n if argspec.args:\n arg = argspec.args[idx]\n try:\n __validate_argument_type(argspec.annotations, arg, args[idx])\n except TypeError as te:\n type_errs.append(te)\n for k, v in kwargs.items():\n try:\n __validate_argument_type(argspec.annotations, k, v)\n except TypeError as te:\n type_errs.append(te)\n return type_errs\n\n\ndef __validate_argument_type(annotations, argname, value):\n type_information = annotations.get(argname)\n if type_information:\n typeguard.check_type(argname, value, type_information)\n","sub_path":"testslide/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"16070113","text":"from sac2019 import SACAgent as SAC\nimport numpy as np\nimport os\nimport torch\nimport gym\nimport pybullet_envs\nfrom gym import wrappers\n\nif not os.path.exists(\"./models\"):\n os.makedirs(\"./models\")\n\n\ndef evaluate_policy(policy, eval_episodes=10):\n avg_reward = 0.\n for _ in range(eval_episodes):\n obs = env.reset()\n done = False\n while not done:\n action = policy.get_action(obs)\n obs, reward, done, _ = env.step(action)\n avg_reward += reward\n avg_reward /= eval_episodes\n print(\"\\n------------------------------------------\")\n print(f\"SAMPLE: Evaluation Step: {avg_reward}\")\n print(\"------------------------------------------\\n\")\n\n\ndef evaluate_policy_deterministic(policy, eval_episodes=10):\n avg_reward = 0.\n for _ in range(eval_episodes):\n obs = env.reset()\n done = False\n while not done:\n action = policy.get_action_deterministic(obs)\n obs, reward, done, _ = env.step(action)\n avg_reward += reward\n avg_reward /= eval_episodes\n print(\"\\n------------------------------------------\")\n print(f\"DETERMINISTIC: Evaluation Step: {avg_reward}\")\n print(\"------------------------------------------\\n\")\n\n\nenv_name = \"Walker2DBulletEnv-v0\"\nenv = gym.make(env_name)\nstart_timesteps = 10_000\neval_freq = 5_000\nmax_timesteps = 500_000\nbatch_size = 100\nmax_episode_steps = env._max_episode_steps\n\n\ntotal_timesteps = 0\nepisode_reward = 0\nepisode_timesteps = 0\nepisode_num = 0\ndone = False\nobs = env.reset()\n\ngamma = 0.99\ntau = 0.005\nalpha = 0.2\na_lr = 1e-3\nq_lr = 1e-3\np_lr = 1e-3\nbuffer_maxlen = 1_000_000\n\nseed = 0\ntorch.manual_seed(seed)\nnp.random.seed(seed)\nenv.seed(seed)\n\n\npolicy = SAC(env, gamma, tau, alpha, q_lr, p_lr, a_lr, buffer_maxlen)\n\nwhile total_timesteps < max_timesteps:\n\n if total_timesteps < start_timesteps:\n action = env.action_space.sample()\n else:\n action = policy.get_action(obs)\n\n new_obs, reward, done, _ = env.step(action)\n episode_reward += reward\n done_bool = 0.0 if episode_timesteps + 1 == env._max_episode_steps else float(done)\n policy.replay_buffer.add(obs, action, reward, new_obs, done_bool)\n obs = new_obs\n\n episode_timesteps += 1\n total_timesteps += 1\n\n if done:\n if total_timesteps >= start_timesteps:\n policy.train(episode_timesteps, batch_size)\n print(\"Total Timesteps: {} Episode Timesteps {} Episode Num: {} Reward: {}\".format(total_timesteps, episode_timesteps, episode_num, episode_reward))\n obs = env.reset()\n episode_reward = 0\n episode_timesteps = 0\n episode_num += 1\n\n if total_timesteps % eval_freq == 0:\n evaluate_policy(policy)\n evaluate_policy_deterministic(policy)\n policy.save_checkpoint('models/actor', 'models/critic')\n","sub_path":"01-walker/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"562887062","text":"import cv2\nimport matplotlib.pyplot as plt\nfrom skimage.feature import hog\nfrom skimage import data, exposure\nfrom glob import glob\nimport os\nimport numpy as np\n\n\nsource = 'cropped/*.png'\nx = []\ny = []\nfor file in glob(source):\n image = cv2.imread(file)\n size = (320,320)\n image = cv2.resize(image, size)\n\n fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualize=True, multichannel=True)\n\n '''\n #stuff for visualizing the hog features\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)\n\n ax1.axis('off')\n ax1.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n ax1.set_title('Input image')\n\n hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10))\n \n ax2.axis('off')\n ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)\n ax2.set_title('Histogram of Oriented Gradients')\n plt.show()\n '''\n x.append(fd)\n y.append(1)\n\nsource = 'croppedwrong/*.png'\nfor file in glob(source):\n image = cv2.imread(file)\n size = (320,320)\n image = cv2.resize(image, size)\n\n fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualize=True, multichannel=True)\n x.append(fd)\n y.append(0)\nx = np.asarray(x)\ny = np.asarray(y)\nnp.save(\"images\", x)\nnp.save(\"labels\", y)\n","sub_path":"createHOGTrainigSets.py","file_name":"createHOGTrainigSets.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"392212746","text":"import pdal\nimport json\nimport geopandas as gpd\nfrom shapely.geometry import Polygon, Point\nimport sys, os\nsys.path.append(os.path.abspath(os.path.join('../scripts')))\n\nfrom app_logger import App_Logger\nfrom file_handler import FileHandler\n\nclass Fetch3depData:\n\n def __init__(self, public_data_url = \"https://s3-us-west-2.amazonaws.com/usgs-lidar-public/\", pipeline_json_path=\"./getdata.json\") -> None:\n self.logger = App_Logger().get_logger(__name__)\n self.file_handler = FileHandler()\n self.pipeline_json = self.file_handler.read_json(pipeline_json_path)\n self.public_data_url = public_data_url\n self.input_epsg = 3857\n\n def get_polygon_boundaries(self, polygon: Polygon):\n polygon_df = gpd.GeoDataFrame([polygon], columns=['geometry'])\n\n polygon_df.set_crs(epsg=self.output_epsg, inplace=True)\n polygon_df['geometry'] = polygon_df['geometry'].to_crs(epsg=self.input_epsg)\n minx, miny, maxx, maxy = polygon_df['geometry'][0].bounds\n\n polygon_input = 'POLYGON(('\n xcords, ycords = polygon_df['geometry'][0].exterior.coords.xy\n for x, y in zip(list(xcords), list(ycords)):\n polygon_input += f'{x} {y}, '\n polygon_input = polygon_input[:-2]\n polygon_input += '))'\n\n print(polygon_input)\n print(f\"({[minx, maxx]},{[miny,maxy]})\")\n\n return f\"({[minx, maxx]},{[miny,maxy]})\", polygon_input\n\n def get_pipeline(self, region: str, polygon: Polygon, output_filename: str = \"temp\"):\n boundaries, polygon_input = self.get_polygon_boundaries(polygon)\n\n full_dataset_path = f\"{self.public_data_url}{region}/ept.json\"\n\n self.pipeline_json['pipeline'][0]['filename'] = full_dataset_path\n self.pipeline_json['pipeline'][0]['bounds'] = boundaries\n self.pipeline_json['pipeline'][1]['polygon'] = polygon_input\n self.pipeline_json['pipeline'][3]['out_srs'] = f'EPSG:{self.output_epsg}'\n self.pipeline_json['pipeline'][4]['filename'] = \"../data/laz/\" + output_filename + \".laz\"\n self.pipeline_json['pipeline'][5]['filename'] = \"../data/tif/\" + output_filename + \".tif\"\n\n pipeline = pdal.Pipeline(json.dumps(self.pipeline_json))\n\n return pipeline\n\n def run_pipeline(self, polygon: Polygon, epsg, region: str = \"IA_FullState\"):\n self.output_epsg = epsg\n pipeline = self.get_pipeline(region, polygon)\n\n try:\n pipeline.execute()\n self.logger.info(f'Pipeline executed successfully.')\n return pipeline\n except RuntimeError as e:\n self.logger.exception('Pipeline execution failed')\n print(e)\n\n def make_geo_df(self, arr):\n geometry_points = [Point(x, y) for x, y in zip(arr[\"X\"], arr[\"Y\"])]\n elevetions = arr[\"Z\"]\n df = gpd.GeoDataFrame(columns=[\"elevation\", \"geometry\"])\n df['elevation'] = elevetions\n df['geometry'] = geometry_points\n df = df.set_geometry(\"geometry\")\n df.set_crs(self.output_epsg, inplace=True)\n return df\n\n def get_data(self, polygon: Polygon, epsg):\n pipeline = self.run_pipeline(polygon, epsg)\n arr = pipeline.arrays[0]\n return self.make_geo_df(arr)\n\nif(__name__ == '__main__'):\n MINX, MINY, MAXX, MAXY = [-93.756155, 41.918015, -93.756055, 41.918115]\n polygon = Polygon(((MINX, MINY), (MINX, MAXY), (MAXX, MAXY), (MAXX, MINY), (MINX, MINY)))\n data_fetcher = Fetch3depData()\n print(data_fetcher.get_data(polygon, epsg=4326))","sub_path":"scripts/.ipynb_checkpoints/fetch-checkpoint.py","file_name":"fetch-checkpoint.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"353876632","text":"i = 0\nspisok = [1]\nresult = []\nwhile sum(spisok) != 0:\n if i == 0: spisok[i] = int(input())\n else:\n spisok.append(int(input()))\n i+= 1\n\nresult = [num * num for num in spisok]\nprint(sum(result))\n","sub_path":"stepic_exercise/exercise/ex33.py","file_name":"ex33.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"463820216","text":"#!/usr/bin/env python3\r\n\"\"\"House version 1.3\r\n Python 3.7.2\"\"\"\r\n\r\n\r\nimport count_down\r\nfrom time import sleep\r\nimport web\r\n\r\n\r\ndef porch():\r\n \"\"\"porch\"\"\"\r\n while True:\r\n porch_options = ['1 Front', '2 Exit']\r\n print(' \\n'.join(porch_options))\r\n porch_choice = input('Choose an option. ')\r\n if porch_choice in '1':\r\n living_room()\r\n elif porch_choice in '2':\r\n raise SystemExit\r\n else:\r\n print('Invalid answer!')\r\n\r\n\r\ndef stairs():\r\n \"\"\"stairs\"\"\"\r\n while True:\r\n for i in range(1, 7):\r\n print(i)\r\n hall_choice = input(\"\"\"\\nYou're in the upstairs hall.\r\n \\rChoose a door. \"\"\")\r\n if hall_choice in ['1', '2', '3', '5']:\r\n print('This door is locked.')\r\n elif hall_choice in ['4', '6']:\r\n print('This room is empty.')\r\n else:\r\n living_room()\r\n\r\n\r\ndef kitchen():\r\n \"\"\"kitchen\"\"\"\r\n print('The kitchen is being remodeled. Come back later.')\r\n living_room()\r\n\r\n\r\ndef basement():\r\n \"\"\"basement\"\"\"\r\n while True:\r\n laundry_option = ['1 Yes', '2 No']\r\n print(' \\n'.join(laundry_option))\r\n laundry = input('Do you want to do laundry? ')\r\n if laundry in '1':\r\n quarters = int(input('How many quarters do you have? '))\r\n if quarters < 8:\r\n print(f'{quarters} is not enough money.')\r\n elif quarters >= 8:\r\n print('Washing!')\r\n sleep(3)\r\n print('Drying!')\r\n sleep(6)\r\n print('Done!')\r\n else:\r\n living_room()\r\n\r\n\r\ndef living_room():\r\n \"\"\"living_room\"\"\"\r\n while True:\r\n room_select = ['1 Kitchen', '2 Stairs', '3 Porch', '4 Basement',\r\n '5 Browse', '6 Rest']\r\n print(' \\n'.join(room_select))\r\n room_choice = input(\"\"\"\\nYou're in the living room.\r\n \\rChoose a room or activity. \"\"\")\r\n if room_choice in '1':\r\n kitchen()\r\n elif room_choice in '2':\r\n stairs()\r\n elif room_choice in '3':\r\n porch()\r\n elif room_choice in '4':\r\n basement()\r\n elif room_choice in '5':\r\n web.web_site(room_choice)\r\n elif room_choice in '6':\r\n count_down.count(room_choice)\r\n else:\r\n print('Invalid Answer.')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n porch()\r\n","sub_path":"house.py","file_name":"house.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"637462509","text":"# coding: utf-8\n\n# Demonstrator / keine Fehlerbehandlung\n\nimport cherrypy\nimport datetime\nimport time\n\nfrom p3.app.database import Database_cl\nfrom p3.app.view import View_cl\n\n# Method-Dispatching!\n\n# Übersicht Anforderungen / Methoden\n\n\"\"\"\nAnforderung GET \n-----------------------------------------\nprolist/ Auswertung Fehler \n nach Projekt/Komponente/Status \n als Liste anfordern \n-----------------------------------------------------------------------------\nkatlist/ Auswertung Fehler nach\n Kategorie/Status als Liste\n anfordern \n\"\"\"\n\n\n# ----------------------------------------------------------\nclass Prolist_cl(object):\n exposed = True\n\n def __init__(self, path):\n self.db = Database_cl(path)\n self.view_o = View_cl()\n\n @cherrypy.tools.json_out()\n def GET(self):\n data = {'projects': self.db.readFile('project.json')['data'],\n 'components': self.db.readFile('component.json')['data'],\n 'bugs': self.db.readFile('bug.json')['data']}\n reportData = []\n reportList = []\n for p in data['projects']:\n components = []\n for c in data['components']:\n if int(c['project']) == int(p['id']):\n bugs = []\n for b in data['bugs']:\n if int(b['component']) == int(c['id']):\n reportList.append({'project': p['title'], 'component': c['name'], 'bug': b['startdesc'],\n 'status': b['type']})\n bugs.append({'desc': b['startdesc'], 'type': b['type'],'startdate':b['startdate'],'enddate':b['enddate'], 'diff': self.days_between(b['startdate'],b['enddate'])})\n components.append({'name': c['name'], 'childs': bugs})\n reportData.append({'title': p['title'], 'childs': components})\n report = {'data': reportData}\n\n return report\n\n def days_between(self,d1, d2):\n if d2 is None:\n return '-'\n d1 = datetime.datetime.strptime(d1, \"%Y-%m-%d\")\n d2 = datetime.datetime.strptime(d2, \"%Y-%m-%d\")\n return str(abs((d2 - d1).days))+' Tage'\n\nclass Katlist_cl(object):\n exposed = True\n\n def __init__(self, path):\n self.db = Database_cl(path)\n self.view_o = View_cl()\n\n @cherrypy.tools.json_out()\n def GET(self):\n data = {'bug_category': self.db.readFile('bug_category.json')['data'],\n 'bugs': self.db.readFile('bug.json')['data']}\n reportData = []\n reportList = []\n for c in data['bug_category']:\n bugs = []\n for b in data['bugs']:\n if (int(c['id']) in b['bug_category']):\n reportList.append({'category': c['title'], 'bug': b['startdesc'], 'status': b['type']})\n bugs.append({'desc': b['startdesc'], 'type': b['type'],'startdate':b['startdate'],'enddate':b['enddate'], 'diff': self.days_between(b['startdate'],b['enddate'])})\n reportData.append({'category': c['title'], 'childs':bugs})\n report = {'data': reportData}\n return report\n\n def days_between(self,d1, d2):\n if d2 is None:\n return '-'\n d1 = datetime.datetime.strptime(d1, \"%Y-%m-%d\")\n d2 = datetime.datetime.strptime(d2, \"%Y-%m-%d\")\n return str(abs((d2 - d1).days))+' Tage'\n# EOF\n","sub_path":"p3/app/objects/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"307885515","text":"import requests, re, time\nfrom lxml import etree\n\nclass Spider_CSS:\n def __init__(self):\n self.url = 'http://glidedsky.com/level/web/crawler-css-puzzle-1?page={}'\n self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',\n 'Cookie': '******'\n }\n self.totle_num = 0\n\n def get_str(self, url):\n r = requests.get(url, headers=self.headers)\n content_str = r.content.decode()\n return content_str\n\n\n def parse_content_str(self, content_str):\n html = etree.HTML(content_str)\n divs_list = html.xpath('//div[@class=\"col-md-1\"]')\n return divs_list\n\n\n def parse_divs_list(self, divs_list, content_str):\n for divs in divs_list:\n div_list = divs.xpath('./div')\n\n if len(div_list) < 3:\n for div in div_list:\n text = div.xpath(\"./text()\")\n if not text:\n class_name = div.xpath(\"./@class\")[0]\n num1 = re.findall(r\"\\.{}\\:before\\s*.*?\\s*content\\:\\\"(\\d*)\\\"\".format(class_name), content_str, re.S)[0]\n print(num1)\n self.totle_num += int(num1)\n print('总数', self.totle_num)\n\n else:\n if len(div_list) == 4:\n div_list = div_list[1:]\n number = [-1, -1, -1]\n for i in range(0, len(div_list)):\n div = div_list[i]\n class_name = div.xpath(\"./@class\")[0]\n data = div.xpath(\"./text()\")[0]\n left = re.findall(r\"\\.{}\\s.*?\\sleft\\:(.*?)em\".format(class_name), content_str)\n if not left:\n number[i] = data\n\n else:\n index = i + int(left[0])\n number[index] = data\n num2 = \"\".join(number)\n print(num2)\n self.totle_num += int(num2)\n print('总数', self.totle_num)\n\n\n def run(self):\n num = 1\n while True:\n next_url = self.url.format(num)\n content_str = self.get_str(next_url)\n divs_list = self.parse_content_str(content_str)\n self.parse_divs_list(divs_list,content_str)\n print('第 {} 页采集完成'.format(num))\n time.sleep(0.3)\n num += 1\n\n if len(divs_list)<12:\n break\n\nif __name__ == '__main__':\n kunpeng = Spider_CSS()\n kunpeng.run()\n","sub_path":"Glidedsky/CSS.py","file_name":"CSS.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"343110432","text":"from keras.datasets import fashion_mnist\nfrom keras.layers import Conv2D, MaxPooling2D, Dense, Flatten\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom data_fashion import Data\n\nimport sys, os, inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\n\nfrom cnn import CNN\n\ndef make_data():\n # Load data and inspect\n data = Data(*fashion_mnist.load_data(), onehot=(\"y_train\", \"y_test\"))\n\n # Reshaping of input data\n data.X_train = data.X_train.reshape(-1, 28, 28, 1)\n data.X_test = data.X_test.reshape(-1, 28, 28, 1)\n\n # Scaling the input data\n data.scale(X=1/255)\n\n # Splitting into training and validation data sets (80% and 20% respectively)\n data.make_validation_set(val_size=0.2)\n \n return data\n\nif __name__ == \"__main__\":\n data = make_data()\n\n cnn = CNN([\n Conv2D(32, kernel_size=(3,3), activation=\"linear\", input_shape=(28, 28, 1), padding=\"same\"),\n LeakyReLU(alpha=0.1),\n MaxPooling2D((2, 2), padding=\"same\"),\n Conv2D(64, (3, 3), activation=\"linear\", padding=\"same\"),\n LeakyReLU(alpha=0.1),\n MaxPooling2D(pool_size=(2, 2), padding=\"same\"),\n Conv2D(128, (3, 3), activation=\"linear\", padding=\"same\"),\n LeakyReLU(alpha=0.1),\n MaxPooling2D(pool_size=(2, 2), padding=\"same\"),\n Flatten(),\n Dense(128, activation=\"linear\"),\n LeakyReLU(alpha=0.1),\n Dense(data.n_classes, activation=\"softmax\")\n ])\n\n cnn.train(data)\n\n cnn.dump(\"models/fashion_mnist_cnn\")","sub_path":"src/fashion_mnist/make_model.py","file_name":"make_model.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"553668968","text":"from fenics import *\n\nmesh = UnitSquareMesh(32,32)\nV = FunctionSpace(mesh, \"Lagrange\", 2)\n\n# Define Dirichlet boundary (x = 0 or x = 1)\ndef boundary(x, on_boundary):\n return on_boundary\n\n# Define boundary condition\nu0 = Constant(0.0)\nbc = DirichletBC(V, u0, boundary)\n\n\n# Define variational problem\nu = TrialFunction(V)\nv = TestFunction(V)\nC = Expression(\"(2+1.8*sin(2*pi*x[0]))/(2+1.8*cos(2*pi*x[1]))+(2+sin(2*pi*x[1]))/(2+1.8*cos(2*pi*x[0]))\",degree=2)\na = C*inner((grad(u)), grad(v))*dx\nL = -C*grad(v)[0]*dx\n\n# Compute solution\nu = Function(V)\nsolve(a == L, u, bc,solver_parameters={\"linear_solver\":\"mumps\"})\n\nhom=assemble(C*dx(mesh));\nhom = hom + assemble(C*grad(u)[0]*dx(mesh))\nprint(hom)\n","sub_path":"Dirichlet/Function1/DirichletCorrectorP.py","file_name":"DirichletCorrectorP.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"282360659","text":"alphabet = \"abcdefghijklmnopqrstuvwxyz\"\r\npartialOne = \"\"\r\npartialTwo = \"\"\r\nnewAlphabet = \"\"\r\nnewMessage = \"\"\r\nmessage = input(\"Please enter a secret message: \").lower()\r\nkey = int(input(\"Please enter a number to shift by: \"))\r\n\r\nif key == 0:\r\n newAlphabet = alphabet\r\nelif key > 0:\r\n partialOne = alphabet[:key]\r\n partialTwo = alphabet[key:]\r\n newAlphabet = partialTwo + partialOne\r\nelse:\r\n partialOne = alphabet[:(26 + key)]\r\n partialTwo = alphabet[(26 + key):]\r\nfor i in range(0, len(message)):\r\n index = alphabet.find(message[i])\r\n if index < 0:\r\n newMessage += message[i]\r\n else:\r\n newMessage += newAlphabet[index]\r\nprint(newMessage)\r\n","sub_path":"learning/idtech/Python/CaeserCipher/Cipher.py","file_name":"Cipher.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"330513433","text":"import datetime\nimport fileinput\nimport re\nfrom typing import List\nimport unittest\n\nBASE_PARSER = re.compile(r'\\[([^]]+)\\] (.*)')\nGUARD_PARSER = re.compile(r'Guard #([0-9]+) begins shift')\n\n\nclass RawEvent(object):\n def __init__(self, time: datetime.datetime, desc: str):\n self.time = time\n self.desc = desc\n\n def __eq__(self, other):\n return (self.time == other.time and\n self.desc == other.desc)\n\n def __repr__(self):\n return 'RawEvent({}, {})'.format(self.time, self.desc)\n\n\nclass Event(object):\n def __init__(self, time: datetime.datetime, guard_num: int):\n self._guard_num = guard_num\n self._time = time\n\n def get_time(self):\n return self._time\n\n def get_guard_num(self):\n return self._guard_num\n\n def __eq__(self, other):\n return (self._time == other.get_time() and\n self._guard_num == other.get_guard_num())\n\n def __repr__(self):\n return 'Event({}, {})'.format(self._time, self._guard_num)\n\n\nclass OnDuty(Event):\n def __init__(self, time: datetime.datetime, guard_num: int):\n super(OnDuty, self).__init__(time, guard_num)\n\n def __repr__(self):\n return 'OnDuty({}, {})'.format(self._time, self._guard_num)\n\n\nclass Sleep(Event):\n def __init__(self, time: datetime.datetime, guard_num: int):\n super(Sleep, self).__init__(time, guard_num)\n\n def __repr__(self):\n return 'Sleep({}, {})'.format(self._time, self._guard_num)\n\n\nclass Wake(Event):\n def __init__(self, time: datetime.datetime, guard_num: int):\n super(Wake, self).__init__(time, guard_num)\n\n def __repr__(self):\n return 'Wake({}, {})'.format(self._time, self._guard_num)\n\n\nclass Window(object):\n def __init__(self, guard_num: int, start_min: int, end_min: int):\n self.guard_num = guard_num\n self.start_min = start_min\n self.end_min = end_min\n\n def __eq__(self, other):\n return (self.guard_num == other.guard_num and\n self.start_min == other.start_min and\n self.end_min == other.end_min)\n\n def __repr__(self):\n return 'Window({}, {}, {})'.format(self.guard_num, self.start_min, self.end_min)\n\n\ndef parse_line_raw(line: str) -> RawEvent:\n timestamp_str, event_str = BASE_PARSER.findall(line)[0]\n timestamp = datetime.datetime.strptime(timestamp_str, '%Y-%m-%d %H:%M')\n return RawEvent(timestamp, event_str)\n\n\ndef parse_line(raw_event: RawEvent, curr_guard_num: int) -> Event:\n if 'Guard' in raw_event.desc:\n return OnDuty(raw_event.time, int(GUARD_PARSER.findall(raw_event.desc)[0]))\n elif raw_event.desc == 'falls asleep':\n return Sleep(raw_event.time, curr_guard_num)\n elif raw_event.desc == 'wakes up':\n return Wake(raw_event.time, curr_guard_num)\n\n\ndef parse(lines: List[str]) -> List[Event]:\n raw_events = []\n for line in lines:\n raw_event = parse_line_raw(line)\n raw_events.append(raw_event)\n\n raw_events = sorted(raw_events, key=lambda x: x.time)\n\n curr_guard_num = None\n events = []\n for raw_event in raw_events:\n event = parse_line(raw_event, curr_guard_num)\n events.append(event)\n if isinstance(event, OnDuty):\n curr_guard_num = event.get_guard_num()\n return events\n\n\ndef build_windows(events: List[Event]) -> List[Window]:\n guards = {}\n windows = []\n for event in events:\n if isinstance(event, Sleep):\n guards[event.get_guard_num()] = event.get_time().minute\n elif isinstance(event, Wake):\n windows.append(Window(\n event.get_guard_num(),\n guards[event.get_guard_num()],\n event.get_time().minute))\n del guards[event.get_guard_num()]\n return windows\n\n\ndef find_sleepiest_minute(windows: List[Window], guard_num: int) -> (int, int):\n minutes = [0] * 60\n for window in windows:\n if window.guard_num == guard_num:\n for i in range(window.start_min, window.end_min):\n minutes[i] += 1\n\n freq_minute = -1\n times_slept = -1\n for i in range(len(minutes)):\n if minutes[i] > times_slept:\n times_slept = minutes[i]\n freq_minute = i\n\n return freq_minute, times_slept\n\n\ndef find_sleepiest_guard_minute(windows: List[Window]) -> (int, int):\n guards = set([x.guard_num for x in windows])\n\n sleepiest = dict((g, find_sleepiest_minute(windows, g)) for g in guards)\n\n guard = -1\n minute = -1\n times_slept = -1\n for g, (min, times) in sleepiest.items():\n if times > times_slept:\n times_slept = times\n minute = min\n guard = g\n\n return guard, minute\n\n\ndef process(lines: List[str]) -> int:\n events = parse(lines)\n windows = build_windows(events)\n sleepiest_minute, sleepiest_guard = find_sleepiest_guard_minute(windows)\n return sleepiest_guard * sleepiest_minute\n\n\nif __name__ == '__main__':\n lines = []\n for line in fileinput.input():\n lines += [line.strip()]\n\n print(process(lines))\n\n\nclass Test042(unittest.TestCase):\n def test_parse_line_raw(self):\n self.assertEqual(\n parse_line_raw('[1518-11-01 00:00] Guard #10 begins shift'),\n RawEvent(datetime.datetime(1518, 11, 1, 0, 0), 'Guard #10 begins shift')\n )\n\n def test_parse_line__guard_init(self):\n self.assertEqual(\n parse_line(\n RawEvent(datetime.datetime(1518, 11, 1, 0, 0), 'Guard #10 begins shift'),\n None),\n OnDuty(datetime.datetime(1518, 11, 1, 0, 0), 10))\n\n def test_parse_line__guard_subsequent(self):\n self.assertEqual(\n parse_line(\n RawEvent(datetime.datetime(1518, 11, 1, 0, 0), 'Guard #10 begins shift'),\n 5),\n OnDuty(datetime.datetime(1518, 11, 1, 0, 0), 10))\n\n def test_parse_line__sleep(self):\n self.assertEqual(\n parse_line(\n RawEvent(datetime.datetime(1518, 11, 1, 0, 5), 'falls asleep'),\n 10),\n Sleep(datetime.datetime(1518, 11, 1, 0, 5), 10))\n\n def test_parse_line__wake(self):\n self.assertEqual(\n parse_line(\n RawEvent(datetime.datetime(1518, 11, 1, 0, 25), 'wakes up'),\n 10),\n Wake(datetime.datetime(1518, 11, 1, 0, 25), 10))\n\n def test_parse(self):\n self.assertEqual(\n parse([\n '[1518-11-01 00:00] Guard #10 begins shift',\n '[1518-11-01 00:05] falls asleep',\n '[1518-11-01 00:25] wakes up',\n '[1518-11-01 23:58] Guard #99 begins shift',\n '[1518-11-02 00:40] falls asleep',\n '[1518-11-02 00:50] wakes up'\n ]),\n [\n OnDuty(datetime.datetime(1518, 11, 1, 0, 0), 10),\n Sleep(datetime.datetime(1518, 11, 1, 0, 5), 10),\n Wake(datetime.datetime(1518, 11, 1, 0, 25), 10),\n OnDuty(datetime.datetime(1518, 11, 1, 23, 58), 99),\n Sleep(datetime.datetime(1518, 11, 2, 0, 40), 99),\n Wake(datetime.datetime(1518, 11, 2, 0, 50), 99)\n ]\n )\n\n def test_parse__out_of_order(self):\n self.assertEqual(\n parse([\n '[1518-11-01 00:05] falls asleep',\n '[1518-11-01 23:58] Guard #99 begins shift',\n '[1518-11-01 00:00] Guard #10 begins shift',\n '[1518-11-01 00:25] wakes up',\n '[1518-11-02 00:50] wakes up',\n '[1518-11-02 00:40] falls asleep',\n ]),\n [\n OnDuty(datetime.datetime(1518, 11, 1, 0, 0), 10),\n Sleep(datetime.datetime(1518, 11, 1, 0, 5), 10),\n Wake(datetime.datetime(1518, 11, 1, 0, 25), 10),\n OnDuty(datetime.datetime(1518, 11, 1, 23, 58), 99),\n Sleep(datetime.datetime(1518, 11, 2, 0, 40), 99),\n Wake(datetime.datetime(1518, 11, 2, 0, 50), 99)\n ]\n )\n\n def test_build_windows(self):\n self.assertEqual(\n build_windows([\n OnDuty(datetime.datetime(1518, 11, 1, 0, 0), 10),\n Sleep(datetime.datetime(1518, 11, 1, 0, 5), 10),\n Wake(datetime.datetime(1518, 11, 1, 0, 25), 10),\n OnDuty(datetime.datetime(1518, 11, 1, 23, 58), 99),\n Sleep(datetime.datetime(1518, 11, 2, 0, 40), 99),\n Wake(datetime.datetime(1518, 11, 2, 0, 50), 99)\n ]),\n [\n Window(10, 5, 25),\n Window(99, 40, 50)\n ]\n )\n\n def test_find_sleepiest_minute(self):\n self.assertEqual(\n find_sleepiest_minute(\n [\n Window(10, 5, 25),\n Window(10, 30, 55),\n Window(99, 40, 50),\n Window(10, 24, 29),\n Window(99, 36, 46),\n Window(99, 45, 55)\n ],\n 10\n ),\n (24, 2)\n )\n\n def test_find_sleepiest_guard_minute(self):\n self.assertEqual(\n find_sleepiest_guard_minute([\n Window(10, 5, 25),\n Window(10, 30, 55),\n Window(99, 40, 50),\n Window(10, 24, 29),\n Window(99, 36, 46),\n Window(99, 45, 55)\n ]),\n (99, 45)\n )\n\n def test_process(self):\n self.assertEqual(\n process([\n '[1518-11-01 00:00] Guard #10 begins shift',\n '[1518-11-01 00:05] falls asleep',\n '[1518-11-01 00:25] wakes up',\n '[1518-11-01 00:30] falls asleep',\n '[1518-11-01 00:55] wakes up',\n '[1518-11-01 23:58] Guard #99 begins shift',\n '[1518-11-02 00:40] falls asleep',\n '[1518-11-02 00:50] wakes up',\n '[1518-11-03 00:05] Guard #10 begins shift',\n '[1518-11-03 00:24] falls asleep',\n '[1518-11-03 00:29] wakes up',\n '[1518-11-04 00:02] Guard #99 begins shift',\n '[1518-11-04 00:36] falls asleep',\n '[1518-11-04 00:46] wakes up',\n '[1518-11-05 00:03] Guard #99 begins shift',\n '[1518-11-05 00:45] falls asleep',\n '[1518-11-05 00:55] wakes up'\n ]),\n 4455\n )\n","sub_path":"04/main_042.py","file_name":"main_042.py","file_ext":"py","file_size_in_byte":10504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"94513763","text":"# encoding: utf-8\n\n\"\"\"\n.. codeauthor:: Tsuyoshi Hombashi \n\"\"\"\n\nimport datetime\nfrom dateutil.tz import tzoffset\nimport pytest\nimport six\n\nfrom dataproperty import TypeConversionError\nfrom dataproperty import convert_value\nfrom dataproperty import is_nan\nfrom dataproperty.converter import IntegerConverter\nfrom dataproperty.converter import FloatConverter\nfrom dataproperty.converter import DateTimeConverter\n\n\nnan = float(\"nan\")\ninf = float(\"inf\")\n\n\nclass Test_IntegerConverter_convert:\n\n @pytest.mark.parametrize([\"value\", \"is_convert\", \"expected\"], [\n [0.1, True, 0],\n [-0.1, False, -0.1],\n [1, False, 1],\n [-1, True, -1],\n [.5, True, 0],\n [0., False, 0],\n [True, True, 1],\n [True, False, 1],\n [str(six.MAXSIZE), True, six.MAXSIZE],\n [str(six.MAXSIZE), False, str(six.MAXSIZE)],\n [str(-six.MAXSIZE), True, -six.MAXSIZE],\n [str(-six.MAXSIZE), False, str(-six.MAXSIZE)],\n ])\n def test_normal(self, value, is_convert, expected):\n assert IntegerConverter(value, is_convert).convert() == expected\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [\"\", TypeConversionError],\n [None, TypeConversionError],\n [\"test\", TypeConversionError],\n [\"0.0\", TypeConversionError],\n [\"0.1\", TypeConversionError],\n [\"-0.1\", TypeConversionError],\n [\"1e-05\", TypeConversionError],\n [inf, TypeConversionError],\n ])\n def test_exception(self, value, expected):\n with pytest.raises(expected):\n IntegerConverter(value).convert()\n\n\nclass Test_FloatConverter_convert:\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [0.0, 0.0],\n [0.1, 0.1],\n [-0.1, -0.1],\n [1, 1.0],\n [-1, -1.0],\n [\"0.0\", 0.0],\n [\"0.1\", 0.1],\n [\"-0.1\", -0.1],\n [\"1\", 1.0],\n [\"-1\", -1.0],\n [.5, .5],\n [0., 0.0],\n [\"1e-05\", 1e-05],\n [inf, inf],\n [True, 1.0],\n ])\n def test_normal(self, value, expected):\n assert FloatConverter(value).convert() == expected\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [\"\", TypeConversionError],\n [None, TypeConversionError],\n [\"test\", TypeConversionError],\n ])\n def test_exception(self, value, expected):\n with pytest.raises(expected):\n FloatConverter(value).convert()\n\n\nclass Test_DateTimeConverter_convert:\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [\n datetime.datetime(\n 2017, 3, 22, 10, 0, tzinfo=tzoffset(None, 32400)),\n datetime.datetime(\n 2017, 3, 22, 10, 0, tzinfo=tzoffset(None, 32400)),\n ],\n [\n \"2017-03-22T10:00:00+0900\",\n datetime.datetime(2017, 3, 22, 10, 0, tzinfo=tzoffset(None, 32400))\n ],\n ])\n def test_normal(self, value, expected):\n dt_converter = DateTimeConverter(value)\n\n assert dt_converter.convert() == expected\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [\n \"2015-03-08T00:00:00-0400\",\n \"2015-03-08 00:00:00-04:00\",\n ],\n [\n \"2015-03-08T12:00:00-0400\",\n \"2015-03-08 12:00:00-03:00\",\n ],\n [\n \"2015-03-08T00:00:00-0800\",\n \"2015-03-08 00:00:00-08:00\",\n ],\n [\n \"2015-03-08T12:00:00-0800\",\n \"2015-03-08 12:00:00-07:00\",\n ],\n ])\n def test_normal_dst(self, value, expected):\n dt_converter = DateTimeConverter(value)\n\n assert str(dt_converter) == expected\n assert str(dt_converter.convert()) == expected\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [\"invalid time string\", TypeConversionError],\n [None, TypeConversionError],\n [11111, TypeConversionError],\n ])\n def test_exception(self, value, expected):\n dt_converter = DateTimeConverter(value)\n\n with pytest.raises(expected):\n dt_converter.convert()\n\n\nclass Test_convert_value:\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [\"0\", 0],\n [str(six.MAXSIZE), six.MAXSIZE],\n [str(-six.MAXSIZE), -six.MAXSIZE],\n [0, 0],\n [six.MAXSIZE, six.MAXSIZE],\n [-six.MAXSIZE, -six.MAXSIZE],\n\n [\"0.0\", 0],\n [0.0, 0],\n\n [\"aaaaa\", \"aaaaa\"],\n\n [inf, inf],\n ])\n def test_normal(self, value, expected):\n assert convert_value(value) == expected\n\n @pytest.mark.parametrize([\"value\", \"none_return_value\", \"expected\"], [\n [None, None, None],\n [\"1\", None, 1],\n [None, \"null\", \"null\"],\n [\"1\", \"null\", 1],\n ])\n def test_none(self, value, none_return_value, expected):\n assert convert_value(value, none_return_value) == expected\n\n def test_abnormal(self):\n assert is_nan(convert_value(nan))\n","sub_path":"test/test_converter.py","file_name":"test_converter.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"525908812","text":"import tornado.ioloop\nimport tornado.web\n\nimport os\nimport sys\nimport time\n\n\nclass StaticHandler(tornado.web.StaticFileHandler):\n def parse_url_path(self, url_path):\n if not url_path or url_path.endswith('/'):\n url_path = url_path + 'index.html'\n return url_path\n\n\napplication = tornado.web.Application([\n (r\"/(.*)\", StaticHandler, {\"path\": os.getcwd() + \"/dist\"})\n])\n\nprint(\"### Server Starting ###\")\nwhile True:\n try:\n application.listen(80)\n tornado.ioloop.IOLoop.current().start()\n except KeyboardInterrupt:\n print(\"### Server Closed ###\")\n sys.exit(0)\n except:\n print(\"### Restarting Server ###\")\n time.sleep(10)\n\n","sub_path":"runDeploy.py","file_name":"runDeploy.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"211924201","text":"# MARK: Imports\nfrom fresh_tomatoes import open_movies_page\nfrom movie import Movie\n\n# MARK: Movie Posters Constants\nIMAGE_ASSET_BASE = \"https://images-na.ssl-images-amazon.com/images/M/\"\nR1_POSTER = IMAGE_ASSET_BASE +\\\n \"MV5BMjEwMzMxODIzOV5BMl5BanBnXkFtZTgwNzg3OTAzMDI@._V1_SY1000_SX675_AL_.jpg\"\n\nMOANA_POSTER = IMAGE_ASSET_BASE +\\\n \"MV5BMjI4MzU5NTExNF5BMl5BanBnXkFtZTgwNzY1MTEwMDI\" +\\\n \"@._V1_SY1000_CR0,0,674,1000_AL_.jpg\"\n\nFBEASTS_POSTER = IMAGE_ASSET_BASE +\\\n \"MV5BMjMxOTM1OTI4MV5BMl5BanBnXkFtZTgwODE5OTYxMDI\" +\\\n \"@._V1_SY1000_CR0,0,674,1000_AL_.jpg\"\n\n# MARK: Create Movie Instances\n# NOTE: You should probably escape your HTML\nrogue_one = Movie(\n \"Rogue One:
A Star Wars Story\",\n R1_POSTER,\n \"https://www.youtube.com/watch?v=sC9abcLLQpI\")\n\nmoana = Movie(\n \"Moana\",\n MOANA_POSTER,\n \"https://www.youtube.com/watch?v=M5dnZKrUpdA\")\n\nf_beasts = Movie(\n \"Fantastic Beasts
And Where To Find Them\",\n FBEASTS_POSTER,\n \"https://youtu.be/Vso5o11LuGU\")\n\n# MARK: Aggregate Movies in List\nmovies = [rogue_one, moana, f_beasts]\n\n# MARK: Main\nopen_movies_page(movies)\n","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"573472793","text":"# сложение Х + ХХ + ХХХ\nnumber = input('Введите число: ')\nnumber = float(number)\nif int(number) < 0:\n print('Необходимо ввести положительное число!')\n exit(0)\nif (number % 1) > 0:\n print('! введённое число округлено до: ', int(number))\nnumber = str(int(number))\nnum_1 = number\nprint('число_1: ', num_1)\nnum_2 = number + number\nprint('число_2: ', num_2)\nnum_3 = number + number + number\nprint('число_3: ', num_3)\nnum_sum = int(num_1) + int(num_3) + int(num_3)\nprint(num_1, '+', num_2, '+', num_3, '=', num_sum)","sub_path":"Less1-3.py","file_name":"Less1-3.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"541726870","text":"import math\r\nimport smbus\r\nimport time\r\nfrom gps3 import gps3\r\nimport numpy as np\r\nimport RPi.GPIO as GPIO\r\nimport time\r\n\r\n#****************************************************************************************************************\r\n\r\n\r\nGPIO.setmode(GPIO.BCM)\r\nGPIO.setwarnings(False)\r\n\r\ndist=0\r\ncheckA=0\r\ncheckB=0\r\nheadA=0.0\r\nheadB=0.0\r\nheadC=0.0\r\nheadD=0.0\r\n\r\ni=0\r\nj=0\r\n\r\nl=360-185\r\nx_manual=1.5465\r\ny_manual=1.4085\r\n\r\n#left forward\r\n\r\ntrigPin1 = 27\r\nechoPin1 = 17\r\n\r\n# right forward\r\n\r\ntrigPin2 = 14\r\nechoPin2 = 15\r\n\r\n#left back\r\n\r\ntrigPin3 = 26\r\nechoPin3 = 19\r\n\r\n#right back\r\n\r\ntrigPin4 = 20\r\nechoPin4 = 21\r\n\r\nthreshold = 30\r\n\r\nGPIO.setup(trigPin1, GPIO.OUT)\r\nGPIO.setup(echoPin1, GPIO.IN)\r\n\r\nGPIO.setup(trigPin2, GPIO.OUT)\r\nGPIO.setup(echoPin2, GPIO.IN)\r\n\r\nGPIO.setup(trigPin3, GPIO.OUT)\r\nGPIO.setup(echoPin3, GPIO.IN)\r\n\r\nGPIO.setup(trigPin4, GPIO.OUT)\r\nGPIO.setup(echoPin4, GPIO.IN)\r\n\r\n#*********************************************************************************************************************\r\n\r\n#DIRECTION PINS\r\nldir=9\r\nrdir=23\r\n#SPEED PINS\r\n\r\nlspeed=11\r\nrspeed=24\r\n\r\n\r\nGPIO.setup(ldir, GPIO.OUT)\r\nGPIO.setup(rdir,GPIO.OUT)\r\nGPIO.setup(lspeed, GPIO.OUT)\r\nGPIO.setup(rspeed,GPIO.OUT)\r\n\r\n\r\n#frequency\r\np=GPIO.PWM(lspeed,100)\r\nq=GPIO.PWM(rspeed,100)\r\n\r\np.start(0.00)\r\nq.start(0.00)\r\n\r\n\r\n#********************************************************************************************************************\r\nt1 = 0.000000\r\nt2 = 0.000000\r\nt3 = 0.000000\r\nt4 = 0.000000\r\nt5 = 0.000000\r\nt6 = 0.000000\r\n\r\ntax1 = 0.000000\r\ntax2 = 0.000000\r\ntax3 = 0.000000\r\ntax4 = 0.000000\r\n\r\n\r\n#***************************************************************************************************************\r\n\r\n\r\n\r\n\r\nmin_x=0\r\nmax_x=0\r\nmin_y=0\r\nmax_y=0\r\nmin_z=0\r\nmax_z=0\r\n\r\n\r\nt=0.0000\r\n\r\nbus = smbus.SMBus(1)\r\n\r\ndef twos_complement(val, bits):\r\n if (val & (1 << (bits - 1))) != 0:\r\n val = val - (1 << bits)\r\n return val\r\n\r\nbus.write_byte_data(0x1E, 0x20, 0b01111100)\r\nbus.write_byte_data(0x1E, 0x21, 0b00000000)\r\nbus.write_byte_data(0x1E, 0x22, 0b00000000)\r\nbus.write_byte_data(0x1E, 0x23, 0b00001100)\r\n\r\n\r\nlat2 = 13.347906667\r\nlon2 = 74.792238333\r\nlat1 = 0.0000000\r\nlon1 = 0.0000000\r\n\r\nangle=0.0\r\n\r\nx=0.00\r\ny=0.00\r\n\r\ndef short_angle(x,y):\r\n if abs(x-y)<180.0:\r\n return (abs(x-y))\r\n\r\n else:\r\n return (360.0-abs(x-y))\r\n\r\n\r\n#********************************************************************************************************************\r\ndef forward():\r\n GPIO.output(ldir,True)\r\n GPIO.output(rdir,True)\r\n GPIO.output(lspeed,True)\r\n GPIO.output(rspeed,True)\r\n #p.ChangeDutyCycle(50)\r\n #q.ChangeDutyCycle(50)\r\n\r\n\r\ndef backward():\r\n GPIO.output(ldir,False)\r\n GPIO.output(rdir,False)\r\n GPIO.output(lspeed,True)\r\n GPIO.output(rspeed,True)\r\n #p.ChangeDutyCycle(50)\r\n #q.ChangeDutyCycle(50)\r\n\r\n\r\ndef right():\r\n GPIO.output(ldir,True)\r\n GPIO.output(rdir,False)\r\n GPIO.output(lspeed,True)\r\n GPIO.output(rspeed,True)\r\n #p.ChangeDutyCycle(50)\r\n #q.ChangeDutyCycle(50)\r\n\r\n\r\ndef left():\r\n GPIO.output(ldir,False)\r\n GPIO.output(rdir,True)\r\n GPIO.output(lspeed,True)\r\n GPIO.output(rspeed,True)\r\n # p.ChangeDutyCycle(50)\r\n # q.ChangeDutyCycle(50)\r\n\r\n\r\ndef brutestop():\r\n GPIO.output(ldir, False)\r\n GPIO.output(rdir, True)\r\n GPIO.output(lspeed, False)\r\n GPIO.output(rspeed, False)\r\n\r\ndef backwardright():\r\n GPIO.output(ldir,False)\r\n GPIO.output(rdir,False)\r\n GPIO.output(lspeed,True)\r\n GPIO.output(rspeed,True)\r\n p.ChangeDutyCycle(100)\r\n q.ChangeDutyCycle(50)\r\n\r\n\r\ndef backwardleft():\r\n GPIO.output(ldir, False)\r\n GPIO.output(rdir, False)\r\n GPIO.output(lspeed, True)\r\n GPIO.output(rspeed, True)\r\n p.ChangeDutyCycle(50)\r\n q.ChangeDutyCycle(100)\r\n\r\n\r\ndef sideleftcheck():\r\n GPIO.output(trigPin3, True)\r\n time.sleep(10e-6)\r\n GPIO.output(trigPin3, False)\r\n\r\n while GPIO.input(echoPin3) == 0:\r\n pass\r\n\r\n t5 = time.time()\r\n\r\n while GPIO.input(echoPin3) == 1:\r\n t6 = time.time()\r\n tax3 = t6 - t5\r\n get = 0\r\n if tax3 > 0.005:\r\n get = 1\r\n break\r\n\r\n t6 = time.time()\r\n\r\n if get == 1:\r\n pass\r\n return 1\r\n\r\n duration3 = t6 - t5\r\n\r\n distance3 = duration3 * 17000\r\n\r\n if distance3 < threshold+10:\r\n print(\"ULTRASONIC OVERRIDE: FORWARD---\")\r\n forward()\r\n p.ChangeDutyCycle(50)\r\n q.ChangeDutyCycle(50)\r\n\r\n return -1\r\n\r\ndef siderightcheck():\r\n GPIO.output(trigPin4, True)\r\n time.sleep(10e-6)\r\n GPIO.output(trigPin4, False)\r\n\r\n while GPIO.input(echoPin4) == 0:\r\n pass\r\n\r\n t7 = time.time()\r\n\r\n while GPIO.input(echoPin4) == 1:\r\n t8 = time.time()\r\n tax4 = t8 - t7\r\n put = 0\r\n if tax4 > 0.005:\r\n put = 2\r\n break\r\n\r\n t8 = time.time()\r\n\r\n if put == 2:\r\n pass\r\n return 1\r\n\r\n duration4 = t8 - t7\r\n\r\n distance4 = duration4 * 17000\r\n\r\n if distance4 < threshold+10:\r\n print(\"ULTRASONIC OVERRIDE: FORWARD---\")\r\n forward()\r\n p.ChangeDutyCycle(50)\r\n q.ChangeDutyCycle(50)\r\n\r\n return -1\r\n\r\n\r\n#.....................................................................................................................\r\ndef ultrasonic():\r\n while 1:\r\n\r\n time.sleep(0.1)\r\n GPIO.output(trigPin1, True)\r\n time.sleep(10e-6)\r\n GPIO.output(trigPin1, False)\r\n\r\n while GPIO.input(echoPin1) == 0:\r\n pass\r\n\r\n t1 = time.time()\r\n\r\n while GPIO.input(echoPin1) == 1:\r\n t2 = time.time()\r\n tax1 = t2 - t1\r\n i = 0\r\n if tax1 > 0.005: #TIMEOUT AT 85cm\r\n i = 1\r\n break\r\n\r\n t2 = time.time()\r\n\r\n duration1 = t2 - t1\r\n\r\n distance1 = duration1 * 17000\r\n # .............................................\r\n GPIO.output(trigPin2, True)\r\n time.sleep(10e-6)\r\n GPIO.output(trigPin2, False)\r\n\r\n while GPIO.input(echoPin2) == 0:\r\n pass\r\n\r\n t3 = time.time()\r\n\r\n while GPIO.input(echoPin2) == 1:\r\n t4 = time.time()\r\n tax2 = t4 - t3\r\n j = 0\r\n if tax2 > 0.005:\r\n j = 2\r\n break\r\n\r\n t4 = time.time()\r\n\r\n #if (i + j) == 3:\r\n # pass\r\n\r\n duration2 = t4 - t3\r\n\r\n distance2 = duration2 * 17000\r\n#......................................................................................................................\r\n#BACKWARD\r\n\r\n if (distance1 > threshold) and (distance2 > threshold):\r\n pass\r\n\r\n elif (distance1 < threshold+5) or (distance2 < threshold+5):\r\n backward()\r\n p.ChangeDutyCycle(50)\r\n q.ChangeDutyCycle(50)\r\n time.sleep(1)\r\n\r\n if distance1 < distance2:\r\n print(\"ULTRASONIC OVERRIDE: BACKWARD RIGHT\")\r\n\r\n headC = np.round(roverheading(min_x, max_x, min_y, max_y, min_z, min_z))\r\n headD = headC\r\n\r\n while short_angle(headC, headD) < 90:\r\n print(\"ULTRASONIC OVERRIDE: BACKWARD RIGHT\")\r\n right()\r\n p.ChangeDutyCycle(50)\r\n q.ChangeDutyCycle(50)\r\n headD = np.round(roverheading(min_x, max_x, min_y, max_y, min_z, min_z))\r\n\r\n brutestop()\r\n checkB = sideleftcheck()\r\n\r\n while checkB == -1:\r\n checkB = sideleftcheck()\r\n\r\n return -1\r\n\r\n else:\r\n print(\"ULTRASONIC OVERRIDE: BACKWARD LEFT\")\r\n\r\n headA = np.round(roverheading(min_x, max_x, min_y, max_y, min_z, min_z))\r\n headB = headA\r\n\r\n while short_angle(headA, headB) < 90:\r\n print(\"ULTRASONIC OVERRIDE: BACKWARD LEFT\")\r\n left()\r\n p.ChangeDutyCycle(50)\r\n q.ChangeDutyCycle(50)\r\n headB = np.round(roverheading(min_x, max_x, min_y, max_y, min_z, min_z))\r\n\r\n brutestop()\r\n checkA = siderightcheck()\r\n\r\n while checkA == -1:\r\n checkA = siderightcheck()\r\n\r\n return -1\r\n\r\n return -1\r\n\r\n#LEFT90\r\n\r\n elif (distance2 < threshold) and (distance2 < distance1):\r\n print(\"ULTRASONIC OVERRIDE: LEFT90\")\r\n\r\n headA = np.round(roverheading(min_x, max_x, min_y, max_y, min_z, min_z))\r\n headB=headA\r\n\r\n while short_angle(headA,headB) < 90:\r\n print(\"ULTRASONIC OVERRIDE: LEFT90\")\r\n left()\r\n p.ChangeDutyCycle(50)\r\n q.ChangeDutyCycle(50)\r\n headB = np.round(roverheading(min_x, max_x, min_y, max_y, min_z, min_z))\r\n\r\n\r\n brutestop()\r\n checkA = siderightcheck()\r\n\r\n while checkA == -1:\r\n checkA = siderightcheck()\r\n\r\n return -1\r\n#.....................................................................................................................\r\n# RIGHT90\r\n\r\n elif (distance1 < threshold) and (distance1 < distance2):\r\n print(\"ULTRASONIC OVERRIDE: RIGHT90\")\r\n\r\n headC = np.round(roverheading(min_x, max_x, min_y, max_y, min_z, min_z))\r\n headD=headC\r\n\r\n while short_angle(headC,headD) < 90:\r\n print(\"ULTRASONIC OVERRIDE: RIGHT90\")\r\n right()\r\n p.ChangeDutyCycle(50)\r\n q.ChangeDutyCycle(50)\r\n headD = np.round(roverheading(min_x, max_x, min_y, max_y, min_z, min_z))\r\n\r\n brutestop()\r\n checkB = sideleftcheck()\r\n\r\n while checkB == -1:\r\n checkB = sideleftcheck()\r\n\r\n return -1\r\n\r\n # SIDE ULTRASONICS..........................................................................................\r\n\r\n GPIO.output(trigPin3, True)\r\n time.sleep(10e-6)\r\n GPIO.output(trigPin3, False)\r\n\r\n while GPIO.input(echoPin3) == 0:\r\n pass\r\n\r\n t5 = time.time()\r\n\r\n while GPIO.input(echoPin3) == 1:\r\n t6 = time.time()\r\n tax3 = t6 - t5\r\n get = 0\r\n if tax3 > 0.005:\r\n get = 1\r\n break\r\n\r\n t6 = time.time()\r\n\r\n if get == 1:\r\n pass\r\n return 1\r\n\r\n duration3 = t6 - t5\r\n\r\n distance3 = duration3 * 17000\r\n\r\n if distance3 < threshold+10:\r\n print(\"ULTRASONIC OVERRIDE: FORWARD---\")\r\n forward()\r\n p.ChangeDutyCycle(50)\r\n q.ChangeDutyCycle(50)\r\n\r\n return -1\r\n ##......................................................................................................\r\n\r\n GPIO.output(trigPin4, True)\r\n time.sleep(10e-6)\r\n GPIO.output(trigPin4, False)\r\n\r\n while GPIO.input(echoPin4) == 0:\r\n pass\r\n\r\n t7 = time.time()\r\n\r\n while GPIO.input(echoPin4) == 1:\r\n t8 = time.time()\r\n tax4 = t8 - t7\r\n put = 0\r\n if tax4 > 0.005:\r\n put = 2\r\n break\r\n\r\n t8 = time.time()\r\n\r\n if put == 2:\r\n pass\r\n return 1\r\n\r\n duration4 = t8 - t7\r\n\r\n distance4 = duration4 * 17000\r\n\r\n if distance4 < threshold:\r\n print(\"ULTRASONIC OVERRIDE: FORWARD---\")\r\n forward()\r\n p.ChangeDutyCycle(50)\r\n q.ChangeDutyCycle(50)\r\n\r\n return -1\r\n\r\n\r\n#......................................................................................................................\r\n\r\ndef haversine(lat1, lon1, lat2, lon2):\r\n # distance between latitudes\r\n # and longitudes\r\n dLat = (lat2 - lat1) * math.pi / 180.0\r\n dLon = (lon2 - lon1) * math.pi / 180.0\r\n\r\n # convert to radians\r\n lat1 = (lat1) * math.pi / 180.0\r\n lat2 = (lat2) * math.pi / 180.0\r\n\r\n # apply formulae\r\n a = (pow(math.sin(dLat / 2), 2) +\r\n pow(math.sin(dLon / 2), 2) *\r\n math.cos(lat1) * math.cos(lat2))\r\n rad = 6378.1*1000\r\n c = 2 * math.asin(math.sqrt(a))\r\n dist= rad*c\r\n\r\n if dist < 7:\r\n print(\"GATE REACHED\")\r\n quit()\r\n\r\n return dist\r\n\r\n#......................................................................................................................\r\ndef bearing(lat1, lon1, lat2, lon2):\r\n dLon = lon2 - lon1\r\n y = math.sin(dLon) * math.cos(lat2)\r\n x = math.cos(lat1) * math.sin(lat2) \\\r\n - math.sin(lat1) * math.cos(lat2) * math.cos(dLon)\r\n\r\n degree = math.atan2(y, x) * 180 / math.pi\r\n\r\n if degree < 0:\r\n degree += 360\r\n return degree\r\n#....................................................................................................................\r\ndef roverheading(min_x,max_x,min_y,max_y,min_z,max_z):\r\n out_x_m_l = bus.read_byte_data(0x1E, 0x28)\r\n out_x_m_h = bus.read_byte_data(0x1E, 0x29)\r\n x = twos_complement((out_x_m_h << 8) | out_x_m_l, 16) / 1e3\r\n\r\n\r\n out_y_m_l = bus.read_byte_data(0x1E, 0x2A)\r\n out_y_m_h = bus.read_byte_data(0x1E, 0x2B)\r\n y = twos_complement((out_y_m_h << 8) | out_y_m_l, 16) / 1e3\r\n\r\n out_z_m_l = bus.read_byte_data(0x1E, 0x2C)\r\n out_z_m_h = bus.read_byte_data(0x1E, 0x2D)\r\n z = twos_complement((out_z_m_h << 8) | out_z_m_l, 16) / 1e3\r\n\r\n print(\"\")\r\n\r\n if x < min_x:\r\n min_x = x\r\n if x > max_x:\r\n max_x = x\r\n\r\n if y < min_y:\r\n min_y = y\r\n if y > max_y:\r\n max_y = y\r\n\r\n if z < min_z:\r\n min_z = z\r\n if z > max_z:\r\n max_z = z\r\n\r\n offset_x = (max_x + min_x) / 2\r\n offset_y = (max_y + min_y) / 2\r\n offset_z = (max_z + min_z) / 2\r\n\r\n x = x - x_manual\r\n y = y - y_manual\r\n z = z - offset_z\r\n\r\n heading = math.atan2(y, x) * 180 / math.pi\r\n\r\n if heading < 0:\r\n heading += 360\r\n\r\n heading = (heading+l)% 360\r\n\r\n #print(\"HEADING=\",heading)\r\n return heading\r\n\r\n#....................................................................................................................\r\n\r\ndef displaydata(t,dist):\r\n\r\n if t < 10 and t > -10:\r\n print(\"STRAIGHT\",\"DISTANCE=\",dist)\r\n angle=0\r\n forward()\r\n p.ChangeDutyCycle(50)\r\n q.ChangeDutyCycle(50)\r\n #return\r\n\r\n elif t <= -180:\r\n angle=360+t\r\n print(\"ANTICLOCKWISE\",angle,\"DISTANCE=\",dist)\r\n left()\r\n p.ChangeDutyCycle(30)\r\n q.ChangeDutyCycle(30)\r\n #return\r\n\r\n elif t < 0 and t > -180:\r\n angle=-t\r\n print(\"CLOCKWISE\", angle,\"DISTANCE=\",dist)\r\n right()\r\n p.ChangeDutyCycle(30)\r\n q.ChangeDutyCycle(30)\r\n #return\r\n\r\n elif t >= 180:\r\n angle=360-t\r\n print(\"CLOCKWISE\", angle,\"DISTANCE=\",dist)\r\n right()\r\n p.ChangeDutyCycle(30)\r\n q.ChangeDutyCycle(30)\r\n #return\r\n\r\n elif t > 0 and t < 180:\r\n angle=t\r\n print(\"ANTICLOCKWISE\", angle,\"DISTANCE=\",dist)\r\n left()\r\n p.ChangeDutyCycle(30)\r\n q.ChangeDutyCycle(30)\r\n #return\r\n\r\ngps_socket = gps3.GPSDSocket()\r\ndata_stream = gps3.DataStream()\r\ngps_socket.connect()\r\ngps_socket.watch()\r\n\r\n\r\n\r\n\r\n\r\ntry:\r\n for new_data in gps_socket:\r\n v = ultrasonic()\r\n if v==1:\r\n pass\r\n else:\r\n continue\r\n\r\n if new_data:\r\n data_stream.unpack(new_data)\r\n lat1= data_stream.TPV['lat']\r\n lon1= data_stream.TPV['lon']\r\n if lat1 == 'n/a':\r\n continue\r\n if lon1 == 'n/a':\r\n continue\r\n\r\n dist=haversine(lat1, lon1, lat2, lon2)\r\n a = np.round(bearing(lat1, lon1, lat2, lon2))\r\n b = np.round(roverheading(min_x, max_x, min_y, max_y, min_z, min_z))\r\n t = b - a\r\n displaydata(t,dist) #put in for\r\n\r\nexcept KeyboardInterrupt:\r\n GPIO.cleanup()\r\n","sub_path":"Leander_Stephen_D'Souza/Autonomous GPS+Magnetometer+Ultrasonics/Final_Autonomous_Task.py","file_name":"Final_Autonomous_Task.py","file_ext":"py","file_size_in_byte":16040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"78553364","text":"\r\ncharacters_to_merge = {\r\n'Dorincourt':'Dorinate', 'Misselthwaite':'Dorinate', 'Coombe':'Dorinate', 'Manor':'Castle',\r\n'Dr.':'Mr.', 'Fräulein':'Miss', 'Mademoiselle':'Miss',\r\n'Craven':'Haverford', 'Hobbs':'Haverford', 'Havisham':'Haverford', 'Barrow':'Haverford', 'Carrisford':'Haverford',\r\n'Crewe':'Lennox', 'Gareth-Lawless':'Lennox', 'Mary':'Rose', 'Robin':'Rose', 'Sara':'Rose',\r\n'Ram':'Dickard', 'Dass':'Howe', 'Dick': 'Dickard', 'Dickon':'Dickard',\r\n'Ceddie':'Connor', 'Cedric':'Connor', 'Donal':'Connor', 'Muir':'Errol', 'Colin':'Connor',\r\n'Roach':'Weatherstaff', 'Higgins':'Weatherstaff', 'Michael':'Ben',\r\n'Medlock':'Howe', 'Amabel':'Felicia', 'Amelia':'Felicia', 'Feather':'Felicia', 'Sowerby':'Howe',\r\n'Becky':'Betty', 'Susan':'Betty', 'Dawson':'Betty', 'Mariette':'Betty', 'Louisa':'Betty', 'Anne':'Betty', 'Dowson':'Betty', 'Dowie': 'Betty', 'Martha':'Betty',\r\n'Vallé':'Minchin', 'Hirsch':'Minchin', 'Andrews':'Minchin',\r\n'Ermengarde':'Lottie', 'Lugh': 'St. John', 'Lavinia':'Lottie', 'Janet':'Lottie', 'Large':'St. John'\r\n}\r\n\r\ndef merge(nnp):\r\n\treturn characters_to_merge.get(nnp, nnp)\r\n","sub_path":"character_merge.py","file_name":"character_merge.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"567381964","text":"# Author: kk.Fang(fkfkbill@gmail.com)\n\nfrom schema import Optional, Schema\n\nfrom new_rule.rule import *\nfrom utils.schema_utils import *\nfrom restful_api.views.base import PrivilegeReq\nfrom utils.const import *\n\n\nclass TicketRuleHandler(PrivilegeReq):\n\n def get(self):\n \"\"\"线下工单规则列表\"\"\"\n params = self.get_query_args(Schema({\n Optional(\"db_type\"): scm_one_of_choices(ALL_SUPPORTED_DB_TYPE),\n Optional(\"keyword\", default=None): scm_str,\n **self.gen_p()\n }))\n keyword = params.pop(\"keyword\")\n p = self.pop_p(params)\n rules = TicketRule.objects(**params)\n if keyword:\n rules = self.query_keyword(rules, keyword, \"desc\", \"name\")\n items, p = self.paginate(rules, **p)\n self.resp([i.to_dict() for i in items], **p)\n\n def post(self):\n \"\"\"新增规则\"\"\"\n self.acquire(PRIVILEGE.PRIVILEGE_RULE)\n\n params = self.get_json_args(Schema({\n \"db_type\": scm_one_of_choices(ALL_SUPPORTED_DB_TYPE),\n \"input_params\": [\n {\n \"desc\": scm_str,\n \"name\": scm_unempty_str,\n \"unit\": scm_str,\n \"value\": object,\n }\n ],\n \"max_score\": scm_num,\n \"output_params\": [\n {\n \"desc\": scm_str,\n \"name\": scm_unempty_str,\n \"unit\": scm_str\n }\n ],\n \"desc\": scm_str,\n \"name\": scm_unempty_str,\n \"code\": scm_str,\n \"status\": scm_bool,\n \"summary\": scm_str,\n \"sql_type\": scm_one_of_choices(ALL_SQL_TYPE),\n \"solution\": [scm_unempty_str],\n \"weight\": scm_num,\n \"analyse_type\": scm_one_of_choices(ALL_TICKET_ANALYSE_TYPE)\n }))\n new_rule = TicketRule(**params)\n new_rule.run(test_only=True)\n new_rule.save()\n self.resp_created(new_rule.to_dict())\n\n def patch(self):\n \"\"\"修改规则\"\"\"\n self.acquire(PRIVILEGE.PRIVILEGE_RULE)\n\n params = self.get_json_args(Schema({\n\n # 这二个字段是不能改的\n \"db_type\": scm_one_of_choices(ALL_SUPPORTED_DB_TYPE),\n \"name\": scm_unempty_str,\n\n Optional(\"input_params\"): [\n {\n \"desc\": scm_str,\n \"name\": scm_unempty_str,\n \"unit\": scm_str,\n \"value\": object,\n }\n ],\n Optional(\"max_score\"): scm_num,\n Optional(\"output_params\"): [\n {\n \"desc\": scm_str,\n \"name\": scm_unempty_str,\n \"unit\": scm_str\n }\n ],\n Optional(\"desc\"): scm_str,\n Optional(\"code\"): scm_str,\n Optional(\"status\"): scm_bool,\n Optional(\"summary\"): scm_str,\n Optional(\"type\"): scm_str,\n Optional(\"sql_type\"): scm_one_of_choices(ALL_SQL_TYPE),\n Optional(\"solution\"): [scm_unempty_str],\n Optional(\"weight\"): scm_num\n }))\n\n rule = TicketRule.objects(\n name=params.pop(\"name\"),\n db_type=params.pop(\"db_type\")).first()\n rule.from_dict(params)\n rule.run(test_only=True)\n rule.save()\n self.resp_created(rule.to_dict())\n\n\nclass TicketRuleCodeHandler(PrivilegeReq):\n\n def get(self):\n \"\"\"获取空的规则code的模板\"\"\"\n self.resp({\n \"code\": TicketRule.code_template()\n })\n","sub_path":"restful_api/views/offline/rule.py","file_name":"rule.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"78290696","text":"import numpy as np\n\n\nclass Policy:\n\n def __init__(self, d_state, dm_act, pdict):\n self.d_state = d_state\n self.dm_act = dm_act\n\n self.type = pdict['type']\n\n if 'beta' in pdict:\n self.beta = pdict['beta']\n if 'eps' in pdict:\n self.eps = pdict['eps']\n if 'weights' in pdict:\n self.weights = pdict['weights']\n\n def anneal(self, n=0):\n if self.type == 'softmax':\n self.beta = self.beta * 0.9995\n elif self.type == 'greedy':\n self.eps = 1.0 / (1.0 * (n + 1.0))\n else:\n pass\n\n def action(self, qfunc, x):\n if self.type == 'softmax':\n pmf = np.exp(np.clip(qfunc[x, :] / self.beta, -700, 700))\n return np.random.choice(self.dm_act, p=pmf/np.sum(pmf))\n elif self.type == 'greedy':\n if self.eps >= np.random.rand():\n return np.random.choice(self.dm_act)\n else:\n return np.argmax(qfunc[x, :])\n else:\n return np.random.choice(self.dm_act, p=self.weights)\n\n\nclass QLearning:\n\n def __init__(self, env, discount, alpha, pdict):\n self.env = env\n\n self.d_state = 16 # self.env.observation_space.shape[0]\n self.dm_act = 4 # self.env.action_space.shape[0]\n\n self.discount = discount\n\n self.ctl = Policy(self.d_state, self.dm_act, pdict)\n\n self.alpha = alpha\n\n self.vfunc = np.zeros((self.d_state, ))\n self.qfunc = np.zeros((self.d_state, self.dm_act))\n\n self.td_error = []\n self.rollouts = None\n\n def run(self, nb_samples):\n score = np.empty((0, 1))\n\n rollouts = []\n\n n_samp = 0\n n_eps = 0\n while True:\n roll = {'x': np.empty((0, ), np.int64),\n 'u': np.empty((0, ), np.int64),\n 'xn': np.empty((0, ), np.int64),\n 'done': np.empty((0,), np.int64),\n 'r': np.empty((0,))}\n\n # reset env\n x = self.env.reset()\n\n # anneal policy\n self.ctl.anneal(n=n_eps)\n\n done = False\n while not done:\n u = self.ctl.action(self.qfunc, x)\n\n roll['x'] = np.hstack((roll['x'], x))\n roll['u'] = np.hstack((roll['u'], u))\n\n xn, r, done, _ = self.env.step(u)\n roll['xn'] = np.hstack((roll['xn'], xn))\n roll['done'] = np.hstack((roll['done'], done))\n roll['r'] = np.hstack((roll['r'], r))\n\n err = 0.0\n if not done:\n err = r + self.discount * np.max(self.qfunc[xn, :]) - self.qfunc[x, u]\n if done:\n err = r - self.qfunc[x, u]\n\n self.qfunc[x, u] += self.alpha * err\n self.td_error = np.append(self.td_error, err)\n\n x = xn\n\n if len(score) < 100:\n score = np.append(score, r)\n else:\n score[n_eps % 100] = r\n\n n_samp += 1\n if n_samp >= nb_samples:\n roll['done'][-1] = True\n rollouts.append(roll)\n return rollouts\n\n print(\"eps: {} step: {} rwd:{} score:{}\".format(n_eps, len(roll['r']), r, np.mean(score, axis=0)))\n\n n_eps += 1\n rollouts.append(roll)\n","sub_path":"rl/td/qlearning.py","file_name":"qlearning.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"545332906","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of the amtFSM (AI Mechanics & Tech FSM) project\n# \n# Copyright 2019 AI Mechanics & Tech\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n__author__ = \"aimktech\"\n__email__ = \"code@aimechanics.tech\"\n__version__ = \"0.1.0\"\n\n\n# imports\n#----------\nimport os\nimport yaml\nimport queue\n\nfrom enum import Enum, auto\n\n\n# classes\n#----------\nclass StateType(Enum):\n \"\"\" supported types for all the FSM states \"\"\"\n FSM_BEGIN_STATE = auto()\n FSM_NORMAL_STATE = auto()\n FSM_END_STATE = auto()\n\n\nclass State(object):\n \"\"\" definition of a FSM state \"\"\"\n\n def __init__(self, name, state_type:StateType, enter_action=\"\", exit_action=\"\"):\n \"\"\"\n Create a new state\n\n :param name: the name for this state\n :param state_type: the type for this state\n :param enter_action: action performed when entering the state\n :param exit_action: action performed when leaving the state\n \"\"\"\n self.name = name\n self.state_type = state_type\n self.enter_action = enter_action\n self.exit_action = exit_action\n\n\nclass Event(object):\n \"\"\" definition of a FSM event \"\"\"\n\n def __init__(self, name):\n \"\"\"\n Create a new event\n\n :param name: the event name\n \"\"\"\n self.name = name\n\n\nclass Transition(object):\n \"\"\" definition of a FSM transition \"\"\"\n\n def __init__(self, event, begin_state, end_state):\n \"\"\"\n Create a new transition\n\n :param event: the event to trigger the transition\n :param begin_state: the initial state before the transition\n :param end_state: the final state after the transition\n \"\"\"\n self.event = event\n self.begin_state = begin_state\n self.end_state = end_state\n\n\nclass FSMError(Exception):\n \"\"\" generic Exception for the FSM \"\"\"\n pass\n\n\nclass FSM(object):\n \"\"\" the main FSM class \"\"\"\n\n def __init__(self):\n \"\"\" Create a new FSM \"\"\"\n self.has_ended = True # True when the FSM has ended\n self.states = dict() # internal objects \n self.current = None # current state\n self.user_callback = None # the user callback method\n self.user_queue = None # the user callback queue\n\n def setup(self, user_callback, user_queue):\n \"\"\"\n Setup the user callback / queue\n\n :param user_callback: callback used to request action from the user\n :param user_queue: queue for pushing user_callback to the user\n \"\"\"\n if callable(user_callback):\n self.user_callback = user_callback\n else:\n raise FSMError(\"user_callback must be a callable object.\")\n\n if isinstance(user_queue, queue.Queue):\n self.user_queue = user_queue\n else:\n raise FSMError(\"user_queue must be an instance of queue.Queue.\")\n\n def add(self, transitions):\n \"\"\"\n Add transitions to the FSM\n\n :param transitions: a list of transition\n \"\"\"\n if not isinstance(transitions,list):\n transitions = [transitions]\n\n for transition in transitions:\n # record the begin state in the map\n if transition.begin_state.name not in self.states:\n self.states[transition.begin_state.name] = dict()\n self.states[transition.begin_state.name]['__object'] = transition.begin_state\n \n # record the end state in the map\n if transition.end_state is not None:\n if transition.end_state.name not in self.states:\n self.states[transition.end_state.name] = dict()\n self.states[transition.end_state.name]['__object'] = transition.end_state\n \n # associate both states with the event\n self.states[transition.begin_state.name][transition.event.name] = transition.end_state\n\n def state(self):\n \"\"\" \n Return the name of the current state \n \n :return: the name of the current state or \"\" if not state are defined\n \"\"\"\n if self.current:\n return self.current.name\n else:\n return \"\"\n\n def start(self):\n \"\"\" Set the FSM on the starting state \"\"\"\n for state_name in self.states:\n state = self.states[state_name]['__object']\n if state.state_type == StateType.FSM_BEGIN_STATE:\n self.current = state\n self.has_ended = False\n return\n \n raise FSMError(\"FSM has no begin state.\")\n\n def stop(self):\n \"\"\" Set the FSM on the stopping state \"\"\"\n for state_name in self.states:\n state = self.states[state_name]['__object']\n if state.state_type == StateType.FSM_END_STATE:\n self.current = state\n self.has_ended = True\n return\n \n raise FSMError(\"FSM has no end state.\")\n\n def update(self, event):\n \"\"\" \n Update the FSM with a new event \n \n :param event: the event \n \"\"\"\n def _sendUserAction(action):\n if action == \"\":\n return\n \n if self.user_queue is None or self.user_callback is None:\n return\n \n self.user_queue.put(\n lambda: self.user_callback(action)\n )\n\n if self.has_ended:\n return\n\n if isinstance(event, Event):\n event_name = event.name\n else:\n event_name = event\n\n if event_name not in self.states[self.current.name]:\n raise FSMError(f\"Event {event_name} is not defined for the current state {self.current.name}.\")\n\n # check for invalid move\n if self.states[self.current.name][event_name] is None:\n raise FSMError(f\"Invalid transition for state {self.current.name} and event {event_name}.\")\n\n # move to new state\n _sendUserAction(self.current.exit_action)\n self.current = self.states[self.current.name][event_name]\n _sendUserAction(self.current.enter_action)\n\n # check for completeness\n if self.current.state_type == StateType.FSM_END_STATE:\n self.has_ended = True\n\n def can(self, state):\n \"\"\"\n Check if the state is valid from the current state\n \n :param state: the targeted state\n :return: True if the state is a valid state\n \"\"\"\n for event in self.states[self.current.name]:\n if event == '__object':\n continue\n if self.states[self.current.name][event] == state:\n return True\n \n return False\n\n def cannot(self, state):\n \"\"\"\n Check if the state is not valid from the current state\n \n :param state: the targeted state\n :return: True if the state is not a valid state\n \"\"\"\n for event in self.states[self.current.name]:\n if event == '__object':\n continue\n if self.states[self.current.name][event] == state:\n return False\n \n return True \n\n\nclass FSMBuilderError(Exception):\n \"\"\" Generic Exception class when building FSM from YAML file \"\"\"\n pass\n\n\nclass FSMBuilderComposite(object):\n \"\"\" Composite object returned by the builder \"\"\"\n pass\n\n\nclass FSMBuilder(object):\n \"\"\" Build a FSM from a YAML definition file \"\"\"\n def __init__(self, filename):\n \"\"\"\n Construct an object of class FSMBuilder\n\n :param filename: the name of the YAML file to read\n \"\"\"\n\n if not os.path.exists(filename):\n raise FSMBuilderError(f\"Could not find the YAML file {filename}.\")\n \n self.filename = filename\n\n def _makeVersion(self, version):\n \"\"\"\n Create a number from a string version\n\n :param version: the version as a string\n \"\"\"\n try:\n major, minor, patch = version.split('.')\n version = (int(major) * (2**16)) + (int(minor) * (2**8)) + int(patch)\n except ValueError:\n raise FSMBuilderError(f\"Cannot parse the version (major.minor.patch) from the definition file.\")\n \n return version\n\n def _buildEvents(self, events):\n \"\"\" \n Build event objects from a list of name \n \n :param events: a list containing names of the events\n \"\"\"\n self.events = dict()\n for event in events:\n if event in self.events:\n raise FSMBuilderError(f\"Event {event} is already defined.\")\n else:\n self.events[event] = Event(event)\n \n def _buildStates(self, states):\n \"\"\"\n Build state objects from a list of definition\n\n :param states: a list containing states definition\n \"\"\"\n self.states = dict()\n for state in states:\n if 'name' not in state:\n raise FSMBuilderError(f\"Found a state with no name in the definition file.\")\n \n if state['name'] in self.states:\n raise FSMBuilderError(f\"Found duplicated state name {state['name']} in the defintion file.\")\n\n if 'type' not in state:\n state_type = StateType.FSM_NORMAL_STATE\n else:\n if state['type'].lower() == 'begin':\n state_type = StateType.FSM_BEGIN_STATE\n elif state['type'].lower() == 'end':\n state_type = StateType.FSM_END_STATE\n else:\n raise FSMBuilderError(f\"Unknown state type <{state['type']}>\")\n \n if 'enter' not in state:\n enter_action = \"\"\n else:\n enter_action = state['enter']\n \n if 'exit' not in state:\n exit_action = \"\"\n else:\n exit_action = state['exit']\n\n self.states[state['name']] = State(state['name'],state_type,enter_action,exit_action)\n\n def _buildTransitions(self, transitions):\n \"\"\"\n Build transition objects for a list of definition\n\n :param transitions: a list containing transitions definition\n \"\"\"\n self.transitions = list()\n for transition in transitions:\n if 'event' not in transition:\n raise FSMBuilderError(f\"Found a transition with no event associated.\")\n else:\n if transition['event'] not in self.events:\n raise FSMBuilderError(f\"Cannot find event {transition['event']} in the list of defined events.\")\n \n if 'begin' not in transition:\n raise FSMBuilderError(f\"Found a transition with no begin state.\")\n else:\n if transition['begin'] not in self.states:\n raise FSMBuilderError(f\"Cannot find state {transition['begin']} in the list of defined states.\")\n\n if 'end' not in transition:\n raise FSMBuilderError(f\"Found a transition with no end state.\")\n else:\n if transition['end'] not in self.states:\n raise FSMBuilderError(f\"Cannot find state {transition['end']} in the list of defined states.\")\n\n event = self.events[transition['event']]\n begin_state = self.states[transition['begin']]\n end_state = self.states[transition['end']]\n self.transitions.append(Transition(event,begin_state,end_state))\n \n def parse(self, event_objects=True):\n \"\"\"\n Parse the YAML file and return composite object with the FSM and the list of events\n\n :return: a FSM composite object\n \"\"\"\n\n with open(self.filename, 'r') as stream:\n data = yaml.load(stream, Loader=yaml.BaseLoader)\n\n # check the version\n if 'Version' not in data:\n raise FSMBuilderError(f\"Cannot find the version in the file.\")\n else:\n if self._makeVersion(__version__) < self._makeVersion(data['Version']):\n raise FSMBuilderError(f\"Builder cannot parse file with version > {__version__}.\")\n\n # build events\n if 'Events' not in data:\n raise FSMBuilderError(f\"Cannot find the list of Events in {self.filename}.\")\n else:\n self._buildEvents(data['Events'])\n\n # build states\n if 'States' not in data:\n raise FSMBuilderError(f\"Cannot find the list of States in {self.filename}.\")\n else:\n self._buildStates(data['States'])\n\n # build transitions\n if 'Transitions' not in data:\n raise FSMBuilderError(f\"Cannot find the list of Transitions in {self.filename}.\")\n else:\n self._buildTransitions(data['Transitions'])\n \n # create the FSM\n obj = FSMBuilderComposite()\n obj.FSM = FSM()\n obj.FSM.add(self.transitions)\n\n # set the events\n obj.events = data['Events']\n\n # create syntaxic sugar strings for events\n if event_objects:\n count = 0\n for event in data['Events']:\n setattr(obj, f\"E{count}\", event)\n count = count + 1\n\n return obj \n\n","sub_path":"amtfsm/amtfsm.py","file_name":"amtfsm.py","file_ext":"py","file_size_in_byte":13814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"95172046","text":"# -----------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# -----------------------------------------------------------------------------\n\n\"\"\"Telemetry related methods and classes\"\"\"\n\nfrom sys import platform, version\nfrom subprocess import Popen\nimport inspect\nimport os\nimport psutil\nfrom knack.log import get_logger\nfrom sfctl.config import get_telemetry_config\nfrom sfctl.util import is_help_command\n\n# knack CLIConfig has been re-purposed to handle state instead.\nSF_CLI_NAME = 'sfctl'\nSF_CLI_TELEMETRY_DIR = os.path.join('~', '.{0}'.format(SF_CLI_NAME))\nTELEMETRY_FILE_NAME = 'telemetry'\n\ndef check_and_send_telemetry(args_list, invocation_ret_val, exception=None):\n \"\"\"\n Check if telemetry should be sent, and if so, send the telemetry\n Telemetry should be sent only if the configuration allows for sending telemetry and if\n the commandline window does not have too many child processes already running.\n\n :param args_list: a list of strings, representing the command called along\n with its parameters\n :param invocation_ret_val: (int) The return value of calling invoke on the command in args_list\n :param exception: (str) the string version of the Exception object returned by invoking the\n command in args_list\n\n :return: None\n \"\"\"\n\n logger = get_logger(__name__)\n\n if get_telemetry_config():\n\n try:\n # If there are more than 15 python processes, do not create another process\n # (do not send telemetry)\n # len(psutil.Process().parent().children(recursive=True)) does not work, since it is\n # not able to find orphaned children\n python_processes_count = 0\n\n for process in psutil.process_iter():\n if process.name().find('python') != -1:\n python_processes_count += 1\n\n if python_processes_count > 15:\n logger.info('Not sending telemetry because python process count exceeds '\n 'allowable number')\n return\n\n # In the background, do some work on checking and sending telemetry for the current call\n command_return_tuple = (invocation_ret_val, str(exception))\n\n send_telemetry(args_list, command_return_tuple)\n\n except: # pylint: disable=bare-except\n\n import sys\n ex = sys.exc_info()[0]\n\n # Allow telemetry to fail silently.\n logger.info(\n str.format('Not sending telemetry because python process due to error: {0}', ex))\n\n\ndef send_telemetry(command, command_return):\n \"\"\"\n Send telemetry to the provided instrumentation key. This does not includes a check to\n previously unsent telemetry for offline work.\n\n :param command: list representing the command which is given, including the parameters.\n For example, ['node', 'list', '--max-results', '10']\n :param command_return: (int, str). int is the returned code,\n str is the error message on command failure.\n\n :return: None\n \"\"\"\n\n command_return_code = command_return[0]\n command_return_msg = command_return[1]\n\n command_without_params = []\n\n # Mark commands which retrieve help text (ex. sfctl -h or sfctl node list -h)\n is_help_cmd = is_help_command(command)\n\n # Remove the parameters and keep only the command name\n # Do this by finding the first item which starts with \"-\"\n for segment in command:\n if segment.startswith('-'):\n break\n command_without_params.append(segment)\n\n # If the commands_without_params is empty, this means that\n # either sfctl is called, or sfctl -h is called. Don't record this.\n # Don't record commands asking for help.\n if is_help_cmd or not command_without_params:\n # Do not send telemetry\n return\n\n command_as_str = ' '.join(command_without_params)\n\n call_success = True\n\n if command_return_code != 0:\n call_success = False\n\n # Get the path of where this file (telemetry.py) is.\n current_file_location = \\\n os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n\n # This is the absolute path.\n send_telemetry_background_path = \\\n os.path.join(current_file_location, 'send_telemetry_background.py')\n\n # subprocess.run is the newer version of the call command (python 3.5)\n # If you close the terminal, this process will end as well.\n Popen(['python', send_telemetry_background_path, command_as_str,\n str(call_success), platform, version, command_return_msg], close_fds=True)\n\n return\n","sub_path":"src/sfctl/telemetry.py","file_name":"telemetry.py","file_ext":"py","file_size_in_byte":4772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"375359222","text":"# coding=utf-8\n\"\"\"Module docstring.\nThis serves as a long usage message.\n\"\"\"\n\n# 程序主函数入口\nimport sys\n\nfrom disassembly import dis_assembly\nimport constant as Constant\nfrom simulation import simulate\n\n\ndef write_file(txt, file_path):\n with open(file_path, 'wt') as f:\n print(txt, file=f, end='')\n\n\ndef main(sample_file, dis_assembly_file, simulation_file):\n dis_assembly_txt, dis_assembly_list, mem_line_num = dis_assembly(sample_file)\n write_file(dis_assembly_txt, dis_assembly_file)\n simulate_txt = simulate(dis_assembly_list, mem_line_num)\n write_file(simulate_txt, simulation_file)\n\n\nif __name__ == \"__main__\":\n argv_length = len(sys.argv)\n sample_file = sys.argv[1] if argv_length > 1 else Constant.MIPS_MACHINE_CODE_PATH\n dis_assembly_file = sys.argv[2] if argv_length > 2 else Constant.MIPS_DIS_ASSEMBLY_PATH\n simulation_file = sys.argv[3] if argv_length > 3 else Constant.MIPS_SIMULATION_PATH\n\n main(sample_file, dis_assembly_file, simulation_file)\n","sub_path":"proj2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"239807807","text":"\"\"\"\n\n\n\n\nconda install -c ankurankan pgmpy\n\n#conda create -n env_BNLEARN python=3.6\n#conda activate env_BNLEARN\n\nconda install pytorch\npgmpy\nnumpy\npandas\ntqdm\nstatsmodels\ncommunity\nnetworkx==1.11\nmatplotlib==2.2.3\n\n#conda install spyder\n\n\n\n\n\"\"\"\n####################################################################################################\n####################################################################################################\nimport pandas as pd, numpy as np\n\n\ndef load(filename, globs=None, varname=None) :\n import numpy as np, pandas as pd, pickle, os\n ### Name\n # print(filename)\n x = os.path.basename(filename)\n x = x if varname is None else varname\n x = x.split(\".\")[0].replace(\"-\",\"_\")\n\n if \".pkl\" in filename : obj = pickle.load( open(filename, mode=\"rb\") , encoding=\"utf-8\" )\n elif \".npy\" in filename : obj = np.load(filename)\n else : obj = pd.read_csv(filename)\n\n if globs is not None :\n print(x, filename)\n globs[x] = obj\n else :\n return obj\n\n\ndef load_all(folder, globs, recursive=True) :\n import glob\n for f in glob.glob(folder, recursive=recursive):\n try :\n load(f, globs=globs)\n # print(f)\n except Exception as e:\n print(\"error\", f, e)\n\n\n\n#####################################\ndir1 =\"data/sachs/sachs/\"\n\n\nload_all( f\"{dir1}/*.*\", globs= globals() )\n\n\n\n####################################################################################################\n####### Details ##################################################################################\nload( f\"{dir1}/categories.npy\", globs= globals() )\n\nload( f\"{dir1}/idx2name.pkl\", globs= globals() )\n\nload( f\"{dir1}/name2idx.pkl\", globs= globals() )\n\nload( f\"{dir1}/sachs-header.npy\", globs= globals() )\n\n\n\n\n\n\n\n\n\n####################################################################################################\n####################################################################################################\nload_all( f\"{dir1}/continuous/*.*\", globs= globals() )\n\n\n\ndata1 = data1\n\nCPDAG1\n\nDAG1\n\na=1\n\n\n\na = DAG1\n\n\n\n\nsachs_header\n\n\n\nsachs-header\n\n\ncategories\n\n\n\n\n\nimport bnlearn as bn\n\n\n\n# Example dataframe sprinkler_data.csv can be loaded with: \ndf = bn.import_example()\n# df = pd.read_csv('sprinkler_data.csv')\n\nmodel = bn.structure_learning(df)\nG = bn.plot(model)\n\n\n\n\n\nimport pgmpy\n\n\nimport numpy as np\n\n\na =1\n\n","sub_path":"viz/bnlearn_test.py","file_name":"bnlearn_test.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"529346229","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/1/30 0030 上午 11:15\n# @Author : adowu\n# @Site : \n# @File : coin_change.py\n# @Software: PyCharm\n\n\ndef coin_change(coins, amount):\n \"\"\"\n :type coins: List[int]\n :type amount: int\n :rtype: int\n \"\"\"\n\n dp = list()\n\n for i in range(amount + 1):\n # 默认不可到达\n dp.append(-1)\n\n # 初始化最优解\n dp[0] = 0\n\n for i in range(amount + 1):\n for coin in coins:\n if i >= coin and dp[i - coin] != -1:\n # 当前面值大于等于coin的值 and 小面值的coin是可以取到\n if dp[i] == -1 or dp[i] > dp[i - coin] + 1:\n # dp[i]==-1 表示第一次小面值的加进来\n # dp[i] > dp[i-coin] +1 表示大面值(coin比前次的coin肯定要大)的加进来,大面值的个数肯定比小面值的要小\n\n # 重新赋值dp[i]\n dp[i] = dp[i - coin] + 1\n # else:\n # break\n # coins 是无序的话就不行\n\n return dp[amount]\n\n\nif __name__ == '__main__':\n print(coin_change([1, 2, 5, 7, 10], 25))\n","sub_path":"dynamic_programming/322_coin_change.py","file_name":"322_coin_change.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"650180844","text":"#!/usr/bin/env python\n#-*- coding=utf-8 -*-\n\"\"\"\n@author:Wllen\n@file:homework2.py\n@time:2018/5/10 20:52\n\"\"\"\ndic = {'k1':'v1',\n 'k2':'v2',\n 'k3':'v3',\n }\n# question 1&2&3\n# for i in dic:\n# print(i,dic[i])\n\n# question 4\n# dic.setdefault('k4','v4') # 查找key,有就返回value,无则新增key和value\n# print(dic)\n\n\n# question 5\n# dic.pop('k1')\n# print(dic)\n\n\n# question 6\n# if dic.__contains__('k5'):\n# dic.pop('k5')\n# else:\n# print(dic.get('k5'))\n\n# question 7\n# print(dic.get('k2'))\n#\n# question 8\n# print(dic.get('k6'))\n\n# question 9\n\ndic2 ={'k1':'v111','a':'b'}\n\ndic2.update(dic)\nprint(dic2)\n\n# 10、组合嵌套题,有如下列表,按照要求实现每一个功能\n\n","sub_path":"test/homework2.py","file_name":"homework2.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"324852447","text":"from app import app, db\nfrom flask import redirect, url_for, request, jsonify\nfrom app.models import User\nimport time\nimport jwt\nimport stripe\n\n\nstripe.api_key = app.config['STRIPE_SECRET_KEY']\n\n# when you have a token always have an expiration date\n# so use the time module\n@app.route('/')\n@app.route('/index')\ndef index():\n return ''\n\n@app.route('/api/payment', methods=['GET', 'POST'])\ndef payment():\n token = request.headers.get('token')\n email = request.headers.get('email')\n amount = request.headers.get('amount')\n\n print('*************')\n print('*************')\n print('*************')\n print(token)\n print(email)\n print(amount)\n print('*********************************')\n print('*********************************')\n # stripes hasn't really saved yet\n # it has saved card information and token information\n\n # create a Customer\n customer = stripe.Customer.create(\n email=email,\n source=token\n )\n print(customer.id)\n\n # now set up the actual stripe charge\n # which completes the purchase\n\n # create a Charge from the customer and token\n charge = stripe.Charge.create(\n customer=customer.id,\n amount=amount,\n currency='usd',\n description='This was a test purchase on Dec 9 2019 from React'\n )\n\n print(charge)\n\n return jsonify({'message' : 'success'})\n\n\n@app.route('/api/register', methods=[\"GET\", \"POST\"])\ndef register():\n try:\n # the front end encodes user and pw and sends in header\n token = request.headers.get('token')\n print(token)\n # decode token to a dictionary\n data = jwt.decode(\n token,\n app.config['SECRET_KEY'],\n algorithm='HS256'\n )\n print(data)\n # create User and save\n user = User(email=data['email'])\n user.set_password(data['password'])\n db.session.add(user)\n db.session.commit()\n return jsonify({ 'message': 'Success' })\n except:\n return jsonify({ 'message': 'User not crated' })\n\n@app.route('/api/login', methods=[\"GET\", \"POST\"])\ndef login():\n try:\n # this token is log-in credentials\n # tokenise passwords always\n token = request.headers.get('token')\n print(token)\n data = jwt.decode(\n token,\n app.config['SECRET_KEY'],\n algorithm=['H2256']\n )\n print(data)\n # query db to get User and check password\n user = User.query.filter_by(email=data['email']).first()\n # if user doesn't exist or password incorrect, send fall message\n if user is None or not user.check_password(data['password']):\n return jsonify({'message': 'invalid credentials'})\n return jsonify({'message': 'success', 'token': user.get_token()})\n except:\n return jsonify({'message': 'failure to login'})\n\n# need to be authenticated to see this page\n@app.route('/api/data', methods=[\"GET\", \"POST\"])\ndef data():\n try:\n token = request.headers.get('token')\n print(token)\n # get user id or nothing\n user = User.verify_token(token)\n if not user:\n return jsonify({ 'message': 'Invalid user'})\n data = {\n 'name': 'John Smith',\n 'age': 27\n }\n return jsonify({'info': data})\n #\n except:\n return jsonify({'message': 'invalid token'})\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"198127582","text":"# #-*- coding: utf-8 -*-\n\nimport folium\nimport webbrowser\nimport requests\nimport json\n\n# 네이티브 앱 키\n# f04c74adfc7f52935732eb312d3c98f5\n# REST API 키\n# 83bee4a2cc30ecb8dc09921b53417214\n# JavaScript 키\n# d945efe23d65b9cba53d60375315617c\n# Admin 키\n# 708ecd59aca5f4c51908a7a2b8bd98fc\n\n\ndef getLatLng(addr):\n url = 'https://dapi.kakao.com/v2/local/search/address.json?query=' + addr\n\n headers = {\"Authorization\": \"KakaoAK 83bee4a2cc30ecb8dc09921b53417214\"}\n\n result = json.loads(str(requests.get(url, headers=headers).text))\n\n match_first = result['documents'][0]['address']\n\n return float(match_first['y']), float(match_first['x'])\n\n\ndef FindRocation(rocationAddr):\n #rocationList=[35.1345653,128.9260548]\n\n rocationList = getLatLng(rocationAddr)\n map_osm = folium.Map (location = rocationList, zoom_start=16)\n # 마커 지정\n folium.Marker(rocationList).add_to(map_osm)\n # html 파일로 저장\n map_osm.save('osm.html')\n\n webbrowser.open(\"osm.html\")","sub_path":"ScriptTermProject/foliumTest.py","file_name":"foliumTest.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"219016492","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport io\nimport mako.lookup\nimport ordereddict\nimport simplejson\n\n\ntemplate_lookup = mako.lookup.TemplateLookup(\n directories=['.'],\n)\n\n\nALL_TEMPLATES = ('index', 'hooks')\n\n\ndef get_env():\n all_hooks = simplejson.loads(\n io.open('all-hooks.json').read(),\n object_pairs_hook=ordereddict.OrderedDict,\n )\n\n return {'all_hooks': all_hooks}\n\n\ndef main():\n env = get_env()\n for template in ALL_TEMPLATES:\n env['template_name'] = template\n with io.open('{0}.html'.format(template), 'w') as html_file:\n template_obj = template_lookup.get_template(\n '{0}.mako'.format(template),\n )\n html_file.write(template_obj.render(**env))\n\n\nif __name__ == '__main__':\n exit(main())\n","sub_path":"make_templates.py","file_name":"make_templates.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"622416896","text":"import pandas as pd\nimport numpy as np\n\ntsv=pd.read_table('Python100knock-master/order.tsv', header=None)\n#print tsv\n\ntotal=0\nvalues=list(tsv[5])\n\nfor num in values:\n value=num.replace(',','')\n total+=int(value)\n\nprint(total)\n","sub_path":"25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"419103058","text":"import uvicorn\nfrom blacksheep.server import Application\nfrom blacksheep.server.bindings import FromForm\nfrom blacksheep.server.bindings import Request\nfrom blacksheep.server.templating import use_templates\nfrom blacksheep.server.responses import redirect, status_code, text, json as res_json\nfrom jinja2 import PackageLoader\nfrom event import Event\nfrom fips import get_fips\nimport json\nfrom event_risk import event_risk\nimport database as db\nfrom datetime import datetime, date\nfrom nanoid import generate\n\napp = Application()\nget = app.router.get\npost = app.router.post\nview = use_templates(app, loader=PackageLoader('app', 'templates'))\n\n\n@app.route('/')\ndef home():\n return view('index', {})\n\n\n@app.route('/create-event')\ndef create_event():\n return view('create-event', {})\n\n\n@get('/event-layout')\ndef event_layout(id: str, seats: int):\n evt = db.load_event(id)\n if evt is None:\n return status_code(422, f'unknown event with id {id}')\n\n risk = event_risk(evt, seats)\n return view('event-layout', {'id': id, 'seats': seats, 'risk': risk, 'name': evt.name})\n\n\n@post('/event-layout')\nasync def post_event_layout(request: Request):\n form = await request.json()\n print(form)\n evt = db.load_event(form['id'])\n if evt is None:\n return status_code(422, f'unknown event with id {id}')\n\n evt.seats = form['layout']['seats']\n db.store_event(evt)\n return status_code(200, 'success')\n\n\n@get('/event-created')\ndef event_created(id: str):\n evt = db.load_event(id)\n if evt is None:\n return status_code(422, f'unknown event with id {id}')\n\n return view('event-created', {'event': evt})\n\n\n@app.route('/about')\ndef about():\n return view('about', {})\n\n\n@app.route('/join-event')\ndef join_event():\n return view('join-event', {})\n\n\nclass EventCreationInput:\n event_name: str\n event_date: str\n event_location: str\n event_organizer: str\n event_population: int\n event_seats: int\n\n def __init__(self, event_name: str, event_date: str, event_location: str, event_organizer: str,\n event_population: int, event_seats: int) -> None:\n self.event_name = event_name\n self.event_date = event_date\n self.event_location = event_location\n self.event_organizer = event_organizer\n self.event_population = event_population\n self.event_seats = event_seats\n\n\n@app.router.post('/event-creation-endpoint')\nasync def create_event_from_post(input: FromForm[EventCreationInput]):\n data = input.value\n fips = get_fips(data.event_location)\n print(f'fips = {fips}')\n\n evt = Event(\n id=generate(size=8),\n name=data.event_name,\n creator_email=data.event_organizer,\n date=datetime.strptime(data.event_date, '%b %d, %Y').date(),\n fips=int(fips),\n seats=[]\n )\n db.store_event(evt)\n return redirect(f'/event-layout?id={evt.id}&seats={data.event_seats}')\n\n\nclass EventJoinData:\n seat_number: int\n\n def __init__(self, seat_number: int) -> None:\n self.seat_number = seat_number\n\n\n@get('/join-event/{event_id}')\nasync def join_event_from_post(request: Request):\n event_id = request.route_values['event_id']\n evt = db.load_event(event_id)\n if evt is None:\n return status_code(422, f'unknown event id: {event_id}')\n\n print(evt.seats)\n return view('join-event', {'event': evt, 'risk': event_risk(evt, len(evt.seats))})\n\n\n@post('/join-event-endpoint')\nasync def join_event_post(request: Request):\n js = await request.json()\n seat = js['seat_number']\n _id = js['id']\n evt = db.load_event(_id)\n for i, s_o in enumerate(evt.seats):\n if s_o['number'] == seat:\n evt.seats[i]['occupied'] = True\n db.store_event(evt)\n return status_code(200, 'success')\n\n\n@get('/event-joined')\ndef event_joined(id: str):\n evt = db.load_event(id)\n return view('event-joined', {'event': evt, 'risk': event_risk(evt, len(evt.seats))})\n\n\napp.serve_files('static')\n\nif __name__ == '__main__':\n uvicorn.run(app, host='0.0.0.0', port=42069)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"325967608","text":"# 특정 원소가 속한 집합을 찾기\ndef find_parent(parent, x):\n # 루트 노드가 아니라면, 루트 노드를 찾을 때까지 재귀적으로 호출\n if parent[x] != x:\n parent[x] = find_parent(parent, parent[x])\n return parent[x]\n\n# 두 원소가 속한 집합을 합치기\ndef union_parent(parent, a, b):\n a = find_parent(parent, a)\n b = find_parent(parent, b)\n if a < b:\n parent[b] = a\n else:\n parent[a] = b\n\n# n, m을 공백으로 구분하여 입력받음\nn, m = map(int, input().split())\n\n# parent 리스트, 각 노드에 대하여 부모를 자기 자신으로 초기화 \nparent = [0] * (n + 1)\nfor i in range(1, n + 1):\n parent[i] = i\n\n# 길의 정보를 담을 리스트\narray = []\nfor _ in range(m):\n a, b, cost = map(int, input().split())\n array.append((cost, a, b))\n\n# 크루스칼 알고리즘을 적용하기 위해 유지비를 기준으로 오름차순으로 정렬\narray.sort()\n\n# 크루스칼 알고리즘에 의해 선택된 간선들의 유지비를 담을 리스트\nresult = []\n# 크루스칼 알고리즘 수행\nfor i in range(m):\n if find_parent(parent, array[i][1]) != find_parent(parent, array[i][2]):\n union_parent(parent, array[i][1], array[i][2])\n result.append(array[i][0])\n else:\n continue\n\n# 마을을 두개로 분리하기 위해 마지막 선택된 간선을 제거\nresult.pop()\n\n# 결과 출력\nprint(sum(result))\n\n# 해설\n\"\"\"\n# 특정 원소가 속한 집합을 찾기\ndef find_parent(parent, x):\n # 루트 노드가 아니라면, 루트 노드를 찾을 때까지 재귀적으로 호출\n if parent[x] != x:\n parent[x] = find_parent(parent, parent[x])\n return parent[x]\n\n# 두 원소가 속한 집합을 합치기\ndef union_parent(parent, a, b):\n a = find_parent(parent, a)\n b = find_parent(parent, b)\n if a < b:\n parent[b] = a\n else:\n parent[a] = b\n\n# 노드의 개수와 간선(Union 연산)의 개수 입력받기\nv, e = map(int, input().split())\nparent = [0] * (v + 1) # 부모 테이블 초기화\n\n# 모든 간선을 담을 리스트와, 최종 비용을 담을 변수\nedges = []\nresult = 0\n\n# 부모 테이블상에서, 부모를 자기 자신으로 초기화\nfor i in range(1, v + 1):\n parent[i] = i\n\n# 모든 간선에 대한 정보를 입력받기\nfor _ in range(e):\n a, b, cost = map(int, input().split())\n # 비용순으로 정렬하기 위해서 튜플의 첫 번째 원소를 비용으로 설정\n edges.append((cost, a, b))\n\n# 간선을 비용순으로 정렬\nedges.sort()\nlast = 0 # 최소 신장 트리에 포함되는 간선 중에서 가장 비용이 큰 간선\n\n# 간선을 하나씩 확인하며\nfor edge in edges:\n cost, a, b = edge\n # 사이클이 발생하지 않는 경우에만 집합에 포함\n if find_parent(parent, a) != find_parent(parent, b):\n union_parent(parent, a, b)\n result += cost\n last = cost\n\nprint(result - last)\n\"\"\"\n","sub_path":"10-6 도시 분할 계획.py","file_name":"10-6 도시 분할 계획.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"300956549","text":"\"\"\"

Face UI Generator

\n
\nCreates a faceUI and connects other modules inputs\n\"\"\"\n\nimport maya.cmds as cmds\n\nimport gurka.extensions.maya.lib.controlshape as controlshape\nimport gurka.extensions.maya.core.mgurkrigobj as mgurkrigobj\nimport gurka.extensions.maya.lib.mcurves as mcurves\nreload(mcurves)\n\n\nimport gurka.lib.gurkex as gurkex\n\n\nimport logging\n\n#TODO: Remake the UI so it creates shapes and then plugs the outputs.\n# Make it intuitive. Like easy grouping for controls and easy to change shapes etc.\n\nclass FaceUIGenerator(mgurkrigobj.MGurkRigObj):\n\n def __init__(self):\n super(FaceUIGenerator, self).__init__()\n\n self.add(\"faceuidata\", {}, public=True, ui={\"layer\":\"Add Components\",\n \"custom\":[\"gurka.extensions.maya.ui.customwidgets.faceuigeneratorwidget\",\n \"FaceUIGeneratorWidget\"]})\n\n\n def pre(self):\n super(FaceUIGenerator, self).pre()\n\n faceuidata = self.get(\"faceuidata\")\n maingrp = self.getMainGroup()\n\n guidegrp = cmds.group(em=True, n=\"{}Guide_{}\".format(self.get(\"name\"), self.suffix(\"group\")))\n self.add(\"guidegroup\", guidegrp)\n\n shapelist = [\"square\", \"circle\"]\n\n guides = []\n for keyid in faceuidata.keys():\n\n # grab data\n data = faceuidata[keyid]\n name = data[\"name\"]\n useparent = data[\"parent\"]\n childdict = data[\"inputsdata\"]\n parent = None\n\n if useparent:\n parent = cmds.group(em=True, n=\"{}{}_{}\".format(self.get(\"name\"),\n name,\n self.suffix(\"guide\")))\n controlshape.ControlShape.square(parent)\n cmds.parent(parent, guidegrp)\n if len(childdict)>1:\n cmds.setAttr(parent + \".sx\", len(childdict)+1.0)\n cmds.setAttr(parent + \".sy\", 2.0)\n cmds.setAttr(parent + \".tx\", len(childdict)/2.0)\n cmds.makeIdentity(parent, s=True)\n\n\n self.reg.add(\"transform\", parent)\n self.reg.add(\"shape\", parent)\n\n\n for e, childid in enumerate(childdict):\n childdata = childdict[childid]\n childshapeindex = childdata[\"shape\"]\n childname = childdata[\"name\"]\n\n childctl = cmds.group(em=True, n=\"{}{}_{}\".format(self.get(\"name\"),\n childname,\n self.suffix(\"guide\")))\n\n fnc = getattr(controlshape.ControlShape, shapelist[childshapeindex])\n fnc(childctl)\n\n\n if parent: cmds.parent(childctl, parent)\n else: cmds.parent(childctl, guidegrp)\n\n cmds.xform(childctl, t=[e,0,0])\n\n self.reg.add(\"transform\", childctl)\n self.reg.add(\"shape\", childctl)\n self.reg.add(\"hierarchy\", childctl)\n\n def build(self):\n super(FaceUIGenerator, self).build()\n\n guidegrp = self.get(\"guidegroup\")\n cmds.setAttr(guidegrp + \".v\", 0)\n\n guides = []\n for keyid in faceuidata.keys():\n\n # grab data\n data = faceuidata[keyid]\n name = data[\"name\"]\n useparent = data[\"parent\"]\n childdict = data[\"inputsdata\"]\n parent = None\n\n if useparent:\n parent = cmds.group(em=True, n=\"{}{}_{}\".format(self.get(\"name\"),\n name,\n self.suffix(\"control\")))\n guide = \"{}{}_{}\".format(self.get(\"name\"),\n name, self.suffix(\"guide\"))\n\n if cmds.objExists(guide):\n\n cmds.delete(cmds.parentConstraint(guide, parent, mo=False))\n dup = cmds.duplicate(guide)[0]\n\n cmds.makeIdentity(dup, a=True)\n cmds.makeIdentity(parent, a=True)\n\n shape = cmds.listRelatives(dup, shapes=True)\n cmds.parent(shape, parent, r=True, s=True)\n cmds.delete(dup)\n\n\n for e, childid in enumerate(childdict):\n childdata = childdict[childid]\n childshapeindex = childdata[\"shape\"]\n childname = childdata[\"name\"]\n\n childctl = cmds.group(em=True, n=\"{}{}_{}\".format(self.get(\"name\"),\n childname,\n self.suffix(\"control\")))\n childguide = \"{}{}_{}\".format(self.get(\"name\"),\n childname, self.suffix(\"guide\"))\n\n if cmds.objExists(guide):\n cmds.delete(cmds.parentConstraint(childguide, parent, mo=False))\n dup = cmds.duplicate(childguide)[0]\n cmds.makeIdentity(dup, a=True)\n cmds.makeIdentity(parent, a=True)\n\n shape = cmds.listRelatives(dup, shapes=True)\n cmds.parent(shape, parent, r=True, s=True)\n\n cmds.delete(dup)\n\n if parent: cmds.parent(childctl, parent)\n cmds.xform(childctl, t=[e,0,0])\n\n\n def post(self):\n super(FaceUIGenerator, self).post()\n","sub_path":"gurka/extensions/maya/modules/face/faceuigenerator.py","file_name":"faceuigenerator.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"565987793","text":"from django.conf.urls import url, patterns\nfrom proyecto import views\n\nurlpatterns = patterns('proyecto.views',\n url(r'^index$', 'index', name='index'),\n url(r'^mapa$', 'mapa', name='mapa'),\n url(r'^contacto$', 'contacto', name='contacto'),\n url(r'^discapacidadesPastel$', 'discapacidadesPastel', name='discapacidadesPastel'),\n url(r'^analfabetismo$', 'analfabetismo', name='analfabetismo'),\n url(r'^mental$', 'mental', name='mental'),\n url(r'^visual$', 'visual' , name='visual'),\n url(r'^auditiva$', 'auditiva', name='auditiva'),\n url(r'^fisica$', 'fisica', name='fisica'),\n url(r'^intelectual$', 'intelectual', name='intelectual'),\n)\n\n","sub_path":"proyecto/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"545753449","text":"import kxgui.src.KXG_Util as Util\nimport kxgui.src.Label as Label\n\nclass new(Label.new):\n\n def __init__(self, x=0, y=0, width=0.2, height=0.05, text=\"\", checked=False):\n Label.new.__init__(self, x, y, width, height, text, 0.04)\n self.checked = checked\n self.margin = 16\n self.verticalAlign = 1\n self.autoSize = False\n\n self.theme = {\n \"textColor\": (0, 0, 0, 1),\n \"backColor\": (0.62, 0.61, 0.6, 1),\n \"borderColor\": (0, 0, 0, 1),\n \"borderWidth\": 1,\n \"checked\": (0, 0, 0, 1),\n \"unchecked\": (1, 1, 1, 1)\n }\n\n self.__once = False\n\n def onMouseClick(self, sender):\n if not self.__once:\n self.checked = not self.checked\n self.__once = True\n\n def onMouseRelease(self, sender):\n self.__once = False\n\n def onDeactivate(self):\n self.__once = False\n\n def onDraw(self):\n Label.new.onDraw(self)\n\n isImageBackground = (isinstance(self.theme[\"checked\"], Util.Image) and\n isinstance(self.theme[\"unchecked\"], Util.Image))\n\n if not isImageBackground:\n chkb = [self.bounds.x+1, self.bounds.y+6, 16, 16]\n self.margin = 18\n\n Util.setColor(self.theme[\"backColor\"])\n Util.fillRect(*chkb)\n\n Util.setWidth(self.theme[\"borderWidth\"])\n Util.setColor(self.theme[\"borderColor\"])\n Util.drawRect(*chkb)\n\n if self.checked:\n chk = [chkb[0]+2, chkb[1]+2, 12, 12]\n Util.setColor((0,0,0,1))\n Util.fillRect(*chk)\n else:\n w, h = self.theme[\"checked\"].size\n h2 = int(self.bounds.height/2-h/2)+1\n chkb = (self.bounds.x+1, self.bounds.y+h2, w, h)\n self.margin = w+2\n\n if self.checked:\n Util.drawTexture(self.theme[\"checked\"].id, w, h,\n [chkb[0], chkb[1], chkb[2], chkb[3]],\n [(0,0), (1,0), (1,1), (0,1)], invert=True)\n else:\n Util.drawTexture(self.theme[\"unchecked\"].id, w, h,\n [chkb[0], chkb[1], chkb[2], chkb[3]],\n [(0,0), (1,0), (1,1), (0,1)], invert=True)\n","sub_path":"src/CheckBox.py","file_name":"CheckBox.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"182317511","text":"# coding: utf-8\n\nimport time\nimport operator\nimport tensorflow as tf\nfrom datetime import datetime\nfrom keras.utils.np_utils import to_categorical\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"../MNIST_data/\")\nX_train1 = mnist.train.images[mnist.train.labels < 5]\ny_train1 = mnist.train.labels[mnist.train.labels < 5]\nX_valid1 = mnist.validation.images[mnist.validation.labels < 5]\ny_valid1 = mnist.validation.labels[mnist.validation.labels < 5]\nX_test1 = mnist.test.images[mnist.test.labels < 5]\ny_test1 = mnist.test.labels[mnist.test.labels < 5]\ny_train1 = to_categorical(y_train1, num_classes=5)\ny_valid1 = to_categorical(y_valid1, num_classes=5)\ny_test1 = to_categorical(y_test1, num_classes=5)\n\n\ndef split_folds(num_folds=3):\n train_length = len(y_train1)\n fold_size = int(train_length / num_folds)\n X_train_folds = []\n y_train_folds = []\n for i in range(num_folds):\n start = i * fold_size\n end = (i + 1) * fold_size if i + 1 < num_folds else train_length\n images = X_train1[start: end]\n labels = y_train1[start: end]\n X_train_folds.append(images)\n y_train_folds.append(labels)\n\n return X_train_folds, y_train_folds\n\n\ndef init_input():\n with tf.name_scope('input'):\n X = tf.placeholder('float', [None, 784], name='input_x')\n y = tf.placeholder('float', [None, 5], name='label_y')\n return X, y\n\n\ndef add_layer(input_dim, output_dim, inputs, name, activation_function=None):\n name_W = 'W_' + name\n name_b = 'b_' + name\n\n with tf.name_scope(name):\n with tf.name_scope('weight'):\n W = tf.get_variable(shape=[input_dim, output_dim], initializer=tf.contrib.keras.initializers.he_normal(), name=name_W)\n tf.summary.histogram(name + '/weight', W)\n with tf.name_scope('bias'):\n b = tf.Variable(tf.constant(0.1, shape=[1, output_dim]), name=name_b)\n tf.summary.histogram(name + '/bias', b)\n with tf.name_scope('Wx_plus_b'):\n Wx_plus_b = tf.add(tf.matmul(inputs, W), b)\n if activation_function is None:\n outputs = Wx_plus_b\n else:\n outputs = activation_function(Wx_plus_b)\n tf.summary.histogram(name + '/output', outputs)\n return outputs\n\n\ndef build_network(X, n, activation):\n h1 = add_layer(input_dim=784, output_dim=n, inputs=X, name='hidden_layer_1', activation_function=activation)\n h2 = add_layer(input_dim=n, output_dim=n, inputs=h1, name='hidden_layer_2', activation_function=activation)\n h3 = add_layer(input_dim=n, output_dim=n, inputs=h2, name='hidden_layer_3', activation_function=activation)\n h4 = add_layer(input_dim=n, output_dim=n, inputs=h3, name='hidden_layer_4', activation_function=activation)\n h5 = add_layer(input_dim=n, output_dim=n, inputs=h4, name='hidden_layer_5', activation_function=activation)\n y_hat = add_layer(input_dim=n, output_dim=5, inputs=h5, name='output_layer', activation_function=tf.nn.softmax)\n return y_hat\n\n\ndef init_evaluation(y, y_hat, learning_rate):\n with tf.name_scope('cross_entropy'):\n loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=y_hat))\n tf.summary.scalar('cross_entropy', loss_function)\n\n with tf.name_scope('train'):\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss_function)\n\n with tf.name_scope('accuracy'):\n with tf.name_scope('correction_prediction'):\n correct_prediction_count = tf.equal(tf.argmax(y, 1), tf.argmax(y_hat, 1))\n with tf.name_scope('accuracy'):\n acc = tf.reduce_mean(tf.cast(correct_prediction_count, 'float'))\n tf.summary.scalar('accuracy', acc)\n\n return loss_function, optimizer, acc\n\n\ndef DNN(epoch, n_neurons, learning_rate, activation, batch_size, early_stopping, restore_model_name=None):\n X_train_folds, y_train_folds = split_folds()\n\n tf.reset_default_graph()\n act_str = str(activation).split()[1]\n\n training_id = '[n_neurons_%d, learning_rate_%f, activation_%s, batch_size_%d]' % (\n n_neurons, learning_rate, act_str, batch_size)\n print(training_id)\n training_id += \"-\" + datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")\n logdir = \"tf_logs/{}/\".format(training_id)\n\n X, y = init_input()\n y_hat = build_network(X, n_neurons, activation)\n loss_function, optimizer, acc = init_evaluation(y, y_hat, learning_rate)\n\n batch = int(mnist.train.num_examples / batch_size)\n\n start_epoch = 0\n iteration = 0\n\n with tf.Session() as sess:\n if restore_model_name:\n saver.restore(sess, \"regular_train/\" + restore_model_name)\n start_epoch = int(restore_model_name.split('.')[0].split('_')[-1])\n else:\n sess.run(tf.global_variables_initializer())\n\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter(logdir, sess.graph)\n\n MSE = 0.0\n folds_count = len(X_train_folds)\n\n # X_train_folds[i] is the validation fold\n for i in range(folds_count):\n sess.run(tf.global_variables_initializer())\n n = 0\n # Always takes the first fold as validation fold, than move the fold to the bottom after each iteration\n print('FOLD %d' % (i + 1))\n vali_X = X_train_folds.pop(0)\n vali_y = y_train_folds.pop(0)\n best_vali_loss = 99999.0\n best_vali_acc = 0.0\n saver = tf.train.Saver()\n\n for i in range(start_epoch, epoch):\n # zip(X_train_folds, y_train_folds) are training folds (index = [1] & [2])\n for X_train_fold, y_train_fold in zip(X_train_folds, y_train_folds):\n for j in range(batch):\n batch_x = X_train_fold[j * batch_size: (j + 1) * batch_size]\n batch_y = y_train_fold[j * batch_size: (j + 1) * batch_size]\n sess.run(optimizer, feed_dict={X: batch_x, y: batch_y})\n\n iteration += 1\n result = sess.run(merged, feed_dict={X: vali_X, y: vali_y})\n writer.add_summary(result, iteration)\n\n vali_loss = sess.run(loss_function, feed_dict={X: vali_X, y: vali_y})\n vali_acc = sess.run(acc, feed_dict={X: vali_X, y: vali_y})\n\n if best_vali_acc > vali_acc:\n n += 1\n best_vali_loss = vali_loss if best_vali_loss > vali_loss else best_vali_loss\n file_name = 'regular_training_epoch_%d.ckpt' % (i + 1)\n saver.save(sess, \"regular_train/%s/%s\" % (training_id, file_name))\n else:\n best_vali_loss = vali_loss if best_vali_loss > vali_loss else best_vali_loss\n best_vali_acc = vali_acc\n n += 0\n\n print(\"Epoch: %2d, Validation loss: %9.4f, Best loss:%9.4f, Accuracy: %.4f\" %\n (i + 1, vali_loss, best_vali_loss, vali_acc))\n\n if n > early_stopping:\n print('Early Stopping at epoch %d' % i)\n break\n\n MSE += (best_vali_loss ** 2) / folds_count\n X_train_folds.append(vali_X)\n y_train_folds.append(vali_y)\n\n file_name = 'final_model'\n save_path = saver.save(sess, \"regular_train/%s/%s\" % (training_id, file_name))\n print(\"Model saved %s\" % training_id)\n\n test_acc = sess.run(acc, feed_dict={X: X_test1, y: y_test1})\n print(\"Final test accuracy: %.4f\" % test_acc)\n\n return MSE\n\n\ndef DNN_testacc(epoch, n_neurons, learning_rate, activation, batch_size, early_stopping, restore_model_name=None):\n tf.reset_default_graph()\n\n act_str = str(activation).split()[1]\n training_id = '[n_neurons_%d, learning_rate_%f, activation_%s, batch_size_%d]' % (\n n_neurons, learning_rate, act_str, batch_size)\n print(training_id)\n training_id += \"-\" + datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")\n logdir = \"tf_logs/{}/\".format(training_id)\n\n X, y = init_input()\n y_hat = build_network(X, n_neurons, activation)\n loss_function, optimizer, acc = init_evaluation(y, y_hat, learning_rate)\n\n batch = int(mnist.train.num_examples / batch_size)\n\n best_vali_loss = 9999.0\n best_vali_acc = 0.0\n\n saver = tf.train.Saver()\n start_epoch = 0\n\n with tf.Session() as sess:\n if restore_model_name:\n saver.restore(sess, \"regular_train/\" + restore_model_name)\n start_epoch = int(restore_model_name.split('.')[0].split('_')[-1])\n else:\n sess.run(tf.global_variables_initializer())\n\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter(logdir, sess.graph)\n\n for i in range(start_epoch, epoch):\n for j in range(batch):\n batch_x = X_train1[j * batch_size: (j + 1) * batch_size]\n batch_y = y_train1[j * batch_size: (j + 1) * batch_size]\n sess.run(optimizer, feed_dict={X: batch_x, y: batch_y})\n\n result = sess.run(merged, feed_dict={X: X_valid1, y: y_valid1})\n writer.add_summary(result, i + 1)\n\n vali_loss = sess.run(loss_function, feed_dict={X: X_valid1, y: y_valid1})\n vali_acc = sess.run(acc, feed_dict={X: X_valid1, y: y_valid1})\n\n if best_vali_acc < vali_acc:\n n = 0\n best_vali_acc = vali_acc\n else:\n n += 1\n\n if n > early_stopping:\n print('Early Stopping at epoch %d' % i)\n break\n\n file_name = 'regular_training_epoch_testacc_%d.ckpt' % (i + 1)\n save_path = saver.save(sess, \"regular_train/%s/%s\" % (training_id, file_name))\n\n print(\"Epoch: %2d, Validation loss: %9.4f, Accuracy: %.4f, Best Accuracy:%.4f\" %\n (i + 1, vali_loss, vali_acc, best_vali_acc))\n\n file_name = 'final_model'\n save_path = saver.save(sess, \"regular_train/%s/%s\" % (training_id, file_name))\n print(\"Model saved in path: %s\" % save_path)\n\n test_acc = sess.run(acc, feed_dict={X: X_test1, y: y_test1})\n print(\"Final test accuracy: %.4f\" % test_acc)\n\n\n\"\"\"\nneurons_list = [10, 30, 50, 70, 90, 100, 120, 140, 160]\nbatch_size_list = [10, 50, 100, 500]\nlearning_rate_list = [0.01, 0.02, 0.05, 0.1]\nactivation_function_list = [tf.nn.relu, tf.nn.elu, tf.nn.leaky_relu, tf.nn.tanh]\n\"\"\"\nparams = [(160, 0.1, tf.nn.elu, 500),\n (100, 0.1, tf.nn.elu, 100),\n (10, 0.05, tf.nn.elu, 50),\n (160, 0.05, tf.nn.relu, 500),\n (100, 0.1, tf.nn.relu, 100),\n (10, 0.05, tf.nn.relu, 50),\n (160, 0.05, tf.nn.leaky_relu, 500),\n (100, 0.1, tf.nn.leaky_relu, 100),\n (10, 0.05, tf.nn.leaky_relu, 50),\n (160, 0.05, tf.nn.tanh, 500),\n (100, 0.1, tf.nn.tanh, 100),\n (10, 0.05, tf.nn.tanh, 50)]\nresult = {}\nfor param in params:\n (n, lr, af, bs) = param\n MSE = DNN(epoch=1000, n_neurons=n, learning_rate=lr, activation=af, batch_size=bs, early_stopping=10)\n result[param] = MSE\nparams, MSE = sorted(result.items(), key=operator.itemgetter(1))[0]\nn, lr, af, bs = params\nprint('--------BEST PARAMETERS--------')\ntest_acc = DNN_testacc(epoch=1000, n_neurons=n, learning_rate=lr, activation=af, batch_size=bs, early_stopping=10)\n","sub_path":"Deep-Learning/hw2/1C/MNIST_cross_entropy.py","file_name":"MNIST_cross_entropy.py","file_ext":"py","file_size_in_byte":11381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"8858707","text":"\"\"\" Functions used by calibration the calibration routines \"\"\"\n\nfrom scipy.optimize import least_squares\nimport numpy\n\ndef fit_sphere_least_squares(coordinates, initial_parameters,\n bounds=\n ((-numpy.inf, -numpy.inf, -numpy.inf, -numpy.inf),\n (numpy.inf, numpy.inf, numpy.inf, numpy.inf))):\n \"\"\"\n Uses scipy's least squares optimisor to fit a sphere to a set\n of 3D Points\n\n :param coordinates: (x,y,z) n x 3 array of point coordinates\n :param initial parameters: 1 x 4 array containing four initial\n values (centre, and radius)\n :return: x: an array containing the four fitted parameters\n :return: ier: int An integer flag. If it is equal to 1, 2, 3 or 4, the\n solution was found.\n \"\"\"\n x_values = coordinates[:, 0]\n y_values = coordinates[:, 1]\n z_values = coordinates[:, 2]\n return least_squares(_calculate_residual_sphere, initial_parameters,\n bounds=bounds,\n method='trf',\n jac='3-point',\n args=(x_values, y_values, z_values))\n\ndef _calculate_residual_sphere(parameters, x_values, y_values, z_values):\n \"\"\"\n Calculates the residual error for an x,y,z coordinates, fitted\n to a sphere with centre and radius defined by the parameters tuple\n\n :param: A tuple of the parameters to be optimised, should contain\n [x_centre, y_centre, z_centre, radius]\n :param: arrays containing the x,y, and z coordinates.\n :return: The residual error\n\n \"\"\"\n #extract the parameters\n x_centre, y_centre, z_centre, radius = parameters\n\n #use numpy's sqrt function here, which works by element on arrays\n distance_from_centre = numpy.sqrt((x_values - x_centre)**2 +\n (y_values - y_centre)**2 +\n (z_values - z_centre)**2)\n\n return distance_from_centre - radius\n","sub_path":"sksurgerycalibration/algorithms/sphere_fitting.py","file_name":"sphere_fitting.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"336937399","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport math\n\nfrom helper import Helper\nfrom ..output.output import Output\n\n\nclass ProgressHelper(Helper):\n \"\"\"\n The Progress class providers helpers to display progress output.\n \"\"\"\n\n FORMAT_QUIET = ' %percent%%'\n FORMAT_NORMAL = ' %current%/%max% [%bar%] %percent%%'\n FORMAT_VERBOSE = ' %current%/%max% [%bar%] %percent% Elapsed: %elapsed%'\n FORMAT_QUIET_NOMAX = ' %{current}%'\n FORMAT_NORMAL_NOMAX = ' %current% [%bar%]'\n FORMAT_VERBOSE_NOMAX = ' %current% [%bar%] Elapsed: %elapsed%'\n\n # options\n bar_width = 28\n bar_char = '='\n empty_bar_char = '-'\n progress_char = '>'\n display_format = None\n redraw_freq = 1\n\n last_messages_length = None\n bar_char_original = None\n\n output = None\n current_step = 0\n max_steps = 0\n start_time = None\n\n default_format_vars = [\n 'current',\n 'max',\n 'bar',\n 'percent',\n 'elapsed'\n ]\n\n format_vars = []\n\n widths = {\n 'current': 4,\n 'max': 4,\n 'percent': 3,\n 'elapsed': 6\n }\n\n time_formats = [\n (0, '???'),\n (2, '1 sec'),\n (59, 'secs', 1),\n (60, '1 min'),\n (3600, 'mins', 60),\n (5400, '1 hr'),\n (86400, 'hrs', 3600),\n (129600, '1 day'),\n (604800, 'days', 86400)\n ]\n\n def set_bar_width(self, size):\n \"\"\"\n Sets the progress bar with\n\n @param size: The progress bar size\n @type size: int\n \"\"\"\n self.bar_width = size\n\n def set_bar_character(self, char):\n \"\"\"\n Sets the progress bar character\n\n @param char: The progress bar character\n @type char: str\n \"\"\"\n self.bar_char = char\n\n def set_empty_bar_character(self, char):\n \"\"\"\n Sets the empty bar character\n\n @param char: A character\n @type char: str\n \"\"\"\n self.empty_bar_char = char\n\n def set_progress_character(self, char):\n \"\"\"\n Sets the progress character\n\n @param char: A character\n @type char: str\n \"\"\"\n self.progress_char = char\n\n def set_display_format(self, display_format):\n \"\"\"\n Sets the progress bar format\n\n @param display_format: The display format\n @type display_format: str\n \"\"\"\n self.display_format = display_format\n\n def set_redraw_frequency(self, freq):\n \"\"\"\n Sets the redraw frequency\n\n @param freq: The redraw frequency in seconds\n @type freq: int\n \"\"\"\n self.bar_char = freq\n\n def start(self, output_, max_steps=None):\n \"\"\"\n Starts the progress output\n\n @param output_: An Output instance\n @type output_: Output\n @param max_steps: Maximum steps\n @type max_steps: int\n \"\"\"\n self.start_time = time.time()\n self.current_step = 0\n self.max_steps = int(max_steps or 0)\n self.output = output_\n\n if self.display_format is None:\n if self.output.get_verbosity() == Output.VERBOSITY_QUIET:\n self.display_format = self.FORMAT_QUIET_NOMAX\n if self.max_steps > 0:\n self.display_format = self.FORMAT_QUIET\n elif self.output.get_verbosity() == Output.VERBOSITY_VERBOSE:\n self.display_format = self.FORMAT_VERBOSE_NOMAX\n if self.max_steps > 0:\n self.display_format = self.FORMAT_VERBOSE\n else:\n self.display_format = self.FORMAT_NORMAL_NOMAX\n if self.max_steps > 0:\n self.display_format = self.FORMAT_NORMAL\n\n self.initialize()\n\n def advance(self, step=1, redraw=False):\n \"\"\"\n Advances the progress output X steps\n\n @param step: Number of steps to advance\n @type step: int\n @param redraw: Whether to redraw or not\n @type redraw: bool\n \"\"\"\n if self.start_time is None:\n raise Exception('You must start the progress bar before calling advance().')\n\n if self.current_step == 0:\n redraw = True\n\n self.current_step += step\n if redraw or self.current_step % self.redraw_freq == 0:\n self.display()\n\n def display(self, finish=False):\n \"\"\"\n Ouputs the current progress string\n\n @param finish: Forces the end result\n @type finish: bool\n \"\"\"\n if self.start_time is None:\n raise Exception('You must start the progress bar before calling display().')\n\n message = self.display_format\n for name, value in self.generate(finish).items():\n message = message.replace('%' + name + '%', str(value))\n\n self.overwrite(self.output, message)\n\n def finish(self):\n \"\"\"\n Finishes the progress output\n \"\"\"\n if self.start_time is None:\n raise Exception('You must start the progress bar before calling finish().')\n\n if not self.max_steps:\n self.bar_char = self.bar_char_original\n self.display(True)\n\n self.start_time = None\n self.output.writeln('')\n self.output = None\n\n def initialize(self):\n \"\"\"\n Initializes the progress output\n \"\"\"\n self.format_vars = []\n for v in self.default_format_vars:\n if self.display_format.find('%' + v + '%') != -1:\n self.format_vars.append(v)\n\n if self.max_steps > 0:\n self.widths['max'] = len(str(self.max_steps))\n self.widths['current'] = self.widths['max']\n else:\n self.bar_char_original = self.bar_char\n self.bar_char = self.empty_bar_char\n\n def generate(self, finish=False):\n \"\"\"\n Generates the array map of format variables to values.\n\n @param finish: Forces the end result\n @type finish: bool\n\n @return: A dict of format vars and values\n @rtype: dict\n \"\"\"\n format_vars = {}\n percent = 0\n if self.max_steps > 0:\n percent = round(float(self.current_step) / self.max_steps, 2)\n\n # bar\n if 'bar' in self.format_vars:\n if self.max_steps > 0:\n complete_bars = math.floor(percent * self.bar_width)\n else:\n if not finish:\n complete_bars = math.floor(self.current_step % self.bar_width)\n else:\n complete_bars = self.bar_width\n\n empty_bars = self.bar_width - complete_bars - len(self.progress_char)\n bar = self.bar_char * int(complete_bars)\n if complete_bars < self.bar_width:\n bar += self.progress_char\n bar += self.empty_bar_char * int(empty_bars)\n\n format_vars['bar'] = bar\n\n # elapsed\n if 'elapsed' in self.format_vars:\n elapsed = time.time() - self.start_time\n format_vars['elapsed'] = self.humane_time(elapsed).rjust(self.widths['elapsed'], ' ')\n\n # current\n if 'current' in self.format_vars:\n format_vars['current'] = str(self.current_step).rjust(self.widths['current'], ' ')\n\n # max steps\n if 'max' in self.format_vars:\n format_vars['max'] = self.max_steps\n\n # percent\n if 'percent' in self.format_vars:\n format_vars['percent'] = str(int(round(percent * 100))).rjust(self.widths['percent'], ' ')\n\n return format_vars\n\n def humane_time(self, secs):\n \"\"\"\n Converts seconds into human-readable format\n\n @param secs: Number of seconds\n @type secs: int\n\n @return: Time in human-readable format\n @rtype: str\n \"\"\"\n text = ''\n for time_format in self.time_formats:\n if secs < time_format[0]:\n if len(time_format) == 2:\n text = time_format[1]\n break\n else:\n text = str(math.ceil(secs / time_format[2])) + ' ' + time_format[1]\n break\n\n return text\n\n def overwrite(self, output_, messages):\n \"\"\"\n Overwrites a previous message to the output.\n\n @param output_: An Output instance\n @type output_: Output\n @param messages: The message as an array of lines or a single string\n @type messages: list or str\n \"\"\"\n # carriage return\n output_.write('\\x0D')\n if self.last_messages_length is not None:\n # clear the line with the text of the last message\n output_.write('\\x20' * self.last_messages_length)\n # carriage return\n output_.write('\\x0D')\n\n output_.write(messages)\n\n self.last_messages_length = len(messages)\n\n def get_name(self):\n return 'progress'","sub_path":"console/helper/progress_helper.py","file_name":"progress_helper.py","file_ext":"py","file_size_in_byte":8858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"135125973","text":"\" Oxygen tank repair droid min steps\"\n\nfrom itertools import permutations\nimport random\n\n\nclass Intcode:\n def __init__(self, prog, input, output):\n self.prog = prog.copy()\n self.prog.extend([0]*100000)\n self.input = input\n self.output = output\n self.i = 0\n self.base = 0\n\n def get_param(self,mode,index):\n if mode == 0:\n return self.prog[self.prog[index]]\n elif mode == 1:\n return self.prog[index]\n else:\n return self.prog[self.prog[index] + self.base]\n\n\n def exec(self):\n \"\"\"Returns true if the self.program has halted, continues running\n the self.program till it finds a load and no input and returns false\"\"\"\n while self.i < len(self.prog):\n opcode = self.prog[self.i] % 100\n if opcode == 99:\n return True\n mode_first = (self.prog[self.i] // 100) % 10\n mode_second = (self.prog[self.i] // 1000) % 10\n mode_third = (self.prog[self.i] // 10000) % 10\n param1 = self.get_param(mode_first,self.i+1)\n if opcode in [7,8,1,2]:\n if mode_third == 0:\n index_third = self.prog[self.i + 3]\n elif mode_third == 2:\n index_third = self.prog[self.i + 3] + self.base\n if opcode != 3 and opcode != 4 and opcode != 9:\n param2 = self.get_param(mode_second,self.i+2)\n if opcode == 1:\n self.prog[index_third] = param1 + param2\n self.i += 4\n elif opcode == 2:\n self.prog[index_third] = param1 * param2\n self.i += 4\n elif opcode == 3:\n if len(self.input) == 0:\n return False\n if mode_first == 0:\n self.prog[self.prog[self.i + 1]] = self.input.pop(0)\n elif mode_first == 2:\n self.prog[self.prog[self.i + 1] + self.base] = self.input.pop(0)\n self.i += 2\n elif opcode == 4:\n # print(param1)\n self.i += 2\n self.output.append(param1)\n elif opcode == 5:\n if param1 != 0:\n self.i = param2\n else:\n self.i += 3\n elif opcode == 6:\n if param1 == 0:\n self.i = param2\n else:\n self.i += 3\n elif opcode == 7:\n if param1 < param2:\n self.prog[index_third] = 1\n else:\n self.prog[index_third] = 0\n self.i += 4\n elif opcode == 8:\n if param1 == param2:\n self.prog[index_third] = 1\n else:\n self.prog[index_third] = 0\n self.i += 4\n elif opcode == 9:\n self.base += param1\n self.i += 2\n else:\n print(\"unknown opcode\", opcode)\n # print(self.prog)\n print(\"Error: reached end of program without halt instruction\")\n return True\n\ndef read_input():\n file = open('input_aoc15.txt', 'r')\n lst = []\n for line in file:\n lst.append(line)\n lst = str.split(lst[0], ',')\n intlist = []\n for l in lst:\n intlist.append(int(l))\n return intlist\n\ndef count_steps(prog):\n NORTH = (0,1)\n EAST = (1,0)\n WEST = (-1,0)\n SOUTH = (0,-1)\n direction = [NORTH, SOUTH, WEST, EAST]\n\n input = []\n output = []\n intcode = Intcode(prog, input, output)\n curr_pos = (0, 0)\n curr_dir = 0\n directions_at_wall = dict()\n room = dict()\n while True:\n input.append(curr_dir+1)\n intcode.exec()\n # print(output)\n next_pos = (curr_pos[0]+direction[curr_dir][0], curr_pos[1]+direction[curr_dir][1])\n if output[0] == 0 or output[0] == 1:\n list = [0,1,2,3]\n random.shuffle(list)\n if next_pos in directions_at_wall:\n if len(directions_at_wall[next_pos]) == 0:\n raise ValueError(\"Exhausted directions\")\n# directions_at_wall[next_pos] = list\n curr_dir = directions_at_wall[next_pos].pop(0)\n else:\n directions_at_wall[next_pos] = list\n if output[0] == 0:\n room[next_pos] = '#'\n elif output[0] == 1:\n room[next_pos] = '.'\n curr_pos = next_pos\n elif output[0] == 2:\n room[next_pos] = 'o'\n curr_pos = next_pos\n print_room(room)\n return(room)\n output.pop(0)\n print_room(room)\n return(room)\n\ndef print_room(tiles):\n print(\"room\")\n tiles[(0,0)] = 'D'\n for j in range(min([ x[1] for x in tiles.keys() ]), max([ x[1] for x in tiles.keys() ])+1):\n for i in range(min([ x[0] for x in tiles.keys()]),max([ x[0] for x in tiles.keys()])+1):\n if (i,j) in tiles.keys():\n print(tiles[(i,j)],end=\"\")\n else:\n print(' ',end=\"\")\n print()\n\n\nprint(count_steps(read_input()))","sub_path":"2019/aoc15.py","file_name":"aoc15.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"585337726","text":"import re\nimport pymorphy2\n\nmorph = pymorphy2.MorphAnalyzer()\n\ndef camel_case_split(str):\n words = [[str[0]]]\n\n for c in str[1:]:\n if words[-1][-1].islower() and c.isupper():\n words.append(list(c))\n else:\n words[-1].append(c)\n\n return [''.join(word) for word in words]\n\ndef split_camel_case(tokens):\n splited = []\n added = []\n\n for token in tokens:\n words = camel_case_split(token)\n if len(words) > 0:\n splited.append(token)\n added.extend(words)\n\n tokens -= set(splited)\n tokens |= set(added)\n return tokens\n\ndef to_normal_form(word):\n p = morph.parse(word)[0]\n if p.normalized.is_known:\n normal_form = p.normal_form\n else:\n normal_form = word.lower()\n return normal_form\n\ndef is_cyrillic(text):\n return bool(re.fullmatch('[а-яА-ЯёЁ]+', text))\n\ndef is_english(text):\n return bool(re.fullmatch('[a-zA-Z]+', text))","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"508064611","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# - __ECE 657A: Data and Knowledge Modelling and Analysis__\n# - __Winter 2019__\n# - __WATIAM:roozara ID: 20801583__\n# - __Homework 3:Eigenvector Decomposition__\n# \n# Reference used :https://hadrienj.github.io/posts/Preprocessing-for-deep-learning/\n# https://archive.ics.uci.edu/ml/machine-learning-databases/communities/\n\n# In[62]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n#for eigen value\nfrom numpy import cov\nfrom numpy import linalg as LA\n#pd.options.display.max_columns = None\npd.options.display.max_rows = None\n\ndef readFromFile(name):\n with open(name) as f: \n features= [line.split(' ')[1] for line in f.readlines()\n if line.startswith('@attr')]\n return features \n \n\n\n \n \n\n\n# # Importing the crime dataset and storing in a matrix\n# The crime dataset was loaded into a matrix ,its observed that the data is already normalized but so many missing values.\n# About 1675 out of 1993 missing values each in columns 101 to 126 were replaced by the mean of those features. the first\n# 5 columns including two (county & community of which contained about 1174 missing values each were not included in \n# creating the matrix.These first five non predictive attributes were left out of the analysis. 128 scaled to 123 features\n# being analysed. \n\n# In[67]:\n\n\n#importing the communities dataset into variable cdata\ncdata = pd.read_csv('data/communities.data',sep= ',', header = None, na_values=[\"?\"]) \n\n\n\n#print(Acdata)\n#examining the data to correct for missing valiue\n\n#IDENTIFYING COLUMNS WITH null value and filling with the mean\n#nan_col=pd.DataFrame(cdata.isnull().sum(axis=0))\n#nan_col.index = list(range(128))\n\ncdata.iloc[:,4:] = cdata.iloc[:,4:].apply(lambda x: x.fillna(x.mean()),axis=0)\ncdata.columns = readFromFile('data/communities.names')\nprint('Crime dataset dimension', cdata.shape)\ncdata.head(5)\n\n\n# In[66]:\n\n\n#matrix creating with numpy\ncdata_matrix = np.matrix(cdata.iloc[:,5:])\nprint('Matrix created' ,'(dimension' ,cdata_matrix.shape,')','\\n',cdata_matrix)\n\n\n# # Compute the eigenvectors and eigenvalue and Reporting the top 20 eigenvalues\n# We compute the eigenvectors and thus eigen value by first calculating the covaraince matrix. We project\n# any data onto the principal subspace that is spanned by the eigenvectors that belong to the largest eigenvalues.\n\n# In[46]:\n\n\n\ncov_matrix = np.cov(cdata_matrix, rowvar=False, bias=True)\neigenvalues, eigenVector = LA.eig(cov_matrix)\nx = np.arange(1, 124,1)\neig_valTable = pd.DataFrame(eigenvalues, index = x, columns = ['Eigenvalues'])\neig_valTable.sort_values(by='Eigenvalues', ascending=False, inplace=True)\nprint(eig_valTable.head(20))\n\n\n# As it can be seen from the first plot (left plot) below, it is hard to have a clear cut off since the\n# curve is more shallow. The first 20 eigenvalues count for ~85% of the variance. The 95% were\n# calculated below and it turned out that we need approximately 39 eigenvalues to process 95% of\n# the data.\n\n# In[61]:\n\n\ni = 1\nj = 0 \ny = np.zeros(shape=(123,1))\n\nfor index, row in eig_valTable.iterrows():\n y[j] = eig_valTable['Eigenvalues'][i] + y[j-1]\n i += 1\n j += 1 \nsum_eigen = pd.DataFrame(y, index = x)\nsum_eigen['Eigenvalue No'] = x \n \nf, (ax1, ax2) = plt.subplots(1, 2)\nax1.plot(x[:21], y[:21])\nax1.set_title('plot showing\\n20 eigenvalues')\nax1.set_xlabel('Eigenvalues')\nax1.set_ylabel('sum(Eigenvalues)')\n\nax2.plot(x, y)\nax2.set_title(' plot showing \\nall eigenvalues')\nax2.set_xlabel('Eigenvalues')\nax2.set_ylabel('sum(Eigenvalues)')\n\n\n# In[59]:\n\n\nprint(sum_eigen[(sum_eigen[0] > 3.98) & (sum_eigen[0] < 3.992268253580597)])\n\n","sub_path":"HW3/roozara_hw3.py","file_name":"roozara_hw3.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"121000250","text":"from os import environ\nfrom pathlib import Path\nfrom shutil import copyfileobj\nfrom typing import Tuple, Optional\n\nimport click\nfrom requests import Session\n\nfrom .base import AbstractStorage\n\n\nclass WebDAVStorage(AbstractStorage):\n endpoint: str\n _credentials: Optional[Tuple[str, str]]\n\n def __init__(self, project: str):\n super().__init__(project)\n endpoint = environ.get(\"RUMPICO_DAV_SERVER\")\n credentials = (environ.get(\"RUMPICO_DAV_USER\"), environ.get(\"RUMPICO_DAV_PASSWORD\"))\n\n if endpoint is None:\n click.echo(click.style('Specify WebDAV endpoint in RUMPICO_DAV_SERVER environment variable', fg='red'))\n exit(1)\n\n self.endpoint = endpoint\n self._credentials = credentials if all(credentials) else None\n\n @property\n def _client(self) -> Session:\n session = Session()\n session.auth = self._credentials\n session.headers['Content-Type'] = 'application/octet-stream'\n return session\n\n def key(self, name: str, image_hash: str) -> str:\n return f'{self.project}/{name}.{image_hash}.image'\n\n def upload(self, *, source: Path, key: str):\n with source.open('rb') as f, self._client as client:\n client.request('MKCOL', f'{self.endpoint}/${self.project}')\n client.post(f'{self.endpoint}/${key}', f)\n\n def download(self, *, destination: Path, key: str):\n with destination.open('wb') as f, self._client as client:\n response = client.get(f'{self.endpoint}/${key}', stream=True)\n copyfileobj(response.raw, f)\n","sub_path":"rumpico/storage/webdav.py","file_name":"webdav.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"299000699","text":"\"\"\"\nCode ideas from https://github.com/Newmu/dcgan and tensorflow mnist dataset reader\n\"\"\"\nimport numpy as np\nimport scipy.misc as misc\nimport cv2\nimport random\n\nclass BatchDatset:\n files = []\n image_options = {}\n batch_offset = 0\n epochs_completed = 0\n\n def __init__(self, records_list, image_options={}):\n \"\"\"\n Intialize a generic file reader with batching for list of files\n :param records_list: list of file records to read -\n sample record: {'image': f, 'annotation': annotation_file, 'filename': filename}\n :param image_options: A dictionary of options for modifying the output image\n Available options:\n resize = True/ False\n resize_size = #size of output image - does bilinear resize\n color=True/False\n \"\"\"\n print(\"Initializing Batch Dataset Reader...\")\n print(image_options)\n self.files = records_list\n self.image_options = image_options\n self.total_files = len(self.files)\n\n def load_images(self, indexes, indexesCrop):\n images = []\n for i, index in enumerate(indexes):\n image = cv2.imread(self.files[index]['image'], cv2.IMREAD_COLOR)\n #image = misc.imread(self.files[index]['image'])\n image = self.crop(image, indexesCrop[i])\n image = self._transform_image(image)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n images.append(image)\n return images\n\n def load_annotations(self, indexes, indexesCrop):\n images = []\n for i, index in enumerate(indexes):\n image = cv2.imread(self.files[index]['annotation'], cv2.IMREAD_GRAYSCALE)\n #image = misc.imread(self.files[index]['annotation'])\n image = self.crop(image, indexesCrop[i])\n image = self._transform_image(image)\n images.append(np.expand_dims(image, axis=2))\n return images\n\n def crop(self, image, cropIndex):\n h, w = image.shape[:2]\n\n less_value = h if h < w else w\n\n if less_value > 448:\n less_value = 448\n else:\n less_value = 336\n\n if h > (less_value*2) or w > (less_value*2):\n raise Exception('Problem')\n\n if cropIndex == 0:\n image = image[0:less_value, 0:less_value]\n elif cropIndex == 1:\n image = image[0:less_value, w-less_value:w]\n elif cropIndex == 2:\n image = image[h-less_value:h, 0:less_value]\n elif cropIndex == 3:\n image = image[h-less_value:h, w-less_value:w]\n\n if image.shape[:2] != (less_value, less_value):\n raise Exception('Problem')\n\n return image\n\n def _transform(self, filename):\n image = misc.imread(filename)\n if self.__channels and len(image.shape) < 3: # make sure images are of shape(h,w,3)\n image = np.array([image for i in range(3)])\n\n if self.image_options.get(\"resize\", False) and self.image_options[\"resize\"]:\n resize_size = int(self.image_options[\"resize_size\"])\n resize_image = misc.imresize(image,\n [resize_size, resize_size], interp='nearest')\n else:\n resize_image = image\n\n return np.array(resize_image)\n\n def _transform_image(self, image):\n if self.image_options.get(\"resize\", False) and self.image_options[\"resize\"]:\n resize_size = int(self.image_options[\"resize_size\"])\n resize_image = cv2.resize(image, (resize_size, resize_size), interpolation=cv2.INTER_NEAREST)\n #resize_image = misc.imresize(image,\n # [resize_size, resize_size], interp='nearest')\n else:\n resize_image = image\n\n return resize_image\n\n def reset_batch_offset(self, offset=0):\n self.batch_offset = offset\n\n def next_batch(self, batch_size):\n start = self.batch_offset\n self.batch_offset += batch_size\n if self.batch_offset > self.total_files:\n # Finished epoch\n self.epochs_completed += 1\n print(\"****************** Epochs completed: \" + str(self.epochs_completed) + \"******************\")\n # Shuffle the data\n #perm = np.arange(self.images.shape[0])\n #np.random.shuffle(perm)\n #self.images = self.images[perm]\n #self.annotations = self.annotations[perm]\n random.shuffle(self.files)\n # Start next epoch\n start = 0\n self.batch_offset = batch_size\n\n end = self.batch_offset\n indexes = list(range(start, end))\n cropes = np.random.randint(0, 4, size=[batch_size]).tolist()\n return self.load_images(indexes, cropes), self.load_annotations(indexes, cropes)\n\n def get_random_batch(self, batch_size):\n indexes = np.random.randint(0, self.total_files, size=[batch_size]).tolist()\n indexesCrop = np.random.randint(0, 4, size=[batch_size]).tolist()\n return self.load_images(indexes, indexesCrop), self.load_annotations(indexes, indexesCrop)","sub_path":"BatchDatsetReader.py","file_name":"BatchDatsetReader.py","file_ext":"py","file_size_in_byte":5101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"521498286","text":"# -*- coding: utf-8 -*-\n\n# Copyright (C) 2017 Luis López \n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,\n# USA.\n\n\nimport collections\n\n\nfrom appkit import Null\n\n\nimport arroyo\nimport arroyo.exc\nimport arroyo.extensions\n\n\nclass Engine:\n def __init__(self, filters=None, sorter=None, logger=None):\n if filters is None:\n errmsg = \"At least one filter is required\"\n raise ValueError(filters, errmsg)\n\n for filter in filters:\n if not isinstance(filter, arroyo.extensions.FilterExtension):\n errmsg = \"Not a filter extension\"\n raise TypeError(filter, errmsg)\n\n if not isinstance(sorter, arroyo.extensions.SorterExtension):\n errmsg = \"Not a sorter extension\"\n raise TypeError(sorter, errmsg)\n\n self.registry = {}\n self.logger = logger or Null\n\n for filter in filters or []:\n self.register(filter)\n\n self.sorter = sorter\n\n def register(self, filter):\n s1 = set(filter.HANDLES)\n s2 = set(self.registry.keys())\n collisions = tuple(s2.intersection(s1))\n\n if collisions:\n raise arroyo.exc.ConflictingFilterError(collisions)\n\n self.registry.update({\n handle: filter for handle in filter.HANDLES\n })\n\n # msg = \"Filter {f} has conflicting handles: {collisions}\"\n # msg = msg.format(f=name, collisions=', '.join(e.args[1]))\n # self.logger.warning(msg)\n # raise\n\n def apply(self, filter, handler, value, results):\n if isinstance(results, list):\n prev = len(results)\n else:\n prev = '?? (iterable)'\n\n results = filter.apply(handler, value, results)\n\n if isinstance(results, list):\n curr = len(results)\n else:\n curr = '?? (iterable)'\n\n msg = \"Applied {name} over {prev} items: {curr} results\"\n msg = msg.format(name=handler, prev=prev, curr=curr)\n self.logger.debug(msg)\n\n return results\n\n def filter(self, results, query):\n if not isinstance(query, arroyo.Query):\n raise TypeError(query)\n if not isinstance(results, collections.Iterable):\n raise TypeError(results)\n\n filters, missing = self.get_for_query(query)\n\n if not filters:\n err = \"No matching filters\"\n self.logger.error(err)\n return []\n\n for key in missing:\n msg = \"Missing filter for key '{key}'\"\n msg = msg.format(key=key)\n self.logger.warning(msg)\n\n for (handler, filter) in filters:\n results = self.apply(filter,\n handler, getattr(query, handler),\n results)\n # fn = functools.partial(filter.apply,\n # handler,\n # getattr(query, handler))\n\n # prev = len(results)\n # results = list(fn(results))\n # curr = len(results)\n\n # msg = \"Applied {name} over {prev} items: {curr} results\"\n # msg = msg.format(name=handler, prev=prev, curr=curr)\n # self.logger.debug(msg)\n\n if not isinstance(results, list):\n results = list(results)\n\n return results\n\n def filter_by(self, sources, **params):\n return self.filter(sources, arroyo.Query(**params))\n\n def get_for_handler(self, handler):\n try:\n return self.registry[handler]\n except KeyError as e:\n raise arroyo.exc.MissingFilterError() from e\n\n def get_for_query(self, query):\n matches = []\n missing = []\n\n for (key, value) in query.asdict().items():\n try:\n filter = self.get_for_handler(key)\n matches.append((key, filter))\n except arroyo.exc.MissingFilterError:\n missing.append(key)\n\n return matches, missing\n\n def sorted(self, sources, query):\n return self.sorter.sort(sources, query)\n\n def sorted_by(self, sources, **params):\n return self.sorted(sorted, arroyo.Query(**params))\n","sub_path":"arroyo/helpers/filterengine.py","file_name":"filterengine.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"566878056","text":"# Function to accept number of lines and print a pattern\ndef pattern(n):\n\n # outer loop through number of rows: n\n for i in range(0, n):\n # Set the value at the start of each loop\n value = 1\n # inner loop to handle number of columns 0 to i+1\n for j in range(0, i+1):\n # printing first and the last number except when i == n-1 \n if(value == 1 or value == i+1):\n print(value, end=\" \")\n else:\n #printing pattern when i == n-1\n if(i == n-1):\n print(value, end=\" \")\n else:\n #printing blank otherwise\n print(\" \", end=\" \")\n value += 1\n print('\\r')\n \n# Calling Code\n# Accept the number of lines you want to print in 1,2... formatn\nnum = int(input(\"Enter the number of lines you want to print : \"))\n\n# define a function to print the pattern\npattern(num)\n\n","sub_path":"CBSE Python/class11/annualexams/AniruddhBAnnuals3.py","file_name":"AniruddhBAnnuals3.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"275202580","text":"import numpy as np\nfrom . import ProblemDefinition\n\nclass ComplexProblemDefinition(ProblemDefinition):\n \"\"\"Defines a complex boundary value problem.\n This is wrapper for complex differential equations:\n dy/dx = f(x,y)\n where x is real and y(x) and f can be complex. The original solver takes only real values, \n so we decompose the equations into real and imaginary parts. We use the following for a complex\n vector y:\n\n y=[y0, y1, ..., yN1] -> [y0.real,y1.real,...,yN1.real, y0.imag, y1.imag, ..., yN1.imag],\n\n where yN1 is the N-th element of y.\n \n Linear ODEs:\n For N linear ODEs, y' = My, where M\n is a NxN matrix, we have 2 x N real equations. That is \n dy_big/dx = M_big y_big,\n where y_big = [y_r, y_i] and \n M = [ M_r -M_i ]\n [ M_i M_r ].\n\n Note:\n 1. The class ``ComplexProblemDefinition\" basically replaces ProblemDefinition and allows users to\n define functions with complex arguments. \n 2. The original ``solve\" function is used. Therefore, the ``parameter_guess\" argument for ``solve\"\n should be real. \n \"\"\"\n\n def __init__(self,\n num_ODE_c,\n num_parameters_c,\n num_left_boundary_conditions_c,\n boundary_points,\n function_c,\n boundary_conditions_c,\n function_derivative_c = None,\n boundary_conditions_derivative_c = None):\n \n self._num_ODE_c = num_parameters_c\n self._num_parameters_c = num_parameters_c\n self._num_left_boundary_conditions_c = num_left_boundary_conditions_c\n self._function_c = function_c\n self._boundary_conditions_c = boundary_conditions_c\n self._function_derivative_c = function_derivative_c\n self._boundary_conditions_derivative_c = boundary_conditions_derivative_c\n\n if self._num_parameters_c == 0:\n self._function_r = self._function_dummy\n self._boundary_conditions_r = self._boundary_conditions_dummy\n if self._function_derivative_c is None:\n self._function_derivative_r = None\n else:\n self._function_derivative_r = self._function_derivative_dummy\n if self._boundary_conditions_derivative_c is None:\n self._boundary_conditions_derivative_r = None\n else:\n self._boundary_conditions_derivative_r = self._boundary_conditions_derivative_dummy\n \n else:\n self._function_r = self._functionp_dummy\n self._boundary_conditions_r = self._boundary_conditionsp_dummy\n\n if self._function_derivative_c is None:\n self._function_derivative_r = None\n else:\n self._function_derivative_r = self._function_derivativep_dummy\n\n if self._boundary_conditions_derivative_c is None:\n self._boundary_conditions_derivative_r = None\n else:\n self._boundary_conditions_derivative_r = self._boundary_conditionsp_derivative_dummy\n\n ProblemDefinition.__init__(self,\n num_ODE = num_ODE_c * 2,\n num_parameters = num_parameters_c * 2,\n num_left_boundary_conditions = num_left_boundary_conditions_c * 2,\n boundary_points = boundary_points,\n function = self._function_r,\n boundary_conditions = self._boundary_conditions_r,\n function_derivative = self._function_derivative_r,\n boundary_conditions_derivative = self._boundary_conditions_derivative_r)\n\n def _function_dummy(self, T, Y):\n \"\"\"dummy real function without parameter\n \"\"\"\n dYc = self._function_c(T, real_to_complex(Y))\n return complex_to_real(dYc)\n\n def _functionp_dummy(self, T, P, Y):\n \"\"\"dummy real function with parameter\n \"\"\"\n dYc = self._function_c(T, real_to_complex(P), real_to_complex(Y))\n return complex_to_real(dYc)\n\n def _boundary_conditions_dummy(self, Ya, Yb):\n \"\"\"dummy real BC without parameter\n \"\"\"\n BCac, BCbc = self._boundary_conditions_c(\n real_to_complex(Ya), \n real_to_complex(Yb))\n return complex_to_real(BCac), complex_to_real(BCbc)\n\n def _boundary_conditionsp_dummy(self, Ya, Yb, P):\n \"\"\"dummy real BC with parameter\n \"\"\"\n BCac, BCbc = self._boundary_conditions_c(\n real_to_complex(Ya), \n real_to_complex(Yb),\n real_to_complex(P) )\n return complex_to_real(BCac), complex_to_real(BCbc)\n\n def _function_derivative_dummy(self, T, Y):\n \"\"\"function derivative w/o parameter\n \"\"\"\n dFdYc = self._function_derivative_c(T, real_to_complex(Y))\n return complex_to_real_matrix(dFdYc)\n\n def _function_derivativep_dummy(self, T, Y, P):\n \"\"\"function derivative w/ parameter\n \"\"\"\n dFdYc, dFdPc = self._function_derivative_c(T, real_to_complex(Y), real_to_complex(P))\n return complex_to_real_matrix(dFdYc), complex_to_real_matrix(dFdPc)\n\n def _boundary_conditions_derivative_dummy(self, Ya, Yb):\n \"\"\"boundary conditions derivative w/o parameter\n \"\"\"\n dBCa, dBCb = self._boundary_conditions_derivative_c(\n real_to_complex(Ya), \n real_to_complex(Yb))\n return complex_to_real_matrix(dBCa), complex_to_real_matrix(dBCb)\n \n def _boundary_conditionsp_derivative_dummy(self, Ya, Yb, P):\n \"\"\"boundary conditions derivative w/ parameter\n \"\"\"\n\n dBCa, dBCb, dBPa, dBPb = self._boundary_conditions_derivative_c(\n real_to_complex(Ya), \n real_to_complex(Yb),\n real_to_complex(P)\n )\n return complex_to_real_matrix(dBCa), complex_to_real_matrix(dBCb), \\\n complex_to_real_matrix(dBPa), complex_to_real_matrix(dBPb)\n\n# some helper functions\ndef complex_to_real(Yc):\n \"\"\"convert a complex vector into real vector \n \"\"\"\n return np.hstack([Yc.real, Yc.imag])\n\ndef real_to_complex(Y):\n \"\"\"convert a real vector into a complex vector\n \"\"\"\n N = Y.shape[0]\n return Y[:int(N/2)] + 1j*Y[int(N/2):]\n\ndef complex_to_real_matrix(Mc):\n \"\"\"compute an ``enlarged\" real matrix\n \"\"\"\n dim1, dim2 = Mc.shape\n M = np.zeros((dim1*2, dim2*2), dtype=float)\n M[:dim1,:dim2] = Mc.real\n M[:dim1,dim2:] =-Mc.imag\n M[dim1:,:dim2] = Mc.imag\n M[dim1:,dim2:] = Mc.real\n return M\n\ndef real_to_complex_matrix(M):\n dim1, dim2 = M.shape\n Mc = M[:int(dim1/2),:int(dim2/2)] - 1j * M[:int(dim1/2),int(dim2/2):]\n return Mc\n\n\n\n\n\n","sub_path":"scikits/bvp_solver/ComplexProblemDefinition.py","file_name":"ComplexProblemDefinition.py","file_ext":"py","file_size_in_byte":6909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"357966145","text":"#!/usr/bin/env python\n\nimport datetime\nimport json\n\nfrom flask import Flask\nfrom flask_mysqldb import MySQL\n\napp = Flask(__name__)\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_DB'] = 'ebuff_vst'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = 'root'\n\nmysql = MySQL(app)\n\n@app.route('/')\ndef api_root():\n return 'Server'\n\n\n@app.route('/registers')\ndef api_registers():\n cursor = mysql.connection.cursor()\n cursor.execute('SELECT * FROM registers')\n result = []\n for i in cursor.fetchall():\n result.append({\n 'id': i[0],\n 'start': i[1].strftime('%Y-%m-%d %H:%M:%S') if i[1] is not None else i[1],\n 'stop': i[2].strftime('%Y-%m-%d %H:%M:%S') if i[2] is not None else i[2],\n 'calls': i[3]\n })\n return json.dumps(result)\n\n\n@app.route('/registers/')\ndef api_register(registerid):\n cursor = mysql.connection.cursor()\n cursor.execute('SELECT * FROM registers WHERE ID=%s' % registerid)\n r = cursor.fetchone()\n result = {}\n result['id'] = r[0]\n result['start'] = r[1].strftime('%Y-%m-%d %H:%M:%S') if r[1] is not None else r[1]\n result['stop'] = r[2].strftime('%Y-%m-%d %H:%M:%S') if r[2] is not None else r[2]\n result['calls'] = r[3]\n return json.dumps(result)\n\n\n@app.route('/start')\ndef api_start():\n now = datetime.datetime.now().__str__()\n cursor = mysql.connection.cursor()\n cursor.execute('INSERT INTO registers (start) VALUES (\"%s\")' % now)\n registerid = cursor.lastrowid\n mysql.connection.commit()\n return '%s' % registerid\n\n\n@app.route('/keep/')\ndef api_keep(registerid):\n cursor = mysql.connection.cursor()\n cursor.execute('SELECT calls FROM registers WHERE id=%s' % registerid)\n calls = cursor.fetchone()[0] + 1\n cursor.execute('UPDATE registers SET calls=%s WHERE id=%s' % (calls, registerid))\n mysql.connection.commit()\n return '%s' % calls\n\n\n@app.route('/stop/')\ndef api_stop(registerid):\n now = datetime.datetime.now().__str__()\n cursor = mysql.connection.cursor()\n cursor.execute('UPDATE registers SET stop=\"%s\" WHERE id=%s' % (now, registerid))\n mysql.connection.commit()\n return 'Stop %s' % registerid\n\n\nif __name__ == '__main__':\n app.run(port=5050)\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"455409088","text":"#每一次迭代结果作为下次开始的初始值\r\n#关于迭代python有两个Built-in function:iter()和next()\r\n#对iter()的调用就是得到他的迭代器,next()返回下一个值,如果迭代器没有值可以调用,抛出StopIteration\r\n#关于操作如下\r\ndef diedai(*args):\r\n it = iter(args)\r\n while True:\r\n try:\r\n each = next(it)\r\n print(each)\r\n except:\r\n break;\r\nlist = [1,2,3,4,5,6,7]\r\nset = {1,2,3,4,5,6,7}\r\ndict = {'5':3,'6':4}\r\nstring = '1234567'\r\ndiedai(*list)\r\ndiedai(*set)\r\ndiedai(*dict)#字典迭代出的是key\r\ndiedai(*string)","sub_path":"python自带的更多方法/属性访问/5.迭代器.py","file_name":"5.迭代器.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"24409703","text":"\"\"\"Operations on MAC addresses.\"\"\"\nimport os\nimport uuid\nimport random\nimport numbers\nimport subprocess\nimport distutils.spawn\n\n\ndef get_mac():\n \"\"\"Get your own device's MAC address using uuid.getnode().\n Returns a Mac object, or None on failure.\"\"\"\n #TODO: uuid.getnode() arbitrarily chooses a MAC when the device has more than one. May have to use\n # another method to make sure it's the MAC of the NIC in use. Probably have to create a\n # dummy socket using a public IP.\n # uuid.getnode() returns the MAC as an integer.\n uuid_mac = Mac(uuid.getnode())\n # On failure, uuid.getnode() returns a random MAC, with the eight bit set:\n # https://docs.python.org/2/library/uuid.html#uuid.getnode\n # Check the eigth bit to determine whether it failed.\n if uuid_mac.byte_ints[0] & 0b00000001:\n # Try harder. (Use the \"ip\" command.)\n device = get_default_device()\n mac_str = get_mac_of_device(device)\n if mac_str:\n return Mac(mac_str)\n else:\n return None\n else:\n return uuid_mac\n\n\ndef get_mac_of_device(device):\n output = execute_cmd(['ip', 'link', 'show', 'dev', device], bin_path='/sbin/ip')\n line_num = 0\n for line in output.splitlines():\n line_num += 1\n if line_num == 2:\n fields = line.split()\n if len(fields) > 2:\n return fields[1]\n return None\n\n\ndef get_default_device():\n output = execute_cmd(['ip', 'route', 'show', '0/0'], bin_path='/sbin/ip')\n fields = output.split()\n if len(fields) < 6:\n return None\n else:\n return fields[4]\n\n\ndef execute_cmd(cmd, bin_path=None):\n if bin_path:\n if 'PATH' not in os.environ or not distutils.spawn.find_executable(cmd[0]):\n cmd[0] = bin_path\n devnull = open(os.devnull, 'w')\n try:\n output = subprocess.check_output(cmd, stderr=devnull)\n except (OSError, subprocess.CalledProcessError):\n return None\n finally:\n devnull.close()\n return output\n\n\ndef get_random_mac():\n \"\"\"Generate a valid, random MAC address.\"\"\"\n # In the first byte, the two least-significant bits must be 10:\n # The 1 means it's a local MAC address (not globally assigned and unique).\n # The 0 means it's not a broadcast address.\n # https://superuser.com/questions/725467/set-mac-address-fails-rtnetlink-answers-cannot-assign-requested-address/725472#725472\n # 0-63 * 4 gives a number with 00 as the last binary digits.\n number = random.randint(0, 63) * 4\n # Add the following 5 bytes.\n for i in range(5):\n number *= random.randint(0, 255)\n return Mac(number)\n\n\ndef eui64_to_mac(eui64):\n \"\"\"Convert an EUI-64 to a MAC address by removing the middle two bytes.\"\"\"\n _bytes = eui64.split(':')\n return Mac(_bytes[:3] + _bytes[5:])\n\n\n# Subclass tuple to make Mac immutable.\nclass Mac(tuple):\n \"\"\"An object representing a MAC address.\n Initialize with one argument: a MAC address in one of 4 representations:\n 1. a string of the colon-delimited hexadecimal bytes\n 2. an iterable of the bytes as hex strings\n 3. an iterable of the bytes as integers\n 4. a single integer representing the 48-bit value of the address\n When a representation is requested that wasn't given initially, it is computed and cached.\n Mac objects are immutable.\"\"\"\n\n # Need to override tuple's __new__().\n def __new__(cls, mac):\n return tuple.__new__(cls, ())\n\n def __init__(self, mac):\n self._string = None\n self._number = None\n self._bytes = None\n self._byte_ints = None\n if isinstance(mac, basestring):\n assert len(mac) == 17, 'Mac string must be 17 characters (6 colon-delimited hex bytes).'\n self._string = mac.upper()\n elif isinstance(mac, numbers.Integral):\n self._number = mac\n else:\n try:\n _bytes = tuple(mac)\n except TypeError:\n raise AssertionError('Mac object must be initialized with a string, integer, or iterable.')\n assert len(_bytes) == 6, 'Mac must consist of 6 bytes.'\n if isinstance(_bytes[0], basestring):\n self._bytes = _bytes\n elif isinstance(_bytes[0], numbers.Integral):\n self._byte_ints = _bytes\n else:\n raise AssertionError('Mac bytes must be numbers or strings.')\n\n @property\n def string(self):\n \"\"\"A string representing the MAC address as the standard colon-delimited hex bytes.\n If it doesn't exist, derive it from the bytes.\"\"\"\n if self._string is None:\n self._string = ':'.join(self.bytes)\n return self._string\n\n @property\n def number(self):\n \"\"\"An int representing the MAC address value as a number.\n If it doesn't exist, derive it from the bytes.\"\"\"\n if self._number is None:\n hexadecimal = ''.join(self.bytes)\n self._number = int(hexadecimal, 16)\n return self._number\n\n @property\n def byte_ints(self):\n \"\"\"A tuple representing the MAC address as a series of bytes (ints).\n If it doesn't exist, derive it from the bytes.\"\"\"\n if self._byte_ints is None:\n self._byte_ints = tuple(int(o, 16) for o in self.bytes)\n return self._byte_ints\n\n @property\n def bytes(self):\n \"\"\"An tuple representing the MAC address as a series of hex bytes (strings).\n If it doesn't exist, try deriving it from one of the other representations.\"\"\"\n if self._bytes is None:\n if self._string is not None:\n self._bytes = tuple(self._string.split(':'))\n elif self._byte_ints is not None:\n self._bytes = tuple('{:02X}'.format(o) for o in self._byte_ints)\n elif self._number is not None:\n hexadecimal = '{:012X}'.format(self._number)\n self._bytes = tuple(hexadecimal[i:i+2] for i in range(0, 12, 2))\n else:\n raise AssertionError('Mac object is uninitialized.')\n return self._bytes\n\n def __str__(self):\n return self.string\n\n def __repr__(self):\n return \"{}.{}('{}')\".format(type(self).__module__, type(self).__name__, self.string)\n\n def __eq__(self, mac2):\n return self.string == mac2.string\n\n def __ne__(self, mac2):\n return self.string != mac2.string\n\n def to_eui64(self, is_mac48=False):\n \"\"\"Convert the MAC address to an EUI-64 address.\n This expands the address to 64 bits by adding 'FF:FE' as the middle two bytes, by default.\n This is the procedure when the MAC address is considered an EUI-48 (as is done when creating an\n IPv6 address from a MAC address). If the MAC address should be considered a MAC-48 instead, so\n that 'FF:FF' is used as the middle bytes, set 'is_mac48' to True.\n N.B.: This is part 1 of how IPv6 generates addresses from MAC addresses. The second part is\n flipping the locally administered bit.\"\"\"\n if is_mac48:\n middle = 'FF:FF'\n else:\n middle = 'FF:FE'\n return self.bytes[:3] + middle + self.bytes[3:]\n\n def is_broadcast(self):\n \"\"\"Check whether the MAC address is the broadcast FF:FF:FF:FF:FF:FF address.\"\"\"\n return self.string == 'FF:FF:FF:FF:FF:FF'\n\n def is_local(self):\n \"\"\"Check whether the \"locally administered\" bit in a MAC address is set to 1.\"\"\"\n return bool(self.byte_ints[0] & 0b00000010)\n\n def is_global(self):\n \"\"\"Check whether the \"locally administered\" bit in a MAC address is set to 0.\n This means that the MAC address should be \"globally unique\".\"\"\"\n return not bool(self.byte_ints[0] & 0b00000010)\n\n def is_multicast(self):\n \"\"\"Check whether the \"multicast\" bit in a MAC address is set to 1.\"\"\"\n return bool(self.byte_ints[0] & 0b00000001)\n\n def is_unicast(self):\n \"\"\"Check whether the \"multicast\" bit in a MAC address is set to 0.\n This means the MAC address is unicast.\"\"\"\n return not bool(self.byte_ints[0] & 0b00000001)\n\n def is_normal(self):\n \"\"\"Check whether the MAC address is the common type used by networking hardware.\n Returns false if it's a locally administered, multicast, or broadcast address.\"\"\"\n # Is broadcast address?\n if self.is_broadcast():\n return False\n # Is locally administered bit set?\n if self.is_local():\n return False\n # Is multicast bit set?\n if self.is_multicast():\n return False\n return True\n\n def to_local(self):\n \"\"\"Set the \"locally administered\" bit to 1 and return the result.\"\"\"\n byte_ints = list(self.byte_ints)\n # OR the first byte with 00000010.\n byte_ints[0] = byte_ints[0] | 0b00000010\n return Mac(byte_ints)\n\n def to_global(self):\n \"\"\"Set the \"locally administered\" bit to 0 and return the result.\"\"\"\n byte_ints = list(self.byte_ints)\n # AND the first byte with 11111101.\n byte_ints[0] = byte_ints[0] & 0b11111101\n return Mac(byte_ints)\n\n def to_multicast(self):\n \"\"\"Set the \"multicast\" bit to 1 and return the result.\"\"\"\n byte_ints = list(self.byte_ints)\n # OR the first byte with 00000001.\n byte_ints[0] = byte_ints[0] | 0b00000001\n return Mac(byte_ints)\n\n def to_unicast(self):\n \"\"\"Set the \"multicast\" bit to 0 and return the result.\"\"\"\n byte_ints = list(self.byte_ints)\n # AND the first byte with 11111110.\n byte_ints[0] = byte_ints[0] & 0b11111110\n return Mac(byte_ints)\n","sub_path":"maclib.py","file_name":"maclib.py","file_ext":"py","file_size_in_byte":8926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"388855266","text":"\"\"\"Statement base classes for courseware blocks.\"\"\"\n\n\nfrom tincan import Activity, ActivityDefinition, ActivityList, ContextActivities, LanguageMap\n\nfrom . import base\nfrom . import course\nfrom xapi_bridge import constants, settings\n\n\nclass BlockActivityDefinition(ActivityDefinition):\n def __init__(self, event, *args, **kwargs):\n try:\n display_name = event['context']['module']['display_name']\n except KeyError:\n # not all events will have in the context\n display_name = \"Course Block\"\n kwargs.update({\n 'type': constants.XAPI_ACTIVITY_MODULE,\n 'name': LanguageMap({'en': display_name}),\n 'description': LanguageMap({'en': 'A course block in a course delivered through Open edX'})\n })\n super(BlockActivityDefinition, self).__init__(*args, **kwargs)\n\n\nclass BaseCoursewareBlockStatement(base.LMSTrackingLogStatement):\n \"\"\"Base for any interaction with a courseware block.\"\"\"\n\n def _get_activity_id(self, event):\n format_str = constants.BLOCK_OBJECT_ID_FORMAT\n platform_str = settings.OPENEDX_PLATFORM_URI\n block_id = event['context']['module']['usage_key']\n return format_str.format(platform=platform_str, block_usage_key=block_id)\n\n def get_context_activities(self, event):\n parent_activities = [\n Activity(\n id='{}/courses/{}'.format(settings.OPENEDX_PLATFORM_URI, event['context']['course_id']),\n definition=course.CourseActivityDefinition(event)\n ),\n ]\n # browser source events don't know as much about their context\n if event['event_source'].lower() == 'server':\n parent_activities.append(\n Activity(\n id=event['referer'],\n definition=BlockActivityDefinition(event)\n ),\n )\n\n other_activities = [\n Activity(\n id=event['referer'],\n definition=base.ReferringActivityDefinition(event)\n ),\n ]\n\n return ContextActivities(\n parent=ActivityList(parent_activities),\n other=ActivityList(other_activities)\n )\n\n def get_context(self, event):\n \"\"\"Get Context for the statement.\n\n For problems this can include the course and the block id.\n \"\"\"\n context = super(BaseCoursewareBlockStatement, self).get_context(event)\n context.context_activities=self.get_context_activities(event)\n return context\n","sub_path":"xapi_bridge/statements/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"534597554","text":"'''\nGiven a collection of integers that might contain duplicates, nums, return all possible subsets (the power set).\n\nNote: The solution set must not contain duplicate subsets.\n\nFor example,\nIf nums = [1,2,2], a solution is:\n\n[\n [2],\n [1],\n [1,2,2],\n [2,2],\n [1,2],\n []\n]\n'''\nclass Solution(object):\n def subsetsWithDup(self, arr):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n self.result = []\n self.dfs(sorted(arr), [], 0)\n return self.result\n\n def dfs(self, arr, singleResult, index):\n self.result.append(singleResult[:])\n for i in range(index, len(arr)):\n if i != index and arr[i] == arr[i - 1]:\n continue\n singleResult.append(arr[i])\n self.dfs(arr, singleResult, i + 1)\n singleResult.pop()\n","sub_path":"Python/leetcode/90-SubsetII.py","file_name":"90-SubsetII.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"215812860","text":"# This file is Copyright (c) 2019 Florent Kermarrec \n# License: BSD\n\nfrom migen import *\n\nfrom litex.soc.interconnect import stream\n\nfrom usb3_pipe.common import *\nfrom usb3_pipe.lfps import LFPSUnit\nfrom usb3_pipe.training import TSUnit\nfrom usb3_pipe.ltssm import LTSSM\nfrom usb3_pipe.scrambling import Scrambler, Descrambler\n\n# USB3 PIPE ----------------------------------------------------------------------------------------\n\n@ResetInserter()\nclass USB3PIPE(Module):\n def __init__(self, serdes, sys_clk_freq, with_scrambling=True, with_endianness_swap=True):\n assert sys_clk_freq >= 125e6\n self.ready = Signal() # o\n\n self.sink = stream.Endpoint([(\"data\", 32), (\"ctrl\", 4)])\n self.source = stream.Endpoint([(\"data\", 32), (\"ctrl\", 4)])\n\n # # #\n\n # Endianness Swap --------------------------------------------------------------------------\n if with_endianness_swap:\n sink = stream.Endpoint([(\"data\", 32), (\"ctrl\", 4)])\n source = stream.Endpoint([(\"data\", 32), (\"ctrl\", 4)])\n sink_swap = EndiannessSwap(self.sink, sink)\n source_swap = EndiannessSwap(source, self.source)\n self.submodules += sink_swap, source_swap\n else:\n sink = self.sink\n source = self.source\n\n # LFPS -------------------------------------------------------------------------------------\n lfps = LFPSUnit(sys_clk_freq=sys_clk_freq, serdes=serdes)\n self.submodules.lfps = lfps\n\n # TS----------------------------------------------------------------------------------------\n ts = TSUnit(serdes=serdes)\n self.submodules.ts = ts\n\n # LTSSM ------------------------------------------------------------------------------------\n ltssm = LTSSM(serdes=serdes, lfps_unit=lfps, ts_unit=ts, sys_clk_freq=sys_clk_freq)\n self.submodules.ltssm = ltssm\n self.comb += self.ready.eq(ltssm.polling.idle)\n\n # Scrambling -------------------------------------------------------------------------------\n if with_scrambling:\n scrambler = Scrambler()\n scrambler = ResetInserter()(scrambler)\n self.comb += scrambler.reset.eq(~ltssm.polling.tx_ready)\n self.submodules.scrambler = scrambler\n self.comb += [\n sink.connect(scrambler.sink),\n If(ltssm.polling.tx_ready, scrambler.source.connect(serdes.sink))\n ]\n\n descrambler = Descrambler()\n descrambler = ResetInserter()(descrambler)\n descrambler = stream.BufferizeEndpoints({\"source\": stream.DIR_SOURCE})(descrambler)\n self.comb += descrambler.reset.eq(~ltssm.polling.rx_ready)\n self.submodules.descrambler = descrambler\n self.comb += [\n serdes.source.connect(descrambler.sink, keep={\"data\", \"ctrl\"}),\n If(ltssm.polling.rx_ready, serdes.source.connect(descrambler.sink, omit={\"data\", \"ctrl\"})),\n descrambler.source.connect(source),\n ]\n else:\n self.comb += If(ltssm.polling.tx_ready, sink.connect(serdes.sink))\n self.comb += If(ltssm.polling.rx_ready, serdes.source.connect(source))\n","sub_path":"usb3_pipe/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"128066324","text":"\"\"\"scripts to save new discovered service, update ip, env_val in configuration.ini in smoke_details\"\"\"\nimport sys\nsys.path = ['..'] + sys.path\nimport traceback\nfrom _datetime import datetime\nfrom datetime import timedelta\nfrom parse import get_configuration_section\nfrom login import request_login, header1, session\n\ninput_details = get_configuration_section('smoke_details','configuration.ini',\"'\")\n\n\ntime_list = timedelta(minutes=2)\nstart_time = (datetime.now() - time_list).strftime('%Y-%m-%dT%H:%M:%S')\nend_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n\ndef check_disc_servc():\n try:\n req_services_url = 'http://' + input_details['ip'] + '/v1/traffic/group/' + input_details['env_val'] + '/app/com.nano.temp/services?endTime=' + end_time + '&startTime=' + start_time\n request_obj = session.get(url=req_services_url)\n if request_obj.status_code == 200:\n print(\"checking data for new discovered services {} hours\".format(time_list))\n data = len(request_obj.text)\n if data == 2:\n print(\"sorry no discovered services to display for {}\".format(time_list))\n return None, 1\n else:\n print('please get list of discovered service \\n')\n serv_detail = request_obj.text\n print(serv_detail)\n return serv_detail, 0\n else:\n return None, 1\n except:\n print(traceback.format_exc())\n return None, 1\n\ndef main():\n try:\n data, rst = check_disc_servc()\n if rst == 0:\n payload = '{\"services\":[' + data.rstrip('}]') + ',\"isSelected\":true}]]}'\n print(payload)\n # payload = '{\"services\":[[{\"groupId\":\"test\",\"serviceId\":\"207f9697a9\",\"name\":\"com.nano.app2.ubuntu\",\"isSelected\":true}]]}'\n post_saved_serv_url = 'http://' + input_details['ip'] + '/v1/traffic/group/' + input_details['env_val'] + '/app/com.nano.temp/services'\n obj = session.post(post_saved_serv_url, headers = header1, data=payload)\n if obj.status_code == 200:\n print(\"saved discovered services\")\n return 0\n else:\n print(\"failed to save discovered service with response \" + obj.status_code + ' and text ' +obj.text)\n return 1\n else:\n print('no discovered services available')\n return 1\n except:\n print(traceback.format_exc())\n return 1\n\n\nif __name__ == '__main__':\n login_ui = request_login()\n if login_ui == 0:\n return_code = main()\n if return_code == 1:\n print('testcase failed with code', return_code)\n else:\n print('testcase passed with code', return_code)\n else:\n print(\"login_ui failed with code\", login_ui)","sub_path":"scripts/save_discovered_servc.py","file_name":"save_discovered_servc.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"101695505","text":"#!/usr/bin/env python3\n\nfrom matplotlib import pyplot as plt\nimport util\n\ninstance_file = \"../../data/pbn423.tsp\"\ninstance_file = \"../../data/xrb14233.tsp\"\n\noptimal_tour_file = \"../../data/pbn423.tour\"\noptimal_tour_file = \"../../data/xrb14233.tour\"\n\ncycle1_file = \"../output/cycle0_margin_1.txt\"\ncycle2_file = \"../output/cycle1_margin_1.txt\"\n\ncoordinates = util.read_point_file_path(instance_file)\ncycle1 = util.read_edge_list(cycle1_file)\ncycle2 = util.read_edge_list(cycle2_file)\n\ndef plot_cycle(coordinates, edges, markers):\n xx = [coordinates[e[0]][0] for e in edges]\n xx.append(xx[0])\n yy = [coordinates[e[0]][1] for e in edges]\n yy.append(yy[0])\n plt.plot(xx, yy, markers)\n\nplot_cycle(coordinates, cycle1, \"g-x\")\nplot_cycle(coordinates, cycle2, \"r-x\")\nutil.read_and_plot_tour(coordinates, optimal_tour_file, \":k\")\n\nplt.gca().set_aspect(\"equal\")\nplt.show()\n\n","sub_path":"plot/cycles.py","file_name":"cycles.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"645492609","text":"import shutil\nimport os\nfrom utils.data import get_tsv_data\n\nos.makedirs(\"data/ARSC-fixed\", exist_ok=True)\n\nwith open(\"data/ARSC-Yu/raw/workspace.filtered.list\", \"r\") as file:\n train_labels = [line.strip() for line in file.readlines()]\nwith open(\"data/ARSC-Yu/raw/workspace.target.list\", \"r\") as file:\n test_labels = [line.strip() for line in file.readlines()]\n\ntrain_data = list()\n\nfor lab in train_labels:\n for ix in (2, 4, 5):\n for split_type in (\"train\", \"dev\", \"test\"):\n train_data += get_tsv_data(f\"data/ARSC-Yu/raw/{lab}.t{ix}.{split_type}\", label=lab)\n\ntest_data = list()\n\nfor lab in test_labels:\n for ix in (2, 4, 5):\n for split_type in (\"train\", \"dev\", \"test\"):\n test_data += get_tsv_data(f\"data/ARSC-Yu/raw/{lab}.t{ix}.{split_type}\", label=lab)\n","sub_path":"utils/scripts/prepare-ARSC-fixed.py","file_name":"prepare-ARSC-fixed.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"118147873","text":"#Assinatura\n\nnome = str(input(\"Informe o seu nome: \"))\nplano = str(input(f\"Olá Sr.(a) {nome}, por gentileza, informe a sua assinatura: \"))\nif plano != \"basic\" and plano != \"silver\" and plano != \"gold\" and plano != \"platinum\":\n print(f\"O plano: {plano} não existe.\")\n exit()\nfaturamento = float(input(\"Informe seu faturamento anual:R$ \"))\n\nif plano == 'basic':\n bonusI = faturamento * 0.3\n print(f\"O valor do bônus a ser pago será de R$ {round(bonusI,2)}\")\nelif plano == 'silver':\n bonusII = faturamento * 0.2\n print(f\"O valor do bônus a ser pago será de {round(bonusII,2)}\")\nelif plano == 'gold':\n bonusIII = faturamento * 0.1\n print(f\"O valor do bônus a ser pago será de {round(bonusIII,2)}\")\nelif plano == 'platinum':\n bonusIV = faturamento * 0.05\n print(f\"O valor do bônus a ser pago será de {round(bonusIV,2)}\")","sub_path":"FIAP/Fase2_cap2/RM86567_EX02.py","file_name":"RM86567_EX02.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"572669951","text":"# -*- coding: utf-8 -*-\n# pp/utils/test_optionlines.py\n\nfrom datetime import datetime\nfrom dateutil.parser import parse as dt\nfrom pprint import pprint\n\nimport pytest\nimport mock\n\n# import pp.utils.optionlines.OptionLineFactory as OptLineFactory\nfrom pp.utils.optionlines import (CommentLine, OptionLine,\n TaskContinuationLine,\n TaskLine, BlankLine,\n OptionLines,\n OptionLineFactory,\n OptionLineError, OptionSubsetError)\n\n\n@pytest.mark.parametrize(\"source_line, indent, text, is_valid_comment\", [\n (\"# This is a comment\", 0, \"# This is a comment\", True),\n (\" # Comment\", 4, \"# Comment\", True),\n (\"\\t\\t \\t\\t # Comment 2\", 6, \"# Comment 2\", True),\n (\" Missing the hash\", 4, \"Missing the hash\", False),\n])\ndef test_comments(source_line, indent, text, is_valid_comment):\n comment_line = CommentLine(source_line)\n assert comment_line.indent == indent\n assert comment_line.text == text\n assert comment_line.validates() == is_valid_comment\n\n\ndef test_valid_option_line():\n source_line = \"importance :: a | b | c\"\n option_line = OptionLine(source_line)\n assert option_line.indent == 0\n assert option_line.key == 'importance'\n assert option_line.options == set([\"a\", \"b\", \"c\"])\n assert option_line.validates()\n assert option_line.text == source_line\n assert str(option_line) == source_line\n assert repr(option_line) == source_line\n\n\ndef test_empty_option_items_ignored():\n source_line = \"urgency::1| 2 || 3\"\n option_line = OptionLine(source_line)\n assert option_line.key == 'urgency'\n assert option_line.options == set([\"1\", \"2\", \"3\"])\n assert option_line.validates()\n assert option_line.text == \"urgency :: 1 | 2 | 3\"\n\n\ndef test_no_options_ok_to_declare_key():\n source_line = \"some_input_any_value_ok ::\"\n option_line = OptionLine(source_line)\n assert option_line.key == 'some_input_any_value_ok'\n assert option_line.options == set()\n assert option_line.validates()\n\n\ndef test_multiple_double_colons_accepted():\n source_line = \"bar:: a | bbb:27| c::95| d||\"\n option_line = OptionLine(source_line)\n assert option_line.key == 'bar'\n assert option_line.options == set([\"a\", \"bbb:27\", \"c::95\", \"d\"])\n assert option_line.text == \"bar :: a | bbb:27 | c::95 | d\"\n\n\n@pytest.mark.parametrize(\"source_line, is_task_line\", [\n (\" [ ] Fix the bathroom door\", True),\n (\"* [>] Apply for deed of variation\", True),\n (\"* [x] Contact agency re contract\", True),\n (\"! [-] No such emphasis char\", False),\n (\"**[@] No such status char\", False),\n (\"[]\", False), # Task text missing\n (\"][\", False),\n (\"[Badly formatted task]\", False),\n (\"No brackets\", False),\n (\"[ Only one left bracket\", False),\n (\"] Only one right bracket\", False),\n (\"* [ ] Two status chars possible?\", True),\n])\ndef test_task_line(source_line, is_task_line):\n # print(source_line)\n task_line = TaskLine(source_line)\n assert task_line.validates() == is_task_line\n if is_task_line:\n assert task_line.text == source_line.rstrip()\n\n\n@pytest.mark.parametrize(\"source_line, status_ch, status, emph_chrs, emph\", [\n (\"* [x] Contact agency re contract\", \"x\", \"finished\", \"*\", \"this-week\"),\n (\" [>] Do this later\", \">\", \"later\", \"\", \"sometime\"),\n (\" [-] Cancelled this one\", \"-\", \"cancelled\", \"\", \"sometime\"),\n (\"**[ ] Desperate measures\", \"\", \"to-do\", \"**\", \"today\"),\n])\ndef test_task_line_status_text(source_line, status_ch, status,\n emph_chrs, emph):\n task_line = TaskLine(source_line)\n assert task_line.status_ch == status_ch\n assert task_line.status == status\n assert task_line.emphasis_chars == emph_chrs\n assert task_line.emphasis == emph\n # assert task_line.task_text == \"Contact agency re contract\"\n assert task_line.text == source_line.rstrip()\n\n\ndef test_not_an_option_line():\n source_line = \" Continuation line for some task\"\n option_line = OptionLine(source_line)\n assert option_line.indent == 0 # Options indents are always 0\n assert option_line.key is None\n assert not option_line.validates()\n\n\ndef test_option_line_forces_lower_case_key_and_options():\n source_line = \"Importance :: A | b | C\"\n option_line = OptionLine(source_line)\n assert option_line.text == \"importance :: a | b | c\"\n\n\n@pytest.mark.parametrize(\"source_line, key\", [\n (\":: d | e | f\", \"\"),\n (\"time value :: 1 | 2 | 3\", \"time value\"),\n])\ndef test_invalid_option_line_with_bad_keys_rejected(source_line, key):\n with pytest.raises(OptionLineError) as exc:\n option_line = OptionLine(source_line)\n assert exc.value.message.startswith('Bad key \"{}\"'.format(key))\n\n\n@pytest.mark.parametrize(\"source_line, class_name\", [\n (\"# Starting with a comment\", 'CommentLine'),\n (\"importance :: a | b | c\", 'OptionLine'),\n (\"[ ] Some task\", 'TaskLine'),\n (\"Mary had a little lamb\", 'TaskContinuationLine'),\n (\"\", 'BlankLine'),\n (\" \", 'BlankLine'),\n (\"\\n\", 'BlankLine'),\n])\ndef test_option_line_factory_individual_lines(source_line, class_name):\n factory = OptionLineFactory()\n # lines = []\n line_obj = factory.make_line(source_line)\n assert line_obj.__class__.__name__ == class_name\n\n\n# Make the list of classes to scan an empty list, for this test only\n@mock.patch.object(OptionLineFactory, 'line_classes', [])\ndef test_no_such_line_type():\n factory = OptionLineFactory()\n with pytest.raises(OptionLineError) as exc:\n line_obj = factory.make_line(\"foo bar baz\")\n assert exc.value.message.startswith(\"Unknown line type for option line\")\n\n\ndef test_option_line_factory_from_text_block():\n source_text = \"\"\"\\\n# Starting with a comment\nimportance :: a | b | c\n[ ] Some task\nMary had a little lamb\n [x] This task has been finished\n\n\n# asdasd asdasd\n\"\"\"\n factory = OptionLineFactory()\n class_names = []\n # print(source_text.splitlines())\n for source_line in source_text.rstrip().splitlines():\n line_obj = factory.make_line(source_line)\n class_names.append(line_obj.__class__.__name__)\n assert class_names == ['CommentLine', 'OptionLine', 'TaskLine',\n 'TaskContinuationLine',\n 'TaskLine',\n 'BlankLine', 'BlankLine', 'CommentLine']\n\ndef test_task_lines_with_indents():\n source_text = \"\"\"\\\n [ ] Task 1\n [ ] Mary had a little lamb,\n Its fleece was white as snow.\n And everywhere that Mary went\n The lamb was sure to go.\n [ ] There once was a man with a beard\n Who said it is just as I feared.\n\"\"\"\n option_lines = OptionLines(source_text)\n assert str(option_lines) == source_text.rstrip()\n\n\ndef test_task_lines_with_missing_two_space_indent():\n # Initial two spaces for asterisks have been left out\n source_text = \"\"\"\\\n[ ] Task 3 with indent missing\n Continuation line for task 3\n\"\"\"\n option_lines = OptionLines(source_text)\n expected = \"\"\"\\\n [ ] Task 3 with indent missing\n Continuation line for task 3\n\"\"\"\n assert str(option_lines) == expected.rstrip()\n\n\ndef test_option_lines_correct_indents():\n source_text = \"\"\"\\\nimportance :: a | b | c\n urgency :: 2 | 1 | 2 | 3\n\"\"\"\n expected = \"\"\"\\\nimportance :: a | b | c\nurgency :: 1 | 2 | 3\n\"\"\"\n option_lines = OptionLines(source_text)\n assert str(option_lines) == expected.rstrip()\n\n\ndef test_option_lines_from_text_block():\n source_text = \"\"\"\\\n# This is a comment\nimportance :: a | b | c\n urgency :: 2 | 1 | 2 | 3\n [x] This task has been finished\n* [ ] This task is more urgent\n Do it this week\n**[ ] Do this one today\n But what is it?\n [ ] Mary had a little lamb,\n Its fleece was white as snow.\n And everywhere that Mary went\n The lamb was sure to go.\n [ ] There once was a man with a beard\n Who said it is just as I feared.\n[ ] Task 3 with indent missing\n Continuation line for task 3\n\n [ ] Another final task\n\"\"\"\n expected = \"\"\"\\\n# This is a comment\nimportance :: a | b | c\nurgency :: 1 | 2 | 3\n [x] This task has been finished\n* [ ] This task is more urgent\n Do it this week\n**[ ] Do this one today\n But what is it?\n [ ] Mary had a little lamb,\n Its fleece was white as snow.\n And everywhere that Mary went\n The lamb was sure to go.\n [ ] There once was a man with a beard\n Who said it is just as I feared.\n [ ] Task 3 with indent missing\n Continuation line for task 3\n\n [ ] Another final task\n\"\"\"\n option_lines = OptionLines(source_text)\n assert str(option_lines) == expected.rstrip()\n assert option_lines.lines[0] == \"# This is a comment\"\n assert option_lines.lines[1].startswith(\"importance\")\n assert len(option_lines.lines) == 18\n\n\ndef test_bad_task_line_sequence():\n source_text = \"\"\"\\\nimportance :: a | b | c\nMary had a little lamb\n\"\"\"\n with pytest.raises(OptionLineError) as exc:\n option_lines = OptionLines(source_text)\n assert exc.value.message.startswith(\"Bad task line sequence\")\n\n\ndef test_duplicate_option_keys_rejected():\n source_text = \"\"\"\\\nnames :: x | y | z\nfoo :: 1 | 2 | 3\nnames :: adam | bill | charlie\n\"\"\"\n with pytest.raises(OptionLineError) as exc:\n option_lines = OptionLines(source_text)\n assert exc.value.message.startswith(\"Duplicate option keys\")\n\n\ndef test_duplicate_options_across_keys_rejected():\n source_text = \"\"\"\\\navailability :: am | eve | pm\nimportance :: b | c | a\nnames :: adam | eve | bill\n\"\"\"\n with pytest.raises(OptionLineError) as exc:\n option_lines = OptionLines(source_text)\n assert exc.value.message.startswith(\"Duplicate options\")\n\n\ndef test_options_set_in_parse_text():\n source_text = \"\"\"\\\nstatus :: queued | started | nearly-done | finished | on-hold\nsupermarket :: morrisons | sainsburys | tesco\n\"\"\"\n option_lines = OptionLines(\"# This will be overwritten\")\n option_lines.parse_text(source_text)\n assert 'started' in option_lines.option_keys['status'].options\n assert len(option_lines.option_keys['status'].options) == 5\n assert len(option_lines.all_options) == 8\n\n\ndef test_parse_text_2():\n source_text = \"\"\"\\\navailability :: am | eve | pm\nimportance :: b | c | a\n# Note: intentional duplication removed from output\ninternet :: offline | connected | offline\n\"\"\"\n option_lines = OptionLines(source_text)\n assert option_lines.option_keys['availability'\n ].options == set(['am', 'eve', 'pm'])\n assert option_lines.option_keys['importance'\n ].options == set(['a', 'b', 'c'])\n assert option_lines.option_keys['internet'\n ].options == set(['connected', 'offline'])\n assert option_lines.lines[1] == \"importance :: a | b | c\"\n assert str(option_lines) == \"\"\"\\\navailability :: am | eve | pm\nimportance :: a | b | c\n# Note: intentional duplication removed from output\ninternet :: connected | offline\"\"\"\n\n\ndef test_full_text_input_3():\n source_text = \"\"\"\\\n# environment.txt\n# For ease of reading and editing, using options format.\n\navailability :: pm |am | eve | pm\nimportance :: a | c| b\ninternet :: connected | offline\nlocation :: banbury | isleworth | kings-sutton | south-bank-centre\n | whitnash | bognor-regis | glasgow | worthing\n | lands-end||| shopping-in-leamington || deddington\n# Status uses words rather than dates now\nstatus :: queued | started | nearly-done | finished | on-hold\nsupermarket :: morrisons | sainsburys | tesco | asda | m&s\nurgency :: sometime | this-month | this-week | today | tomorrow\nweather :: fine | rain | showers\n\"\"\"\n option_lines = OptionLines(source_text)\n assert len(option_lines) == 12\n assert str(option_lines) == \"\"\"\\\n# environment.txt\n# For ease of reading and editing, using options format.\n\navailability :: am | eve | pm\nimportance :: a | b | c\ninternet :: connected | offline\nlocation :: banbury | bognor-regis | deddington | glasgow\n | isleworth | kings-sutton | lands-end\n | shopping-in-leamington | south-bank-centre\n | whitnash | worthing\n# Status uses words rather than dates now\nstatus :: finished | nearly-done | on-hold | queued | started\nsupermarket :: asda | m&s | morrisons | sainsburys | tesco\nurgency :: sometime | this-month | this-week | today | tomorrow\nweather :: fine | rain | showers\"\"\"\n\ndef test_full_text_input_4():\n source_text = \"\"\"\\\n# Try out some tasks. Where should the comments be attached?\n # Indented comment\n# TO-DO Make this a heading: Business\n* [/] Details of competitors\n Send the spreadsheet of 60 online web apps\n**[ ] Prepare data for P11D\n# TO-DO Make this a heading: Personal\n [x] Buy cat food\n\navailability :: am | eve | pm\nimportance :: a | b | c\ninternet :: connected | offline\nurgency :: sometime | this-month | this-week | today | tomorrow\n\"\"\"\n option_lines = OptionLines(source_text)\n print(option_lines)\n assert str(option_lines) == source_text.rstrip()\n\n\ndef test_option_lines_check_is_option_subset_of():\n outer_text = \"\"\"\\\n# environment, listing all possibilities.\navailability :: am | eve | pm\ninternet :: connected | offline\nweather :: fine | rain | showers\"\"\"\n inner_text = \"\"\"\\\ninternet :: connected\nweather :: rain | showers\"\"\"\n opt_lines_outer = OptionLines(outer_text)\n opt_lines_inner = OptionLines(inner_text)\n # Raises exception if not subset\n opt_lines_inner.check_is_option_subset_of(opt_lines_outer)\n\n\ndef test_option_lines_check_is_option_subset_of_bad_option():\n outer_text = \"weather :: fine | rain | showers\"\n inner_text = \"weather :: cloudy\"\n opt_lines_outer = OptionLines(outer_text)\n opt_lines_inner = OptionLines(inner_text)\n with pytest.raises(OptionSubsetError) as exc:\n opt_lines_inner.check_is_option_subset_of(opt_lines_outer)\n assert exc.value.message.startswith(\n \"['cloudy'] not found in \\\"weather\\\" options\")\n\n\ndef test_option_lines_check_is_option_subset_of_bad_key():\n outer_text = \"weather :: fine | rain | showers\"\n inner_text = \"foo :: bar\"\n opt_lines_outer = OptionLines(outer_text)\n opt_lines_inner = OptionLines(inner_text)\n with pytest.raises(OptionSubsetError) as exc:\n opt_lines_inner.check_is_option_subset_of(opt_lines_outer)\n assert exc.value.message.startswith('\"foo\" not found as option key')\n\n\n@pytest.mark.parametrize(\"source_line, max_option_length, expected\", [\n (\"a | b | c\", 20, [\"a | b | c\"]),\n (\"a | b | c | d | e\", 17, [\"a | b | c | d | e\"]),\n (\"a | b | c | d | e\", 13, [\"a | b | c | d\", \"| e\"]),\n (\"a | b | c | d | e\", 7, [\"a | b\", \"| c | d\", \"| e\"]),\n (\"a | b | c\", 3, [\"a\", \"| b\", \"| c\"]),\n (\"banbury | isleworth | kings-sutton | south-bank-centre\", 58,\n [\"banbury | isleworth | kings-sutton | south-bank-centre\"]),\n])\ndef test_wrap_options(source_line, max_option_length, expected):\n opt_line = OptionLine()\n opts_gen = (opt.strip() for opt in source_line.split('|'))\n opt_line.options = set(opt for opt in opts_gen if len(opt))\n assert opt_line._wrap_options(max_option_length) == expected\n\n\ndef test_handles_option_continuation_lines():\n source_text = \"\"\"\\\nlocation :: banbury | isleworth | kings-sutton | south-bank-centre\n | whitnash\n\"\"\"\n opt_lines = OptionLines(source_text)\n assert len(opt_lines) == 1\n assert len(opt_lines.option_keys['location'].options) == 5\n assert opt_lines.all_options['whitnash'].key == 'location'\n # Check that repr() gives the same result as str()\n assert repr(opt_lines) == \"\"\"\\\nlocation :: banbury | isleworth | kings-sutton | south-bank-centre\n | whitnash\"\"\"\n\n\ndef test_option_continuation_lines_2():\n source_text = \"\"\"\\\nlocation1234 :: banbury | isleworth | kings-sutton | south-bank-centre\n | whitnash | bognor-regis | glasgow | worthing\n | lands-end||| shopping-in-leamington || deddington\n\"\"\"\n expected = \"\"\"\\\nlocation1234 :: banbury | bognor-regis | deddington | glasgow\n | isleworth | kings-sutton | lands-end\n | shopping-in-leamington | south-bank-centre\n | whitnash | worthing\"\"\"\n opt_lines = OptionLines(source_text)\n assert str(opt_lines) == expected\n\n\ndef test_cant_start_text_with_option_continuation_char():\n source_text = \"| more-options\"\n with pytest.raises(OptionLineError) as exc:\n opt_lines = OptionLines(source_text)\n assert exc.value.message.startswith(\"Bad option line sequence\")\n\n\ndef test_handles_task_continuation_lines():\n source_text = \"\"\"\\\n [ ] Some task to-do\n More details of task\n And some more info\nalphabet :: a | b | c\n | d | e | f\n* [ ] This is a second task\n Need to look at this web site: http://www.example.com\n\"\"\"\n opt_lines = OptionLines(source_text)\n print(opt_lines)\n assert len(opt_lines) == 3\n assert len(opt_lines.tasks) == 2\n assert len(opt_lines.option_keys) == 1\n assert len(opt_lines.all_options) == 6\n\n\ndef test_init_with_long_lines():\n source_text = \"\"\"\\\nlocation :: banbury | isleworth | south-bank-centre\n | kings-sutton\n | whitnash\n\"\"\"\n opt_lines1 = OptionLines(source_text, 77)\n assert str(opt_lines1) == (\"location :: banbury | isleworth | \" +\n \"kings-sutton | south-bank-centre | whitnash\")\n opt_lines2 = OptionLines(source_text) # Default max_line_length = 70\n assert str(opt_lines2) == \"\"\"\\\nlocation :: banbury | isleworth | kings-sutton | south-bank-centre\n | whitnash\"\"\"\n opt_lines3 = OptionLines(source_text, 46)\n assert str(opt_lines3) == \"\"\"\\\nlocation :: banbury | isleworth | kings-sutton\n | south-bank-centre | whitnash\"\"\"\n opt_lines4 = OptionLines(source_text, 45)\n assert str(opt_lines4) == \"\"\"\\\nlocation :: banbury | isleworth\n | kings-sutton\n | south-bank-centre | whitnash\"\"\"\n opt_lines5 = OptionLines(source_text, 31)\n assert str(opt_lines5) == \"\"\"\\\nlocation :: banbury | isleworth\n | kings-sutton\n | south-bank-centre\n | whitnash\"\"\"\n\n\ndef test_task_list_text_1b27():\n source_text = \"\"\"\\\n [x] Cancel BT line: Business line --> 0800 800152\n Only 8-6, M-F. Need phone bill to hand.\n Collect phone bill from Isleworth\n [>] Cancel Zen Internet DD after 30 July\n Foo bar\n [ ] Contact Virgin Media, re transfer of broadband line\n* [ ] PythonPro VAT return\n [ ] https://www.123-reg.co.uk/secure domains\n [/] daily.co.uk domains\n\"\"\"\n option_lines = OptionLines(source_text)\n assert str(option_lines) == source_text.rstrip()\n\n","sub_path":"pp/utils/tests/test_optionlines.py","file_name":"test_optionlines.py","file_ext":"py","file_size_in_byte":18908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"43548330","text":"import praw\n\nAPP_ID = 'YourAppId'\nAPP_SECRET = 'YourAppSecret'\nAPP_URI = 'https://127.0.0.1:65010/authorize_callback'\nUSER_AGENT = 'YourUserAgent'\nAPP_SCOPES = 'account creddits edit flair history identity livemanage modconfig modcontributors modflair modlog modothers modposts modself modwiki mysubreddits privatemessages read report save submit subscribe vote wikiedit wikiread'\nAPP_ACCOUNT_CODE = 'YourAppAccountCode'\nAPP_REFRESH = 'YourAppRefresh'\n\ndef login():\n r = praw.Reddit(USER_AGENT)\n r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)\n r.refresh_access_information(APP_REFRESH)\n return r\n\n","sub_path":"obot.py","file_name":"obot.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"630131108","text":"# -*- coding: utf-8 -*-\nimport os\n\n__author__ = 'lxj'\n\n# 项目根路径\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n\n# 启用跨域请求保护\nWTF_CSRF_ENABLED = True\n# 表单跨域时携带的秘钥,应该生成唯一的字符串\nWTF_CSRF_SECRET_KEY = os.urandom(16)\n# session使用\nSECRET_KEY = WTF_CSRF_SECRET_KEY\n\n# db\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, 'app.db')\n# 数据库迁移文件存储目录\nSQLALCHEMY_MIGRATE_DIR = os.path.join(BASE_DIR, 'db_migrate')\n\n# 上传文件保存目录 /static/uploads\nUPLOADS_DIR = 'uploads'\n\n# blog分页数量\nBLOG_PAGE_COUNT = 5\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"82153431","text":"arquivo = open(\"texto.txt\", 'r')\n\nn = int(input(\"Digite o número n: \")) \n\nlista = []\nl = []\n\nfor linha in arquivo:\n l = linha.split()\n for palavra in l:\n lista.append(palavra)\n\nfor c in lista:\n if len(c) <= n:\n print(c)\n\n\narquivo.close()","sub_path":"Exericicios e provas/Prova7/problema2.py","file_name":"problema2.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"614565428","text":"import re\n\ndef string_to_coords(string):\n\tassert re.match(r'^[nsew]+$', string)\n\tx, y = 0, 0\n\ti = 0\n\twhile i < len(string):\n\t\tchar = string[i]\n\t\tif char == 'n' or char == 's':\n\t\t\tassert i + 1 < len(string)\n\t\t\tchar2 = string[i + 1]\n\t\t\tif char == 'n':\n\t\t\t\ty += 2\n\t\t\telse:\n\t\t\t\ty -= 2\n\t\t\tif char2 == 'w':\n\t\t\t\tx -= 1\n\t\t\telse:\n\t\t\t\tassert char2 == 'e'\n\t\t\t\tx += 1\n\t\t\ti += 1\n\t\telif char == 'w':\n\t\t\tx -= 2\n\t\telif char == 'e':\n\t\t\tx += 2\n\t\telse:\n\t\t\tassert False\n\n\t\ti += 1\n\treturn x, y\n\n\ndef main():\n\tblack = set()\n\twith open('24.dat', 'r') as file:\n\t\tstate = 0\n\t\tfor line in file:\n\t\t\tline = line.strip()\n\t\t\tcoords = string_to_coords(line)\n\t\t\tif coords in black:\n\t\t\t\tblack.remove(coords)\n\t\t\telse:\n\t\t\t\tblack.add(coords)\n\n\treturn len(black)\n\nprint(main())\n","sub_path":"2020/24a.py","file_name":"24a.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"282874193","text":"from Crypto.Cipher import AES, PKCS1_OAEP\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Random import get_random_bytes\nfrom pathlib import Path\n\nfrom pickle import dumps, loads\n\n\ndef encode_encrypt(cmd: str, public_key: RSA.RsaKey):\n \"\"\"Encode a command then encrypt with RSA public key\n\n Parameters:\n cmd: str << a command\n public_key: RSA.RsaKey << RSA.import_key()\n\n Returns:\n An encrypted byte string << pickle.dumps()\n \"\"\"\n encoded_cmd = cmd.encode()\n session_key = get_random_bytes(16)\n\n cipher_RSA = PKCS1_OAEP.new(key=public_key)\n encrypted_session_key = cipher_RSA.encrypt(session_key)\n\n cipher_AES = AES.new(session_key, AES.MODE_EAX)\n encrypted_cmd, tag = cipher_AES.encrypt_and_digest(encoded_cmd)\n\n bundle = (encrypted_session_key, cipher_AES.nonce, tag, encrypted_cmd)\n return dumps(bundle)\n\n\ndef decrypt_decode(bundle: bytes, private_key: RSA.RsaKey):\n \"\"\"Decrypt a command with RSA private key then encode\n\n Parameters:\n bundle: bytes << an encrypted byte string\n private_key: RSA.RsaKey << RSA.import_key()\n\n Returns:\n A string\n \"\"\"\n session_key, nonce, tag, encrypted_cmd = loads(bundle)\n\n cipher_RSA = PKCS1_OAEP.new(key=private_key)\n session_key = cipher_RSA.decrypt(session_key)\n\n cipher_AES = AES.new(session_key, AES.MODE_EAX, nonce)\n encoded_cmd = cipher_AES.decrypt_and_verify(encrypted_cmd, tag)\n \n return encoded_cmd.decode()\n\n\ndef import_key(private_key_file: str, public_key_file):\n \"\"\"Import keys from a directory\n\n Parameters:\n private_key_file: str << Filepath where private key is saved\n public_key_file: str << Filepath to where public key is saved\n \n Returns:\n private_key, public_key: RSA.RsaKey\n \"\"\"\n private_key_file = Path(private_key_file)\n public_key_file = Path(public_key_file)\n\n private_key = RSA.import_key(open(private_key_file, 'r').read())\n public_key = RSA.import_key(open(public_key_file, 'r').read())\n return private_key, public_key\n\n\nif __name__ == '__main__':\n private_key, public_key = import_key('client/private.pigeon.txt', 'client/public.pigeon.txt')\n\n cmd = 'ls'\n\n print(decrypt_decode(encode_encrypt(cmd, public_key), private_key))","sub_path":"pigeon-shell/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"579961265","text":"import os\nimport re\n\nfrom datetime import datetime\nfrom airflow.models import BaseOperator\nfrom airflow.plugins_manager import AirflowPlugin\nfrom airflow.utils.decorators import apply_defaults\nfrom airflow.operators.sensors import BaseSensorOperator\n\n\nfrom airflow import DAG\nfrom airflow.operators import PythonOperator\nfrom datetime import datetime, timedelta\nfrom airflow.models import Variable\n\n\nclass OmegaFileSensor(BaseSensorOperator):\n @apply_defaults\n def __init__(self, filepath, filepattern, *args, **kwargs):\n super(OmegaFileSensor, self).__init__(*args, **kwargs)\n self.filepath = filepath\n self.filepattern = filepattern\n\n def poke(self, context):\n full_path = self.filepath\n file_pattern = re.compile(self.filepattern)\n\n directory = os.listdir(full_path)\n\n for files in directory:\n if not re.match(file_pattern, files):\n return False\n else:\n context['task_instance'].xcom_push('file_name', files)\n return True\n\n\ndefault_args = {\n 'owner': 'owner',\n 'depends_on_past': False,\n 'start_date': datetime.now(),\n 'provide_context': True,\n 'retries': 100,\n 'retry_delay': timedelta(seconds=30),\n 'max_active_runs': 1,\n 'schedule_interval': timedelta(seconds=5),\n}\n\ndag = DAG('test_sensing_for_a_file', default_args=default_args)\n\nfilepath = '/usr/local/airflow/logs/ft'\nfilepattern = 'ft'\n\nsensor_task = OmegaFileSensor(\n task_id='file_sensor_task',\n filepath=filepath,\n filepattern=filepattern,\n poke_interval=3,\n dag=dag)\n\n\ndef process_file(**context):\n file_to_process = context['task_instance'].xcom_pull(\n key='file_name', task_ids='file_sensor_task')\n file = open(filepath + file_to_process, 'w')\n file.write('This is a test\\n')\n file.write('of processing the file')\n file.close()\n\n\nproccess_task = PythonOperator(\n task_id='process_the_file', python_callable=process_file, dag=dag)\n\n\nsensor_task >> proccess_task\n","sub_path":"example/airflow-exporter-python2/airflow/dags/ff_dag.py","file_name":"ff_dag.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"570496222","text":"import scrapy\nimport links_from_header\nimport json\nimport os\n\nclass Repo(scrapy.Item):\n id = scrapy.Field()\n languages_url = scrapy.Field()\n name = scrapy.Field()\n\nclass Scraper(scrapy.Spider):\n name = \"github\"\n access_token = os.environ['github_token']\n\n def __init__(self):\n self.count = 0\n\n start_urls = ['https://api.github.com/repositories?since=76761293&access_token={}'.format(access_token)]\n \n def parse(self, response):\n \n self.logger.info(self.count)\n if self.count == 100:\n return\n \n self.count += 1\n\n json_body = json.loads(response.body.decode('utf-8'))\n\n for item in json_body:\n current_item = Repo()\n current_item['id'] = item['id']\n current_item['name'] = item['name']\n current_item['languages_url'] = item['languages_url']\n yield current_item\n \n link_header = links_from_header.extract(response.headers['Link'].decode('utf-8'))\n if 'next' in link_header:\n next_page = link_header['next']\n yield scrapy.Request(next_page, callback=self.parse)","sub_path":"github_scraper/github_scraper/spiders/repo_scraper.py","file_name":"repo_scraper.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"414559128","text":"import random\nimport string\nimport httplib2\nimport json\nimport requests\nfrom flask import Flask, render_template, request, redirect, url_for, flash, \\\n jsonify\nfrom flask_bootstrap import Bootstrap\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Category, CategoryItem, User\nfrom flask import session as login_session\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.client import FlowExchangeError\nfrom flask import make_response\n\napp = Flask(__name__)\napp.config['SESSION_TYPE'] = 'memcached'\napp.config['SECRET_KEY'] = 'super_secret_key'\nBootstrap(app)\n\nCLIENT_ID = json.loads(\n open('client_secrets.json', 'r').read())['web']['client_id']\nAPPLICATION_NAME = \"Udacity FSND Catalog Project\"\n\n#engine = create_engine('sqlite:///database.db',\n# connect_args={'check_same_thread': False})\n\nengine = create_engine('postgresql://catalog:grader@localhost/catalog')\n\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\n# Create a state token to prevent request forgery.\n# Store it in the session for later validation.\n@app.route('/login')\ndef showLogin():\n # Convert Python2 xrange() to Python3 range()\n xrange = range\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)\n\n\n@app.route('/gconnect', methods=['POST'])\ndef gconnect():\n # Validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code\n code = request.data\n\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n print(\"Token's client ID does not match app's.\")\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(\n json.dumps('Current user is already connected.'),\n 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n\n data = answer.json()\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n # see if user exists, if it doesn't make a new one:\n user_id = getUserID(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n\n output = ''\n output += '

Welcome, '\n output += login_session['username']\n output += '!

'\n output += ' '\n flash(\"you are now logged in as %s\" % login_session['username'])\n print(\"done!\")\n return output\n\n\n# User Helper Functions\ndef createUser(login_session):\n newUser = User(name=login_session['username'], email=login_session[\n 'email'], picture=login_session['picture'])\n session.add(newUser)\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id\n\n\ndef getUserInfo(user_id):\n user = session.query(User).filter_by(id=user_id).one()\n return user\n\n\ndef getUserID(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except Exception:\n return None\n\n\n# DISCONNECT - Revoke a current user's token and reset their login_session\n@app.route('/gdisconnect')\ndef gdisconnect():\n access_token = login_session.get('access_token')\n if access_token is None:\n print('Access Token is None')\n response = make_response(json.dumps(\n 'Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n print('In gdisconnect access token is %s', access_token)\n print('User name is: ')\n print(login_session['username'])\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % \\\n login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n print('result is ')\n print(result)\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n flash(\"Successfully logged out!\")\n return redirect(url_for('categories_home'))\n # return response\n else:\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response\n\n\n# Making an API Endpoint for the Category Items:\n# ALLOW PUBLIC ACCESS\n@app.route('//JSON/')\ndef category_items_json(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n items = session.query(CategoryItem).filter_by(category_id=category_id)\n return jsonify(CategoryItem=[i.serialize for i in items])\n\n\n# Making an API Endpoint for ONE Category Item:\n# ALLOW PUBLIC ACCESS\n@app.route('///JSON/')\ndef category_item_json(category_id, item_id):\n item = session.query(CategoryItem).filter_by(id=category_id).one()\n return jsonify(CategorItem=item.serialize)\n\n\n# Home page to list all of our Categories:\n# ALLOW PUBLIC ACCESS\n@app.route('/')\ndef categories_home():\n category = session.query(Category).all()\n items = session.query(CategoryItem).order_by(CategoryItem.id.desc()).limit(\n 5)\n if 'username' not in login_session:\n return render_template('public_catalog.html', category=category,\n items=items)\n else:\n return render_template('catalog.html', category=category, items=items)\n\n\n# Page to add a new category to the app:\n# RESTRICT TO LOGGED IN USERS ONLY\n@app.route('/new/', methods=['GET', 'POST'])\ndef new_category():\n if 'username' not in login_session:\n return redirect('login')\n if request.method == 'POST':\n newCategory = Category(name=request.form['name'],\n user_id=login_session['user_id'])\n session.add(newCategory)\n session.commit()\n flash(\"new category created!\")\n return redirect(url_for('categories_home'))\n else:\n return render_template('newCategory.html')\n\n\n# Page to edit a category:\n# RESTRICT TO LOGGED IN USERS ONLY\n@app.route('//edit/', methods=['GET', 'POST'])\ndef edit_category(category_id):\n updatedCategory = session.query(Category).filter_by(id=category_id).one()\n category = session.query(Category).filter_by(id=category_id).one()\n if 'username' not in login_session:\n return redirect('login')\n creator = getUserInfo(category.user_id)\n if creator.id != login_session['user_id']:\n flash(\n \"You cannot edit this category. This category belongs to %s\" %\n creator.name)\n return redirect(url_for('categories_home'))\n if request.method == 'POST':\n if request.form['name']:\n updatedCategory.name = request.form['name']\n session.add(updatedCategory)\n session.commit()\n flash(\"category updated!\")\n return redirect(url_for('categories_home'))\n else:\n return render_template('editCategory.html', category=updatedCategory)\n # category=updatedCategory is the database category at id=category_id\n # i.e. updatedCategory.name would = Baseball for example\n\n\n# #Page to delete a category:\n# RESTRICT TO LOGGED IN USERS ONLY\n@app.route('//delete/', methods=['GET', 'POST'])\ndef delete_category(category_id):\n deleteCategory = session.query(Category).filter_by(id=category_id).one()\n category = session.query(Category).filter_by(id=category_id).one()\n if 'username' not in login_session:\n return redirect('login')\n creator = getUserInfo(category.user_id)\n if creator.id != login_session['user_id']:\n flash(\n \"You cannot delete this category. This category belongs to %s\" %\n creator.name)\n return redirect(url_for('categories_home'))\n if request.method == 'POST':\n session.delete(deleteCategory)\n session.commit()\n flash(\"category deleted!\")\n return redirect(url_for('categories_home'))\n else:\n return render_template('deleteCategory.html', category=deleteCategory)\n\n\n# Page to show a category and all of its associated items:\n# ALLOW PUBLIC ACCESS\n@app.route('//')\ndef category_items(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n items = session.query(CategoryItem).filter_by(category_id=category_id)\n a_category = session.query(Category).filter_by(id=category_id).one()\n creator = getUserInfo(category.user_id)\n if 'username' not in login_session:\n return render_template('public_category.html', category=category,\n items=items)\n elif 'username' in login_session and a_category.user_id != \\\n login_session['user_id']:\n flash(\"You can only view these category items. This category \"\n \"belongs to %s\" % creator.name)\n return render_template('public_category.html', category=category,\n items=items)\n else:\n return render_template('category.html', category=category, items=items)\n creator = getUserInfo(a_category.category_id)\n # TODO delete if app works if creator.id != login_session['user_id']:\n # flash (\"You can only view these category items. This category belongs\n # to %s\" % creator.name) return render_template('public_category.html',\n # category=category,\\ items=items)\n\n\n# Page to add a new item to a specific category\n# RESTRICT TO LOGGED IN USERS ONLY\n@app.route('//new/', methods=['GET', 'POST'])\ndef new_category_item(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n if 'username' not in login_session:\n return redirect('login')\n if request.method == 'POST':\n newItem = CategoryItem(name=request.form['name'],\n description=request.form['description'],\n user_id=login_session['user_id'],\n category_id=category_id)\n session.add(newItem)\n session.commit()\n flash(\"new category item created!\")\n return redirect(url_for('category_items', category_id=category_id))\n else:\n return render_template('newItem.html', category=category,\n category_id=category_id)\n\n\n# Create route to edit a menu item:\n# RESTRICT TO LOGGED IN USERS ONLY\n@app.route('///edit/', methods=['GET', 'POST'])\ndef edit_category_item(category_id, item_id):\n updatedItem = session.query(CategoryItem).filter_by(id=item_id).one()\n category = session.query(CategoryItem).filter_by(id=category_id).one()\n item_user = session.query(CategoryItem).filter_by(id=item_id).one()\n if 'username' not in login_session:\n return redirect('login')\n creator = getUserInfo(item_user.user_id)\n if creator.id != login_session['user_id']:\n flash(\"You cannot edit this category item. This category item \"\n \"belongs to %s\" % creator.name)\n return redirect(url_for('categories_home'))\n if request.method == 'POST':\n if request.form['name'] and request.form['description']:\n updatedItem.name = request.form['name']\n updatedItem.description = request.form['description']\n session.add(updatedItem)\n session.commit()\n flash(\"category item updated!\")\n return redirect(url_for('category_items', category_id=category_id))\n elif request.form['name'] and not request.form['description']:\n updatedItem.name = request.form['name']\n session.add(updatedItem)\n session.commit()\n flash(\"category item updated!\")\n return redirect(url_for('category_items', category_id=category_id))\n elif request.form['description'] and not request.form['name']:\n updatedItem.description = request.form['description']\n session.add(updatedItem)\n session.commit()\n flash(\"category item updated!\")\n return redirect(url_for('category_items', category_id=category_id))\n else:\n return render_template('editCategoryItem.html',\n category_id=category_id, item_id=item_id,\n item=updatedItem)\n\n\n# Create route to delete a menu item:\n# RESTRICT TO LOGGED IN USERS ONLY\n@app.route('///delete/', methods=['GET', 'POST'])\ndef delete_category_item(category_id, item_id):\n deletedItem = session.query(CategoryItem).filter_by(id=item_id).one()\n category = session.query(Category).filter_by(id=category_id).one()\n if 'username' not in login_session:\n return redirect('login')\n creator = getUserInfo(category.user_id)\n if creator.id != login_session['user_id']:\n flash(\"You cannot delete this category item. This category item \"\n \"belongs to %s\" % creator.name)\n return redirect(url_for('categories_home'))\n if request.method == 'POST':\n session.delete(deletedItem)\n session.commit()\n flash(\"category item deleted!\")\n return redirect(url_for('category_items', category_id=category_id))\n else:\n return render_template('deleteCategoryItem.html',\n category_id=category_id, item_id=item_id,\n item=deletedItem)\n\n\nif __name__ == '__main__':\n app.secret_key = 'super_secret_key'\n app.debug = True\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":16382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"144888591","text":"\"\"\"\nDate: 20200316\nUser: Alan Viegas\nProject: Search on site ReclameAqui.com.br for score-points and reclamations of the Company\n\n\"\"\"\nimport pytest\nfrom unittest import TestCase\nfrom src.dao.reclamations import CompanyReclamations\n\nclass TestDao(TestCase):\n \"\"\"\n Test for class on Dao layer\n \"\"\"\n def test_get_scores(self):\n \"\"\"\n :assert: Verify\n \"\"\"\n cr = CompanyReclamations()\n cr.search_company('Cielo')\n scores = cr.get_scores\n\n answered = \"5914\"\n reclamations = \"6160\"\n response_time = \"14 dias e 8 horas \"\n unanswered = \"246\"\n\n assert (answered == scores[\"answered\"])\n assert (reclamations == scores[\"reclamations\"])\n assert (response_time == scores[\"response_time\"])\n assert (unanswered == scores[\"unanswered\"])\n\n#if __name__ == '__main__':\n# TestDao().test_get_scores()\n\n\n'''\nreclamations = cr.get_reclamations\nprint(reclamations)\ncr.close()\n'''\n","sub_path":"apps/API_consultaReclameAqui/test/dao/dao_test.py","file_name":"dao_test.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"163079716","text":"import sys\nsys.stdin = open('input_5105.txt', 'r')\nsys.stdout = open('output_5105.txt', 'w')\n\nT = int(input())\nfor t in range(1, T+1):\n n = int(input())\n miro = []\n for q in range(n + 2):\n\n if q == 0:\n miro.append(list(1 for _ in range(n + 2)))\n elif q == n + 1:\n miro.append(list(1 for _ in range(n + 2)))\n else:\n k = str(input())\n miro.append([1] + list(int(x) for x in k) + [1])\n\n\n idx2 = []\n idx3 = []\n for i in range(n + 2):\n if 2 in miro[i]:\n idx2 = [i, miro[i].index(2)]\n elif 3 in miro[i]:\n idx3 = [i, miro[i].index(3)]\n\n a = idx2[0]\n b = idx2[1]\n stack = [[a, b]]\n x = idx3[0]\n y = idx3[1]\n\n while len(stack) >= 1:\n # 3을 만났을 때\n if miro[a - 1][b] == 3 or miro[a + 1][b] == 3 or miro[a][b - 1] == 3 or miro[a][b + 1] == 3:\n break\n\n # 위\n if miro[a - 1][b] == 0:\n a = a - 1\n b = b\n miro[a][b] = 4\n stack += [[a, b]]\n\n\n\n # 아래\n elif miro[a + 1][b] == 0:\n a = a + 1\n b = b\n miro[a][b] = 4\n stack += [[a, b]]\n\n # 왼\n elif miro[a][b - 1] == 0:\n a = a\n b = b - 1\n miro[a][b] = 4\n stack += [[a, b]]\n\n\n # 오\n elif miro[a][b + 1] == 0:\n a = a\n b = b + 1\n miro[a][b] = 4\n stack += [[a, b]]\n\n # 모두 0이 아닐 때\n else:\n stack.pop()\n miro[a][b] = 5\n if len(stack) != 0:\n a = stack[-1][0]\n b = stack[-1][1]\n else:\n break\n\n cnt = 0\n for o in range(n + 2):\n for p in range(n + 2):\n if 4 == miro[o][p]:\n cnt += 1\n print(f'#{t} {cnt}')\n\n\n\n\n","sub_path":"SWEA/5105_미로의거리.py","file_name":"5105_미로의거리.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"364485399","text":"class RepeatingDecimal(object):\n def __new__(cls, v, rep=None):\n from amath.testing.types import intQ, isReal\n self = object.__new__(cls)\n if rep is not None:\n if not intQ(v):\n raise TypeError(\"If repeating decimal is defined, value must have an integer value\")\n if not isReal(rep):\n raise TypeError(\"Repeating decimal must be a float or a string\")\n\n else:\n if not isReal(v):\n raise TypeError(\"value must be a float or a string\")\n self.value = int(v)\n v = str(v)\n index = v.find('.')\n if index == -1:\n raise ValueError(\"Value must be a float or a string, not an int\")\n self.rep = v[index + 1:]\n\n return self\n","sub_path":"amath/DataTypes/Repeating_Decimal.py","file_name":"Repeating_Decimal.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"366459657","text":"#\n# MIT License\n#\n# Copyright (c) 2020 Airbyte\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\nfrom enum import Enum\nfrom string import Template\nfrom typing import Any, List, Mapping\n\nfrom google.ads.googleads.client import GoogleAdsClient\nfrom google.ads.googleads.v7.services.types.google_ads_service import GoogleAdsRow, SearchGoogleAdsResponse\n\nREPORT_MAPPING = {\"ad_group_ad_report\": \"ad_group_ad\"}\n\n\nclass GoogleAds:\n DEFAULT_PAGE_SIZE = 1000\n\n def __init__(self, credentials: Mapping[str, Any], customer_id: str):\n self.client = GoogleAdsClient.load_from_dict(credentials)\n self.customer_id = customer_id\n self.ga_service = self.client.get_service(\"GoogleAdsService\")\n\n def send_request(self, query: str) -> SearchGoogleAdsResponse:\n client = self.client\n search_request = client.get_type(\"SearchGoogleAdsRequest\")\n search_request.customer_id = self.customer_id\n search_request.query = query\n search_request.page_size = self.DEFAULT_PAGE_SIZE\n\n return self.ga_service.search(search_request)\n\n @staticmethod\n def get_fields_from_schema(schema: Mapping[str, Any]) -> List[str]:\n properties = schema.get(\"properties\")\n\n return [*properties]\n\n @staticmethod\n def convert_schema_into_query(schema: Mapping[str, Any], report_name: str, from_date: str, to_date: str) -> str:\n from_category = REPORT_MAPPING[report_name]\n fields = GoogleAds.get_fields_from_schema(schema)\n fields = \",\\n\".join(fields)\n\n query = Template(\n \"\"\"\n SELECT\n $fields\n FROM $from_category\n WHERE segments.date > '$from_date'\n AND segments.date < '$to_date'\n ORDER BY segments.date\n \"\"\"\n )\n query = query.substitute(fields=fields, from_category=from_category, from_date=from_date, to_date=to_date)\n\n return query\n\n @staticmethod\n def get_field_value(result: GoogleAdsRow, field: str) -> str:\n field_name = field.split(\".\")\n try:\n field_value = result\n for level_attr in field_name:\n field_value = field_value.__getattr__(level_attr)\n if isinstance(field_value, Enum):\n field_value = field_value.name\n field_value = str(field_value)\n except Exception:\n field_value = None\n\n return field_value\n\n @staticmethod\n def parse_single_result(schema: Mapping[str, Any], result: GoogleAdsRow):\n fields = GoogleAds.get_fields_from_schema(schema)\n single_record = {}\n for field in fields:\n single_record[field] = GoogleAds.get_field_value(result, field)\n return single_record\n","sub_path":"airbyte-integrations/connectors/source-google-ads/source_google_ads/google_ads.py","file_name":"google_ads.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"40350678","text":"import numpy as np\n\nclass NearestNeighbor(object):#\n def __init__(self):\n pass\n \n def train(self, X, y):\n #save all trainind data\n self.Xtr = X\n self.ytr = y\n def predict(self, X):\n num_test = X.shape[0]\n Ypred = np.zeros(num_test, dtype = self.ytr.dtype)\n for i in range(num_test):\n distances = np.sum(np.abs(self.Xtr - X[i,:]), axis = 1)\n min_index = np.argmin(distances)\n Ypred[i] = self.ytr[min_index]\n return Ypred\n\ninput1 = np.load('../input/06.Kaggle_CIFAR-10_train.npz')\ninput2 = np.load('../input/06.Kaggle_CIFAR-10_test.npz')\n\ntmpXtr = input1['x_train']\nXtr, Ytr = tmpXtr.reshape(tmpXtr.shape[0],32*32*3)[100:],input1['y_train'][100:]\ntmpXte = input2['x_test']\nXte, Yte = tmpXtr.reshape(tmpXtr.shape[0],32*32*3)[:100],input1['y_train'][:100]\n\nnn = NearestNeighbor()\nnn.train(Xtr, Ytr)\nYte_predict = nn.predict(Xte)\n\nprint('accuracy : %f' % (np.mean(Yte_predict == Yte)))\n\n\n\n\n","sub_path":"NearestNeighbourCifar.py","file_name":"NearestNeighbourCifar.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"631342308","text":"import os\r\nfrom instance import Instance\r\nfrom data_parser import DataParser\r\nimport argparse\r\nimport time\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"-o\", \"--option\", type=str,\r\n help=\"case to compare\")\r\nargs = parser.parse_args()\r\n\r\n'''\r\ncompares different methods:\r\n\r\n-comparewithneh - compares simulated annealing with neh, returns cmaxes for datasets and checks if optimal orders are compatible\r\n-compareinsertswap - compares neigbour generation methods, returns cmaxes for datasets and checks if optimal orders are compatible\r\n-comparecoolingoption - compares cooling forms(temprature and iteration), returns cmaxes for datasets and checks if optimal orders are compatible\r\n-comparecooling - compares cooling factors, returns cmaxes for datasets\r\n-comparemove - compares classic method with method which always calculates prob from exponent formula, returns cmaxes for datasets and checks if optimal orders are compatible\r\n-comparemovewithonlydiff - compares classic method with method which checks only different cmaxes, returns cmaxes for datasets and checks if optimal orders are compatible\r\n-comparestart - compares start orders - neh_prio and neh_result, returns cmaxes for datasets and checks if optimal orders are compatible\r\n\r\nclassic parameters: temperature=50, min_value=0.000001, cooling=0.8, method=swap\r\n'''\r\n\r\n\r\nif args.option == 'comparewithneh':\r\n with open('compare_ex3/comparewithneh.txt', 'w') as file:\r\n print(\"INFO: Started comparing neh with simulated annealing...\")\r\n for filename in os.listdir('data'):\r\n if filename.startswith('data'):\r\n data_parser = DataParser('data/{}'.format(filename))\r\n jobs, machines, tasks, neh_prio = data_parser.get_instance_parameters()\r\n instance = Instance('Roxanne', machines, jobs, tasks, neh_prio)\r\n instance.print_info()\r\n neh_queue, neh_cmax = instance.neh()\r\n simann_queues = []\r\n simann_cmaxes = []\r\n for i in range(3):\r\n simann_queue, simann_cmax = instance.simulated_annealing(50, instance.neh_prio, 0.000001, 0.8, 'swap')\r\n simann_cmaxes.append(simann_cmax)\r\n simann_queues.append(simann_queue)\r\n simann_cmax_avg = sum(simann_cmaxes)/len(simann_cmaxes)\r\n if neh_queue in simann_queues:\r\n compatibility = True\r\n else:\r\n compatibility = False\r\n file.write(\"--- {} ---\\n\".format(filename))\r\n file.write(\"NEH cmax: {}\\n\".format(neh_cmax))\r\n file.write(\"SIMULATED ANNEALING average cmax: {}\\n\".format(simann_cmax_avg))\r\n file.write(\"Order compatibility: {}\\n\".format(compatibility))\r\n print(\"INFO: Finished!\")\r\n\r\nelif args.option == 'compareinsertswap':\r\n with open('compare_ex3/compareinsertswap.txt', 'w') as file:\r\n print(\"INFO: Started comparing insert with swap...\")\r\n for filename in os.listdir('data'):\r\n if filename.startswith('data'):\r\n data_parser = DataParser('data/{}'.format(filename))\r\n jobs, machines, tasks, neh_prio = data_parser.get_instance_parameters()\r\n instance = Instance('Roxanne', machines, jobs, tasks, neh_prio)\r\n instance.print_info()\r\n simann_queues_insert = []\r\n simann_cmaxes_insert = []\r\n simann_queues_swap = []\r\n simann_cmaxes_swap = []\r\n for i in range(3):\r\n simann_queue_ins, simann_cmax_ins = instance.simulated_annealing(50, instance.neh_prio, 0.000001, 0.8, 'insert')\r\n simann_cmaxes_insert.append(simann_cmax_ins)\r\n simann_queues_insert.append(simann_queue_ins)\r\n simann_queue_swap, simann_cmax_swap = instance.simulated_annealing(50, instance.neh_prio, 0.000001, 0.8, 'swap')\r\n simann_cmaxes_swap.append(simann_cmax_swap)\r\n simann_queues_swap.append(simann_queue_swap)\r\n simann_cmax_swap_avg = sum(simann_cmaxes_swap)/len(simann_cmaxes_swap)\r\n simann_cmax_ins_avg = sum(simann_cmaxes_insert)/len(simann_cmaxes_insert)\r\n if len([queue for queue in simann_queues_swap if queue in simann_queues_insert]) != 0:\r\n compatibility = True\r\n else:\r\n compatibility = False\r\n file.write(\"--- {} ---\\n\".format(filename))\r\n file.write(\"SIMULATED ANNEALING insert average cmax:: {}\\n\".format(simann_cmax_ins_avg))\r\n file.write(\"SIMULATED ANNEALING swap average cmax: {}\\n\".format(simann_cmax_swap_avg))\r\n file.write(\"Order compatibility: {}\\n\".format(compatibility))\r\n print(\"INFO: Finished!\")\r\n\r\nelif args.option == 'comparecooling':\r\n with open('compare_ex3/comparecooling.txt', 'w') as file:\r\n print(\"INFO: Started comparing cooling factors...\")\r\n for filename in os.listdir('data'):\r\n if filename.startswith('data'):\r\n data_parser = DataParser('data/{}'.format(filename))\r\n jobs, machines, tasks, neh_prio = data_parser.get_instance_parameters()\r\n instance = Instance('Roxanne', machines, jobs, tasks, neh_prio)\r\n instance.print_info()\r\n coolings = [0.8, 0.9, 0.95, 0.99]\r\n results = {}\r\n for cooling in coolings:\r\n print(\"INFO: Cooling factor: {}\".format(cooling))\r\n results['cmaxes_{}'.format(cooling)] = []\r\n results['queues_{}'.format(cooling)] = []\r\n for i in range(3):\r\n simann_queue_swap, simann_cmax_swap = instance.simulated_annealing(50, instance.neh_prio, 0.000001, cooling, 'swap')\r\n results['cmaxes_{}'.format(cooling)].append(simann_cmax_swap)\r\n results['queues_{}'.format(cooling)].append(simann_queue_swap)\r\n results['cmax_avg_{}'.format(cooling)] = sum(results['cmaxes_{}'.format(cooling)])/len(results['cmaxes_{}'.format(cooling)])\r\n file.write(\"--- {} ---\\n\".format(filename))\r\n file.write(\"SIMULATED ANNEALING cooling 0.8 average cmax:: {}\\n\".format(results['cmax_avg_0.8']))\r\n file.write(\"SIMULATED ANNEALING cooling 0.9 average cmax: {}\\n\".format(results['cmax_avg_0.9']))\r\n file.write(\"SIMULATED ANNEALING cooling 0.95 average cmax: {}\\n\".format(results['cmax_avg_0.95']))\r\n file.write(\"SIMULATED ANNEALING cooling 0.99 average cmax: {}\\n\".format(results['cmax_avg_0.99']))\r\n print(\"INFO: Finished!\")\r\n\r\nelif args.option == 'comparecoolingoption':\r\n with open('compare_ex3/comparecoolingoption.txt', 'w') as file:\r\n print(\"INFO: Started comparing cooling options...\")\r\n for filename in os.listdir('data'):\r\n if filename.startswith('data'):\r\n data_parser = DataParser('data/{}'.format(filename))\r\n jobs, machines, tasks, neh_prio = data_parser.get_instance_parameters()\r\n instance = Instance('Roxanne', machines, jobs, tasks, neh_prio)\r\n instance.print_info()\r\n cool_queues = []\r\n cool_cmaxes = []\r\n iter_cmaxes = []\r\n iter_queues = []\r\n for i in range(3):\r\n cool_queue, cool_cmax = instance.simulated_annealing(50, instance.neh_prio, 0.000001, 0.8, 'swap')\r\n cool_cmaxes.append(cool_cmax)\r\n cool_queues.append(cool_queue)\r\n iter_queue, iter_cmax = instance.simulated_annealing_iter(50, instance.neh_prio, 150, 'swap')\r\n iter_cmaxes.append(iter_cmax)\r\n iter_queues.append(iter_cmax)\r\n cool_cmax_avg = sum(cool_cmaxes)/len(cool_cmaxes)\r\n iter_cmax_avg = sum(iter_cmaxes)/len(iter_cmaxes)\r\n if len([queue for queue in cool_queues if queue in iter_queues]) != 0:\r\n compatibility = True\r\n else:\r\n compatibility = False\r\n file.write(\"--- {} ---\\n\".format(filename))\r\n file.write(\"SIMULATED ANNEALING cool option cmax: {}\\n\".format(cool_cmax_avg))\r\n file.write(\"SIMULATED ANNEALING iter option cmax: {}\\n\".format(iter_cmax_avg))\r\n file.write(\"Order compatibility: {}\\n\".format(compatibility))\r\n print(\"INFO: Finished!\")\r\n\r\nelif args.option == 'comparemove':\r\n with open('compare_ex3/comparemove.txt', 'w') as file:\r\n print(\"INFO: Started comparing move options...\")\r\n for filename in os.listdir('data'):\r\n if filename.startswith('data'):\r\n data_parser = DataParser('data/{}'.format(filename))\r\n jobs, machines, tasks, neh_prio = data_parser.get_instance_parameters()\r\n instance = Instance('Roxanne', machines, jobs, tasks, neh_prio)\r\n instance.print_info()\r\n without_reject_queues = []\r\n without_reject_cmaxes = []\r\n with_reject_cmaxes = []\r\n with_reject_queues = []\r\n for i in range(3):\r\n wo_queue, wo_cmax = instance.simulated_annealing(50, instance.neh_prio, 0.2, 0.8, 'swap')\r\n without_reject_cmaxes.append(wo_cmax)\r\n without_reject_queues.append(wo_queue)\r\n w_queue, w_cmax = instance.simulated_annealing_reject_prob(50, instance.neh_prio, 0.2, 0.8, 'swap')\r\n with_reject_cmaxes.append(w_cmax)\r\n with_reject_queues.append(w_queue)\r\n w_cmax_avg = sum(with_reject_cmaxes)/len(with_reject_cmaxes)\r\n wo_cmax_avg = sum(without_reject_cmaxes)/len(without_reject_cmaxes)\r\n if len([queue for queue in without_reject_queues if queue in with_reject_queues]) != 0:\r\n compatibility = True\r\n else:\r\n compatibility = False\r\n file.write(\"--- {} ---\\n\".format(filename))\r\n file.write(\"SIMULATED ANNEALING without prob=1 cmax: {}\\n\".format(w_cmax_avg))\r\n file.write(\"SIMULATED ANNEALING with prob=1 cmax: {}\\n\".format(wo_cmax_avg))\r\n file.write(\"Order compatibility: {}\\n\".format(compatibility))\r\n print(\"INFO: Finished!\")\r\n\r\nelif args.option == 'comparestart':\r\n with open('compare_ex3/comparestart.txt', 'w') as file:\r\n print(\"INFO: Started comparing start options...\")\r\n for filename in os.listdir('data'):\r\n if filename.startswith('data'):\r\n data_parser = DataParser('data/{}'.format(filename))\r\n jobs, machines, tasks, neh_prio = data_parser.get_instance_parameters()\r\n instance = Instance('Roxanne', machines, jobs, tasks, neh_prio)\r\n instance.print_info()\r\n neh_prio_queues = []\r\n neh_prio_cmaxes = []\r\n neh_result_cmaxes = []\r\n neh_result_queues = []\r\n neh_queue, neh_cmax = instance.neh()\r\n for i in range(3):\r\n np_queue, np_cmax = instance.simulated_annealing(50, instance.neh_prio, 0.000001, 0.8, 'swap')\r\n neh_prio_cmaxes.append(np_cmax)\r\n neh_prio_queues.append(np_queue)\r\n nr_queue, nr_cmax = instance.simulated_annealing(50, neh_queue, 0.000001, 0.8, 'swap')\r\n neh_result_cmaxes.append(nr_cmax)\r\n neh_result_queues.append(nr_queue)\r\n nr_cmax_avg = sum(neh_result_cmaxes)/len(neh_result_cmaxes)\r\n np_cmax_avg = sum(neh_prio_cmaxes)/len(neh_prio_cmaxes)\r\n if len([queue for queue in neh_prio_queues if queue in neh_result_queues]) != 0:\r\n compatibility = True\r\n else:\r\n compatibility = False\r\n file.write(\"--- {} ---\\n\".format(filename))\r\n file.write(\"SIMULATED ANNEALING with neh_prio start option cmax: {}\\n\".format(np_cmax_avg))\r\n file.write(\"SIMULATED ANNEALING with neh_result start option cmax: {}\\n\".format(nr_cmax_avg))\r\n file.write(\"Order compatibility: {}\\n\".format(compatibility))\r\n print(\"INFO: Finished!\")\r\n\r\nelif args.option == 'comparemovewithonlydiff':\r\n with open('compare_ex3/comparemovewithonlydiff.txt', 'w') as file:\r\n print(\"INFO: Started comparing move options (check only different cmaxes)...\")\r\n for filename in os.listdir('data'):\r\n if filename.startswith('data'):\r\n data_parser = DataParser('data/{}'.format(filename))\r\n jobs, machines, tasks, neh_prio = data_parser.get_instance_parameters()\r\n instance = Instance('Roxanne', machines, jobs, tasks, neh_prio)\r\n instance.print_info()\r\n simann_cmaxes = []\r\n simann_queues = []\r\n onlydiff_cmaxes = []\r\n onlydiff_queues = []\r\n for i in range(3):\r\n normal_queue, normal_cmax = instance.simulated_annealing(50, instance.neh_prio, 0.000001, 0.8, 'swap')\r\n simann_cmaxes.append(normal_cmax)\r\n simann_queues.append(normal_queue)\r\n onlydiff_queue, onlydiff_cmax = instance.simulated_annealing_only_diff(50, instance.neh_prio, 0.000001, 0.8, 'swap')\r\n onlydiff_cmaxes.append(onlydiff_cmax)\r\n onlydiff_queues.append(onlydiff_queue)\r\n normal_cmax_avg = sum(simann_cmaxes)/len(simann_cmaxes)\r\n onlydiff_cmax_avg = sum(onlydiff_cmaxes)/len(onlydiff_cmaxes)\r\n if len([queue for queue in simann_queues if queue in onlydiff_queues]) != 0:\r\n compatibility = True\r\n else:\r\n compatibility = False\r\n file.write(\"--- {} ---\\n\".format(filename))\r\n file.write(\"SIMULATED ANNEALING normal move option cmax: {}\\n\".format(normal_cmax_avg))\r\n file.write(\"SIMULATED ANNEALING check only different cmaxes option cmax: {}\\n\".format(onlydiff_cmax_avg))\r\n file.write(\"Order compatibility: {}\\n\".format(compatibility))\r\n print(\"INFO: Finished!\")\r\n\r\n \r\n\r\n \r\n\r\n","sub_path":"compare_lab3.py","file_name":"compare_lab3.py","file_ext":"py","file_size_in_byte":14634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"63765353","text":"from src.music_attribute import MusicAttribute\nimport re\n\n\nclass Operation:\n ops = {\"=\", \"==\", \"!=\", \">\", \">=\", \"<\", \"<=\"}\n op_rex = \"({})\".format(\"|\".join(ops))\n\n def __init__(self, s):\n if s in self.ops:\n self.op = s\n else:\n raise Exception(\"Did not recognize operation {}\".format(s))\n\n def matches(self, criteria, value):\n if self.op == \"=\":\n return value == criteria\n if self.op == \"==\":\n return value == criteria\n if self.op == \"!=\":\n return value != criteria\n if self.op == \">\":\n return value > criteria\n if self.op == \"<\":\n return value < criteria\n if self.op == \">=\":\n return value >= criteria\n if self.op == \"<=\":\n return value <= criteria\n raise Exception(\"How did you get here\")\n\n\nclass Filter:\n rex = r\"^([a-zA-Z_]+){}(.+)$\".format(Operation.op_rex)\n\n def __init__(self, s):\n match = re.match(self.rex, s)\n if match is None:\n raise Exception(\"Couldn't parse filter {}. rex: {}\".format(s, self.rex))\n\n attr = match.group(1)\n value = match.group(3)\n self.attr = MusicAttribute.create_of_string(attr, value)\n self.op = Operation(match.group(2))\n\n def matches(self, candidate):\n # I mean, if it works\n name = self.attr._machine_name\n candidate = getattr(candidate, name)\n return self.op.matches(self.attr, candidate)\n","sub_path":"src/search_filter.py","file_name":"search_filter.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"320458543","text":"from functools import reduce\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom models.util import concat_set\nfrom models.dae import xavier_init\nfrom models.sdae import SDAE\n\n\nclass MPRP(object):\n \"\"\"\n Simple implementation of multi-problem risk prediction for ACS\n \"\"\"\n def __init__(self, n_input, hiddens, n_class, data_set_name, transfer=tf.nn.sigmoid, corrupt_level=0.3,\n optimizer=tf.train.AdamOptimizer(), epochs=1000, pre_train=True, name=\"mprp\", sess=None):\n \"\"\"implementation of `multi-problem risk predictor`\n\n All the hyper parameters that can be adjusted are `{hiddens, transfer, optimizer, epochs, corrupt_level,\n pre_train}`. n_input and n_class are not adjustable because these are determined by the sample data and\n the problem you are trying to solve\n\n :param n_input: number of input\n :param hiddens: list of the number of units in each hidden layer\n :param n_class: the number of classes to predict\n :param data_set_name: list of , for example : ``[\"UA\", \"SA\", \"MI\"]``\n :param transfer: transfer function\n :param corrupt_level: the ratio of input to corrupt when pre_train\n :param optimizer: tf.nn.optimizer\n :param epochs: training epochs\n :param pre_train: whether to choose pre-training\n :param name: variable scope name\n :param sess: tf.Session()\n \"\"\"\n self.n_input = n_input\n self.hiddens = hiddens\n self.n_class = n_class\n self.data_set_name = data_set_name\n self.set_num = len(self.data_set_name)\n self.corrupt_level = corrupt_level\n self.epochs = epochs\n self._pre_train = pre_train\n self.name = name\n with tf.variable_scope(self.name):\n self.transfer_func = transfer\n self.optimizer = optimizer\n\n self.sess = sess if sess is not None else tf.Session()\n\n self.sdaes = self._init_sdaes()\n\n self.inputs = dict()\n self.hidden_reps = dict()\n self.weights = dict()\n self.biases = dict()\n self.outputs = dict()\n self.prediction = dict() # model predictionion\n self.y_ = dict() # real label\n self.losses = dict()\n for _name in self.data_set_name:\n self.inputs[_name] = tf.placeholder(tf.float32, [None, n_input], name=\"{}_input\".format(_name))\n self.hidden_reps[_name] = tf.concat((self.sdaes[_name](self.inputs[_name]),\n self.sdaes[\"share\"](self.inputs[_name])),\n axis=1)\n self.weights[_name] = tf.Variable(xavier_init(2*hiddens[-1], n_class),\n dtype=tf.float32,\n name=\"{}_out_weight\".format(_name))\n self.biases[_name] = tf.Variable(tf.zeros(self.n_class), name=\"{}_out_bias\".format(_name))\n self.outputs[_name] = self.hidden_reps[_name] @ self.weights[_name] + self.biases[_name]\n self.prediction[_name] = tf.nn.softmax(self.outputs[_name])\n self.y_[_name] = tf.placeholder(tf.float32, [None, n_class], name=\"{}_label\".format(_name))\n self.losses[_name] = tf.reduce_mean(tf.losses.softmax_cross_entropy(self.y_[_name], self.outputs[_name]))\n\n # combine all the losses\n g_vars = [v for _name in self.data_set_name for v in self.sdaes[_name].vars]\n g_vars[len(g_vars):] = [self.weights[_name] for _name in self.data_set_name]\n g_vars[len(g_vars):] = [self.biases[_name] for _name in self.data_set_name]\n g_vars[len(g_vars):] = [v for v in self.sdaes['share'].vars]\n\n self.tol_loss = reduce(lambda x, y: x + y, [self.losses[k] for k in self.data_set_name])\n self.train_op = self.optimizer.minimize(self.tol_loss, var_list=g_vars)\n\n # init\n init = tf.global_variables_initializer()\n self.sess.run(init)\n\n def _init_sdaes(self):\n sdaes = dict()\n sdaes['share'] = SDAE(self.n_input,\n self.hiddens,\n transfer=self.transfer_func,\n corrupt_level=self.corrupt_level,\n optimizer=self.optimizer,\n epochs=self.epochs,\n name=\"share_sdae\",\n sess=self.sess)\n\n for name in self.data_set_name:\n sdaes[name] = SDAE(self.n_input,\n self.hiddens,\n transfer=self.transfer_func,\n corrupt_level=self.corrupt_level,\n optimizer=self.optimizer,\n epochs=self.epochs,\n name=\"{}_sdae\".format(name),\n sess=self.sess)\n return sdaes\n\n def pre_train_op(self, data_sets, batch_size):\n \"\"\"at first, pre_train the stacked denoising autoencoders\n\n :param data_sets: a dict of data_set, for example ``{'UA': set1, 'SA': set2, 'MI': set3}``, and the name of key\n is best to be uppercase\n :param batch_size: `batch size` of data\n \"\"\"\n for key, value in data_sets.items():\n self.sdaes[key].pre_train(value, batch_size)\n\n new_set = concat_set(data_sets)\n self.sdaes['share'].pre_train(new_set, batch_size)\n\n def train_process(self, data_sets, batch_size=128):\n if self._pre_train:\n self.pre_train_op(data_sets, batch_size)\n for key, value in data_sets.items():\n value.epoch_completed = 0\n\n # while epochs less than the epoch_complete of the data_set which has most examples, continue training the model\n name = self.data_set_name[0]\n max_n = -1\n for set_name in self.data_set_name:\n if data_sets[set_name].num_examples > max_n:\n max_n = data_sets[set_name].num_examples\n name = set_name\n\n while data_sets[name].epoch_completed < self.epochs:\n example_s = [data_sets[_name].next_batch(batch_size) for _name in self.data_set_name]\n example_dict = {}\n label_dict = {}\n for i in range(self.set_num):\n example_dict[self.inputs[self.data_set_name[i]]] = example_s[i][0]\n label_dict[self.y_[self.data_set_name[i]]] = example_s[i][1]\n feed_dict = {**example_dict, **label_dict}\n loss, _ = self.sess.run((self.tol_loss, self.train_op), feed_dict=feed_dict)\n\n def predict(self, x, **kwargs):\n \"\"\"make prediction for examples\n\n :param x: if you give the set_name ,x represents examples from a certain data set\n else the x should be given in dict() format ``{\"UA\": set1, \"SA\": set2, \"MI\": set3}``\n :param kwargs: receive the 'set_name' parameter\n :return prediction: the prediction of given examples\n \"\"\"\n if \"set_name\" in kwargs:\n set_name = kwargs['set_name']\n if set_name not in self.data_set_name:\n raise KeyError(\"This model can not make prediction for example from the {} data set\".format(set_name))\n return self.sess.run(self.prediction[set_name], feed_dict={self.inputs[set_name]: x})\n\n else:\n pred = tuple([self.sess.run(self.prediction[name], feed_dict={self.inputs[name]: x[name].examples})\n for name in self.data_set_name])\n return np.vstack(pred)\n","sub_path":"models/mprp.py","file_name":"mprp.py","file_ext":"py","file_size_in_byte":7735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"465669211","text":"## Sid Meier's Civilization 4\r\n## Copyright Firaxis Games 2005\r\n## Improvements to this screen by Almightix - thanks\r\nfrom CvPythonExtensions import *\r\nfrom PyHelpers import PyPlayer\r\nimport CvUtil\r\nimport ScreenInput\r\nimport CvScreenEnums\r\n\r\n# BUG - Better Espionage - start\r\nimport BugCore\r\nimport BugUtil\r\nimport ColorUtil\r\nimport FontUtil\r\nimport SpyUtil\r\nimport BugScreen\r\nEspionageOpt = BugCore.game.BetterEspionage\r\n# BUG - Better Espionage - end\r\n\r\n# globals\r\ngc = CyGlobalContext()\r\nArtFileMgr = CyArtFileMgr()\r\nlocalText = CyTranslator()\r\n\r\nCITYMISSION_CITY = 0\r\nCITYMISSION_MISSION = 1\r\n\r\nclass CvEspionageAdvisor:\r\n\r\n\tdef __init__(self):\r\n\t\tself.SCREEN_NAME = \"EspionageAdvisor\"\r\n\t\tself.DEBUG_DROPDOWN_ID = \"EspionageAdvisorDropdownWidget\"\r\n\t\tself.WIDGET_ID = \"EspionageAdvisorWidget\"\r\n\t\tself.WIDGET_HEADER = \"EspionageAdvisorWidgetHeader\"\r\n\t\tself.EXIT_ID = \"EspionageAdvisorExitWidget\"\r\n\t\tself.BACKGROUND_ID = \"EspionageAdvisorBackground\"\r\n\t\tself.X_SCREEN = 500\r\n\t\tself.Y_SCREEN = 396\r\n\t\tself.W_SCREEN = 1024\r\n\t\tself.H_SCREEN = 768\r\n\t\tself.Y_TITLE = 12\r\n\t\tself.BORDER_WIDTH = 4\r\n\t\tself.PANE_HEIGHT = 450\r\n\t\tself.PANE_WIDTH = 283\r\n\t\tself.X_SLIDERS = 50\r\n\t\tself.X_INCOME = 373\r\n\t\tself.X_EXPENSES = 696\r\n\t\tself.Y_TREASURY = 90\r\n\t\tself.H_TREASURY = 100\r\n\t\tself.Y_LOCATION = 230\r\n\t\tself.Y_SPACING = 30\r\n\t\tself.TEXT_MARGIN = 15\r\n\t\tself.Z_BACKGROUND = -2.1\r\n\t\tself.Z_CONTROLS = self.Z_BACKGROUND - 0.2\r\n\t\tself.DZ = -0.2\r\n\r\n\t\tself.X_EXIT = 994\r\n\t\tself.Y_EXIT = 726\r\n\r\n\t\tself.nWidgetCount = 0\r\n\r\n\t\tself.iDirtyBit = 0\r\n\r\n\t\tself.iTargetPlayer = -1\r\n\r\n\t\tself.iActiveCityID = -1\r\n\t\tself.iActiveMissionID = -1\r\n\r\n\t\tself.bShowAISpending = True\r\n\t\tself.ShowAISpendingWidget = \"\"\r\n\r\n\t\tself.drawMissionTabConstantsDone = 0\r\n\t\tself.drawSpyvSpyTabConstantsDone = 0\r\n\t\tself.CityMissionToggle = CITYMISSION_CITY\r\n\r\n\t\tself.MissionsTabWidget = self.SCREEN_NAME + \"MissionTab\"\r\n\t\tself.SpyvSpyTabWidget = self.SCREEN_NAME + \"SpyvSpyTab\"\r\n\r\n\t\tself.iIncreaseButtonID = 555\r\n\t\tself.iDecreaseButtonID = 556\r\n\t\tself.iLeaderImagesID = 456\r\n\r\n\t\t# mission / city widgets - initialized to avoid errors with 'handle input'\r\n\t\t# they get set to proper values in def drawMissionTab(self)\r\n\t\tself.szMissionsTitleText = \"\"\r\n\t\tself.szCitiesTitleText = \"\"\r\n\r\n\t\tself.EPScreenTab = -1\r\n\r\n\tdef getScreen(self):\r\n\t\treturn CyGInterfaceScreen(self.SCREEN_NAME, CvScreenEnums.ESPIONAGE_ADVISOR)\r\n\r\n\tdef interfaceScreen (self):\r\n\t\tself.iTargetPlayer = -1\r\n\t\tself.iActiveCityID = -1\r\n\t\tself.iActiveMissionID = -1\r\n\t\tself.iActivePlayer = CyGame().getActivePlayer()\r\n\r\n\t\tscreen = self.getScreen()\r\n\t\tif screen.isActive():\r\n\t\t\treturn\r\n\t\tscreen.setRenderInterfaceOnly(True);\r\n\t\tscreen.showScreen( PopupStates.POPUPSTATE_IMMEDIATE, False)\r\n\r\n# attempting to call BugScreen class here\r\n\r\n\t\tself.EPScreen = BugScreen.BugScreen(self.SCREEN_NAME, screen, self.W_SCREEN, self.H_SCREEN)\r\n\t\tself.EPScreen.addBackground(self.BACKGROUND_ID, \"SCREEN_BG_OPAQUE\")\r\n\t\tself.EPScreen.addTitle(self.WIDGET_HEADER, \"TXT_KEY_ESPIONAGE_SCREEN\", \"4b\", True, self.X_SCREEN, self.Y_TITLE, self.Z_CONTROLS)\r\n\r\n\t\tbShow = EspionageOpt.isEnabled()\r\n\t\tself.EPScreen.addTab(self.MissionsTabWidget, \"TXT_KEY_ESPIONAGE_MISSIONS_TAB\", \"4\", True, 50, self.Y_EXIT, 0, bShow, True, True, self.drawMissionTab, self.refreshMissionTab, WidgetTypes.WIDGET_GENERAL)\r\n\t\tself.EPScreen.addTab(self.SpyvSpyTabWidget, \"TXT_KEY_ESPIONAGE_SPYVSPY_TAB\", \"4\", False, 350, self.Y_EXIT, 0, bShow, True, False, self.drawSpyvSpyTab, None, WidgetTypes.WIDGET_GENERAL)\r\n\t\tself.EPScreen.addTab(self.EXIT_ID, \"TXT_KEY_PEDIA_SCREEN_EXIT\", \"4\", True, self.X_EXIT, self.Y_EXIT, self.Z_CONTROLS, True, True, False, None, None, WidgetTypes.WIDGET_CLOSE_SCREEN)\r\n\t\tself.EPScreen.evenlySpaceTabs()\r\n\r\n\t\tif (self.EPScreenTab == -1\r\n\t\tor self.EPScreenTab == 1):\r\n\t\t\tself.EPScreen.updateTabStatus(self.MissionsTabWidget)\r\n\t\t\tself.EPScreenTab = 1\r\n\t\telif self.EPScreenTab == 2:\r\n\t\t\tself.EPScreen.updateTabStatus(self.SpyvSpyTabWidget)\r\n\t\t\tself.EPScreenTab = 2\r\n\r\n\t\tself.EPScreen.draw()\r\n\r\n\t\tif (CyGame().isDebugMode()):\r\n\t\t\tself.iDebugDropdownID = 554\r\n\t\t\tself.szDropdownName = self.DEBUG_DROPDOWN_ID\r\n\t\t\tscreen.addDropDownBoxGFC(self.szDropdownName, 22, 12, 300, WidgetTypes.WIDGET_GENERAL, self.iDebugDropdownID, -1, FontTypes.GAME_FONT)\r\n\t\t\tfor j in range(gc.getMAX_PLAYERS()):\r\n\t\t\t\tif (gc.getPlayer(j).isAlive()):\r\n\t\t\t\t\tscreen.addPullDownString(self.szDropdownName, gc.getPlayer(j).getName(), j, j, False )\r\n\r\n\t\t# draw the contents\r\n\t\tself.drawContents()\r\n\r\n\tdef drawContents(self):\r\n\r\n\t\tself.deleteAllWidgets()\r\n\r\n\t\tscreen = self.getScreen()\r\n\r\n\t\tif not EspionageOpt.isEnabled():\r\n\t\t\tself.CityMissionToggle = CITYMISSION_CITY\r\n\t\t\tself.EPScreen.updateTabStatus(self.MissionsTabWidget)\r\n\r\n\t\t# draw tab details\r\n\t\tself.EPScreen.drawActiveTab()\r\n\t\tself.EPScreen.refreshActiveTab()\r\n\r\n\t\t# draw tabs\r\n\t\tself.EPScreen.drawTabs()\r\n\r\n\tdef drawMissionTab(self):\r\n\t\tscreen = self.getScreen()\r\n\r\n#\t\tBugUtil.debug(\"CvEspionage Advisor: drawMissionsTab\")\r\n\r\n\t\tpActivePlayer = gc.getPlayer(self.iActivePlayer)\r\n\t\tpActiveTeam = gc.getTeam(pActivePlayer.getTeam())\r\n\r\n\t\tself.drawMissionTabConstants()\r\n\r\n\t\tself.szLeftPaneWidget = self.getNextWidgetName()\r\n\t\tscreen.addPanel( self.szLeftPaneWidget, \"\", \"\", true, true,\r\n\t\t\tself.X_LEFT_PANE, self.Y_LEFT_PANE, self.W_LEFT_PANE, self.H_LEFT_PANE, PanelStyles.PANEL_STYLE_MAIN)\r\n\r\n\t\tself.szScrollPanel = self.getNextWidgetName()\r\n\t\tscreen.addPanel( self.szScrollPanel, \"\", \"\", true, true,\r\n\t\t\tself.X_SCROLL, self.Y_SCROLL, self.W_SCROLL, self.H_SCROLL, PanelStyles.PANEL_STYLE_EMPTY)\r\n\r\n\t\tself.aiKnownPlayers = []\r\n\t\tself.aiUnknownPlayers = []\r\n\t\tself.iNumEntries= 0\r\n\r\n\t\tfor iLoop in range(gc.getMAX_PLAYERS()):\r\n\t\t\tpPlayer = gc.getPlayer(iLoop)\r\n\t\t\tif (pPlayer.getTeam() != pActivePlayer.getTeam() and not pPlayer.isBarbarian()):\r\n\t\t\t\tif (pPlayer.isAlive()):\r\n\t\t\t\t\tif (pActiveTeam.isHasMet(pPlayer.getTeam())):\r\n\t\t\t\t\t\tself.aiKnownPlayers.append(iLoop)\r\n\t\t\t\t\t\tself.iNumEntries = self.iNumEntries + 1\r\n\r\n\t\t\t\t\t\tif (self.iTargetPlayer == -1):\r\n\t\t\t\t\t\t\tself.iTargetPlayer = iLoop\r\n\r\n\t\twhile(self.iNumEntries < 17):\r\n\t\t\tself.iNumEntries = self.iNumEntries + 1\r\n\t\t\tself.aiUnknownPlayers.append(self.iNumEntries)\r\n\r\n\t\t############################\r\n\t\t#### Total EPs Per Turn Text\r\n\t\t############################\r\n\r\n\t\tif not EspionageOpt.isEnabled():\r\n\t\t\tself.szTotalPaneWidget = self.getNextWidgetName()\r\n\t\t\tscreen.addPanel( self.szTotalPaneWidget, \"\", \"\", true, true,\r\n\t\t\t\tself.X_TOTAL_PANE, self.Y_TOTAL_PANE, self.W_TOTAL_PANE, self.H_TOTAL_PANE, PanelStyles.PANEL_STYLE_MAIN )\r\n\r\n\t\t\tself.szMakingText = self.getNextWidgetName()\r\n\t\t\tszText = u\"\" + localText.getText(\"TXT_KEY_ESPIONAGE_SCREEN_TOTAL_NUM_EPS\", (pActivePlayer.getCommerceRate(CommerceTypes.COMMERCE_ESPIONAGE), )) + \"\"\r\n\t\t\tscreen.setLabel(self.szMakingText, \"Background\", szText, CvUtil.FONT_LEFT_JUSTIFY, self.X_MAKING_TEXT, self.Y_MAKING_TEXT, self.Z_CONTROLS, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )\r\n\r\n\t\t############################\r\n\t\t#### Right Panel\r\n\t\t############################\r\n\r\n\t\tself.szRightPaneWidget = self.getNextWidgetName()\r\n\t\tscreen.addPanel( self.szRightPaneWidget, \"\", \"\", true, true,\r\n\t\t\tself.X_RIGHT_PANE, self.Y_RIGHT_PANE, self.W_RIGHT_PANE, self.H_RIGHT_PANE, PanelStyles.PANEL_STYLE_MAIN )\r\n\r\n\t\tif (self.iTargetPlayer != -1):\r\n\t\t\tself.szCitiesTitleText = self.getNextWidgetName()\r\n\t\t\tif self.CityMissionToggle == CITYMISSION_CITY:\r\n\t\t\t\tszText = u\"\" + localText.getText(\"TXT_KEY_CONCEPT_CITIES\", ()) + \"\"\r\n\t\t\telse:\r\n\t\t\t\tszText = u\"\" + localText.getText(\"TXT_KEY_ESPIONAGE_SCREEN_MISSIONS\", ()) + \"\"\r\n\r\n\t\t\tif EspionageOpt.isEnabled():\r\n\t\t\t\tszText = localText.changeTextColor(szText, gc.getInfoTypeForString(\"COLOR_YELLOW\"))\r\n\t\t\t\tscreen.setText(self.szCitiesTitleText, \"Background\", szText, CvUtil.FONT_LEFT_JUSTIFY, self.X_CITY_LIST, self.Y_CITY_LIST - 40, self.Z_CONTROLS, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )\r\n\t\t\telse:\r\n\t\t\t\tscreen.setLabel(self.szCitiesTitleText, \"Background\", szText, CvUtil.FONT_LEFT_JUSTIFY, self.X_CITY_LIST, self.Y_CITY_LIST - 40, self.Z_CONTROLS, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )\r\n\r\n\t\t\tself.szEffectsTitleText = self.getNextWidgetName()\r\n\t\t\tszText = u\"\" + localText.getText(\"TXT_KEY_ESPIONAGE_SCREEN_PASSIVE_EFFECTS\", ()) + \"\"\r\n\t\t\tscreen.setLabel(self.szEffectsTitleText, \"Background\", szText, CvUtil.FONT_LEFT_JUSTIFY, self.X_EFFECTS_LIST, self.Y_EFFECTS_LIST - 40, self.Z_CONTROLS, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )\r\n\r\n\t\t\tself.szMissionsTitleText = self.getNextWidgetName()\r\n\t\t\tif self.CityMissionToggle == CITYMISSION_MISSION:\r\n\t\t\t\tszText = u\"\" + localText.getText(\"TXT_KEY_CONCEPT_CITIES\", ()) + \"\"\r\n\t\t\telse:\r\n\t\t\t\tszText = u\"\" + localText.getText(\"TXT_KEY_ESPIONAGE_SCREEN_MISSIONS\", ()) + \"\"\r\n\r\n\t\t\tif EspionageOpt.isEnabled():\r\n\t\t\t\tszText = localText.changeTextColor(szText, gc.getInfoTypeForString(\"COLOR_YELLOW\"))\r\n\t\t\t\tscreen.setText(self.szMissionsTitleText, \"Background\", szText, CvUtil.FONT_LEFT_JUSTIFY, self.X_MISSIONS_LIST, self.Y_MISSIONS_LIST - 40, self.Z_CONTROLS, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )\r\n\t\t\telse:\r\n\t\t\t\tscreen.setLabel(self.szMissionsTitleText, \"Background\", szText, CvUtil.FONT_LEFT_JUSTIFY, self.X_MISSIONS_LIST, self.Y_MISSIONS_LIST - 40, self.Z_CONTROLS, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )\r\n\r\n\t\t\tself.szEffectsCostTitleText = self.getNextWidgetName()\r\n\t\t\tszText = u\"\" + localText.getText(\"TXT_KEY_ESPIONAGE_SCREEN_COST\", ()) + \"\"\r\n\t\t\tscreen.setLabel(self.szEffectsCostTitleText, \"Background\", szText, CvUtil.FONT_LEFT_JUSTIFY, self.X_EFFECTS_COSTS_LIST, self.Y_EFFECTS_COSTS_LIST - 40, self.Z_CONTROLS, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )\r\n\r\n\t\t\tself.szMissionsCostTitleText = self.getNextWidgetName()\r\n\t\t\tszText = u\"\" + localText.getText(\"TXT_KEY_ESPIONAGE_SCREEN_COST\", ()) + \"\"\r\n\t\t\tscreen.setLabel(self.szMissionsCostTitleText, \"Background\", szText, CvUtil.FONT_LEFT_JUSTIFY, self.X_MISSIONS_COSTS_LIST, self.Y_MISSIONS_COSTS_LIST - 40, self.Z_CONTROLS, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )\r\n\r\n\t\t\t############################\r\n\t\t\t#### Left Leaders Panel\r\n\t\t\t############################\r\n\r\n\t\t\tself.drawMissionTab_LeftLeaderPanal(screen)\r\n\r\n\t\treturn\r\n\r\n\tdef drawMissionTabConstants(self):\r\n\r\n\t\t# skip this is we have already done it\r\n\t\tif EspionageOpt.isEnabled():\r\n\t\t\tif self.drawMissionTabConstantsDone == 2:\r\n\t\t\t\treturn\r\n\t\telse:\r\n\t\t\tif self.drawMissionTabConstantsDone == 1:\r\n\t\t\t\treturn\r\n\r\n\t\tif EspionageOpt.isEnabled():\r\n\t\t\tself.drawMissionTabConstantsDone = 2\r\n\t\telse:\r\n\t\t\tself.drawMissionTabConstantsDone = 1\r\n\r\n\t\tself.MissionLeaderPanel_X_LeaderIcon = 21\r\n\t\tself.MissionLeaderPanel_X_LeaderNamePanel = 5\r\n\t\tself.MissionLeaderPanel_X_LeaderName = 55\r\n\t\tself.MissionLeaderPanel_X_Multiplier = 190\r\n\t\tself.MissionLeaderPanel_X_CounterEP = 220\r\n\t\tself.MissionLeaderPanel_X_EPoints = 300\r\n\t\tself.MissionLeaderPanel_X_PassiveMissions = 380\r\n\t\tself.MissionLeaderPanel_X_WghtInc = 53\r\n\t\tself.MissionLeaderPanel_X_WghtDec = 68\r\n\t\tself.MissionLeaderPanel_X_Wght = 85\r\n\t\tself.MissionLeaderPanel_X_EPointsTurn = self.MissionLeaderPanel_X_EPoints + 4\r\n\t\tself.MissionLeaderPanel_X_EspionageIcon = 3\r\n\r\n\t\tif EspionageOpt.isEnabled():\r\n\t\t\tself.MissionLeaderPanel_X_EPointsTurn = self.MissionLeaderPanel_X_EPoints + 4\r\n\t\telse:\r\n\t\t\tself.MissionLeaderPanel_X_EPointsTurn = 247\r\n\r\n\t\tself.MissionLeaderPanelTopRow = 0\r\n\t\tself.MissionLeaderPanelBottomRow = 15\r\n\t\tself.MissionLeaderPanelMiddle = 6\r\n\r\n\t\t# mission constants\r\n\t\tfor iMissionLoop in range(gc.getNumEspionageMissionInfos()):\r\n\t\t\tpMission = gc.getEspionageMissionInfo(iMissionLoop)\r\n\t\t\tif (pMission.getCost() != -1\r\n\t\t\tand pMission.isPassive()):\r\n\t\t\t\tif pMission.isInvestigateCity():\r\n\t\t\t\t\tself.MissionInvestigateCity = iMissionLoop\r\n\t\t\t\telif pMission.isSeeDemographics():\r\n\t\t\t\t\tself.MissionSeeDemo = iMissionLoop\r\n\t\t\t\telif pMission.isSeeResearch():\r\n\t\t\t\t\tself.MissionSeeResearch = iMissionLoop\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.MissionCityVisibility = iMissionLoop\r\n\r\n\t\tif EspionageOpt.isEnabled():\r\n\t\t\tself.X_LEFT_PANE = 25\r\n\t\t\tself.Y_LEFT_PANE = 70 - 5\r\n\t\t\tself.W_LEFT_PANE = 400 + 60\r\n\t\t\tself.H_LEFT_PANE = 620\r\n\r\n\t\t\tself.X_SCROLL = self.X_LEFT_PANE + 20\r\n\t\t\tself.Y_SCROLL= 90 - 5\r\n\t\t\tself.W_SCROLL= 360 + 60\r\n\t\t\tself.H_SCROLL= 580\r\n\r\n\t\t\t############################\r\n\t\t\t#### Right Panel\r\n\t\t\t############################\r\n\r\n\t\t\tself.X_RIGHT_PANE = self.X_LEFT_PANE + self.W_LEFT_PANE + 10\r\n\t\t\tself.Y_RIGHT_PANE = self.Y_LEFT_PANE\r\n\t\t\tself.W_RIGHT_PANE = 550 - 50\r\n\t\t\tself.H_RIGHT_PANE = self.H_LEFT_PANE\r\n\r\n\t\t\tself.X_CITY_LIST = self.X_RIGHT_PANE + 20\r\n\t\t\tself.Y_CITY_LIST = self.Y_RIGHT_PANE + 60\r\n\t\t\tself.W_CITY_LIST = 160\r\n\t\t\tself.H_CITY_LIST = self.H_RIGHT_PANE - 90\r\n\r\n\t\t\tself.X_EFFECTS_LIST = self.X_CITY_LIST + self.W_CITY_LIST + 10\r\n\t\t\tself.Y_EFFECTS_LIST = self.Y_CITY_LIST\r\n\t\t\tself.W_EFFECTS_LIST = 210\r\n\t\t\tself.H_EFFECTS_LIST = 100\r\n\r\n\t\t\tself.X_EFFECTS_COSTS_LIST = self.X_EFFECTS_LIST + self.W_EFFECTS_LIST + 10\r\n\t\t\tself.Y_EFFECTS_COSTS_LIST = self.Y_EFFECTS_LIST\r\n\t\t\tself.W_EFFECTS_COSTS_LIST = 60\r\n\t\t\tself.H_EFFECTS_COSTS_LIST = self.H_EFFECTS_LIST\r\n\r\n\t\t\tself.X_MISSIONS_LIST = self.X_CITY_LIST + self.W_CITY_LIST + 10\r\n\t\t\tself.Y_MISSIONS_LIST = self.Y_EFFECTS_LIST + self.H_EFFECTS_LIST + 50\r\n\t\t\tself.W_MISSIONS_LIST = self.W_EFFECTS_LIST\r\n\t\t\tself.H_MISSIONS_LIST = self.H_CITY_LIST - + self.H_EFFECTS_LIST - 50\r\n\r\n\t\t\tself.X_MISSIONS_COSTS_LIST = self.X_MISSIONS_LIST + self.W_MISSIONS_LIST + 10\r\n\t\t\tself.Y_MISSIONS_COSTS_LIST = self.Y_MISSIONS_LIST\r\n\t\t\tself.W_MISSIONS_COSTS_LIST = self.W_EFFECTS_COSTS_LIST\r\n\t\t\tself.H_MISSIONS_COSTS_LIST = self.H_MISSIONS_LIST\r\n\r\n\t\t\t############################\r\n\t\t\t#### Left Leaders Panel\r\n\t\t\t############################\r\n\r\n\t\t\tself.W_LEADER = 128\r\n\t\t\tself.H_LEADER = 128\r\n\r\n\t\t\tself.W_NAME_PANEL = 220\r\n\t\t\tself.H_NAME_PANEL = 30\r\n\r\n\t\telse:\r\n\t\t\tself.X_LEFT_PANE = 25\r\n\t\t\tself.Y_LEFT_PANE = 70\r\n\t\t\tself.W_LEFT_PANE = 400\r\n\t\t\tself.H_LEFT_PANE = 620\r\n\r\n\t\t\tself.X_SCROLL = self.X_LEFT_PANE + 20\r\n\t\t\tself.Y_SCROLL= 90\r\n\t\t\tself.W_SCROLL= 360\r\n\t\t\tself.H_SCROLL= 580\r\n\r\n\t\t\t############################\r\n\t\t\t#### Total EPs Per Turn Text\r\n\t\t\t############################\r\n\r\n\t\t\tself.X_TOTAL_PANE = self.X_LEFT_PANE + self.W_LEFT_PANE + 20\r\n\t\t\tself.Y_TOTAL_PANE = self.Y_LEFT_PANE\r\n\t\t\tself.W_TOTAL_PANE = 550\r\n\t\t\tself.H_TOTAL_PANE = 60\r\n\r\n\t\t\tself.X_MAKING_TEXT = 490\r\n\t\t\tself.Y_MAKING_TEXT = 85\r\n\r\n\t\t\t############################\r\n\t\t\t#### Right Panel\r\n\t\t\t############################\r\n\r\n\t\t\tself.X_RIGHT_PANE = self.X_TOTAL_PANE\r\n\t\t\tself.Y_RIGHT_PANE = self.Y_TOTAL_PANE + self.H_TOTAL_PANE + 20\r\n\t\t\tself.W_RIGHT_PANE = self.W_TOTAL_PANE\r\n\t\t\tself.H_RIGHT_PANE = self.H_LEFT_PANE - self.H_TOTAL_PANE - 20\r\n\r\n\t\t\tself.X_CITY_LIST = self.X_RIGHT_PANE + 40\r\n\t\t\tself.Y_CITY_LIST = self.Y_RIGHT_PANE + 60\r\n\t\t\tself.W_CITY_LIST = 160\r\n\t\t\tself.H_CITY_LIST = self.H_RIGHT_PANE - 90\r\n\r\n\t\t\tself.X_EFFECTS_LIST = self.X_CITY_LIST + self.W_CITY_LIST + 20\r\n\t\t\tself.Y_EFFECTS_LIST = self.Y_CITY_LIST\r\n\t\t\tself.W_EFFECTS_LIST = 210\r\n\t\t\tself.H_EFFECTS_LIST = (self.H_CITY_LIST / 3) - 50\r\n\r\n\t\t\tself.X_EFFECTS_COSTS_LIST = self.X_EFFECTS_LIST + self.W_EFFECTS_LIST + 10\r\n\t\t\tself.Y_EFFECTS_COSTS_LIST = self.Y_EFFECTS_LIST\r\n\t\t\tself.W_EFFECTS_COSTS_LIST = 60\r\n\t\t\tself.H_EFFECTS_COSTS_LIST = self.H_EFFECTS_LIST\r\n\r\n\t\t\tself.X_MISSIONS_LIST = self.X_CITY_LIST + self.W_CITY_LIST + 20\r\n\t\t\tself.Y_MISSIONS_LIST = self.Y_EFFECTS_LIST + self.H_EFFECTS_LIST + 50\r\n\t\t\tself.W_MISSIONS_LIST = self.W_EFFECTS_LIST\r\n\t\t\tself.H_MISSIONS_LIST = (self.H_CITY_LIST * 2 / 3) #- 45\r\n\r\n\t\t\tself.X_MISSIONS_COSTS_LIST = self.X_MISSIONS_LIST + self.W_MISSIONS_LIST + 10\r\n\t\t\tself.Y_MISSIONS_COSTS_LIST = self.Y_MISSIONS_LIST\r\n\t\t\tself.W_MISSIONS_COSTS_LIST = self.W_EFFECTS_COSTS_LIST\r\n\t\t\tself.H_MISSIONS_COSTS_LIST = self.H_MISSIONS_LIST\r\n\r\n\t\t\t############################\r\n\t\t\t#### Left Leaders Panel\r\n\t\t\t############################\r\n\r\n\t\t\tself.W_LEADER = 128\r\n\t\t\tself.H_LEADER = 128\r\n\r\n\t\t\tself.W_NAME_PANEL = 220\r\n\t\t\tself.H_NAME_PANEL = 30\r\n\r\n\t\treturn\r\n\r\n\tdef drawMissionTab_LeftLeaderPanal(self, screen):\r\n\t\tpActivePlayer = gc.getPlayer(self.iActivePlayer)\r\n\t\tpActiveTeam = gc.getTeam(pActivePlayer.getTeam())\r\n\r\n\t\t# the following are needed for each leader\r\n\t\tself.MissionLeaderPanelWidgets = [\"\"] * gc.getMAX_PLAYERS() # updated by refresh\r\n\t\tself.EPWeightWidgets = [\"\"] * gc.getMAX_PLAYERS() # updated by refresh\r\n\t\tself.EPSpendingWidgets = [\"\"] * gc.getMAX_PLAYERS() # updated by refresh\r\n\t\tself.EPIconWidgets = [\"\"] * gc.getMAX_PLAYERS() # updated by refresh\r\n\t\tself.LeaderImageWidgets = [\"\"] * gc.getMAX_PLAYERS() # updated by handle input\r\n\r\n\t\t# The following only occur once\r\n\t\tself.CityListBoxWidget = self.getNextWidgetName() # updated by refresh\r\n\t\tself.EffectsTableWidget = self.getNextWidgetName() # updated by refresh\r\n\t\tself.MissionsTableWidget = self.getNextWidgetName() # updated by refresh\r\n\r\n\t\t# only required for BUG\r\n\t\tif EspionageOpt.isEnabled():\r\n\t\t\tiRatioColor = EspionageOpt.getDefaultRatioColor()\r\n\t\t\tiGoodRatioColor = EspionageOpt.getGoodRatioColor()\r\n\t\t\tiBadRatioColor = EspionageOpt.getBadRatioColor()\r\n\r\n\t\t\t# show AI spending toggle\r\n\t\t\tself.ShowAISpendingWidget = self.getNextWidgetName()\r\n\t\t\tszText = u\"\" + localText.getText(\"TXT_KEY_ESPIONAGE_AI_SPENDING_TOGGLE\", ()) + \"\"\r\n\t\t\tif self.bShowAISpending:\r\n\t\t\t\tszText = localText.changeTextColor(szText, gc.getInfoTypeForString(\"COLOR_YELLOW\"))\r\n\t\t\tscreen.setText(self.ShowAISpendingWidget, \"Background\", szText, CvUtil.FONT_RIGHT_JUSTIFY, self.X_LEFT_PANE + self.W_LEFT_PANE - 20, self.Y_LEFT_PANE + 8, self.Z_CONTROLS, FontTypes.GAME_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)\r\n\r\n\t\tfor iPlayerID in self.aiKnownPlayers:\r\n\t\t\tpTargetPlayer = gc.getPlayer(iPlayerID)\r\n\t\t\tiTargetTeam = pTargetPlayer.getTeam()\r\n\r\n\t\t\t# leader panel / container\r\n\t\t\tszLeaderPanel = self.getNextWidgetName()\r\n\t\t\tself.MissionLeaderPanelWidgets[iPlayerID] = szLeaderPanel\r\n\r\n\t\t\tscreen.attachPanel(self.szScrollPanel, szLeaderPanel, \"\", \"\", True, False, PanelStyles.PANEL_STYLE_STANDARD)\r\n\r\n\t\t\t# EP Spending, Weight. EP Icon - all of these are handled by the 'refresh' procedure\r\n\t\t\tself.EPWeightWidgets[iPlayerID] = self.getNextWidgetName()\r\n\t\t\tself.EPSpendingWidgets[iPlayerID] = self.getNextWidgetName()\r\n\t\t\tself.EPIconWidgets[iPlayerID] = self.getNextWidgetName()\r\n\r\n\t\t\t# leader image\r\n\t\t\tszName = self.getNextWidgetName()\r\n\t\t\tscreen.attachSeparator(szLeaderPanel, szName, true, 30)\r\n\r\n\t\t\tszName = self.getNextWidgetName()\r\n\t\t\tself.LeaderImageWidgets[iPlayerID] = szName # updated by handle input so needs to be stored\r\n\r\n\t\t\tscreen.addCheckBoxGFCAt(szLeaderPanel, szName, gc.getLeaderHeadInfo(gc.getPlayer(iPlayerID).getLeaderType()).getButton(), ArtFileMgr.getInterfaceArtInfo(\"BUTTON_HILITE_SQUARE\").getPath(),\r\n\t\t\t\tself.MissionLeaderPanel_X_LeaderIcon, self.MissionLeaderPanelTopRow, 32, 32, WidgetTypes.WIDGET_GENERAL, self.iLeaderImagesID, iPlayerID, ButtonStyles.BUTTON_STYLE_LABEL, False)\r\n\t\t\tif (self.iTargetPlayer == iPlayerID):\r\n\t\t\t\tscreen.setState(szName, true)\r\n\r\n\t\t\t# leader name\r\n\t\t\tszName = self.getNextWidgetName()\r\n\t\t\tscreen.attachPanelAt( szLeaderPanel, szName, \"\", \"\", true, false, PanelStyles.PANEL_STYLE_MAIN,\r\n\t\t\t\tself.MissionLeaderPanel_X_LeaderNamePanel, self.MissionLeaderPanelTopRow, self.W_NAME_PANEL, self.H_NAME_PANEL, WidgetTypes.WIDGET_GENERAL, -1, -1 )\r\n\r\n\t\t\tszName = self.getNextWidgetName()\r\n\r\n\t\t\tif EspionageOpt.isEnabled():\r\n\t\t\t\tszTempBuffer = u\"%s\" %(pTargetPlayer.getPlayerTextColorR(), pTargetPlayer.getPlayerTextColorG(), pTargetPlayer.getPlayerTextColorB(), pTargetPlayer.getPlayerTextColorA(), pTargetPlayer.getName())\r\n\t\t\telse:\r\n\t\t\t\tszMultiplier = self.getEspionageMultiplierText(self.iActivePlayer, iPlayerID)\r\n\t\t\t\tszTempBuffer = u\"%s (%s)\" %(pTargetPlayer.getPlayerTextColorR(), pTargetPlayer.getPlayerTextColorG(), pTargetPlayer.getPlayerTextColorB(), pTargetPlayer.getPlayerTextColorA(), pTargetPlayer.getName(), szMultiplier)\r\n\r\n\t\t\tszText = u\"\" + szTempBuffer + \"\"\r\n\t\t\tscreen.setLabelAt( szName, szLeaderPanel, szText, 0, self.MissionLeaderPanel_X_LeaderName, self.MissionLeaderPanelTopRow, self.Z_CONTROLS, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 );\r\n\r\n\t\t\t# EPoints Multiplier\r\n\t\t\tif EspionageOpt.isEnabled():\r\n\t\t\t\tiMultiplier = self.getEspionageMultiplier(self.iActivePlayer, iPlayerID)\r\n\t\t\t\tszName = self.getNextWidgetName()\r\n\t\t\t\tszText = u\"%i%s\" %(iMultiplier, \"%\")\r\n\r\n\t\t\t\tif (iBadRatioColor >= 0 and iMultiplier >= EspionageOpt.getBadRatioCutoff()):\r\n\t\t\t\t\tszText = localText.changeTextColor(szText, iBadRatioColor)\r\n\t\t\t\telif (iGoodRatioColor >= 0 and iMultiplier <= EspionageOpt.getGoodRatioCutoff()):\r\n\t\t\t\t\tszText = localText.changeTextColor(szText, iGoodRatioColor)\r\n\t\t\t\telif (iRatioColor >= 0):\r\n\t\t\t\t\tszText = localText.changeTextColor(szText, iRatioColor)\r\n\r\n\t\t\t\tscreen.setLabelAt( szName, szLeaderPanel, szText, CvUtil.FONT_RIGHT_JUSTIFY, self.MissionLeaderPanel_X_Multiplier, self.MissionLeaderPanelTopRow, self.Z_CONTROLS, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 );\r\n\r\n\t\t\t# EPoints Multiplier Against\r\n\t\t\tif (EspionageOpt.isEnabled()\r\n\t\t\tand EspionageOpt.isShowCalculatedInformation()\r\n\t\t\tand self.bShowAISpending):\r\n\t\t\t\tiMultiplier = self.getEspionageMultiplier(iPlayerID, self.iActivePlayer)\r\n\t\t\t\tszName = self.getNextWidgetName()\r\n\t\t\t\tszText = u\"%i%s\" %(iMultiplier, \"%\")\r\n\t\t\t\tscreen.setLabelAt( szName, szLeaderPanel, szText, CvUtil.FONT_RIGHT_JUSTIFY, self.MissionLeaderPanel_X_Multiplier, self.MissionLeaderPanelBottomRow, self.Z_CONTROLS, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 );\r\n\r\n\t\t\t# Counter Espionage (both for and against)\r\n\t\t\tif EspionageOpt.isEnabled():\r\n\t\t\t\t# for\r\n\t\t\t\tiCounterEsp = self.getCounterEspionageTurnsLeft(self.iActivePlayer, iPlayerID)\r\n\t\t\t\tself.showCounterEspionage(screen, szLeaderPanel, iCounterEsp, self.MissionLeaderPanelTopRow)\r\n\r\n\t\t\t\t# against\r\n\t\t\t\tif self.bShowAISpending:\r\n\t\t\t\t\tiCounterEsp = self.getCounterEspionageTurnsLeft(iPlayerID, self.iActivePlayer)\r\n\t\t\t\t\tself.showCounterEspionage(screen, szLeaderPanel, iCounterEsp, self.MissionLeaderPanelBottomRow)\r\n\r\n\t\t\t# EPs\r\n\t\t\tszName = self.getNextWidgetName()\r\n\t\t\tiPlayerEPs = self.getPlayerEPs(self.iActivePlayer, iPlayerID)\r\n\t\t\tszText = u\"\" + localText.getText(\"TXT_KEY_ESPIONAGE_NUM_EPS\", (iPlayerEPs ,)) + \"\"\r\n\t\t\tscreen.setLabelAt( szName, szLeaderPanel, szText, CvUtil.FONT_RIGHT_JUSTIFY, self.MissionLeaderPanel_X_EPoints, self.MissionLeaderPanelTopRow, self.Z_CONTROLS, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 );\r\n\r\n\t\t\t# EPs Against\r\n\t\t\tif (EspionageOpt.isEnabled()\r\n\t\t\tand self.bShowAISpending):\r\n\t\t\t\tszName = self.getNextWidgetName() #\"PointsAgainstText%d\" %(iPlayerID)\r\n\t\t\t\tiTargetEPs = self.getPlayerEPs(iPlayerID, self.iActivePlayer)\r\n\t\t\t\tszText = u\"\" + localText.getText(\"TXT_KEY_ESPIONAGE_NUM_EPS\", (iTargetEPs, )) + \"\"\r\n\t\t\t\tscreen.setLabelAt( szName, szLeaderPanel, szText, CvUtil.FONT_RIGHT_JUSTIFY, self.MissionLeaderPanel_X_EPoints, self.MissionLeaderPanelBottomRow, self.Z_CONTROLS, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 );\r\n\r\n\t\t\t# EP Spending Against (Points per turn)\r\n\t\t\tif (EspionageOpt.isEnabled()\r\n\t\t\tand self.bShowAISpending):\r\n\t\t\t\tszName = self.getNextWidgetName() #\"AmountAgainstText%d\" %(iPlayerID)\r\n\t\t\t\tiSpending = SpyUtil.getDifferenceByPlayer(iPlayerID, self.iActivePlayer)\r\n\t\t\t\tif (iSpending is None\r\n\t\t\t\tor iSpending == 0):\r\n\t\t\t\t\tszText = u\"\"\r\n\t\t\t\telse:\r\n\t\t\t\t\tif iSpending > 0:\r\n\t\t\t\t\t\tszText = u\"(+%i)\" %(iSpending)\r\n\t\t\t\t\t\tszText = localText.changeTextColor(szText, gc.getInfoTypeForString(\"COLOR_GREEN\"))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tszText = u\"(%i)\" %(iSpending)\r\n\t\t\t\t\t\tszText = localText.changeTextColor(szText, gc.getInfoTypeForString(\"COLOR_YELLOW\"))\r\n\t\t\t\tscreen.setLabelAt( szName, szLeaderPanel, szText, CvUtil.FONT_LEFT_JUSTIFY, self.MissionLeaderPanel_X_EPointsTurn, self.MissionLeaderPanelBottomRow, self.Z_CONTROLS, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 );\r\n\r\n\t\t\t# EP Weights\r\n\t\t\tiSize = 16\r\n\t\t\tszName = self.getNextWidgetName()\r\n\t\t\tscreen.setImageButtonAt( szName, szLeaderPanel, ArtFileMgr.getInterfaceArtInfo(\"INTERFACE_BUTTONS_PLUS\").getPath(), self.MissionLeaderPanel_X_WghtInc, self.MissionLeaderPanelBottomRow, iSize, iSize, WidgetTypes.WIDGET_GENERAL, self.iIncreaseButtonID, iPlayerID );\r\n\t\t\tszName = self.getNextWidgetName()\r\n\t\t\tscreen.setImageButtonAt( szName, szLeaderPanel, ArtFileMgr.getInterfaceArtInfo(\"INTERFACE_BUTTONS_MINUS\").getPath(), self.MissionLeaderPanel_X_WghtDec, self.MissionLeaderPanelBottomRow, iSize, iSize, WidgetTypes.WIDGET_GENERAL, self.iDecreaseButtonID, iPlayerID );\r\n\r\n\t\t\t# Symbols for 'Demographics' and 'Research'\r\n\t\t\tif EspionageOpt.isEnabled():\r\n\t\t\t\t# Active Player\r\n\t\t\t\tiDemoCost = pActivePlayer.getEspionageMissionCost(self.MissionSeeDemo, iPlayerID, None, -1)\r\n\t\t\t\tiTechCost = pActivePlayer.getEspionageMissionCost(self.MissionSeeResearch, iPlayerID, None, -1)\r\n\t\t\t\tself.showPassiveMissionIcons(screen, szLeaderPanel, iPlayerEPs, iDemoCost, iTechCost, self.MissionLeaderPanelTopRow)\r\n\r\n\t\t\t\t# Target Player\r\n\t\t\t\tif (EspionageOpt.isShowCalculatedInformation()\r\n\t\t\t\tand self.bShowAISpending):\r\n\t\t\t\t\tiDemoCost = pTargetPlayer.getEspionageMissionCost(self.MissionSeeDemo, self.iActivePlayer, None, -1)\r\n\t\t\t\t\tiTechCost = pTargetPlayer.getEspionageMissionCost(self.MissionSeeResearch, self.iActivePlayer, None, -1)\r\n\t\t\t\t\tself.showPassiveMissionIcons(screen, szLeaderPanel, iTargetEPs, iDemoCost, iTechCost, self.MissionLeaderPanelBottomRow)\r\n\r\n\t\tfor iPlayerID in self.aiUnknownPlayers:\r\n\t\t\tszLeaderPanel = self.getNextWidgetName()\r\n\t\t\tszName = self.getNextWidgetName()\r\n\t\t\tscreen.attachPanel(self.szScrollPanel, szLeaderPanel, \"\", \"\", True, False, PanelStyles.PANEL_STYLE_STANDARD)\r\n\t\t\tscreen.attachSeparator(szLeaderPanel, szName, true, 30)\r\n\r\n\r\n\r\n\r\n\tdef getEspionageMultiplier(self, iCurrentPlayer, iTargetPlayer):\r\n\t\tpCurrentPlayer = gc.getPlayer(iCurrentPlayer)\r\n\t\tiCurrentTeamID = pCurrentPlayer.getTeam()\r\n\t\tpTargetPlayer = gc.getPlayer(iTargetPlayer)\r\n\t\tiTargetTeamID = pTargetPlayer.getTeam()\r\n\r\n\t\tiMultiplier = getEspionageModifier(iCurrentTeamID, iTargetTeamID)\r\n\t\treturn iMultiplier\r\n\r\n\tdef getEspionageMultiplierText(self, iCurrentPlayer, iTargetPlayer):\r\n\t\tpCurrentPlayer = gc.getPlayer(iCurrentPlayer)\r\n\t\tiCurrentTeamID = pCurrentPlayer.getTeam()\r\n\t\tpTargetPlayer = gc.getPlayer(iTargetPlayer)\r\n\t\tiTargetTeamID = pTargetPlayer.getTeam()\r\n\r\n\t\tiMultiplier = getEspionageModifier(iCurrentTeamID, iTargetTeamID)\r\n\t\tszMultiplier = localText.getText(\"TXT_KEY_ESPIONAGE_COST\", (iMultiplier, ))\r\n\r\n\t\tif self.getCounterEspionageTurnsLeft(iCurrentPlayer, iTargetPlayer) > 0:\r\n\t\t\tszMultiplier += u\"*\"\r\n\r\n\t\tif self.getCounterEspionageTurnsLeft(iTargetPlayer, iCurrentPlayer) > 0:\r\n\t\t\tszMultiplier += u\"+\"\r\n\r\n\t\treturn szMultiplier\r\n\r\n\tdef getCounterEspionageTurnsLeft(self, iCurrentPlayer, iTargetPlayer):\r\n\t\tpCurrentTeam = gc.getTeam(gc.getPlayer(iCurrentPlayer).getTeam())\r\n\t\tiTargetTeamID = gc.getPlayer(iTargetPlayer).getTeam()\r\n\r\n\t\tiCurrentCounterEsp = pCurrentTeam.getCounterespionageTurnsLeftAgainstTeam(iTargetTeamID)\r\n\t\treturn iCurrentCounterEsp\r\n\r\n\tdef showCounterEspionage(self, screen, szLeaderPanel, iCounterEspTurns, iRow):\r\n\t\tszName = self.getNextWidgetName()\r\n\t\tif iCounterEspTurns > 0:\r\n\t\t\tszText = u\"[%i]\" %(iCounterEspTurns)\r\n\t\telse:\r\n\t\t\tszText = u\"\"\r\n\t\tscreen.setLabelAt(szName, szLeaderPanel, szText, CvUtil.FONT_RIGHT_JUSTIFY, self.MissionLeaderPanel_X_CounterEP, iRow, self.Z_CONTROLS, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 )\r\n\r\n\tdef showPassiveMissionIcons(self, screen, szLeaderPanel, iEPoints, iDemoCost, iTechCost, iRow):\r\n\t\t# can see demographics icon\r\n\t\tif iEPoints >= iDemoCost:\r\n\t\t\tszText = FontUtil.getChar(\"ss life support\")\r\n\t\telse:\r\n\t\t\tszText = FontUtil.getChar(\"space\")\r\n\r\n\t\t# can see research icon\r\n\t\tif iEPoints >= iTechCost:\r\n\t\t\tszText += FontUtil.getChar(\"commerce research\")\r\n\t\telse:\r\n\t\t\tszText += FontUtil.getChar(\"space\")\r\n\r\n\t\tszName = self.getNextWidgetName()\r\n\t\tscreen.setLabelAt(szName, szLeaderPanel, szText, CvUtil.FONT_LEFT_JUSTIFY, self.MissionLeaderPanel_X_PassiveMissions, iRow, self.Z_CONTROLS, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 );\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef refreshMissionTab(self):\r\n\t\tif (self.iTargetPlayer != -1):\r\n\t\t\t# Create a new screen, called EspionageAdvisor, using the file EspionageAdvisor.py for input\r\n\t\t\tscreen = self.getScreen()\r\n\r\n\t\t\tpActivePlayer = gc.getPlayer(self.iActivePlayer)\r\n\t\t\tpActiveTeam = gc.getTeam(pActivePlayer.getTeam())\r\n\r\n\t\t\tfor iPlayerID in self.aiKnownPlayers:\r\n\t\t\t\tself.refreshMissionTab_LeftLeaderPanel(screen, pActivePlayer, iPlayerID)\r\n\r\n\t\t\t# Is there any other players which have been met?\r\n\t\t\tif (self.iTargetPlayer != -1):\r\n\t\t\t\tpTargetPlayer = gc.getPlayer(self.iTargetPlayer)\r\n\t\t\t\tpyTargetPlayer = PyPlayer(self.iTargetPlayer)\r\n\r\n\t\t\t\t# List of Cities\r\n\t\t\t\tscreen.addListBoxGFC(self.CityListBoxWidget, \"\", self.X_CITY_LIST, self.Y_CITY_LIST, self.W_CITY_LIST, self.H_CITY_LIST, TableStyles.TABLE_STYLE_STANDARD)\r\n\t\t\t\tscreen.enableSelect(self.CityListBoxWidget, True)\r\n\t\t\t\tscreen.setStyle(self.CityListBoxWidget, \"Table_StandardCiv_Style\")\r\n\r\n\t\t\t\tif self.CityMissionToggle == CITYMISSION_CITY:\r\n\t\t\t\t\t# Loop through target's cities, see which are visible and add them to the list\r\n\t\t\t\t\tapCityList = pyTargetPlayer.getCityList()\r\n\r\n\t\t\t\t\tiLoop = 0\r\n\t\t\t\t\tfor pyCity in apCityList:\r\n\t\t\t\t\t\tpCity = pyCity.GetCy()\r\n\t\t\t\t\t\tszCityName = self.getCityNameText(pCity, self.iActivePlayer, self.iTargetPlayer)\r\n\r\n\t\t\t\t\t\tif ((EspionageOpt.isEnabled()\r\n\t\t\t\t\t\t\tand pCity.isRevealed(pActivePlayer.getTeam(), false))\r\n\t\t\t\t\t\tor (not EspionageOpt.isEnabled()\r\n\t\t\t\t\t\t\tand pCity.isRevealed(pActivePlayer.getTeam(), false))):\r\n\t\t\t\t\t\t\tscreen.appendListBoxString(self.CityListBoxWidget, szCityName, WidgetTypes.WIDGET_GENERAL, pCity.getID(), 0, CvUtil.FONT_LEFT_JUSTIFY )\r\n\r\n\t\t\t\t\t\t\tif (self.iActiveCityID == -1 or pTargetPlayer.getCity(self.iActiveCityID).isNone()):\r\n\t\t\t\t\t\t\t\tself.iActiveCityID = pCity.getID()\r\n\r\n\t\t\t\t\t\t\tif (self.iActiveCityID == pCity.getID()):\r\n\t\t\t\t\t\t\t\tscreen.setSelectedListBoxStringGFC(self.CityListBoxWidget, iLoop)\r\n\r\n\t\t\t\t\t\t\tiLoop += 1\r\n\r\n\t\t\t\telif self.CityMissionToggle == CITYMISSION_MISSION:\r\n\t\t\t\t\t# active missions only\r\n\t\t\t\t\tiLoop = 0\r\n\t\t\t\t\tfor iMissionLoop in range(gc.getNumEspionageMissionInfos()):\r\n\t\t\t\t\t\tpMission = gc.getEspionageMissionInfo(iMissionLoop)\r\n\t\t\t\t\t\tif (pMission.getCost() != -1):\r\n\t\t\t\t\t\t\tif pMission.isTargetsCity():\r\n\t\t\t\t\t\t\t\tscreen.appendListBoxString(self.CityListBoxWidget, pMission.getDescription(), WidgetTypes.WIDGET_GENERAL, iMissionLoop, 0, CvUtil.FONT_LEFT_JUSTIFY )\r\n\r\n\t\t\t\t\t\t\t\tif (self.iActiveMissionID == -1):\r\n\t\t\t\t\t\t\t\t\tself.iActiveMissionID = iMissionLoop\r\n\r\n\t\t\t\t\t\t\t\tif (self.iActiveMissionID == iMissionLoop):\r\n\t\t\t\t\t\t\t\t\tscreen.setSelectedListBoxStringGFC(self.CityListBoxWidget, iLoop)\r\n\r\n\t\t\t\t\t\t\t\tiLoop += 1\r\n\r\n\t\t\t\tself.W_TABLE_0 = self.W_EFFECTS_LIST\r\n\t\t\t\tself.W_TABLE_1 = 0\r\n\t\t\t\tself.W_TABLE_2 = self.W_EFFECTS_COSTS_LIST\r\n\t\t\t\tself.W_TABLE_3 = 20\r\n\r\n\t\t\t\tszHelpText = localText.getText(\"TXT_KEY_ESPIONAGE_PASSIVE_AUTOMATIC\", ())\r\n\t\t\t\tscreen.addTableControlGFCWithHelp(self.EffectsTableWidget, 4, self.X_EFFECTS_LIST, self.Y_EFFECTS_LIST, self.W_EFFECTS_LIST + self.W_EFFECTS_COSTS_LIST + self.W_TABLE_1 + self.W_TABLE_3, self.H_EFFECTS_LIST, False, False, 32,32, TableStyles.TABLE_STYLE_STANDARD, szHelpText)\r\n\t\t\t\tscreen.setTableColumnHeader(self.EffectsTableWidget, 0, \"\", self.W_TABLE_0)\r\n\t\t\t\tscreen.setTableColumnHeader(self.EffectsTableWidget, 1, \"\", self.W_TABLE_1)\r\n\t\t\t\tscreen.setTableColumnHeader(self.EffectsTableWidget, 2, \"\", self.W_TABLE_2)\r\n\t\t\t\tscreen.setTableColumnHeader(self.EffectsTableWidget, 3, \"\", self.W_TABLE_3)\r\n\r\n\t\t\t\tif self.CityMissionToggle == CITYMISSION_CITY:\r\n\t\t\t\t\tszHelpText = localText.getText(\"TXT_KEY_ESPIONAGE_MISSIONS_SPY\", ())\r\n\t\t\t\telse:\r\n\t\t\t\t\tszHelpText = \"\"\r\n\t\t\t\tscreen.addTableControlGFCWithHelp(self.MissionsTableWidget, 4, self.X_MISSIONS_LIST, self.Y_MISSIONS_LIST, self.W_MISSIONS_LIST + self.W_MISSIONS_COSTS_LIST + self.W_TABLE_1 + self.W_TABLE_3, self.H_MISSIONS_LIST, False, False, 32,32, TableStyles.TABLE_STYLE_STANDARD, szHelpText)\r\n\t\t\t\tscreen.setTableColumnHeader(self.MissionsTableWidget, 0, \"\", self.W_TABLE_0)\r\n\t\t\t\tscreen.setTableColumnHeader(self.MissionsTableWidget, 1, \"\", self.W_TABLE_1)\r\n\t\t\t\tscreen.setTableColumnHeader(self.MissionsTableWidget, 2, \"\", self.W_TABLE_2)\r\n\t\t\t\tscreen.setTableColumnHeader(self.MissionsTableWidget, 3, \"\", self.W_TABLE_3)\r\n\r\n\t\t\t\t# Loop through passive Missions\r\n\t\t\t\tfor iMissionLoop in range(gc.getNumEspionageMissionInfos()):\r\n\t\t\t\t\tpMission = gc.getEspionageMissionInfo(iMissionLoop)\r\n\t\t\t\t\tif (pMission.getCost() != -1):\r\n\t\t\t\t\t\tif (pMission.isPassive()):\r\n\t\t\t\t\t\t\tif (self.CityMissionToggle == CITYMISSION_CITY\r\n\t\t\t\t\t\t\tor (self.CityMissionToggle == CITYMISSION_MISSION\r\n\t\t\t\t\t\t\tand not pMission.isTargetsCity())):\r\n\t\t\t\t\t\t\t\tszTable = self.EffectsTableWidget\r\n\t\t\t\t\t\t\t\tszText, szCost = self.getTableTextCost(self.iActivePlayer, self.iTargetPlayer, iMissionLoop, self.iActiveCityID)\r\n\t\t\t\t\t\t\t\tiRow = screen.appendTableRow(szTable)\r\n\t\t\t\t\t\t\t\tscreen.setTableText(szTable, 0, iRow, szText, \"\", WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)\r\n\t\t\t\t\t\t\t\tscreen.setTableText(szTable, 2, iRow, szCost, \"\", WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_RIGHT_JUSTIFY)\r\n\r\n\t\t\t\tif self.CityMissionToggle == CITYMISSION_CITY:\r\n\t\t\t\t\t# Loop through active Missions\r\n\t\t\t\t\t# Primary list is cities, secondary list is missions\r\n\t\t\t\t\tfor iMissionLoop in range(gc.getNumEspionageMissionInfos()):\r\n\t\t\t\t\t\tpMission = gc.getEspionageMissionInfo(iMissionLoop)\r\n\t\t\t\t\t\tif (pMission.getCost() != -1):\r\n\t\t\t\t\t\t\tif (not pMission.isPassive()):\r\n\t\t\t\t\t\t\t\tszTable = self.MissionsTableWidget\r\n\t\t\t\t\t\t\t\tszText, szCost = self.getTableTextCost(self.iActivePlayer, self.iTargetPlayer, iMissionLoop, self.iActiveCityID)\r\n\t\t\t\t\t\t\t\tiRow = screen.appendTableRow(szTable)\r\n\t\t\t\t\t\t\t\tscreen.setTableText(szTable, 0, iRow, szText, \"\", WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)\r\n\t\t\t\t\t\t\t\tscreen.setTableText(szTable, 2, iRow, szCost, \"\", WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_RIGHT_JUSTIFY)\r\n\r\n\t\t\t\telif self.CityMissionToggle == CITYMISSION_MISSION:\r\n\t\t\t\t\t# Loop through target's cities, see which are visible and add them to the list\r\n\t\t\t\t\t# Primary list is missions, secondary list is cities\r\n\t\t\t\t\tapCityList = pyTargetPlayer.getCityList()\r\n\r\n\t\t\t\t\tfor pyCity in apCityList:\r\n\t\t\t\t\t\tpCity = pyCity.GetCy()\r\n\t\t\t\t\t\tif (pCity.isRevealed(pActivePlayer.getTeam(), false)):\r\n\r\n\t\t\t\t\t\t\tszTable = self.MissionsTableWidget\r\n\t\t\t\t\t\t\tszText, szCost = self.getTableTextCost(self.iActivePlayer, self.iTargetPlayer, self.iActiveMissionID, pCity.getID())\r\n\t\t\t\t\t\t\tiRow = screen.appendTableRow(szTable)\r\n\t\t\t\t\t\t\tscreen.setTableText(szTable, 0, iRow, szText, \"\", WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)\r\n\t\t\t\t\t\t\tif (pCity.isRevealed(pActivePlayer.getTeam(), false)):\r\n\t\t\t\t\t\t\t\tscreen.setTableText(szTable, 2, iRow, szCost, \"\", WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_RIGHT_JUSTIFY)\r\n\r\n\t\treturn 0\r\n\r\n\tdef refreshMissionTab_LeftLeaderPanel(self, screen, pActivePlayer, iPlayerID):\r\n\t\tpTargetPlayer = gc.getPlayer(iPlayerID)\r\n\t\tiTargetTeam = pTargetPlayer.getTeam()\r\n\r\n\t\tszLeaderPanel = self.MissionLeaderPanelWidgets[iPlayerID]\r\n\t\tszEPWeight = self.EPWeightWidgets[iPlayerID]\r\n\t\tszEPSpending = self.EPSpendingWidgets[iPlayerID]\r\n\t\tszEPIcon = self.EPIconWidgets[iPlayerID]\r\n\r\n\t\t# EP Weight\r\n\t\tszText = u\"\" + localText.getText(\"TXT_KEY_ESPIONAGE_SCREEN_SPENDING_WEIGHT\", ()) + \": %d\" %(pActivePlayer.getEspionageSpendingWeightAgainstTeam(iTargetTeam))\r\n\t\tscreen.setLabelAt(szEPWeight, szLeaderPanel, szText, 0, self.MissionLeaderPanel_X_Wght, self.MissionLeaderPanelBottomRow, self.Z_CONTROLS, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 );\r\n\r\n\t\t# EP Spending (Points per turn)\r\n\t\tiSpending = pActivePlayer.getEspionageSpending(iTargetTeam)\r\n\t\tif EspionageOpt.isEnabled():\r\n\t\t\tiY = self.MissionLeaderPanelTopRow\r\n\t\t\tif (iSpending > 0):\r\n\t\t\t\tszText = u\"(+%i)\" %(iSpending)\r\n\t\t\t\tszText = localText.changeTextColor(szText, gc.getInfoTypeForString(\"COLOR_GREEN\"))\r\n\t\t\telse:\r\n\t\t\t\tszText = u\"\"\r\n\t\telse:\r\n\t\t\tiY = self.MissionLeaderPanelBottomRow\r\n\t\t\tif (iSpending > 0):\r\n\t\t\t\tszText = u\"%s\" %(localText.getText(\"TXT_KEY_ESPIONAGE_NUM_EPS_PER_TURN\", (iSpending, )))\r\n\t\t\telse:\r\n\t\t\t\tszText = u\"%s\" %(localText.getText(\"TXT_KEY_ESPIONAGE_NUM_EPS_PER_TURN\", (iSpending, )))\r\n\t\tscreen.setLabelAt(szEPSpending, szLeaderPanel, szText, CvUtil.FONT_LEFT_JUSTIFY, self.MissionLeaderPanel_X_EPointsTurn, iY, self.Z_CONTROLS, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 );\r\n\r\n\t\t# Espionage Icon\r\n\t\tif (iSpending > 0):\r\n\t\t\tszText = u\"%c\" %(gc.getCommerceInfo(CommerceTypes.COMMERCE_ESPIONAGE).getChar())\r\n\t\telse:\r\n\t\t\tszText = u\"\"\r\n\t\tscreen.setLabelAt(szEPIcon, szLeaderPanel, szText, 0, self.MissionLeaderPanel_X_EspionageIcon, self.MissionLeaderPanelMiddle, self.Z_CONTROLS, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 );\r\n\r\n\r\n\tdef getCityNameText(self, pCity, iActivePlayer, iTargetPlayer):\r\n\t\tif not EspionageOpt.isEnabled():\r\n\t\t\treturn pCity.getName()\r\n\r\n\t\tszCityName = pCity.getName()\r\n\t\tiPlayerEPs = self.getPlayerEPs(iActivePlayer, iTargetPlayer)\r\n\t\tpActivePlayer = gc.getPlayer(iActivePlayer)\r\n\t\tpPlot = pCity.plot()\r\n\r\n\t\tif not pCity.isRevealed(pActivePlayer.getTeam(), false):\r\n\t\t\tszCityName = \"-- %s --\" %(szCityName)\r\n\r\n\t\treturn szCityName\r\n\r\n\tdef getPlayerEPs(self, iCurrentPlayer, iTargetPlayer):\r\n\t\tpCurrentPlayer = gc.getPlayer(iCurrentPlayer)\r\n\t\tpCurrentTeam = gc.getTeam(pCurrentPlayer.getTeam())\r\n\t\tpTargetPlayer = gc.getPlayer(iTargetPlayer)\r\n\t\tiTargetTeam = pTargetPlayer.getTeam()\r\n\t\tEPs = pCurrentTeam.getEspionagePointsAgainstTeam(iTargetTeam)\r\n\t\treturn EPs\r\n\r\n\tdef getTableTextCost(self, iActivePlayer, iTargetPlayer, iMission, iCity):\r\n\r\n\t\tpActivePlayer = gc.getPlayer(iActivePlayer)\r\n\t\tpActiveTeam = gc.getTeam(pActivePlayer.getTeam())\r\n\t\tpTargetPlayer = gc.getPlayer(iTargetPlayer)\r\n\t\tpMission = gc.getEspionageMissionInfo(iMission)\r\n\r\n\t\tszText = \"\"\r\n\t\tszCost = \"\"\r\n\r\n\t\tif pMission.getCost() == -1:\r\n\t\t\treturn szText, szCost\r\n\r\n\t\tiTargetTeam = pTargetPlayer.getTeam()\r\n\t\tiPlayerEPs = self.getPlayerEPs(iActivePlayer, iTargetPlayer)\r\n\t\tif (EspionageOpt.isEnabled()):\r\n\t\t\tiPossibleColor = EspionageOpt.getPossibleMissionColor()\r\n\t\t\tiCloseColor = EspionageOpt.getCloseMissionColor()\r\n\t\t\tiClosePercent = EspionageOpt.getCloseMissionPercent()\r\n\t\telse:\r\n\t\t\tiPossibleColor = -1\r\n\t\t\tiCloseColor = -1\r\n\t\t\tiClosePercent = -1\r\n\r\n\t\tpPlot = None\r\n\t\tszCityName= \"\"\r\n\t\tbHideCost = False\r\n\t\tif (iCity != -1\r\n\t\tand pMission.isTargetsCity()):\r\n\t\t\tpActiveCity = gc.getPlayer(iTargetPlayer).getCity(iCity)\r\n\t\t\tpPlot = pActiveCity.plot()\r\n\t\t\tszCityName = pActiveCity.getName()\r\n\t\t\tszCityName = self.getCityNameText(pActiveCity, iActivePlayer, iTargetPlayer)\r\n\t\t\tif not pActiveCity.isRevealed(pActivePlayer.getTeam(), false):\r\n\t\t\t\tbHideCost = True\r\n\r\n\t\tif not bHideCost:\r\n\t\t\tiCost = pActivePlayer.getEspionageMissionCost(iMission, iTargetPlayer, pPlot, -1)\r\n\t\telse:\r\n\t\t\tiCost = 0\r\n\r\n\t\tif (self.CityMissionToggle == CITYMISSION_CITY # secondary list is mission names\r\n\t\tor (pMission.isPassive()\r\n\t\tand not pMission.isTargetsCity())):\r\n\t\t\tszTechText = \"\"\r\n\t\t\tif (pMission.getTechPrereq() != -1):\r\n\t\t\t\tszTechText = \" (%s)\" %(gc.getTechInfo(pMission.getTechPrereq()).getDescription())\r\n\r\n\t\t\tszText = pMission.getDescription() + szTechText\r\n\t\telse: # secondary list is city names\r\n\t\t\tszText = szCityName\r\n\r\n\t\tif iCost > 0:\r\n\t\t\tszCost = unicode(str(iCost))\r\n\t\t\tif (EspionageOpt.isEnabled()):\r\n\t\t\t\tif (iPossibleColor >= 0 and iPlayerEPs >= iCost):\r\n\t\t\t\t\tszCost = localText.changeTextColor(szCost, iPossibleColor)\r\n\t\t\t\t\tszText = localText.changeTextColor(szText, iPossibleColor)\r\n\t\t\t\telif (iCloseColor >= 0 and iPlayerEPs >= (iCost * float(100 - iClosePercent) / 100)):\r\n\t\t\t\t\tszCost = localText.changeTextColor(szCost, iCloseColor)\r\n\t\t\t\t\tszText = localText.changeTextColor(szText, iCloseColor)\r\n\r\n\t\tif (pMission.getTechPrereq() != -1):\r\n\t\t\tpTeam = gc.getTeam(pActivePlayer.getTeam())\r\n\t\t\tif (not pTeam.isHasTech(pMission.getTechPrereq())):\r\n\t\t\t\tszText = u\"%s\" %(szText)\r\n\t\t\t\treturn szText, szCost\r\n\r\n\t\treturn szText, szCost\r\n\r\n\r\n\r\n\tdef drawSpyvSpyTab(self):\r\n\t\tscreen = self.getScreen()\r\n\r\n#\t\tBugUtil.debug(\"CvEspionage Advisor: drawSpyvSpyTab\")\r\n\r\n\t\tpActivePlayer = gc.getPlayer(self.iActivePlayer)\r\n\t\tpActiveTeam = gc.getTeam(pActivePlayer.getTeam())\r\n\r\n\t\tself.aiKnownPlayers = []\r\n\t\tself.aiUnknownPlayers = []\r\n\r\n\t\t# add current player\r\n\t\tself.aiKnownPlayers.append(self.iActivePlayer)\r\n\t\tself.iNumEntries = 1\r\n\r\n\t\t# add known players\r\n\t\tfor iLoop in range(gc.getMAX_PLAYERS()):\r\n\t\t\tpPlayer = gc.getPlayer(iLoop)\r\n\t\t\tif (self.iActivePlayer != iLoop\r\n\t\t\tand not pPlayer.isBarbarian()):\r\n\t\t\t\tif (pPlayer.isAlive()):\r\n\t\t\t\t\tif (pActiveTeam.isHasMet(pPlayer.getTeam())):\r\n\t\t\t\t\t\tself.aiKnownPlayers.append(iLoop)\r\n\t\t\t\t\t\tself.iNumEntries = self.iNumEntries + 1\r\n\r\n\t\t# fill out to 16 possible players\r\n\t\twhile(self.iNumEntries < 16):\r\n\t\t\tself.iNumEntries = self.iNumEntries + 1\r\n\t\t\tself.aiUnknownPlayers.append(self.iNumEntries)\r\n\r\n\t\tself.drawSpyvSpyTabConstants()\r\n\r\n\t\t# add background panel\r\n\t\tself.szSvSPaneWidget = self.getNextWidgetName()\r\n\t\tscreen.addPanel(self.szSvSPaneWidget, \"\", \"\", true, true,\r\n\t\t\tself.X_SvS_PANE, self.Y_SvS_PANE, self.W_SvS_PANE, self.H_SvS_PANE, PanelStyles.PANEL_STYLE_MAIN)\r\n\r\n\t\t# add scrolling panel\r\n\t\tself.szSvSScrollPanel = self.getNextWidgetName()\r\n\t\tscreen.addPanel(self.szSvSScrollPanel, \"\", \"\", true, true,\r\n\t\t\tself.X_SvS_SCROLL, self.Y_SvS_SCROLL, self.W_SvS_SCROLL, self.H_SvS_SCROLL, PanelStyles.PANEL_STYLE_EMPTY)\r\n\r\n\t\t# add left column leader panel\r\n\t\tself.szLeftLeaderPanel = self.getNextWidgetName()\r\n\t\tscreen.addPanel(self.szLeftLeaderPanel, \"\", \"\", true, true,\r\n\t\t\tself.X_SvS_LEFT_LEADER, self.Y_SvS_LEFT_LEADER, self.W_SvS_LEFT_LEADER, self.H_SvS_LEFT_LEADER, PanelStyles.PANEL_STYLE_STANDARD)\r\n\r\n\t\t# add top column leader panel\r\n\t\tself.szTopLeaderPanel = self.getNextWidgetName()\r\n\t\tscreen.addPanel(self.szTopLeaderPanel, \"\", \"\", true, true,\r\n\t\t\tself.X_SvS_TOP_LEADER, self.Y_SvS_TOP_LEADER, self.W_SvS_TOP_LEADER, self.H_SvS_TOP_LEADER, PanelStyles.PANEL_STYLE_STANDARD)\r\n\r\n\t\tiRow = 0\r\n\t\tfor iRowPlayerID in self.aiKnownPlayers:\r\n\t\t\tiRow += 1\r\n\r\n\t\t\tsLeaderButton = gc.getLeaderHeadInfo(gc.getPlayer(iRowPlayerID).getLeaderType()).getButton()\r\n\r\n\t\t\t# left leader icon\r\n\t\t\tscreen.setImageButton(self.getNextWidgetName(), sLeaderButton,\r\n\t\t\t\t\t\t\t\t self.X_SvS_LEFT_LEADER + 2, self.Y_SvS_LEFT_LEADER + (iRow - 1) * self.H_SvS_CELL + 3, self.SvS_IconSize, self.SvS_IconSize, WidgetTypes.WIDGET_LEADERHEAD, iRowPlayerID, -1)\r\n\r\n\t\t\t# show total EP spending over the last turn\r\n\t\t\tiSpending = SpyUtil.getSpending(iRowPlayerID, self.iActivePlayer)\r\n\t\t\tszText = self.formatSpending(iSpending)\r\n\t\t\tiX = self.X_SvS_LEFT_LEADER + self.W_SvS_LEFT_LEADER - 10\r\n\t\t\tiY = self.Y_SvS_LEFT_LEADER + (iRow - 1) * self.H_SvS_CELL + self.H_SvS_CELL / 2 - 6\r\n\t\t\tscreen.setLabel(self.getNextWidgetName(), \"\", szText, CvUtil.FONT_RIGHT_JUSTIFY, iX, iY, self.Z_CONTROLS, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 );\r\n\r\n\t\t\t# top leader icon\r\n\t\t\tscreen.setImageButton(self.getNextWidgetName(), sLeaderButton,\r\n\t\t\t\t\t\t\t\t self.X_SvS_TOP_LEADER + (iRow - 1) * self.W_SvS_CELL + self.W_SvS_CELL / 2 - self.SvS_IconSize / 2, self.Y_SvS_TOP_LEADER + 3, self.SvS_IconSize, self.SvS_IconSize, WidgetTypes.WIDGET_LEADERHEAD, iRowPlayerID, -1)\r\n\r\n\t\t\t# add a horizontal panel to make the table easier to read\r\n\t\t\tif iRow % 2 == 0: # every even number\r\n\t\t\t\tiY = self.Y_SvS_LEFT_LEADER + (iRow - 1) * self.H_SvS_CELL\r\n\t\t\t\tscreen.addPanel(self.getNextWidgetName(), \"\", \"\", true, true,\r\n\t\t\t\t\tself.X_SvS_TOP_LEADER, iY, self.W_SvS_TOP_LEADER, self.H_SvS_TOP_LEADER, PanelStyles.PANEL_STYLE_STANDARD)\r\n\r\n\t\t\tiCol = 0\r\n\t\t\tfor iColPlayerID in self.aiKnownPlayers:\r\n\t\t\t\tiCol += 1\r\n\r\n\t\t\t\tpActivePlayer = gc.getPlayer(self.iActivePlayer)\r\n\t\t\t\tpActiveTeamID = pActivePlayer.getTeam()\r\n\t\t\t\tpActiveTeam = gc.getTeam(pActiveTeamID)\r\n\r\n\t\t\t\tpColPlayer = gc.getPlayer(iColPlayerID)\r\n\t\t\t\tpColTeamID = pColPlayer.getTeam()\r\n\t\t\t\tpColTeam = gc.getTeam(pColTeamID)\r\n\r\n\t\t\t\tpRowPlayer = gc.getPlayer(iRowPlayerID)\r\n\t\t\t\tpRowTeamID = pRowPlayer.getTeam()\r\n\t\t\t\tpRowTeam = gc.getTeam(pRowTeamID)\r\n\r\n\t\t\t\tif pColTeamID == pRowTeamID: # rol and col player are on the same team\r\n\t\t\t\t\tcontinue\r\n\r\n\t\t\t\t# check that everyone has met - show the EP spending\r\n\t\t\t\tif (pActiveTeam.isHasMet(pColPlayer.getTeam())\r\n\t\t\t\tand pActiveTeam.isHasMet(pRowPlayer.getTeam())\r\n\t\t\t\tand pColTeam.isHasMet(pRowPlayer.getTeam())):\r\n\t\t\t\t\t# EPs per turn\r\n\t\t\t\t\tiSpending = SpyUtil.getDifferenceByPlayer(iRowPlayerID, iColPlayerID)\r\n\t\t\t\t\tszText = self.formatSpending(iSpending)\r\n\t\t\t\t\tiX = self.X_SvS_TOP_LEADER + (iCol - 1) * self.W_SvS_CELL + self.W_SvS_CELL / 2\r\n\t\t\t\t\tiY = self.Y_SvS_LEFT_LEADER + (iRow - 1) * self.H_SvS_CELL + self.H_SvS_CELL / 2 - 6\r\n\t\t\t\t\tscreen.setLabel(self.getNextWidgetName(), \"\", szText, CvUtil.FONT_CENTER_JUSTIFY, iX, iY, self.Z_CONTROLS, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1 );\r\n\r\n\t\treturn\r\n\r\n\r\n\r\n\r\n\tdef drawSpyvSpyTabConstants(self):\r\n\t\t# don't skip this segment as some of the constants are dynamic :)\r\n\r\n\t\t# skip this is we have already done it\r\n#\t\tif self.drawSpyvSpyTabConstantsDone == 1:\r\n#\t\t\treturn\r\n\t\tself.drawSpyvSpyTabConstantsDone = 1\r\n\r\n\t\tself.X_SvS_PANE = 5\r\n\t\tself.Y_SvS_PANE = 45\r\n\t\tself.W_SvS_PANE = self.W_SCREEN - 2 * self.X_SvS_PANE\r\n\t\tself.H_SvS_PANE = self.H_SCREEN - 2 * self.Y_SvS_PANE\r\n\r\n#\t\tself.H_SCREEN = 768\r\n\r\n\t\tself.X_SvS_SCROLL = self.X_SvS_PANE + 10\r\n\t\tself.Y_SvS_SCROLL = self.Y_SvS_PANE + 10\r\n\t\tself.W_SvS_SCROLL = self.W_SvS_PANE - 20\r\n\t\tself.H_SvS_SCROLL = self.H_SvS_PANE - 20\r\n\r\n\t\tiNumPlayers = len(self.aiKnownPlayers)\r\n\t\tif iNumPlayers > 12:\r\n\t\t\tself.W_SvS_CELL = 50\r\n\t\t\tself.H_SvS_CELL = 35\r\n\t\t\tself.SvS_IconSize = 32\r\n\t\telif iNumPlayers >= 9:\r\n\t\t\tself.W_SvS_CELL = 70\r\n\t\t\tself.H_SvS_CELL = 50\r\n\t\t\tself.SvS_IconSize = 48\r\n\t\telse:\r\n\t\t\tself.W_SvS_CELL = 100\r\n\t\t\tself.H_SvS_CELL = 70\r\n\t\t\tself.SvS_IconSize = 64\r\n\r\n\t\tself.X_SvS_LEFT_LEADER = self.X_SvS_SCROLL\r\n\t\tself.Y_SvS_LEFT_LEADER = self.Y_SvS_SCROLL + self.H_SvS_CELL\r\n\t\tself.W_SvS_LEFT_LEADER = 2 * self.W_SvS_CELL\r\n\t\tself.H_SvS_LEFT_LEADER = self.H_SvS_CELL * len(self.aiKnownPlayers)\r\n\r\n\t\tself.X_SvS_TOP_LEADER = self.X_SvS_SCROLL + self.W_SvS_LEFT_LEADER\r\n\t\tself.Y_SvS_TOP_LEADER = self.Y_SvS_SCROLL\r\n\t\tself.W_SvS_TOP_LEADER = self.W_SvS_CELL * len(self.aiKnownPlayers)\r\n\t\tself.H_SvS_TOP_LEADER = self.H_SvS_CELL\r\n\r\n\tdef formatSpending(self, iSpending):\r\n\t\tif iSpending == 0:\r\n\t\t\treturn u\"-\"\r\n\t\telif iSpending > 0:\r\n\t\t\treturn localText.changeTextColor(u\"+%i\" %(iSpending), gc.getInfoTypeForString(\"COLOR_GREEN\"))\r\n\t\telse:\r\n\t\t\treturn localText.changeTextColor(u\"%i\" %(iSpending), gc.getInfoTypeForString(\"COLOR_YELLOW\"))\r\n\r\n\r\n\t# returns a unique ID for a widget in this screen\r\n\tdef getNextWidgetName(self):\r\n\t\tszName = self.WIDGET_ID + str(self.nWidgetCount)\r\n\t\tself.nWidgetCount += 1\r\n\t\treturn szName\r\n\r\n\tdef deleteAllWidgets(self):\r\n\t\tscreen = self.getScreen()\r\n\t\ti = self.nWidgetCount - 1\r\n\t\twhile (i >= 0):\r\n\t\t\tself.nWidgetCount = i\r\n\t\t\tscreen.deleteWidget(self.getNextWidgetName())\r\n\t\t\ti -= 1\r\n\r\n\t\tself.nWidgetCount = 0\r\n\r\n\t# Will handle the input for this screen...\r\n\tdef handleInput (self, inputClass):\r\n\t\t'Calls function mapped in EspionageAdvisorInputMap'\r\n\r\n\t\tscreen = self.getScreen()\r\n\t\tpActivePlayer = gc.getPlayer(self.iActivePlayer)\r\n\t\ticFunctionName = inputClass.getFunctionName()\r\n\r\n\t\t##### Debug Dropdown #####\r\n\t\tif (CyGame().isDebugMode()):\r\n\t\t\tif (icFunctionName == self.DEBUG_DROPDOWN_ID):\r\n\t\t\t\tiIndex = screen.getSelectedPullDownID(self.DEBUG_DROPDOWN_ID)\r\n\t\t\t\tself.iActivePlayer = screen.getPullDownData(self.DEBUG_DROPDOWN_ID, iIndex)\r\n\t\t\t\tself.drawContents()\r\n\t\t\t\tCyInterface().setDirty(InterfaceDirtyBits.Espionage_Advisor_DIRTY_BIT, True)\r\n\r\n\t\tif (inputClass.getNotifyCode() == NotifyCode.NOTIFY_CLICKED):\r\n\t\t\tif self.EPScreen.getActiveTab() == self.MissionsTabWidget:\r\n\t\t\t\tif ((\"%s%d\" %(icFunctionName, inputClass.getID()) == self.szMissionsTitleText\r\n\t\t\t\tor \"%s%d\" %(icFunctionName, inputClass.getID()) == self.szCitiesTitleText)\r\n\t\t\t\tand EspionageOpt.isEnabled()):\r\n\t\t\t\t\tif self.CityMissionToggle == CITYMISSION_MISSION:\r\n\t\t\t\t\t\tself.CityMissionToggle = CITYMISSION_CITY\r\n\t\t\t\t\t\tself.drawContents()\r\n\t\t\t\t\t\treturn 0\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.CityMissionToggle = CITYMISSION_MISSION\r\n\t\t\t\t\t\tself.drawContents()\r\n\t\t\t\t\t\treturn 0\r\n\t\t\t\tif \"%s%d\" %(icFunctionName, inputClass.getID()) == self.ShowAISpendingWidget: # toggle show AI spending\r\n#\t\t\t\t\tBugUtil.debug(\"CvEspionage Advisor: toggle show AI spending\")\r\n\t\t\t\t\tself.bShowAISpending = not self.bShowAISpending\r\n\t\t\t\t\tself.drawContents()\r\n\t\t\t\t\treturn 0\r\n\r\n\t\t\tif (icFunctionName == self.MissionsTabWidget):\r\n#\t\t\t\tBugUtil.debug(\"CvEspionage Advisor: Change to Mission Tab\")\r\n\t\t\t\tself.EPScreen.updateTabStatus(self.MissionsTabWidget)\r\n\t\t\t\tself.EPScreenTab = 1\r\n\t\t\t\tself.drawContents()\r\n\t\t\t\treturn 0\r\n\t\t\telif (icFunctionName == self.SpyvSpyTabWidget):\r\n#\t\t\t\tBugUtil.debug(\"CvEspionage Advisor: Change to Spy v Spy Tab\")\r\n\t\t\t\tself.EPScreen.updateTabStatus(self.SpyvSpyTabWidget)\r\n\t\t\t\tself.EPScreenTab = 2\r\n\t\t\t\tself.drawContents()\r\n\t\t\t\treturn 0\r\n\r\n\t\tif (self.iTargetPlayer != -1):\r\n\t\t\t##### Player Images #####\r\n\t\t\tif (inputClass.getData1() == self.iLeaderImagesID):\r\n\t\t\t\tself.iTargetPlayer = inputClass.getData2()\r\n\r\n\t\t\t\t# Loop through all images\r\n\t\t\t\tfor iPlayerID in self.aiKnownPlayers:\r\n\t\t\t\t\tszName = \"LeaderImage%d\" %(iPlayerID)\r\n\t\t\t\t\tszName = self.LeaderImageWidgets[iPlayerID]\r\n\t\t\t\t\tif (self.iTargetPlayer == iPlayerID):\r\n\t\t\t\t\t\tscreen.setState(szName, true)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tscreen.setState(szName, false)\r\n\r\n\t\t\t\t\tself.iActiveCityID = -1\r\n\r\n\t\t\t\tCyInterface().setDirty(InterfaceDirtyBits.Espionage_Advisor_DIRTY_BIT, True)\r\n\t\t\t\treturn 0\r\n\r\n\t\t\t##### City Listbox #####\r\n\t\t\tif (\"%s%d\" %(icFunctionName, inputClass.getID()) == self.CityListBoxWidget):\r\n\t\t\t\tif self.CityMissionToggle == CITYMISSION_CITY:\r\n\t\t\t\t\tiCityID = inputClass.getData1()\r\n\t\t\t\t\tself.iActiveCityID = iCityID\r\n\t\t\t\t\tCyInterface().setDirty(InterfaceDirtyBits.Espionage_Advisor_DIRTY_BIT, True)\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.iActiveMissionID = inputClass.getData1()\r\n\t\t\t\t\tCyInterface().setDirty(InterfaceDirtyBits.Espionage_Advisor_DIRTY_BIT, True)\r\n\t\t\t\treturn 0\r\n\r\n\t\t\t# EP spending weight adjustments\r\n\t\t\t##### Increase Button #####\r\n\t\t\tif (inputClass.getData1() == self.iIncreaseButtonID):\r\n\t\t\t\tiPlayerID = inputClass.getData2()\r\n\t\t\t\tiTargetTeam = gc.getPlayer(iPlayerID).getTeam()\r\n\r\n\t\t\t\tCyMessageControl().sendEspionageSpendingWeightChange(iTargetTeam, 1)\r\n\t\t\t\tCyInterface().setDirty(InterfaceDirtyBits.Espionage_Advisor_DIRTY_BIT, True)\r\n\t\t\t\treturn 0\r\n\r\n\t\t\t##### Decrease Button #####\r\n\t\t\telif (inputClass.getData1() == self.iDecreaseButtonID):\r\n\t\t\t\tiPlayerID = inputClass.getData2()\r\n\t\t\t\tiTargetTeam = gc.getPlayer(iPlayerID).getTeam()\r\n\r\n\t\t\t\tif (pActivePlayer.getEspionageSpendingWeightAgainstTeam(iTargetTeam) > 0):\t# Can't reduce weight below 0\r\n\t\t\t\t\tCyMessageControl().sendEspionageSpendingWeightChange(iTargetTeam, -1)\r\n\t\t\t\t\tCyInterface().setDirty(InterfaceDirtyBits.Espionage_Advisor_DIRTY_BIT, True)\r\n\t\t\t\treturn 0\r\n\r\n\t\treturn 0\r\n\r\n\tdef update(self, fDelta):\r\n\t\tif (CyInterface().isDirty(InterfaceDirtyBits.Espionage_Advisor_DIRTY_BIT) == True):\r\n\t\t\tCyInterface().setDirty(InterfaceDirtyBits.Espionage_Advisor_DIRTY_BIT, False)\r\n\t\t\tself.EPScreen.refreshActiveTab()\r\n\t\treturn\r\n","sub_path":"Assets/Python/Screens/CvEspionageAdvisor.py","file_name":"CvEspionageAdvisor.py","file_ext":"py","file_size_in_byte":51667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"121262754","text":"from django.db import models\n\nfrom atracoes.models import Atracao\nfrom comentarios.models import Comentario\nfrom avaliacoes.models import Avaliacao\nfrom enderecos.models import Endereco\n\n\nclass PontoTuristico(models.Model):\n nome = models.CharField(max_length=150)\n descricao = models.TextField(verbose_name='Descrição')\n status = models.BooleanField(verbose_name='Ativo', default=False)\n atracoes = models.ManyToManyField(Atracao)\n comentarios = models.ManyToManyField(Comentario)\n avaliacoes = models.ManyToManyField(Avaliacao)\n endereco = models.ForeignKey(\n Endereco, on_delete=models.CASCADE, null=True, blank=True\n )\n\n def __str__(self):\n return str(self.nome)\n\n class Meta:\n verbose_name = 'Ponto Turístico'\n verbose_name_plural = 'Pontos Turísticos'\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"270308634","text":"import argparse\n\nimport gym\nimport supersuit as ss\nimport torch\nimport torch.nn.functional as F\n# pip install git+https://github.com/Rohan138/marl-baselines3\nfrom marl_baselines3 import IndependentPPO\nfrom stable_baselines3.common.torch_layers import BaseFeaturesExtractor\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom torch import nn\n\nfrom social_dilemmas.envs.pettingzoo_env import parallel_env\n\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"MARL-Baselines3 PPO with Independent Learning\")\n parser.add_argument(\n \"--env-name\",\n type=str,\n default=\"harvest\",\n choices=[\"harvest\", \"cleanup\"],\n help=\"The SSD environment to use\",\n )\n parser.add_argument(\n \"--num-agents\",\n type=int,\n default=5,\n help=\"The number of agents\",\n )\n parser.add_argument(\n \"--rollout-len\",\n type=int,\n default=1000,\n help=\"length of training rollouts AND length at which env is reset\",\n )\n parser.add_argument(\n \"--total-timesteps\",\n type=int,\n default=5e8,\n help=\"Number of environment timesteps\",\n )\n parser.add_argument(\n \"--use-collective-reward\",\n type=bool,\n default=False,\n help=\"Give each agent the collective reward across all agents\",\n )\n parser.add_argument(\n \"--inequity-averse-reward\",\n type=bool,\n default=False,\n help=\"Use inequity averse rewards from 'Inequity aversion \\\n improves cooperation in intertemporal social dilemmas'\",\n )\n parser.add_argument(\n \"--alpha\",\n type=float,\n default=5,\n help=\"Advantageous inequity aversion factor\",\n )\n parser.add_argument(\n \"--beta\",\n type=float,\n default=0.05,\n help=\"Disadvantageous inequity aversion factor\",\n )\n args = parser.parse_args()\n return args\n\n\n# Use this with lambda wrapper returning observations only\nclass CustomCNN(BaseFeaturesExtractor):\n \"\"\"\n :param observation_space: (gym.Space)\n :param features_dim: (int) Number of features extracted.\n This corresponds to the number of unit for the last layer.\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Box,\n features_dim=128,\n view_len=7,\n num_frames=6,\n fcnet_hiddens=[1024, 128],\n ):\n super(CustomCNN, self).__init__(observation_space, features_dim)\n # We assume CxHxW images (channels first)\n # Re-ordering will be done by pre-preprocessing or wrapper\n\n flat_out = num_frames * 6 * (view_len * 2 - 1) ** 2\n self.conv = nn.Conv2d(\n in_channels=num_frames * 3, # Input: (3 * 4) x 15 x 15\n out_channels=num_frames * 6, # Output: 24 x 13 x 13\n kernel_size=3,\n stride=1,\n padding=\"valid\",\n )\n self.fc1 = nn.Linear(in_features=flat_out, out_features=fcnet_hiddens[0])\n self.fc2 = nn.Linear(in_features=fcnet_hiddens[0], out_features=fcnet_hiddens[1])\n\n def forward(self, observations) -> torch.Tensor:\n # Convert to tensor, rescale to [0, 1], and convert from B x H x W x C to B x C x H x W\n observations = observations.permute(0, 3, 1, 2)\n features = torch.flatten(F.relu(self.conv(observations)), start_dim=1)\n features = F.relu(self.fc1(features))\n features = F.relu(self.fc2(features))\n return features\n\n\ndef main(args):\n # Config\n env_name = args.env_name\n num_agents = args.num_agents\n rollout_len = args.rollout_len\n total_timesteps = args.total_timesteps\n use_collective_reward = args.use_collective_reward\n inequity_averse_reward = args.inequity_averse_reward\n alpha = args.alpha\n beta = args.beta\n\n # Training\n num_cpus = 4 # number of cpus\n num_envs = 12 # number of parallel multi-agent environments\n num_frames = 6 # number of frames to stack together; use >4 to avoid automatic VecTransposeImage\n features_dim = (\n 128 # output layer of cnn extractor AND shared layer for policy and value functions\n )\n fcnet_hiddens = [1024, 128] # Two hidden layers for cnn extractor\n ent_coef = 0.001 # entropy coefficient in loss\n batch_size = rollout_len * num_envs // 2 # This is from the rllib baseline implementation\n lr = 0.0001\n n_epochs = 30\n gae_lambda = 1.0\n gamma = 0.99\n target_kl = 0.01\n grad_clip = 40\n verbose = 3\n\n env = parallel_env(\n max_cycles=rollout_len,\n env=env_name,\n num_agents=num_agents,\n use_collective_reward=use_collective_reward,\n inequity_averse_reward=inequity_averse_reward,\n alpha=alpha,\n beta=beta,\n )\n env = ss.observation_lambda_v0(env, lambda x, _: x[\"curr_obs\"], lambda s: s[\"curr_obs\"])\n env = ss.frame_stack_v1(env, num_frames)\n env = ss.pettingzoo_env_to_vec_env_v1(env)\n env = ss.concat_vec_envs_v1(\n env, num_vec_envs=num_envs, num_cpus=num_cpus, base_class=\"stable_baselines3\"\n )\n env = VecMonitor(env)\n\n policy_kwargs = dict(\n features_extractor_class=CustomCNN,\n features_extractor_kwargs=dict(\n features_dim=features_dim, num_frames=num_frames, fcnet_hiddens=fcnet_hiddens\n ),\n net_arch=[features_dim],\n )\n\n tensorboard_log = \"./results/sb3/cleanup_ppo_independent\"\n\n model = IndependentPPO(\n \"CnnPolicy\",\n num_agents=num_agents,\n env=env,\n learning_rate=lr,\n n_steps=rollout_len,\n batch_size=batch_size,\n n_epochs=n_epochs,\n gamma=gamma,\n gae_lambda=gae_lambda,\n ent_coef=ent_coef,\n max_grad_norm=grad_clip,\n target_kl=target_kl,\n policy_kwargs=policy_kwargs,\n tensorboard_log=tensorboard_log,\n verbose=verbose,\n )\n model.learn(total_timesteps=total_timesteps)\n\n logdir = model.logger.dir\n model.save(logdir)\n del model\n model = IndependentPPO.load( # noqa: F841\n logdir, \"CnnPolicy\", num_agents, env, rollout_len, policy_kwargs, tensorboard_log, verbose\n )\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n","sub_path":"run_scripts/sb3_independent.py","file_name":"sb3_independent.py","file_ext":"py","file_size_in_byte":6281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"394554548","text":"\n\nimport tensorflow as tf\nimport random\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.keras import layers\nimport gensim\nimport scipy.stats as st\nfrom sklearn.metrics import *\nfrom tensorflow.keras.models import Model\nimport matplotlib.pyplot as plt\nimport sklearn\nfrom tensorflow.keras.preprocessing import *\nfrom collections import defaultdict\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\n\n# In[5]:\n\n#https://github.com/wwbp/empathic_reactions/blob/master/modeling/main/crossvalidation/experiment.py\ndef correlation(true, pred):\n pred = np.array(pred).flatten()\n result = st.pearsonr(np.array(true),pred)\n return result[0]\n\ndef getMetrics(trueLabels, predictedLabels):\n \"\"\"Takes as input true labels, predictions, and prediction confidence scores and computes all metrics\"\"\"\n MSE = sklearn.metrics.mean_squared_error(trueLabels, predictedLabels, squared = True)\n MAE = sklearn.metrics.mean_absolute_error(trueLabels, predictedLabels)\n MAPE = sklearn.metrics.mean_absolute_percentage_error(trueLabels, predictedLabels)\n RMSE = sklearn.metrics.mean_squared_error(trueLabels, predictedLabels, squared = False)\n PearsonR = correlation(true = trueLabels,\n pred = predictedLabels)\n \n return MSE, MAE, MAPE, RMSE, PearsonR\n\n# In[18]:\n\n\ndef splitRowIntoWords(row, length):\n \"\"\"Takes a variable length text input and convert it into a list of words with length equal to 'length' in the function parameter\"\"\"\n words = tf.keras.preprocessing.text.text_to_word_sequence(row, filters=' !#$%&()*+,-./:;<=>?@[\\\\]^_{|}~\\t\\n\"\\'', lower=True, split=\" \")\n \n # If length is less than required length, add zeros\n while len(words) < length:\n words.append(0)\n \n # If greater, remove stuff at the end\n if len(words) >= length:\n words = words[:length]\n \n return words\n\n\n# In[63]:\n\n\ndef buildAndTrainModel(model, learningRate, batchSize, epochs, trainingData, validationData, testingData, trainingLabels, validationLabels, testingLabels, MODEL_NAME, isPrintModel=True):\n \"\"\"Take the model and model parameters, build and train the model\"\"\"\n \n # Build and compile model\n # To use other optimizers, refer to: https://keras.io/optimizers/\n # Please do not change the loss function\n \n optimizer = tf.keras.optimizers.Adam(lr=learningRate)\n model.compile(optimizer=optimizer,\n loss=tf.keras.losses.MeanSquaredError())\n \n if isPrintModel:\n print(model.summary())\n \n\n \n for epoch in range(0, epochs):\n model.fit(trainingData, trainingLabels,\n epochs=1,\n verbose=0,\n batch_size=batchSize,\n shuffle=False)\n \n # Evaluate model\n trainLoss = model.evaluate(trainingData, trainingLabels, verbose=False)\n valLoss = model.evaluate(validationData, validationLabels, verbose=False)\n #model.save('Results/StructuredBinary/{}/epoch_{}'.format(filename,epoch))\n \n ## get metrics\n predictions = model.predict(testingData)\n MSE, MAE, MAPE, RMSE, PR = getMetrics(testingLabels,predictions)\n \n MeanSquaredError.append(MSE)\n RootMeanSquaredError.append(RMSE)\n MeanAbsoluteError.append(MAE)\n MeanAbsolutePercentageError.append(MAPE)\n PearsonR.append(PR)\n ValMSE.append(valLoss)\n Epoch.append(epoch)\n \n if valLoss <= min(ValMSE):\n max_predictions = predictions\n \n \n \n return MeanSquaredError, RootMeanSquaredError, MeanAbsoluteError, MeanAbsolutePercentageError, ValMSE, PearsonR, Epoch, max_predictions\n\n\ndef createWordCNN(trainFeatures, validationFeatures, testFeatures, numConvLayers, vocabularyWords, embeddingsDimensionality, numFilters, kernel, isPreTrainedEmbeddings): \n \"\"\"Create a word cnn\"\"\"\n \n ## create basic cnn model\n wordInput = layers.Input(shape=trainFeatures.shape[1:], dtype='float32')\n \n ## word convolutional neural network\n if isPreTrainedEmbeddings == False:\n # Create embeddings using keras built in function.\n wordCNN = layers.Embedding(input_dim=vocabularyWords + 1, \n output_dim=embeddingsDimensionality, \n input_length=len(trainFeatures[0]))(wordInput)\n \n # Add CNN layers equal to numConvLayers\n for i in range(numConvLayers):\n wordCNN = layers.Conv1D(numFilters, kernel, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001))(wordCNN)\n wordCNN = layers.Dropout(0.5)(wordCNN)\n else:\n \n # Here, we are using pre-trained embeddings. Therefore, we don't need to call layers.embeddings function.\n wordCNN = layers.Conv1D(numFilters, kernel, activation='relu', input_shape=trainFeatures.shape[1:], kernel_regularizer=tf.keras.regularizers.l2(0.001))(wordInput)\n wordCNN = layers.Dropout(0.5)(wordCNN)\n for i in range(numConvLayers - 1):\n wordCNN = layers.Conv1D(numFilters, kernel, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001))(wordCNN)\n wordCNN = layers.Dropout(0.5)(wordCNN)\n \n # GlobalMaxPooling is a good function to use for pooling operations, let's keep it like this\n wordCNN = layers.GlobalMaxPooling1D()(wordCNN)\n wordCNN = layers.Dropout(0.5)(wordCNN)\n \n # You can change the number of nodes in the dense layer. Right now, it's set to 64.\n denseLayer = layers.Dense(64)(wordCNN)\n \n return denseLayer, wordInput \n\ndef concatenateModels(modelDenseLayers):\n \"\"\"Get a list if dense layers and concatenate them together\"\"\"\n concatenatedModel = layers.concatenate(modelDenseLayers)\n concatenatedModel = layers.Dense(64, activation = 'relu')(concatenatedModel)\n \"\"\"You can add more layers here after the concatenation of models e.g you can add single/multiple dense layers after the concatenation layer to give the model more power\"\"\"\n return concatenatedModel \n\n\n# In[35]:\n\n\ndef attachOutputLayerToModel(lastDenseLayer, modelInputs):\n \"\"\"Take as input a dense layer and attach an output layer\"\"\"\n output = layers.Dense(1, activation='sigmoid')(lastDenseLayer)\n model = Model(inputs=modelInputs, outputs=output)\n return model\n\n\n\n# In[31]:\n\n\ndef loadDataForWordsWithPreTrainedEmbeddings(trainText, validationText, testText, EMBEDDING_PATH):\n \"\"\"This function takes as input three text files and and a pre-trained word embedding file and returns arrays containing word embeddings for each word in the text. These arrays can be used \n directly in a keras model without the use of keras.layers.Embedding layer.\"\"\"\n \n # Load embeddings\n #embeddingsData = pd.DataFrame(pd.read_csv(EMBEDDING_PATH, \" \",engine='python')).values.tolist()\n #embeddingsDataDictionary = dict([(item[0], item[1:len(item)-1]) for item in embeddingsData]) # create dictionary of key=word, value=word embedding from the embedding file\n #EMBEDDING_SIZE = int(len(embeddingsDataDictionary[random.choice(list(embeddingsDataDictionary.keys()))]))\n \n EMBEDDING_SIZE = 300\n embeddingsDataDictionary = {}\n \n with open(EMBEDDING_PATH, 'r') as f:\n for line in f:\n values = line.split()\n word = values[0]\n vector = np.asarray(values[1:], 'float32')\n embeddingsDataDictionary[word] = vector\n \n ## convert words into word ids\n meanLength = np.mean([len(item.split(\" \")) for item in trainText])\n MAX_SENTENCE_LENGTH = int(meanLength + 10) # we let a sentence go 100 words longer than the mean sentence length.\n \n ## convert train, validation, and test text into lists with word ids\n trainTextWords = [splitRowIntoWords(row, MAX_SENTENCE_LENGTH) for row in trainText]\n trainWordFeatures = []\n for row in trainTextWords:\n rowEmbeddings = [embeddingsDataDictionary[word] if word in embeddingsDataDictionary else [0]*EMBEDDING_SIZE for word in row]\n trainWordFeatures.append(rowEmbeddings)\n\n validationTextWords = [splitRowIntoWords(row, MAX_SENTENCE_LENGTH) for row in validationText]\n validationWordFeatures = []\n for row in validationTextWords:\n rowEmbeddings = [embeddingsDataDictionary[word] if word in embeddingsDataDictionary else [0]*EMBEDDING_SIZE for word in row]\n validationWordFeatures.append(rowEmbeddings)\n\n testTextWords = [splitRowIntoWords(row, MAX_SENTENCE_LENGTH) for row in testText]\n testWordFeatures = []\n for row in testTextWords:\n rowEmbeddings = [embeddingsDataDictionary[word] if word in embeddingsDataDictionary else [0]*EMBEDDING_SIZE for word in row]\n testWordFeatures.append(rowEmbeddings)\n \n return np.array(trainWordFeatures), np.array(validationWordFeatures), np.array(testWordFeatures), None\n\n\n\n# In[32]:\nfiles = ['TrustPhys_','SubjectiveLit_','Anxiety_','Numeracy_']\n\ncv = ['1','2','3','4','5']\n\n# In[69]:\n\n\nfor filename in files:\n \n for i in cv:\n \n MeanSquaredError = []\n MeanAbsoluteError = []\n MeanAbsolutePercentageError = []\n RootMeanSquaredError = []\n PearsonR = []\n Epoch = []\n ValMSE = []\n \n string_train = 'ContinuousCV/{}/{}train.txt'.format(i, filename)\n string_test = 'ContinuousCV/{}/{}test.txt'.format(i, filename)\n string_val = 'ContinuousCV/{}/{}val.txt'.format(i, filename)\n data_train = pd.read_csv(string_train, header = None, sep = '\\t',encoding='ISO-8859-1').dropna()\n data_test = pd.read_csv(string_test, header = None, sep = '\\t',encoding='ISO-8859-1').dropna()\n data_val = pd.read_csv(string_val, header = None, sep = '\\t',encoding='ISO-8859-1').dropna()\n \n binary_mapper = {-1: 0}\n \n xtrain = data_train[1]\n ytrain = data_train[0]\n \n xtest = data_test[1]\n ytest = data_test[0]\n \n xval = data_val[1]\n yval = data_val[0]\n \n \n # Create input feature arrays\n ##################################################### You can set the embedding path to REPRESENTATION EMBEDDINGS too which you can find in \"RepresentationEmbeddings\" folder ################################\n EMBEDDING_PATH = \"glove.840B.300d.txt\"\n \n VocabSize = None\n trainFeatures, validationFeatures, testFeatures, WORDS_TO_KEEP = loadDataForWordsWithPreTrainedEmbeddings(xtrain,xval,xtest, EMBEDDING_PATH)\n \n # Build WordCNN model\n FILTERS_SIZE = 256\n EMBEDDINGS_DIMENSIONALITY = 300 # don't need this now\n KERNEL_SIZE1 = 1\n KERNEL_SIZE2 = 2\n KERNEL_SIZE3 = 3\n NUM_CNN_LAYERS = 1\n \n wordCNNDenseLayer1, wordCNNInput1 = createWordCNN(trainFeatures, validationFeatures, testFeatures, NUM_CNN_LAYERS, WORDS_TO_KEEP, EMBEDDINGS_DIMENSIONALITY, FILTERS_SIZE, KERNEL_SIZE1, isPreTrainedEmbeddings=True)\n \n wordCNNDenseLayer2, wordCNNInput2 = createWordCNN(trainFeatures, validationFeatures, testFeatures, NUM_CNN_LAYERS, WORDS_TO_KEEP, EMBEDDINGS_DIMENSIONALITY, FILTERS_SIZE, KERNEL_SIZE2, isPreTrainedEmbeddings=True)\n \n wordCNNDenseLayer3, wordCNNInput3 = createWordCNN(trainFeatures, validationFeatures, testFeatures, NUM_CNN_LAYERS, WORDS_TO_KEEP, EMBEDDINGS_DIMENSIONALITY, FILTERS_SIZE, KERNEL_SIZE3, isPreTrainedEmbeddings=True)\n \n concatenatedDenseLayer = concatenateModels([wordCNNDenseLayer1,wordCNNDenseLayer2,wordCNNDenseLayer3])\n \n # Attach the output layer with the model\n wordCNNModel = attachOutputLayerToModel(concatenatedDenseLayer, [wordCNNInput1,wordCNNInput2,wordCNNInput3])\n \n # Train model\n LEARNING_RATE = 0.0001\n BATCH_SIZE = 32\n EPOCHS = 50\n MeanSquaredError, RootMeanSquaredError, MeanAbsoluteError, MeanAbsolutePercentageError, ValMSE, PearsonR, Epochs, pred = buildAndTrainModel(wordCNNModel, LEARNING_RATE, BATCH_SIZE, EPOCHS,[trainFeatures,trainFeatures,trainFeatures], [validationFeatures,validationFeatures,validationFeatures], [testFeatures,testFeatures,testFeatures], ytrain, yval, ytest, \"WordCNNWithoutPretrainedEmbeddings\")\n \n \n results = {\n 'Epochs': Epochs,\n 'Mean_Squared_Error': MeanSquaredError,\n 'Root_Mean_Squared_Error': RootMeanSquaredError,\n 'Mean_Absolute_Error': MeanAbsoluteError,\n 'Mean_Absolute_Percentage_Error': MeanAbsolutePercentageError,\n 'PearsonR': PearsonR,\n 'Val_Mean_Squared_Error': ValMSE\n }\n \n predictions_dictionary = {\n 'sentence': np.array(xtest).flatten(),\n 'pred': np.array(pred).flatten()\n }\n \n # results_df = pd.DataFrame.from_dict(results)\n # results_string = 'Results/WordCNN/ConResults/{}_{}results.csv'.format(i, filename)\n # results_df.to_csv(results_string, index = False)\n \n # predictions_df = pd.DataFrame.from_dict(predictions_dictionary)\n # predictions_df.to_csv('Results/WordCNN/ConPredictions/{}_{}_Conpredictions.csv'.format(i, filename), index=False)\n ","sub_path":"Code/ContinuousCNN.py","file_name":"ContinuousCNN.py","file_ext":"py","file_size_in_byte":13355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"618539753","text":"import os, sys, time, json, subprocess, random, re, logging, traceback, yaml\r\n\r\nfrom models.campaign import Campaign\r\n#You have to draw the line somewhere.\r\nfrom termcolor import colored as coloured\r\n\r\ndef updateCampaign(Session,campName):\r\n\r\n try:\r\n campaign = Session.query(Campaign).filter(Campaign.name.like(campName)).first()\r\n if (campaign is None):\r\n print(coloured(\"No campaign of name \"+campName+\" found. Currently defined campaigns are: \\n\",\"red\"))\r\n for c in Session.query(Campaign.name).all():\r\n print(c[0])\r\n sys.exit(1)\r\n except Exception as e:\r\n logging.error(traceback.format_exc())\r\n Session.rollback()\r\n sys.exit(1)\r\n\r\n return campaign.updateJobs(Session)","sub_path":"source/campaignUpdater.py","file_name":"campaignUpdater.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"549642506","text":"# dictionary tut\ndict = {\"key\" : \"val\", \"jey2\":\"val\"}\ndict[\"key\"] # prints out \"val\" associated with key\n\n#dictionaries are non sequential so printing a dict would give different results every time it is done\n\n#dictionary problem\n\nques = \"Enter Customer ? \"\nques2 = \"Customer Name: \"\ndict_list = []\nwhile (True):\n ans = input(ques)\n if ans == 'y':\n name = input(ques2)\n name.strip()\n nameList = name.split()\n newDict = {\"fname\" : nameList[0], \"lname\" : nameList[1]}\n dict_list.append(newDict)\n else:\n break\nfor dict in dict_list:\n print(dict[\"fname\"] + \" \" + dict[\"lname\"])\n\n# Done\n\n\n","sub_path":"src/recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"198212452","text":"import numpy as np\n\nclass asianOptionBinomialTree:\n\n def __init__(self, num_steps, volatility, time_period, oneOverRho, interest_rate, reop_rate = 0.01):\n self.num_steps = num_steps\n self.volatility = volatility\n self.time_period = time_period\n self.oneOverRho = oneOverRho\n self.interest = np.array(interest_rate) - reop_rate\n self.discount_factor = np.exp(-1 * self.interest * self.time_period)\n self.half_len_grid = self.num_steps * self.oneOverRho\n\n self.averagePriceTree = np.zeros(2 * self.num_steps * oneOverRho + 1)\n self.assetPriceTree = np.zeros((self.num_steps + 1, self.num_steps + 1))\n self.optionPriceTree = np.zeros((self.num_steps + 1, 2 * self.num_steps * self.oneOverRho + 1))\n\n def forwardInduction(self, is_call):\n self.up_factor = np.exp(self.volatility * np.sqrt(self.time_period))\n\n for i in range(self.num_steps + 1):\n lower_bound = -i\n for j in range(i + 1):\n self.assetPriceTree[i, j] = self.init_price * (self.up_factor ** lower_bound)\n lower_bound += 2\n \n for j in range(2 * self.num_steps * self.oneOverRho + 1):\n jump = j - self.half_len_grid\n self.averagePriceTree[j] = self.init_price * (self.up_factor ** (jump / self.oneOverRho))\n for s in range(self.num_steps + 1):\n for k in range(2 * self.num_steps * self.oneOverRho + 1):\n if is_call:\n self.optionPriceTree[s, k] = max(self.averagePriceTree[k] - self.strike, 0)\n self.std_payoff = np.std(self.averagePriceTree - - self.strike)\n else:\n self.optionPriceTree[s, k] = max(self.strike - self.averagePriceTree[k], 0)\n self.std_payoff = np.std(self.strike - self.averagePriceTree)\n\n def grid(self, n, k, j, plus): \n numerator = np.zeros((len(j), len(k)))\n denominator = self.volatility * np.sqrt(self.time_period) / self.oneOverRho\n for jj in j:\n numerator[jj] = (n + 1) * self.up_factor ** (k / self.oneOverRho) + self.up_factor ** (jj + plus)\n numerator[jj] = np.log(numerator[jj] / (n + 2))\n\n return numerator / denominator\n\n def backwardInduction(self):\n delta_y = self.volatility * np.sqrt(self.time_period)/ self.oneOverRho\n proba_up = (1 / self.discount_factor - 1 / self.up_factor) / (self.up_factor - 1 / self.up_factor)\n# print(np.round(self.optionPriceTree))\n for n in reversed(range(self.num_steps)):\n k_idx = np.array([k for k in range(- n * self.oneOverRho, n * self.oneOverRho + 1)])\n j_idx = np.array([j for j in range(n + 1)])\n k_up = self.grid(n, k_idx, j_idx, 1)\n k_down = self.grid(n, k_idx, j_idx, -1)\n \n j_idx_ext = np.repeat(j_idx[:, np.newaxis], len(k_idx), axis=1)\n\n k_up_floor = np.maximum(np.floor(k_up + self.half_len_grid).astype(int), 0)\n k_up_ceil = np.minimum(k_up_floor + 1, self.half_len_grid * 2)\n\n # average_price_up = ((n + 1) * self.averagePriceTree[n, k] + self.assetPriceTree[n + 1, i + 1]) / (n + 2)\n average_price_up = self.init_price * self.up_factor ** (k_up / self.oneOverRho)\n factor_interpolation_up = np.log(average_price_up / self.averagePriceTree[k_up_floor]) / delta_y\n\n option_price_up = factor_interpolation_up[0:] * self.optionPriceTree[j_idx_ext[0:], k_up_ceil[0:]] + \\\n (1 - factor_interpolation_up[0:]) * self.optionPriceTree[j_idx_ext[0:], k_up_floor[0:]]\n \n k_down_floor = np.maximum(np.floor(k_down + self.half_len_grid).astype(int), 0)\n k_down_ceil = np.minimum(k_down_floor + 1, self.half_len_grid * 2)\n\n # average_price_down = ((n + 1) * self.averagePriceTree[n, k] + self.assetPriceTree[n + 1, i - 1]) / (n + 2)\n average_price_down = self.init_price * self.up_factor ** (k_down / self.oneOverRho)\n factor_interpolation_down = np.log(average_price_down / self.averagePriceTree[k_down_floor]) / delta_y\n\n # assert self.averagePriceTree[n + 1, k_down_floor] != 0\n\n option_price_down = factor_interpolation_down[:(n + 1)] * self.optionPriceTree[j_idx_ext[:(n + 1)], k_down_ceil[:(n + 1)]] + \\\n (1 - factor_interpolation_down[:(n + 1)]) * self.optionPriceTree[j_idx_ext[:(n + 1)], k_down_floor[:(n + 1)]]\n\n self.optionPriceTree[j_idx_ext[:(n + 1)], k_idx + self.half_len_grid] = proba_up[n] * option_price_up + (1 - proba_up[n]) * option_price_down\n self.optionPriceTree[j_idx_ext[:(n + 1)], k_idx + self.half_len_grid] *= self.discount_factor[n]\n# print(np.round(self.optionPriceTree[j_idx_ext[:(n + 1)], k_idx + self.half_len_grid]))\n \n\n def getOptionPrice(self, init_price, strike, is_call=True):\n self.init_price = init_price\n self.strike = strike\n self.is_call =is_call\n self.forwardInduction(self.is_call)\n self.backwardInduction()\n return self.optionPriceTree[0, self.half_len_grid], self.std_payoff\n\n\n\n","sub_path":"Codes/binomialTreePricer.py","file_name":"binomialTreePricer.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"491085429","text":"import unittest\nimport os\nimport tempfile\nimport shutil\nimport sqlalchemy\n\nimport pyrob.common\n\n\nclass TestCommon(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n pyrob.common.DB.test_db_init()\n\n def test_create_ipact_stg_schema_tables(self):\n \"\"\"Create the target schema tables.\n \"\"\"\n # Given a set of schema tables to build\n pyrob.common.create_schema_tables()\n\n # when I source the database tables\n inspector = sqlalchemy.inspect(pyrob.common.DB.engine)\n received = inspector.get_table_names(schema='IPACT_STG')\n\n # then I should receive a list of tables\n msg = 'List of schema tables not provided'\n self.assertTrue(list(received), msg)\n\n # Clean up.\n pyrob.common.drop_schema_tables()\n\n def test_create_ipact_stg_schema_tables_targeted_namespace(self):\n \"\"\"Create the target schema tables: targeted_namespace.\n \"\"\"\n # Given a set of schema tables to build\n schema_pkg = 'pyrob.schema.ipact_stg.elms'\n pyrob.common.create_schema_tables(schema_pkg)\n\n # when I source the database tables\n inspector = sqlalchemy.inspect(pyrob.common.DB.engine)\n received = inspector.get_table_names(schema='IPACT_STG')\n\n # then I should receive a list of tables\n expected = [\n 'T_ROB_ELMS_ENH_ADDRESS',\n 'T_ROB_ELMS_ENH_LOCATION'\n ]\n msg = 'List of schema tables not provided'\n self.assertListEqual(sorted(received), sorted(expected), msg)\n\n # Clean up.\n pyrob.common.drop_schema_tables(schema_pkg)\n\n def test_create_ipact_sys_schema_tables(self):\n \"\"\"Create the target schema tables.\n \"\"\"\n # Given a set of schema tables to build\n pyrob.common.create_schema_tables()\n\n # when I source the database tables\n inspector = sqlalchemy.inspect(pyrob.common.DB.engine)\n received = inspector.get_table_names(schema='IPACT_SYS')\n\n # then I should receive a list of tables\n expected = [\n 'T_DL_GROUP',\n 'T_DL_OBJECT_STATUS',\n 'T_DL_OBJECT_STATUS_LOG',\n 'T_DL_PROCESS_LOG',\n 'T_DL_PROCESS_STAGE',\n 'T_DL_PROCESS_STATUS',\n 'T_IPC_OBJECT',\n ]\n msg = 'List of schema tables not provided'\n self.assertListEqual(sorted(received), sorted(expected), msg)\n\n # Clean up.\n pyrob.common.drop_schema_tables()\n\n def test_create_source_schema_tables(self):\n \"\"\"Create the source schema tables.\n \"\"\"\n # Given a set of schema tables to build\n pyrob.common.create_source_schema_tables()\n\n # when I source the database tables\n inspector = sqlalchemy.inspect(pyrob.common.DB.engine_s)\n received = inspector.get_table_names(schema='STARGUS')\n\n # then I should receive a list of tables\n expected = [\n 'CM_ATTR',\n 'CM_NONRESPONDER_7DAY',\n 'CMTS_DOWNSTREAM_CONFIG',\n 'CMTS_UPSTREAM_CONFIG',\n 'CMTS_UPSTREAM_CONFIG_IUC',\n 'CM_HOUR_STATS',\n 'CM_HOUR_STATS_UP_EQ',\n 'CM_HOUR_UPSTREAM_STATS',\n 'CM_HOUR_DOWNSTREAM_STATS',\n 'CMTS_HOUR_DOWNSTREAM_STATS',\n 'CMTS_NSI_STATISTICS',\n 'CMTS_HOUR_CPU_STATS',\n 'CMTS_HOUR_UPSTREAM_STATS',\n 'DOWNSTREAM_SG_CHANNEL_STATS',\n 'DOWNSTREAM_SG_CMSG_STATS',\n 'DOWNSTREAM_SG_STATS',\n 'UPSTREAM_SG_CHANNEL_STATS',\n 'UPSTREAM_SG_CMSG_STATS',\n 'UPSTREAM_SG_STATS',\n 'DOWNSTREAM_INTERFACE_FN_STATS',\n 'UPSTREAM_INTERFACE_FN_STATS',\n ]\n msg = 'List of source schema tables not provided'\n self.assertListEqual(sorted(received), sorted(expected), msg)\n\n # Clean up.\n pyrob.common.drop_source_schema_tables()\n\n def test_create_elms_source_schema_tables(self):\n \"\"\"Create the ELMS source schema tables.\n \"\"\"\n # Given a set of schema tables to build\n pyrob.common.create_source_schema_tables()\n\n # when I source the database tables\n inspector = sqlalchemy.inspect(pyrob.common.DB.engine_s)\n received = inspector.get_table_names(schema='ELMS_UDS')\n\n # then I should receive a list of tables\n expected = [\n 'ENHANCED_ADDRESS_DATA',\n 'ENHANCED_LOCATION_DATA',\n ]\n msg = 'List of ELMS source schema tables not provided'\n self.assertListEqual(sorted(received), sorted(expected), msg)\n\n # Clean up.\n pyrob.common.drop_source_schema_tables()\n\n def test_get_schema_columns(self):\n \"\"\"Get list of columns associated with schema.\n \"\"\"\n # Given a schema name\n schema_package = 'pyrob.schema.dslam'\n\n # when I source all column tables associated with the schema\n received = pyrob.common.get_schema_columns(schema_package)\n\n # then I should receive a list of columns\n msg = 'Valid schema column search did not return results'\n self.assertTrue(len(received), msg)\n\n def test_gen_digest_none_token(self):\n \"\"\"Generate digest: None token.\n \"\"\"\n # Given an invalid digest token\n token = None\n\n # when I try to generate a digest\n received = pyrob.common.gen_digest(token)\n\n # then I should receive None\n msg = 'Digest generation error: None value'\n self.assertIsNone(received, msg)\n\n def test_gen_digest_numeric_token(self):\n \"\"\"Generate digest: numeric token.\n \"\"\"\n # Given a numeric digest token\n token = 1234\n\n # when I try to generate a digest\n received = pyrob.common.gen_digest(token)\n\n # then I should receive a valid digest\n msg = 'Digest generation error: non-string value'\n expected = '81dc9bdb'\n self.assertEqual(expected, received, msg)\n\n def test_gen_digest(self):\n \"\"\"Generate digest: valid values.\n \"\"\"\n # Given a valid digest token\n token = 'ossrc_enb_pm_essp.json'\n\n # when I try to generate a digest\n received = pyrob.common.gen_digest(token)\n\n # then I should receive a valid digest\n expected = '97a8036b'\n msg = 'Digest generation error: valid value'\n self.assertEqual(received, expected, msg)\n\n def test_create_digest_dir(self):\n \"\"\"Create a digest-based directory.\n \"\"\"\n # Given a valid digest token\n token = 'ossrc_enb_pm_essp.json'\n\n # when I try to generate a digest\n received = pyrob.common.gen_digest_path(token)\n\n # then I should receive a valid digest\n expected = ['97', '97a8', '97a803', '97a8036b']\n msg = 'Digest directory path list error'\n self.assertListEqual(received, expected, msg)\n\n def test_create_digest_dir_depth_two(self):\n \"\"\"Create a digest-based directory: depth two.\n \"\"\"\n # Given a valid digest token\n token = 'ossrc_enb_pm_essp.json'\n\n # when I try to generate a digest with directory depth 2\n received = pyrob.common.gen_digest_path(token, dir_depth=2)\n\n # then I should receive a valid digest\n expected = ['97', '97a8']\n msg = 'Digest directory path list error'\n self.assertListEqual(received, expected, msg)\n\n def test_create_digest_dir_depth_three(self):\n \"\"\"Create a digest-based directory: depth three.\n \"\"\"\n # Given a valid digest token\n token = 'ossrc_enb_pm_essp.json'\n\n # when I try to generate a digest with directory depth 3\n received = pyrob.common.gen_digest_path(token, dir_depth=3)\n\n # then I should receive a valid digest\n expected = ['97', '97a8', '97a803']\n msg = 'Digest directory path list error'\n self.assertListEqual(received, expected, msg)\n\n def test_create_tarball(self):\n \"\"\"Create tarball of files.\n \"\"\"\n # Given a source eNodeB source directory location\n test_dir = os.path.join('pyrob',\n 'loads',\n 'fixed_wireless',\n 'tests',\n 'files',\n 'source',\n 'eNodeB')\n source_dir_obj = tempfile.TemporaryDirectory()\n source_dir = os.path.join(source_dir_obj.name, 'test')\n shutil.copytree(test_dir, source_dir)\n\n # and a file filter\n filter = '*_SubNetwork*SubNetwork*eNodeB*_statsfile.xml'\n\n # and a target directory\n target_dir_obj = tempfile.TemporaryDirectory()\n target_dir = target_dir_obj.name\n\n # when I try to tarball the file contents\n received = pyrob.common.create_tarball(source_dir,\n filter,\n target_dir)\n\n # Then the archive and delete count should match\n expected = (2, 2)\n msg = 'Archive/delete file count mismatch'\n self.assertTupleEqual(received, expected, msg)\n\n @classmethod\n def tearDownClass(cls):\n pyrob.common.DB.db_close()\n","sub_path":"pyrob/tests/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":9255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"579761508","text":"from timeit import default_timer as timer\nimport numpy as np\nfrom all.experiments.writer import ExperimentWriter\nfrom all.experiments.experiment import Experiment\n\nclass LundarLanderExperiment(Experiment):\n '''An Experiment object for training and testing agents that interact with one environment at a time.'''\n def __init__(\n self,\n agent,\n env,\n logdir='runs',\n quiet=False,\n render=False,\n write_loss=True\n ):\n super().__init__(self._make_writer(logdir, agent.__name__, env.name, write_loss), quiet)\n self._agent = agent(env, self._writer)\n self._env = env\n self._render = render\n self._frame = 1\n self._episode = 1\n\n if render:\n self._env.render(mode=\"human\")\n\n @property\n def frame(self):\n return self._frame\n\n @property\n def episode(self):\n return self._episode\n\n def train(self, frames=np.inf, episodes=np.inf):\n episode_rewards = []\n episode_outcomes = []\n episode_times = []\n\n step = 0\n while not self._done(frames, episodes):\n rewards, outcomes, times = self._run_training_episode()\n episode_rewards.append(rewards)\n episode_outcomes.append(outcomes)\n episode_times.append(times)\n step += 1\n if step % 100 == 0:\n self._log_100_performance(episode_rewards, episode_outcomes, episode_times)\n\n def _log_100_performance(self, episode_rewards, episode_outcomes, episode_times):\n mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)\n mean_100ep_succ = round(np.mean([1 if x==100 else 0 for x in episode_outcomes[-101:-1]]), 1)\n mean_100ep_crash = round(np.mean([1 if x==-100 else 0 for x in episode_outcomes[-101:-1]]), 1)\n sum_100ep_time = int(np.sum(episode_times[-101:-1]))\n num_episodes = len(episode_rewards)\n \n print(\"----------------------------------------------------------\")\n print(\"episodes\", num_episodes)\n print(\"mean 100 episode reward\", mean_100ep_reward)\n print(\"mean 100 episode succ\", mean_100ep_succ)\n print(\"mean 100 episode crash\", mean_100ep_crash)\n print(\"% time spent exploring\", sum_100ep_time)\n print(\"----------------------------------------------------------\")\n\n # def test(self, episodes=100):\n # episode_rewards = []\n # episode_outcomes = []\n # episode_times = []\n # for episode in range(episodes):\n # rewards, outcomes, times = self._run_test_episode()\n # episode_rewards.append(rewards)\n # episode_outcomes.append(outcomes)\n # episode_times.append(times)\n # self._log_test_episode(episode, rewards)\n # self._log_test(episode_rewards)\n # self._log_100_performance(episode_rewards, episode_outcomes, episode_times)\n # return episode_rewards\n\n def test(self, episodes=100, policy = None):\n episode_rewards = []\n episode_outcomes = []\n episode_times = []\n for episode in range(episodes):\n rewards, outcomes, times = self._run_test_episode(policy)\n episode_rewards.append(rewards)\n episode_outcomes.append(outcomes)\n episode_times.append(times)\n self._log_test_episode(episode, rewards)\n self._log_test(episode_rewards)\n self._log_100_performance(episode_rewards, episode_outcomes, episode_times)\n return episode_rewards\n\n def _run_training_episode(self):\n # initialize timer\n start_time = timer()\n start_frame = self._frame\n\n # initialize the episode\n self._env.reset()\n state = self._env.state\n action = self._agent.act(state)\n returns = 0\n\n # loop until the episode is finished\n while not state['done']:\n if self._render:\n self._env.render()\n state = self._env.step(action)\n action = self._agent.act(state)\n returns += state['reward']\n self._frame += 1\n\n # stop the timer\n end_time = timer()\n fps = (self._frame - start_frame) / (end_time - start_time)\n\n # log the results\n self._log_training_episode(returns, fps)\n\n # update experiment state\n self._episode += 1\n\n return returns, state['reward'], end_time - start_time\n\n # def _run_test_episode(self, policy = self._agent.eval):\n # # initialize timer\n # start_time = timer()\n\n # # initialize the episode\n # self._env.reset()\n # state = self._env.state\n # action = self._agent.eval(state)\n # returns = 0\n\n # # loop until the episode is finished\n # while not state['done']:\n # if self._render:\n # self._env.render()\n # state = self._env.step(action)\n # action = self._agent.eval(state)\n # returns += state['reward']\n\n # # stop the timer\n # end_time = timer()\n\n # return returns, state['reward'], end_time - start_time\n\n\n def _run_test_episode(self, policy = None):\n if not policy:\n # use defalut policy\n policy = self._agent.eval\n\n # initialize timer\n start_time = timer()\n\n # initialize the episode\n self._env.reset()\n state = self._env.state\n action = policy(state)\n returns = 0\n\n # loop until the episode is finished\n while not state['done']:\n if self._render:\n self._env.render()\n state = self._env.step(action)\n action = policy(state)\n returns += state['reward']\n\n # stop the timer\n end_time = timer()\n\n return returns, state['reward'], end_time - start_time\n\n def _done(self, frames, episodes):\n return self._frame > frames or self._episode > episodes\n\n def _make_writer(self, logdir, agent_name, env_name, write_loss):\n return ExperimentWriter(self, agent_name, env_name, loss=write_loss, logdir=logdir)\n\n def show(self, policy = None):\n render = self._render\n self._render = True\n self._run_test_episode(policy)\n self._env.close()\n self._render = render\n","sub_path":"src/utils/lunar_lander_experiment.py","file_name":"lunar_lander_experiment.py","file_ext":"py","file_size_in_byte":6326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"504072365","text":"# create template repo for github classroom from\n# nbgrader directory\n\nimport os\nimport fnmatch\nimport argparse\n\ndef get_notebooks(nbgrader_dir, assignment):\n # get the list of notebooks for this assignment\n # assumes assignemnts have been released (i.e. are in release dir)\n print(\"Getting notebooks\")\n release_dir = nbgrader_dir + '/release/' + assignment\n notebooks = []\n for file in os.listdir(release_dir):\n if fnmatch.fnmatch(file, '*.ipynb'):\n print(file)\n notebooks.append(file)\n print(\"Found {} notebooks\".format(len(notebooks)))\n return notebooks\n\ndef create_readme():\n # create a stub of a readme file for the template repo\n print(\"Creating readme\")\n\ndef init_template(repo_name):\n # create a new directory for this assignment and initialize as git repo\n try:\n os.mkdir(repo_name)\n \n print(\"Initializing git repo\")\n except FileExistsError as fee:\n print(\"directory {} already exists\".format(repo_name))\n\ndef push_to_github(template_dir):\n # push the repo to the github classroom\n print(\"pushing to github repo\")\n\nif __name__ == '__main__':\n # argument parsing\n parser = argparse.ArgumentParser()\n parser.add_argument('nbgrader_dir', help='Top level nbgrader directory')\n parser.add_argument('assignment', help='Assignment name, e.g., \"2019-01-31-stability\" or \"hw1-rootfinding\"')\n parser.add_argument('--org_name', help='name of GitHub organization')\n parser.add_argument('--repo_name', help='desired name of github repo')\n args = parser.parse_args()\n\n notebooks = get_notebooks(args.nbgrader_dir, args.assignment)\n init_template(args.repo_name)\n","sub_path":"scripts/make-template-repo.py","file_name":"make-template-repo.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"213933654","text":"import random\nfrom agent import Agent\nfrom config import Direction\nfrom util import cyclic, Counter\nimport util\n\n\nclass QLearningAgent(Agent):\n\n def __init__(self, alpha, gamma, random_rate):\n self.alpha = alpha # learning rate\n self.gamma = gamma # discount factor\n self.random_rate = random_rate\n # self.actions_space = [Direction.LEFT, Direction.RIGHT, Direction.UP, Direction.DOWN]\n self.actions_space = [0, 1, 2, 3]\n\n self.values = Counter() # Q(s, a)\n\n self.current_state = 0\n self.new_state = 0\n self.fruit_relative_position_before = []\n self.fruit_relative_position_after = []\n\n self.fruit_position = 0\n self.current_fruit_position = 0\n\n self.counter = 0\n\n def update_fruit_location(self, new_location):\n self.fruit_position = new_location\n\n def reward(self, move, reward, after_hit=True):\n if after_hit:\n self.update(self.current_state, self.find_action(move), self.new_state, reward)\n\n else:\n\n # if self.fruit_relative_position_after[0] <= self.fruit_relative_position_after[0] and \\\n # self.fruit_relative_position_after[1] <= self.fruit_relative_position_before[1]:\n # self.update(self.current_state, self.find_action(move), self.new_state, 1)\n\n # if sum(self.fruit_relative_position_after) <= sum(self.fruit_relative_position_before):\n # self.update(self.current_state, self.find_action(move), self.new_state, 2)\n # else:\n self.update(self.current_state, self.find_action(move), self.new_state, -10)\n\n def update_current_state(self, board):\n\n self.current_state = self.get_current_state(board)\n self.current_fruit_position = board.fruit_location\n self.counter += 1\n\n fruit_x = board.fruit_location[0]\n fruit_y = board.fruit_location[1]\n snake_x_before = board.snake[0][0]\n snake_y_before = board.snake[0][1]\n\n fruit_relative_x_before = abs(fruit_x - snake_x_before)\n fruit_relative_y_before = abs(fruit_y - snake_y_before)\n\n self.fruit_relative_position_before = [fruit_relative_x_before, fruit_relative_y_before]\n\n def update_new_state(self, board):\n\n self.new_state = self.get_current_state(board)\n fruit_x = board.fruit_location[0]\n fruit_y = board.fruit_location[1]\n snake_x_before = board.snake[0][0]\n snake_y_before = board.snake[0][1]\n\n fruit_relative_x_after = abs(fruit_x - snake_x_before)\n fruit_relative_y_after = abs(fruit_y - snake_y_before)\n\n self.fruit_relative_position_after = [fruit_relative_x_after, fruit_relative_y_after]\n\n def get_current_state_(self, board):\n \"\"\"\n DIDNT USE... Check the 3 blocks around the head of the snake and relative positions of fruit and tail to head\n :param board:\n :return:\n \"\"\"\n # 3 places around head\n snake = board.snake\n snake_x = snake[0][0]\n snake_y = snake[0][1]\n fruit_position = board.fruit_location\n board_size = board.board_size\n fruit_x = fruit_position[0]\n fruit_y = fruit_position[1]\n\n direction = board.next_move\n\n left = right = up = 0\n # print(cyclic(5, board_size))\n\n if direction == Direction.LEFT:\n left = (cyclic(snake_x + 1, board_size), snake_y)\n up = (snake_x, cyclic(snake_y - 1, board_size))\n right = (cyclic(snake_x - 1, board_size), snake_y)\n\n elif direction == Direction.RIGHT:\n left = (cyclic(snake_x - 1, board_size), snake_y)\n up = (snake_x, cyclic(snake_y + 1, board_size))\n right = (cyclic(snake_x + 1, board_size), snake_y)\n\n elif direction == Direction.UP:\n left = (snake_x, cyclic(snake_y - 1, board_size))\n up = (cyclic(snake_x - 1, board_size), snake_y)\n right = (snake_x, cyclic(snake_y + 1, board_size))\n\n elif direction == Direction.DOWN:\n left = (snake_x, cyclic(snake_y + 1, board_size))\n up = (cyclic(snake_x + 1, board_size), snake_y)\n right = (snake_x, cyclic(snake_y - 1, board_size))\n\n if left in snake or left in board.obstacles:\n state_name = '1'\n elif left == fruit_position:\n state_name = '2'\n else:\n state_name = '0'\n\n if up in snake or up in board.obstacles:\n state_name += '1'\n elif up == fruit_position:\n state_name += '2'\n else:\n state_name += '0'\n\n if right in snake or right in board.obstacles:\n state_name += '1'\n elif right == fruit_position:\n state_name += '2'\n else:\n state_name += '0'\n\n state_name += str(abs(fruit_x - snake_x)) + str(abs(fruit_y - snake_y))\n\n state_name += str(abs(snake[len(snake) - 1][0] - snake_x)) + str(abs(snake[len(snake) - 1][1] - snake_y))\n\n return state_name\n\n def get_current_state(self, board):\n\n snake = board.snake\n snake_x = snake[0][0]\n snake_y = snake[0][1]\n fruit_position = board.fruit_location\n board_size = board.board_size\n\n fruit_x = fruit_position[0]\n fruit_y = fruit_position[1]\n\n left = 1 if (snake_x, cyclic(snake_y - 1, board_size)) in snake or \\\n (snake_x, cyclic(snake_y - 1, board_size)) in board.obstacles else 0\n right = 1 if (snake_x, cyclic(snake_y + 1, board_size)) in snake or \\\n (snake_x, cyclic(snake_y + 1, board_size)) in board.obstacles else 0\n up = 1 if (cyclic(snake_x - 1, board_size), snake_y) in snake or \\\n (cyclic(snake_x - 1, board_size), snake_y) in board.obstacles else 0\n down = 1 if (cyclic(snake_x + 1, board_size), snake_y) in snake or \\\n (cyclic(snake_x + 1, board_size), snake_y) in board.obstacles else 0\n\n left = 2 if (snake_x, cyclic(snake_y - 1, board_size)) == fruit_position else left\n right = 2 if (snake_x, cyclic(snake_y + 1, board_size)) == fruit_position else right\n up = 2 if (cyclic(snake_x - 1, board_size), snake_y) == fruit_position else up\n down = 2 if (cyclic(snake_x + 1, board_size), snake_y) == fruit_position else down\n\n state_name = str(left) + str(up) + str(right) + str(down)\n\n fruit_relative_x = fruit_x - snake_x\n fruit_relative_y = fruit_y - snake_y\n\n if fruit_relative_x < 0 and fruit_relative_y < 0:\n state_name += '10000000'\n\n elif fruit_relative_x < 0 and fruit_relative_y == 0:\n state_name += '01000000'\n\n elif fruit_relative_x < 0 and fruit_relative_y > 0:\n state_name += '00100000'\n\n elif fruit_relative_x == 0 and fruit_relative_y > 0:\n state_name += '00010000'\n\n elif fruit_relative_x > 0 and fruit_relative_y > 0:\n state_name += '00001000'\n\n elif fruit_relative_x > 0 and fruit_relative_y == 0:\n state_name += '00000100'\n\n elif fruit_relative_x > 0 and fruit_relative_y < 0:\n state_name += '00000010'\n\n elif fruit_relative_x == 0 and fruit_relative_y < 0:\n state_name += '00000001'\n\n return state_name\n\n def next_move(self, board):\n current_state = self.get_current_state(board)\n\n action = self.getAction(current_state)\n\n while True:\n if action == 0 and board.next_move != Direction.RIGHT:\n return Direction.LEFT\n elif action == 1 and board.next_move != Direction.LEFT:\n return Direction.RIGHT\n elif action == 2 and board.next_move != Direction.DOWN:\n return Direction.UP\n elif action == 3 and board.next_move != Direction.UP:\n return Direction.DOWN\n\n action = self.getAction(current_state)\n\n def find_action(self, action):\n if action == Direction.LEFT:\n return 0\n elif action == Direction.RIGHT:\n return 1\n elif action == Direction.UP:\n return 2\n else:\n return 3\n\n def getLegalActions(self, state):\n \"\"\"\n Get the actions available for a given\n state. This is what you should use to\n obtain legal actions for a state\n \"\"\"\n return self.actions_space\n\n def update(self, state, action, nextState, reward):\n \"\"\"\n The parent class calls this to observe a\n state = action => nextState and reward transition.\n You should do your Q-Value update here\n\n NOTE: You should never call this function,\n it will be called on your behalf\n \"\"\"\n self.values[(state, action)] += self.alpha * (\n reward + self.gamma * self.getValue(nextState) - self.values[(state, action)])\n\n def getQValue(self, state, action):\n \"\"\"\n Returns Q(state,action)\n Should return 0.0 if we never seen\n a state or (state,action) tuple\n \"\"\"\n return self.values[(state, action)]\n\n def getValue(self, state):\n \"\"\"\n Returns max_action Q(state,action)\n where the max is over legal actions. Note that if\n there are no legal actions, which is the case at the\n terminal state, you should return a value of 0.0.\n \"\"\"\n actions = self.getLegalActions(state)\n if not actions:\n return 0\n return max([self.getQValue(state, a) for a in actions])\n\n def getPolicy(self, state):\n \"\"\"\n Compute the best action to take in a state. Note that if there\n are no legal actions, which is the case at the terminal state,\n you should return None.\n \"\"\"\n val = self.getValue(state)\n actions = [a for a in self.getLegalActions(state) if self.getQValue(state, a) == val]\n if not actions:\n return None\n return random.choice(actions)\n\n def getAction(self, state):\n \"\"\"\n Compute the action to take in the current state. With\n probability self.epsilon, we should take a random action and\n take the best policy action otherwise. Note that if there are\n no legal actions, which is the case at the terminal state, you\n should choose None as the action.\n\n HINT: You might want to use util.flipCoin(prob)\n HINT: To pick randomly from a list, use random.choice(list)\n \"\"\"\n # Pick Action\n legal_actions = self.getLegalActions(state)\n action = None\n if self.counter > 1000 and self.fruit_position == self.current_fruit_position:\n # print(str(self.counter) + ' random................................................')\n action = random.choice(legal_actions)\n self.counter = 0\n\n if util.flipCoin(self.random_rate):\n if legal_actions:\n action = random.choice(legal_actions)\n else:\n action = self.getPolicy(state)\n return action\n\n def write_qtable(self, path='qtable.txt'):\n f = open(path, 'w')\n for k, v in self.values.items():\n f.write(str(k[0]) + ':' + str(k[1]) + ':' + str(v) + '\\n')\n f.close()\n\n def read_qtable(self, path='qtable.txt'):\n f = open(path, 'r')\n line = f.readline()\n while line:\n # print(line)\n line = line.strip('\\n')\n line = line.split(':')\n # print(line)\n self.values[(line[0], float(line[1]))] = float(line[2])\n line = f.readline()\n # print(self.values)\n","sub_path":"qLearning.py","file_name":"qLearning.py","file_ext":"py","file_size_in_byte":11734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"34396913","text":"\nimport matplotlib\nimport numpy as np\nmatplotlib.use(\"Qt5Agg\")\nimport matplotlib.pyplot as plt\n\nnp.random.seed(1)\n\ndef create_spiral(num_per_class=100, dims=2, plot=False):\n\n\tX = np.zeros((num_per_class * 1, dims))\n\ty = np.zeros(num_per_class * 1, dtype='uint8')\n\n\tfor j in range(1):\n\t\tix = range(num_per_class * j, num_per_class * (j+1))\n\t\tr = np.linspace(0.0,1,num_per_class)\n\t\tt = np.linspace(j * 4,(j+1) * 4,num_per_class) + np.random.randn(num_per_class)*0.2\n\t\tX[ix] = np.c_[r * np.sin(t), r * np.cos(t)]\n\t\ty[ix] = j\n\n\tif plot:\n\t\tplt.figure(figsize=(8, 8))\n\t\tplt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)\n\t\tplt.axis(\"off\")\n\t\tplt.savefig(\"damn.png\")\n\n\treturn X, y\n\n\ndef create_sine_save(N=100, r=(-1, 1), plot=False, save_name=None):\n\n\tassert len(r) == 2\n\n\tX = np.linspace(*r, N).reshape(N, 1)\n\ty = -np.sin(X).reshape(N, 1)\n\n\tif plot or save_name != None:\n\t\tplt.figure(figsize=(8, 8))\n\t\tplt.scatter(X, y, c=y, s=40, cmap=plt.cm.Spectral)\n\n\t\tif save_name:\n\t\t\tplt.savefig(f\"{save_name}.png\")\n\t\tif plot:\n\t\t\tplt.show()\n\n\treturn X / np.max(r), y\n\n\nif __name__ == \"__main__\":\n\n\tnum_per_class = 100 # number of points per class\n\tdims = 2 # dimensionality\n\tK = 3 # number of classes\n\n\tX, y = create_spiral()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"numpyflow/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"282984764","text":"word='racecar'\n\nn=len(word)\ns=\"true\"\nfor i in range(int(len(word)/2)):\n if word[i]!=word[n-1-i]:\n s=\"false\"\n break\nif s==\"true\":\n print(f\"{word} is palindrome\")\nelse:\n print(f\"{word} is not a palindrome\")\n# list and reverse\n\nword_to_list=list(word)\nword_to_list.reverse()\nif list(word)==word_to_list:\n print(f\"{word} is a palindrome\")\nelse:\n print(f\"it is not a palindrome\")","sub_path":"demo_day4/task_5.py","file_name":"task_5.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"148593819","text":"import random\nnumeros=[1,2,3,4,5]\nnum_certo=random.choice(numeros)\n\nx=int(input('Adivinhe qual número de 1 a 5 o computador escolheu.\\n'))\n\nif x==num_certo:\n print ('Parabéns, você acertou!')\nelse:\n print('Não foi dessa vez')\nprint (f'O número escolhido foi {num_certo}')\n\n","sub_path":"EXERCÍCIOS_CURSO-EM-VIDEO/FUNDAMENTOS/exercícios_CONDIÇÕES_certo_ou_errado.py","file_name":"exercícios_CONDIÇÕES_certo_ou_errado.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"238478637","text":"# -*- coding=utf-8 -*-\n\n# ==============\n# Jomegle Config\n# ==============\n\n# Base name (ideally under 8 chars)\nirc_nick = 'Jomegle'\n\n# Address of your IRC network\nirc_host = 'irc.rizon.net'\n\n# Port for IRC connections\nirc_port = 6667\n\n# Autojoined channel\nirc_channel = '#Jomegle'\n\n# Delay between server connects (secs)\nirc_delay = 40\n\n# Number of concurrent connections.\n# This should be below the permitted threshold of your IRC network.\n# Protip: subtract one for every other IRC client that shares your IP address\n# to avoid getting K-line'd\nclients = 2\n\n# used to denote a command\ncommand_prefix = '/'\n\n# Subdomains linked to Omegle instances\n# (subdomain.omegle.com)\nomegle_subdomains = ['bajor', 'cardassia', 'chatserv', 'promenade']\n\n# Time in seconds before disconnecting from an unresponsive Omegle user\n# (Only affects setup stage. How you treat idlers is up to you.)\nomegle_timeout = 25\n\n# Require Omeglers to answer a trivial question prior to entering the channel\n# Gets questions from http://textcaptcha.com/\nuse_textcaptcha = False\n\n# How many attempts to grant Omeglers at entering the text captcha\nmax_textcaptcha_attempts = 2\n\n# Bypass manual nick selection\nassign_random_nick = False\n\n# Require Omeglers to confirm their nick choice\nconfirm_nick = True\n\n# How many attempts Omeglers get to choose their nick\nmax_nick_attempts = 4\n\n# Check with NickServ to prevent Omeglers from choosing a reserved nick\n# Should work on every network with a NickServ that responds to 'INFO'\nuse_nickserv = True\n\n# Message of the Day\nmotd = ('\\nType %ccommands for a list of available commands.\\n'\n 'Need help? Read the FAQ at http://bth8.com/jomegle' % command_prefix)\n\n# Show channel topic\nshow_topic = True\n\n# Use deCAPTCHA to delegate Omegle's CAPTCHAs to a web-based interface.\n# (separate component)\n# It's possible to run the script without it, but throttle the number of\n# clients down to 1 or 2 for compensation.\nuse_decaptcha = False\n\n# deCAPTCHA service url\ndecaptcha_url = 'http://HOST:PORT'\n\n# deCAPTCHA authentication\ndecaptcha_api_key = '*******************************'\n\n# Initial message (one per line)\ngreetings = \"\"\"\nA challenger appears!\nDeploying surprise in 3... 2... 1...\nHail, traveler. You have had a long and arduous Omegle journey. Allow us to offer you some rest and a hot meal...\nHello, and welcome to the Jomegle Communications Platform.\nIt is pitch black. You are likely to be eaten by a grue.\nKnock, knock!\nWelcome to Jomegle!\nWelcome to the social.\nYo dawg, I heard you like chatting, so we put a chat in your chat so you can talk while you talk.\nYou are in a cave. There are crystals everywhere.\nYou've reached Omegle 2.0. A service of Web 3.0.\nYou've stumbled into a nerd trap.\n\"\"\"\n\nrot13 = lambda s: s.decode('rot-13')\n\n# Wordfilters. Regular expressions are accepted.\nwordfilters = {\n rot13(r'shpx'): 'gently caress',\n rot13(r'fuvg'): 'poo poo',\n rot13(r'phag'): 'vajayjay',\n rot13(r'pbpx'): 'bu-gawk',\n rot13(r'qvpx'): 'straw',\n rot13(r'ovgpu'): 'bish',\n rot13(r'avttre'): 'chocolate man',\n rot13(r'avttref'): 'chocolate men',\n rot13(r'fhpx zl (qvpx|pbpx)'): 'join me in a riveting game of chess',\n r'\\b' + rot13(r'(shpxre|nffubyr|onfgneq|fba bs n ovgpu)'): 'swell guy',\n r'\\b' + rot13(r'snt(tbg)?'): 'pony princess',\n r'gay': 'lively',\n r'\\basl\\b': 'basil',\n r'\\ba[\\\\/,.]s[\\\\/,.]l\\b': 'ants/seals/lions',\n r'\\bcyber\\b': 'join me in a riveting game of checkers',\n}\n\nuse_wordfilters = True\n\ndel rot13\n","sub_path":"jomegle/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"599250162","text":"from collections import defaultdict\n\nli = []\ntry:\n with open('input.txt', 'r') as file:\n for line in file:\n li.append(line.rstrip())\nexcept:\n print('something went wrong with file opening :(')\n\nn, m = map(int, li[0].split())\n\ngroupA = li[1 : n+1]\n#print(len(groupA),groupA[-1])\ngroupB = li[n+1 :]\n#print(len(groupB),groupB[0])\n\nres = defaultdict(list)\nfor i in range(len(groupB)):\n if groupB[i] not in groupA:\n res[i].append(str(-1))\n else:\n for j in range(len(groupA)):\n if groupB[i] == groupA[j]:\n res[i].append(str(j + 1))\n\n\nwith open('output.txt', 'w') as file:\n for v in res.values():\n file.write(' '.join(v) + '\\n')\n\n","sub_path":"hacker rank default dict/defaultdict.py","file_name":"defaultdict.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"616908554","text":"ret = [(x,y) for x in (1,3,5)\n for y in (4,3,5)\n if x!=y]\nprint (ret)\n\nmatrix = [(1, 2, 3, 5),\n (2, 4, 8, 10),\n (4, 6, 8, 15)]\ntransposition = [[row[i] for row in matrix]\n for i in range(len(matrix[0]))]\nprint(transposition)","sub_path":"project 1/shad/innerExpressions.py","file_name":"innerExpressions.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"157303734","text":"def index_pair(mot):\n ch = ''\n for i in mot:\n if mot.index(i) % 2 == 0:\n ch = ch + i\n print(ch)\n\n\nmot = input(\"saisir um mot : \")\nindex_pair(mot)\n\n#Autre methode\nmot = input(\"saisir um autre mot : \")\nprint(mot[0: len(mot): 2])\n\n\n\ns = \"Python est un langage de programmation. Python est orienté objet\"\n\n# Transformation de la chaine s en une liste\nL = s.split()\n\n# Récupération du nombre d'élément de la liste L\nnombreMots = len(L)\nprint(\"Le nombre de mot de la chaine s est : \", nombreMots)","sub_path":"afficher.caractères.indice.pair.py","file_name":"afficher.caractères.indice.pair.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"493317125","text":"# -*- coding: utf-8 -*-\r\n# 用来格式化12306存储的火车站信息,包括火车站名、火车站代号等信息\r\n# 火车站信息来源于https://kyfw.12306.cn/otn/resources/js/framework/station_name.js\r\n# 此js文件存储于外部文件station.txt中\r\n# 格式化后格式如:bjb 北京北 VAP beijingbei bjb 0\r\nimport re\r\n\r\nf = open('data\\stations.txt', 'r').read()\r\noutput = open('data\\stations.tsv', 'w+');\r\nr = re.findall('@([a-z]*)\\|(.*?)\\|([A-Z]*)\\|([a-z]*)\\|([a-z]*)\\|([0-9]*)', f)\r\n#for s in r:\r\n # for b in s:\r\n # print b,\r\n# output.write(b),\r\n# output.write('\\t')\r\n# output.write('\\n')\r\n#output.close()\r\n\r\n\r\noutputCode = open('data\\stationsCode.tsv', 'w+');\r\nfor s in r:\r\n outputCode.write(s[-1])\r\n outputCode.write('\\n')\r\n","sub_path":"python/formatStation.py","file_name":"formatStation.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"530314609","text":"from gatemate import db, exceptions\nfrom gatemate import constants as const\nimport re\n\n\nclass Trip(db.Model):\n __tablename__ = 'trips'\n\n detail = {\n 'uri': '/trips/',\n 'accepts': {'GET', 'PUT'}\n }\n\n collection = {\n 'uri': '/trips/',\n 'accepts': {'GET', 'POST'}\n }\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String(const.MAX_LENGTH_OTHER), nullable=True)\n id_user = db.Column(db.Integer, db.ForeignKey('users.id'))\n\n user = db.relationship('User', backref=db.backref('trips', lazy='dynamic'))\n\n def __init__(self, user):\n self.user = user\n\n def get_uri(self):\n split = re.split('<(\\w+):(\\w+)>', self.detail['uri'])\n endpoint = split[0]\n type = split[1]\n attribute = split[2]\n\n return const.BASE_ADDRESS + endpoint + str(self.__getattribute__(attribute))\n\n def from_dict(self, data):\n try:\n self.name = data['name']\n except Exception:\n raise exceptions.MalformedDataException('Required field is missing.')\n\n return True\n\n def to_dict(self):\n legs = []\n for leg in self.legs.all():\n legs.append(leg.to_dict())\n return {\n 'id': self.id,\n 'name': self.name,\n 'id_user': self.id_user,\n 'legs': legs,\n 'uri': self.get_uri()\n }\n\n def __repr__(self):\n return '(%s)' % (self.name, self.user.email)","sub_path":"gatemate/models/trip.py","file_name":"trip.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"158185105","text":"import discord\nimport sqlite3\nfrom discord.ext import commands\nimport string\n\n\nclass UserCommands(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n self.connection = sqlite3.connect(\"./data.s3db\")\n self.cursor = self.connection.cursor()\n\n @commands.command()\n async def stats(self, ctx: commands.Context, server):\n self.cursor.execute(f\"SELECT * FROM gtaservers WHERE name = '{server}'\")\n server = self.cursor.fetchone()\n print(server)\n if not server:\n embed = discord.Embed(title=\"Erreur\", description=\"Ce nom de serveur n'a pas été trouvé dans la \"\n \"base de données\")\n await ctx.send(embed=embed)\n else:\n embed = discord.Embed(title=f\"{server[0]}\", description=f\"Données de ce serveur\")\n embed.add_field(name=\"Note moyenne\", value=f\"{server[1]}\")\n embed.add_field(name=\"Nombre d'avis\", value=f\"{server[2]}\")\n self.cursor.execute(f\"SELECT comments FROM comments WHERE name = '{server[0]}' \"\n f\"ORDER BY n_comment DESC LIMIT 3\")\n last_3_comm = self.cursor.fetchall()\n embed.add_field(name=\"Les trois derniers commentaires laissés sur ce serveur:\",\n value=f\"{last_3_comm[0][0]}\\n{last_3_comm[1][0]}\\n{last_3_comm[1][0]}\", inline=False)\n await ctx.send(embed=embed)\n\n @commands.command()\n async def note(self, ctx: commands.Context, server=None, rating=None, *, commentary: str = None):\n if not server or not rating:\n embed = discord.Embed(title=\"Erreur\", description=\"Il manque un argument à votre commande.\")\n embed.add_field(name=\"Syntaxe de la commande:\", value=\".note [serveur] [note sur 5] \"\n \"[commentaire (optionnel)]\")\n\n elif rating not in string.digits:\n embed = discord.Embed(title=\"Erreur\", description=\"Votre note est incorrecte. Veuillez entrer un nombre\")\n embed.add_field(name=\"Syntaxe de la commande:\", value=\".note [serveur] [note sur 5] \"\n \"[commentaire (optionnel)]\")\n\n elif int(rating) <= 0 or int(rating) > 5:\n embed = discord.Embed(title=\"Erreur\", description=\"Votre note est incorrecte. \"\n \"Veuillez entrer une note entre 0 et 5\")\n embed.add_field(name=\"Syntaxe de la commande:\", value=\".note [serveur] [note sur 5] \"\n \"[commentaire (optionnel)]\")\n\n else:\n self.cursor.execute(f\"SELECT * FROM gtaservers WHERE name = '{server}'\")\n db_server = self.cursor.fetchone()\n if db_server:\n name: str = db_server[0]\n name.replace(\"'\", \" \")\n server_rating = db_server[1]\n n_ratings = db_server[2]\n server_rating = server_rating * n_ratings + int(rating)\n server_rating = round(server_rating / (n_ratings + 1), 2)\n query = f\"UPDATE gtaservers SET medium_rating = {server_rating}, n_ratings = {n_ratings + 1}\" \\\n f\" WHERE name = '{name}'\"\n self.cursor.execute(query)\n embed = discord.Embed(title=f\"Vous avez ajouté la note {rating} au serveur {name}\",\n description=\"Merci pour votre participation\")\n\n if commentary:\n commentary.replace(\"'\", \" \")\n self.cursor.execute(f\"SELECT n_comment FROM comments WHERE name = '{name}' \"\n f\"ORDER BY n_comment DESC LIMIT 1\")\n n_comm = self.cursor.fetchone()\n self.cursor.execute(f\"INSERT INTO comments (name, comments, n_comment) VALUES \"\n f\"('{name}', '{commentary}', {n_comm[0] + 1})\")\n embed.add_field(name=\"Commentaire ajouté:\", value=f\"{commentary}\")\n\n else:\n embed = discord.Embed(title=f\"Vous avez ajouté la note {rating} au serveur \"\n f\"{server} pour la première fois\",\n description=\"Merci pour votre participation\")\n self.cursor.execute(f\"INSERT INTO gtaservers (name, medium_rating, n_ratings) VALUES \"\n f\"('{server}', {rating}, 1)\")\n\n self.connection.commit()\n await ctx.send(embed=embed)\n","sub_path":"utilis/commands/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":4709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"527142911","text":"import numpy as np\nimport freenect\nimport pygame as pygame\nimport time\n\npygame.init()\n\t\nclass MaskCentroidManager():\n \"\"\"Consumes kinect images and provides centroids of mass\"\"\"\n\n def __init__(self, crop, base):\n self.croprect = pygame.Rect(480 - crop * 2, 640 - crop * 2, crop, crop)\n self.lastimage = pygame.Surface((1, 1))\n self.base = base\n\t\t\t\t\n def getCentroid(self, image):\n mask = pygame.mask.from_threshold(self._get_surf(image),\n (0, 0, 0), (230, 230, 230, 255))\n return mask.centroid()\n \n def _get_surf(self, depthImage):\n depthImage-self.base\n sface = pygame.Surface((480, 640))\n arr = pygame.surfarray.pixels_green(sface)\n arr[:] = depthImage[:]\n arr = pygame.surfarray.pixels_red(sface)\n arr[:] = depthImage[:]\n arr = pygame.surfarray.pixels_blue(sface)\n arr[:] = depthImage[:]\n del(arr)\n sface = pygame.transform.chop(sface, self.croprect)\n image = pygame.transform.rotate(sface, -90)\n self.lastimage = image\n return image\n\t\t\t\t\ndef getDepthMap():\t\n depth, timestamp = freenect.sync_get_depth()\n \n np.clip(depth, 0, 2 ** 10 - 1, depth)\n depth >>= 2\n depth = depth.astype(np.uint8)\n \n return depth\n\nif(__name__ == \"__main__\"):\n initMap = getDepthMap()\n centroidManager = MaskCentroidManager(20, getDepthMap())\n\n while 1:\n time.sleep(0.1)\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n break\n\n display = pygame.display.set_mode((640, 480))\n\n draw = centroidManager.lastimage\t\n pygame.draw.circle(draw, pygame.Color(255, 0, 0, 255), centroidManager.getCentroid(getDepthMap()), 10)\n display.blit(draw, (0, 0))\n\n pygame.display.update()\n print(\"Rendering\")\n\n","sub_path":"Engine/vec2d/centroid_manager.py","file_name":"centroid_manager.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"385729576","text":"# A seguinte sequência de números 0 1 1 2 3 5 8 13 21... é conhecida como série\n# de Fibonacci. Nessa sequência, cada número, depois dos 2 primeiros, é igual à soma\n# dos 2 anteriores. Escreva um algoritmo que leia um inteiro N (N < 46) e mostre\n# os N primeiros números dessa série.\n#\n# Entrada\n# O arquivo de entrada contém um valor inteiro N (0 < N < 46).\n#\n# Saída\n# Os valores devem ser mostrados na mesma linha, separados por um espaço em branco.\n# Não deve haver espaço após o último valor.\n#\n# Exemplo de entrada Exemplo de saída\n# 5 0 1 1 2 3\n\n\nn = int(input())\n\ni = 0\n\nt = []\n\nwhile i < n:\n\n if i == 0 or i == 1:\n t.append(i)\n\n if i > 1:\n aux = t[i - 2] + t[i - 1]\n\n t.append(aux)\n\n i = i + 1\n\nfor j in range(0, n):\n t[j] = str(t[j])\n\nt = ' '.join(t)\n\nprint(t)","sub_path":"Bootcamp.py","file_name":"Bootcamp.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"596776123","text":"#!/usr/bin/python3\r\n#-*-encoding='utf-8'-*-\r\nfrom urllib.parse import quote\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nfrom pyquery import PyQuery as pq\r\nimport pymongo\r\nkeyword='电脑'\r\nchrome_options=webdriver.ChromeOptions()\r\nchrome_options.add_argument('--headless')\r\nchrome_options.add_argument('--no-sandbox')\r\nbrowser=webdriver.Chrome(chrome_options=chrome_options)\r\nwait=WebDriverWait(browser,10)\r\nclient = pymongo.MongoClient(host='localhost', port=27017)\r\ndb = client['product']\r\ncollection = db['computer']\r\nurl = 'https://s.taobao.com/search?q=' + quote(keyword)\r\ndef get_page(page):\r\n try:\r\n browser.get(url)\r\n if page > 1:\r\n input=wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'#mainsrp-pager .m-page .input')))\r\n input.clear()\r\n input.send_keys(page)\r\n submit=wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'#mainsrp-pager div.form>span.btn.J_Submit'))) #同一个节点span.btn.J_Submit,CSS选择器要紧挨一起,而不是span .btn.J_Submit\r\n submit.click()\r\n wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR,'#mainsrp-pager li.item.active > span'),str(page))) #某个节点文本包含某文字\r\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'.m-itemlist .grid .item')))\r\n get_products()\r\n except NoSuchElementException:\r\n get_page(page)\r\n print('Not Found')\r\ndef get_products():\r\n html=browser.page_source\r\n doc=pq(html)\r\n items=doc('.m-itemlist .grid .items .item').items() #items()构造生成器\r\n for item in items:\r\n product={\r\n 'name':item.find('.title').text().replace('\\n',''),\r\n 'image':'http:'+item.find('.pic .img').attr('data-src'),\r\n 'price':item.find('.price').text().replace('\\n',''),\r\n 'deal':item.find('.deal-cnt').text(),\r\n 'shop':item.find('.shopname').text(),\r\n 'location':item.find('.location').text()\r\n }\r\n print(product)\r\n save_to_mongo(product)\r\n\r\ndef save_to_mongo(product):\r\n try:\r\n if collection.insert(product):\r\n print('success')\r\n except:\r\n print('failed')\r\n\r\nif __name__=='__main__':\r\n for i in range(1,10):\r\n get_page(i)\r\n","sub_path":"spider/Selenium爬取淘宝商品.py","file_name":"Selenium爬取淘宝商品.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"453482486","text":"# coding=utf-8\n\"\"\"update lingala\n\nRevision ID: 53ad7f23e30d\nRevises: 44c466f1f365\nCreate Date: 2015-07-09 14:59:20.977488\n\n\"\"\"\nfrom __future__ import unicode_literals\n\n# revision identifiers, used by Alembic.\nrevision = '53ad7f23e30d'\ndown_revision = '44c466f1f365'\n\nimport os\nimport datetime\n\nfrom alembic import op\nimport sqlalchemy as sa\n\nfrom clld.util import jsonload\nfrom clld.db.migration import Connection\nfrom clld.db.models.common import Sentence, ValueSentence, ValueSet, Value, Language\nfrom clld.lib.dsv import reader\n\nimport apics\n\n\ndef data_file(fname):\n return os.path.join(os.path.dirname(apics.__file__), '..', 'data', fname)\n\n\ndef upgrade():\n conn = Connection(op.get_bind())\n example_map = {}\n\n sid = 204\n for example in jsonload(data_file('lingala_examples.json')):\n sid += 1\n kw = {\n 'id': '60-%s' % sid,\n 'language_pk': conn.pk(Language, '60'),\n 'name': example['Text'],\n 'description': example['Translation'],\n 'gloss': '\\t'.join(example['Gloss'].split()),\n 'analyzed': '\\t'.join(example['Text'].split()),\n 'type': example['Type'].strip().lower(),\n 'jsondata': {'sort': int(example['Order_number']), 'alt_translation': None}\n }\n example_map[example['Example_number']] = conn.insert(Sentence, **kw)\n\n for ve in jsonload(data_file('lingala_value_examples.json')):\n vspk = conn.pk(ValueSet, '60-%s' % ve['Features::Feature_number'])\n vpk = conn.pk(Value, vspk, attr='valueset_pk')\n conn.insert(\n ValueSentence, value_pk=vpk, sentence_pk=example_map[ve['Example_number']])\n\n for i, comment in enumerate(reader(data_file('lingala_valueset_comments.tab'), dicts=True)):\n vspk = conn.pk(ValueSet, '60-%s' % comment['Features::Feature_number'])\n comment['Comments_on_value_assignment'] = comment['Comments_on_value_assignment'].replace('\\x0b', '\\n')\n conn.update(\n ValueSet,\n {\n 'description': comment['Comments_on_value_assignment'],\n 'markup_description': None,\n },\n pk=vspk)\n\n\ndef downgrade():\n pass\n","sub_path":"migrations/versions/53ad7f23e30d_update_lingala.py","file_name":"53ad7f23e30d_update_lingala.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"511634100","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nfrom setuptools import setup\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nsetup(\n name=\"Trove Integration Tests\",\n version=\"0.0.9.9\",\n author='OpenStack',\n description=\"Runs integration tests on Ridley.\",\n license='Apache',\n py_modules=[],\n packages=['tests'],\n scripts=[]\n)\n","sub_path":"integration/tests/integration/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"629705775","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 30 22:59:35 2016\n\n@author: JohannesMHeinrich\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\n\nfrom PIL import Image\n\n\ndef trap_image():\n \n fig_trap_image = plt.figure()\n \n fig_trap_image.patch.set_facecolor('white')\n fig_trap_image.patch.set_alpha(0)\n \n ax = fig_trap_image.add_subplot(111)\n \n ax.plot(range(10))\n \n ax.patch.set_alpha(0)\n \n ax.axis('off')\n \n plt.tight_layout(pad=0.01, w_pad=0.01, h_pad=0.01)\n \n \n img = Image.open('pic_trapy2.png')\n ax.imshow(img,zorder=0)\n\n \n return fig_trap_image","sub_path":"XCon_01_image_trap3.py","file_name":"XCon_01_image_trap3.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"103931093","text":"import dash\r\nfrom dash.dependencies import Input, Output\r\nimport dash_table\r\nimport dash_html_components as html\r\nfrom sqlalchemy import create_engine\r\nimport pymysql\r\nimport pandas as pd\r\nimport dash_core_components as dcc\r\n\r\n# Connect to MYSQL\r\ndb_connection_str = 'mysql+pymysql://root:\"password\"@localhost/craigslist'\r\ndb_connection = create_engine(db_connection_str)\r\n\r\ndf = pd.read_sql('SELECT * FROM books_mags', con=db_connection).drop(['id'], axis=1)\r\ndf[' index'] = range(1, len(df) + 1)\r\n\r\napp = dash.Dash(__name__)\r\nserver = app.server\r\nPAGE_SIZE = 5\r\napp.layout = html.Div([\r\n dash_table.DataTable(\r\n id='datatable-filtering-fe',\r\n columns=[\r\n {\"name\": i, \"id\": i, \"deletable\": True} for i in df.columns\r\n ],\r\n style_cell_conditional=[\r\n {'if': {'column_id': 'titles'},\r\n 'width': '30%'}],\r\n data=df.to_dict('records'),\r\n filter_action=\"native\",\r\n page_size=PAGE_SIZE\r\n ),\r\n html.Div(id='datatable-filter-container')\r\n])\r\n\r\n\r\n@app.callback(\r\n Output('datatable-filter-container', \"children\"),\r\n [Input('datatable-filtering-fe', \"data\")])\r\ndef update_graph(rows):\r\n if rows is None:\r\n dff = df\r\n else:\r\n dff = pd.DataFrame(rows)\r\n\r\n return html.Div()\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)\r\n\r\n \r\n\r\n#References \r\n#https://dash.plot.ly/datatable/callbacks\r\n#https://dash.plot.ly/datatable/filtering\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"453125621","text":"#! /usr/bin/env python\n# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport os\n\nimport pandas as pd\nfrom ludwig.datasets.base_dataset import DEFAULT_CACHE_LOCATION, BaseDataset\nfrom ludwig.datasets.mixins.kaggle import KaggleDownloadMixin\nfrom ludwig.datasets.mixins.load import CSVLoadMixin\nfrom ludwig.utils.fs_utils import makedirs, rename\n\n\ndef load(cache_dir=DEFAULT_CACHE_LOCATION, split=False, kaggle_username=None,\n kaggle_key=None):\n dataset = Titanic(\n cache_dir=cache_dir,\n kaggle_username=kaggle_username,\n kaggle_key=kaggle_key\n )\n return dataset.load(split=split)\n\n\nclass Titanic(CSVLoadMixin, KaggleDownloadMixin, BaseDataset):\n \"\"\"The Titanic dataset.\n\n This pulls in an array of mixins for different types of functionality\n which belongs in the workflow for ingesting and transforming\n training data into a destination dataframe that can\n be loaded by Ludwig's training API.\n \"\"\"\n\n def __init__(\n self,\n cache_dir=DEFAULT_CACHE_LOCATION,\n kaggle_username=None,\n kaggle_key=None\n ):\n self.kaggle_username = kaggle_username\n self.kaggle_key = kaggle_key\n self.is_kaggle_competition = True\n super().__init__(dataset_name='titanic', cache_dir=cache_dir)\n\n def process_downloaded_dataset(self):\n \"\"\"The final method where we create a concatenated CSV file\n with both training ant dest data\"\"\"\n train_file = self.config[\"split_filenames\"][\"train_file\"]\n test_file = self.config[\"split_filenames\"][\"test_file\"]\n\n train_df = pd.read_csv(os.path.join(self.raw_dataset_path, train_file))\n test_df = pd.read_csv(os.path.join(self.raw_dataset_path, test_file))\n\n train_df[\"split\"] = 0\n test_df[\"split\"] = 2\n\n df = pd.concat([train_df, test_df])\n\n makedirs(self.processed_temp_path, exist_ok=True)\n df.to_csv(os.path.join(self.processed_temp_path, self.csv_filename),\n index=False)\n rename(self.processed_temp_path, self.processed_dataset_path)\n","sub_path":"ludwig/datasets/titanic/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"654434562","text":"#!/usr/bin/env python3\n''' The daemon that runs the Python Kalman filters for velocity and heading. '''\n\nfrom numpy import array, cos, sin, radians\nimport numpy as np\nimport quat\n\nfrom settings import dt\n\nimport sys\nimport shm\nimport time\n\nfrom auv_python_helpers.angles import abs_heading_sub_degrees\nfrom shm.watchers import watcher\nfrom threading import Thread\nfrom conf.vehicle import sensors, VEHICLE\nfrom functools import reduce\n\nrec_get_attr = lambda s: reduce(lambda acc, e: getattr(acc, e), s.split('.'), shm)\n\n# thruster_array allows access to thruster values\nthrusters = ['port', 'starboard', 'sway_fore', 'sway_aft']\n\n#### Heading Shared Variables\nhdg_input_var = rec_get_attr(sensors[\"heading\"])\nhdg_out_var = shm.kalman.heading\nhdg_cumulative_out_var = shm.kalman.heading_cumulative\nrate_var = rec_get_attr(sensors[\"ratez\"])\nrate_var_imu = rec_get_attr(sensors[\"heading_rate\"])\nrate_out_var = shm.kalman.heading_rate\n\nheading_sensor = shm.kalman.sensor\n\npitch_var = rec_get_attr(sensors[\"pitch\"])\n# HIM pitch velocity is negative!\npitch_rate_var = rec_get_attr(sensors[\"pitch_rate\"])\npitch_out_var = shm.kalman.pitch\npitch_rate_out_var = shm.kalman.pitch_rate\n\nroll_var = rec_get_attr(sensors[\"roll\"])\nroll_rate_var = rec_get_attr(sensors[\"roll_rate\"])\nroll_out_var = shm.kalman.roll\nroll_rate_out_var = shm.kalman.roll_rate\n\ngx4_q0 = rec_get_attr(sensors[\"quaternion\"]).q0\ngx4_q1 = rec_get_attr(sensors[\"quaternion\"]).q1\ngx4_q2 = rec_get_attr(sensors[\"quaternion\"]).q2\ngx4_q3 = rec_get_attr(sensors[\"quaternion\"]).q3\n\n#### Velocity Shared Variables\nx_vel_in = rec_get_attr(sensors[\"velx\"])\ny_vel_in = rec_get_attr(sensors[\"vely\"])\n# XXX Fragile.\ndvl_velocity = \"dvl\" in sensors[\"vely\"]\n\n# NB: swapped for testing, correct?\nx_acc_in = rec_get_attr(sensors[\"accelx\"])\ny_acc_in = rec_get_attr(sensors[\"accely\"])\ndepth_in = rec_get_attr(sensors[\"depth\"])\ndepth_offset = rec_get_attr(sensors[\"depth_offset\"])\n\n\nx_vel_out = shm.kalman.velx\ny_vel_out = shm.kalman.vely\n\nx_acc_out = shm.kalman.accelx\ny_acc_out = shm.kalman.accely\ndepth_out = shm.kalman.depth\ndepth_rate_out = shm.kalman.depth_rate\nforward_out = shm.kalman.forward\nsway_out = shm.kalman.sway\nnorth_out = shm.kalman.north\neast_out = shm.kalman.east\n\n# DVL Beam vars\nbeam_vars = [shm.dvl.low_amp_1,\n shm.dvl.low_amp_2,\n shm.dvl.low_amp_3,\n shm.dvl.low_amp_4,\n shm.dvl.low_correlation_1,\n shm.dvl.low_correlation_2,\n shm.dvl.low_correlation_3,\n shm.dvl.low_correlation_4]\n\nwrench = shm.control_internal_wrench\n\ndef CalibrateHeadingRate(var):\n vals = []\n for i in range(10):\n vals.append(var.get())\n time.sleep(0.02)\n return sum(vals)/len(vals)\nrate_offset = CalibrateHeadingRate(rate_var)\nrate_offset_imu = CalibrateHeadingRate(rate_var_imu)\n\nfrom kalman_unscented import UnscentedKalmanFilter\n\ndef fx(x, dt):\n q_initial = x[:4]\n disp_quat = quat.ypr_to_quat([vel*dt for vel in x[4:]])\n q_final = quat.add_quat(q_initial, disp_quat)\n x[0] = q_final[0]\n x[1] = q_final[1]\n x[2] = q_final[2]\n x[3] = q_final[3]\n return x\n\ndef hx(x):\n return x\n\norientation_filter = UnscentedKalmanFilter(7, fx, 7, hx, dt, .1)\norientation_filter.x_hat = np.array([gx4_q0.get(), gx4_q1.get(), gx4_q2.get(), gx4_q3.get(), 0, 0, 0])\norientation_filter.P *= .5\norientation_filter.R = np.array([[90, 0, 0, 0, 0, 0, 0],\n [0, 90, 0, 0, 0, 0, 0],\n [0, 0, 90, 0, 0, 0, 0],\n [0, 0, 0, 90, 0, 0, 0],\n [0, 0, 0, 0, .5, 0, 0],\n [0, 0, 0, 0, 0, .7, 0],\n [0, 0, 0, 0, 0, 0, .05]])\n\nfrom kalman_position import PositionFilter\nkalman_xHat = array([[ -1*x_vel_in.get(),\n # x_acc_in.get(),\n y_vel_in.get(),\n 0,\n # y_acc_in.get(),\n 0,\n 0,\n 0,\n depth_in.get() - depth_offset.get(),\n #depth_in.get() - 8.64,\n 0]]).reshape(8,1)\n# Pass in ftarray, shared memory handle to controller\nkalman_position = PositionFilter(kalman_xHat)\n\n\nwatchers = dict()\nfor var in [hdg_input_var, rate_var, rate_var_imu, pitch_var, pitch_rate_var, roll_var, roll_rate_var, gx4_q0, gx4_q1, gx4_q2, gx4_q3]:\n watchers[var] = watcher()\n group = eval(var.__module__)\n watchers[var].watch(group)\ndef get(var):\n #if watchers[var].has_changed():\n # return var.get()\n #else:\n # return None\n return var.get()\n\nstart = time.time()\niteration = 0\nwhile True:\n while (iteration*dt < (time.time() - start)):\n\n yaw_rate = get(rate_var_imu)\n pitch_rate = get(pitch_rate_var)\n roll_rate = get(roll_rate_var)\n yaw_rate_kal = get(rate_var_imu)*np.pi/180\n pitch_rate_kal = get(pitch_rate_var)*np.pi/180\n roll_rate_kal = get(roll_rate_var)*np.pi/180\n\n # Bugs arise due to quaternion aliasing, so we choose the quaternion\n # closest to the actual state\n actual_quat = [get(gx4_q0), get(gx4_q1), get(gx4_q2), get(gx4_q3)]\n negated_quat = [-i for i in actual_quat]\n kalman_quat = orientation_filter.x_hat[:4]\n\n actual_delta = [kalman_quat[i] - actual_quat[i] for i in range(4)]\n negated_delta = [kalman_quat[i] - negated_quat[i] for i in range(4)]\n\n quat_in = actual_quat\n if np.linalg.norm(actual_delta) > np.linalg.norm(negated_delta):\n quat_in = negated_quat\n\n orientation_filter.predict()\n orientation_filter.update(quat_in + [yaw_rate_kal, pitch_rate_kal, roll_rate_kal])\n\n # [q0, q1, q2, q3, yawrate, pitchrate, rollrate]\n data = orientation_filter.x_hat\n ypr = quat.quat_to_ypr(data[:4])\n\n outputs = shm.kalman.get()\n keys = ['q0', 'q1', 'q2', 'q3', 'heading_rate', 'pitch_rate', 'roll_rate']\n output = dict(zip(keys, data))\n outputs.update(**output)\n outputs.heading_rate *= 180/np.pi\n outputs.pitch_rate *= 180/np.pi\n outputs.roll_rate *= 180/np.pi\n outputs.update(**{'heading': ypr[0]*180/np.pi%360, 'pitch': ypr[1]*180/np.pi, 'roll': ypr[2]*180/np.pi})\n\n outputs.heading_cumulative = outputs.heading\n shm.kalman.set(outputs)\n\n\n ## Read Inputs\n #Data relative to the sub\n x_vel = -1*x_vel_in.get()\n x_acc = 0 # x_acc_in.get()\n y_vel = -1*y_vel_in.get()\n y_acc = 0 # y_acc_in.get()\n\n # When the DVL is tracking the surface the y velocity is reversed.\n # This is not ideal... what happens when it is not exactly inverted?\n if dvl_velocity and \\\n bool(abs_heading_sub_degrees(outputs.roll, 180) < 90) ^ \\\n bool(abs_heading_sub_degrees(outputs.pitch, 180) < 90):\n y_vel = -y_vel\n\n #depth = depth_in.get() - depth_offset.get()\n depth = depth_in.get() - 8.64\n #depth = 2.5 - shm.dvl.savg_altitude.get() \n # Compensate for gravitational acceleration\n grav_x = sin( radians(outputs.pitch) )*9.8 # XXX: CHRIS DOES NOT LIKE (small angle approx??)\n grav_y = -sin( radians(outputs.roll) )*9.8\n gx4_grav_y = np.tan(radians(outputs.pitch))*np.sqrt(shm.gx4.accelx.get()**2 + shm.gx4.accelz.get()**2)\n gx4_grav_x = -1*np.tan(radians(outputs.roll))*shm.gx4.accelz.get()\n him_grav_y = np.tan(radians(outputs.pitch))*np.sqrt(shm.him.x_accel.get()**2 + shm.him.z_accel.get()**2)\n him_grav_x = -1*np.tan(radians(outputs.roll))*shm.him.z_accel.get()\n x_acc = x_acc - grav_x\n y_acc = y_acc - grav_y\n x_acc, y_acc = [0, 0] # temporary\n\n\n #Check whether the DVL beams are good\n beams_good = sum( [not var.get() for var in beam_vars] ) >= 2\n\n #beams_good = all( [not var.get() for var in beam_vars] )\n #And if not, disable them\n if not beams_good:\n active_measurements = array([0,1,0,1,1]).reshape((5,1))\n else:\n active_measurements = None\n\n # XXX Experimental.\n #active_measurements = array([1,0,1,0,1]).reshape((5,1))\n\n soft_kill = shm.switches.soft_kill.get()\n\n curr_thrusters = dict((t,(1-soft_kill)*shm.motor_desires.__getattribute__(t).get()) for t in thrusters)\n u = array((wrench.f_x.get(), wrench.f_y.get(), \\\n wrench.f_z.get(), wrench.t_x.get(), \\\n wrench.t_y.get(), wrench.t_z.get()))\n\n\n\n ## Update\n \n outputs.update(**kalman_position.update(outputs.heading, x_vel, x_acc, y_vel, y_acc, depth, u, active_measurements, curr_thrusters, outputs.pitch, outputs.roll))\n \n # This really shouldn't be necessary when kalman has a u term (which it does)\n if not beams_good and VEHICLE is \"thor\":\n outputs.velx = 0\n outputs.vely = 0\n\n ## Write outputs as group, notify only once\n shm.kalman.set(outputs)\n\n iteration += 1\n\n time.sleep(dt/5.)\n\n#@ kalman.heading.updating = shm.kalman.heading.get() != delayed(0.5, 'shm.kalman.heading.get()')\n#@ kalman.heading.valid = 0 <= shm.kalman.heading.get() < 360\n#@ kalman.velx.updating = shm.kalman.velx.get() != delayed(0.5, 'shm.kalman.velx.get()')\n","sub_path":"sensors/kalman/auv-kalmand.py","file_name":"auv-kalmand.py","file_ext":"py","file_size_in_byte":9140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"199604931","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 21 19:49:39 2020\r\n\r\n@author: anish\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\ncapture=cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n ret,frame=capture.read()\r\n gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\r\n invert=cv2.bitwise_not(frame)\r\n cv2.imshow(\"normal\",frame)\r\n cv2.imshow(\"gray\",gray)\r\n cv2.imshow(\"NEGTIVE\",invert)\r\n if cv2.waitKey(20) & 0xFF== ord('q'):\r\n break\r\n \r\n \r\n ","sub_path":"opencv/Video Effects.py","file_name":"Video Effects.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"217932062","text":"from __future__ import print_function, division, absolute_import\n\nfrom operator import add, sub\nfrom time import sleep\n\nimport pytest\npytest.importorskip('bokeh')\nimport sys\nfrom toolz import first\nfrom tornado import gen\nfrom tornado.httpclient import AsyncHTTPClient\n\nfrom distributed.client import _wait\nfrom distributed.metrics import time\nfrom distributed.utils_test import gen_cluster, inc, dec, slowinc\nfrom distributed.bokeh.worker import Counters, BokehWorker\nfrom distributed.bokeh.scheduler import (BokehScheduler, StateTable,\n SystemMonitor, Occupancy, StealingTimeSeries, StealingEvents, Events,\n TaskStream, TaskProgress, MemoryUse, CurrentLoad, ProcessingHistogram,\n NBytesHistogram, WorkerTable)\n\nfrom distributed.bokeh import scheduler\n\nscheduler.PROFILING = False\n\n\n@pytest.mark.skipif(sys.version_info[0] == 2,\n reason='https://github.com/bokeh/bokeh/issues/5494')\n@gen_cluster(client=True,\n scheduler_kwargs={'services': {('bokeh', 0): BokehScheduler}})\ndef test_simple(c, s, a, b):\n assert isinstance(s.services['bokeh'], BokehScheduler)\n\n future = c.submit(sleep, 1)\n yield gen.sleep(0.1)\n\n http_client = AsyncHTTPClient()\n for suffix in ['system', 'counters', 'workers', 'status', 'tasks', 'stealing']:\n response = yield http_client.fetch('http://localhost:%d/%s'\n % (s.services['bokeh'].port, suffix))\n assert 'bokeh' in response.body.decode().lower()\n\n\n@gen_cluster(client=True, worker_kwargs=dict(services={'bokeh': BokehWorker}))\ndef test_basic(c, s, a, b):\n for component in [SystemMonitor, StateTable, Occupancy, StealingTimeSeries]:\n ss = component(s)\n\n ss.update()\n data = ss.source.data\n assert len(first(data.values()))\n if component is Occupancy:\n assert all(addr.startswith('127.0.0.1:')\n for addr in data['bokeh_address'])\n\n\n@gen_cluster(client=True)\ndef test_counters(c, s, a, b):\n pytest.importorskip('crick')\n while 'tick-duration' not in s.digests:\n yield gen.sleep(0.01)\n ss = Counters(s)\n\n ss.update()\n yield gen.sleep(0.1)\n ss.update()\n\n start = time()\n while not len(ss.digest_sources['tick-duration'][0].data['x']):\n yield gen.sleep(1)\n assert time() < start + 5\n\n\n@gen_cluster(client=True)\ndef test_stealing_events(c, s, a, b):\n se = StealingEvents(s)\n\n futures = c.map(slowinc, range(100), delay=0.1, workers=a.address,\n allow_other_workers=True)\n\n while not b.task_state: # will steal soon\n yield gen.sleep(0.01)\n\n se.update()\n\n assert len(first(se.source.data.values()))\n\n\n@gen_cluster(client=True)\ndef test_events(c, s, a, b):\n e = Events(s, 'all')\n\n futures = c.map(slowinc, range(100), delay=0.1, workers=a.address,\n allow_other_workers=True)\n\n while not b.task_state:\n yield gen.sleep(0.01)\n\n e.update()\n d = dict(e.source.data)\n assert sum(a == 'add-worker' for a in d['action']) == 2\n\n\n@gen_cluster(client=True)\ndef test_task_stream(c, s, a, b):\n ts = TaskStream(s)\n\n futures = c.map(slowinc, range(10), delay=0.001)\n\n yield _wait(futures)\n\n ts.update()\n d = dict(ts.source.data)\n\n assert all(len(L) == 10 for L in d.values())\n assert min(d['start']) == 0 # zero based\n\n ts.update()\n d = dict(ts.source.data)\n assert all(len(L) == 10 for L in d.values())\n\n total = c.submit(sum, futures)\n yield _wait(total)\n\n ts.update()\n d = dict(ts.source.data)\n assert len(set(map(len, d.values()))) == 1\n\n\n@gen_cluster(client=True)\ndef test_task_stream_n_rectangles(c, s, a, b):\n ts = TaskStream(s, n_rectangles=10)\n futures = c.map(slowinc, range(10), delay=0.001)\n yield _wait(futures)\n ts.update()\n\n assert len(ts.source.data['start']) == 10\n\n\n@gen_cluster(client=True)\ndef test_task_stream_second_plugin(c, s, a, b):\n ts = TaskStream(s, n_rectangles=10, clear_interval=10)\n ts.update()\n futures = c.map(inc, range(10))\n yield _wait(futures)\n ts.update()\n\n ts2 = TaskStream(s, n_rectangles=5, clear_interval=10)\n ts2.update()\n\n\n\n@gen_cluster(client=True)\ndef test_task_stream_clear_interval(c, s, a, b):\n ts = TaskStream(s, clear_interval=100)\n\n yield _wait(c.map(inc, range(10)))\n ts.update()\n yield gen.sleep(0.010)\n yield _wait(c.map(dec, range(10)))\n ts.update()\n\n assert len(ts.source.data['start']) == 20\n\n yield gen.sleep(0.150)\n yield _wait(c.map(inc, range(10, 20)))\n ts.update()\n\n assert len(ts.source.data['start']) == 10\n\n\n@gen_cluster(client=True)\ndef test_TaskProgress(c, s, a, b):\n tp = TaskProgress(s)\n\n futures = c.map(slowinc, range(10), delay=0.001)\n yield _wait(futures)\n\n tp.update()\n d = dict(tp.source.data)\n assert all(len(L) == 1 for L in d.values())\n assert d['name'] == ['slowinc']\n\n futures2 = c.map(dec, range(5))\n yield _wait(futures2)\n\n tp.update()\n d = dict(tp.source.data)\n assert all(len(L) == 2 for L in d.values())\n assert d['name'] == ['slowinc', 'dec']\n\n del futures, futures2\n\n while s.task_state:\n yield gen.sleep(0.01)\n\n tp.update()\n assert not tp.source.data['all']\n\n\n@gen_cluster(client=True)\ndef test_TaskProgress_empty(c, s, a, b):\n tp = TaskProgress(s)\n tp.update()\n\n futures = [c.submit(inc, i, key='f-' + 'a' * i) for i in range(20)]\n yield _wait(futures)\n tp.update()\n\n del futures\n while s.tasks:\n yield gen.sleep(0.01)\n tp.update()\n\n assert not any(len(v) for v in tp.source.data.values())\n\n\n@gen_cluster(client=True)\ndef test_MemoryUse(c, s, a, b):\n mu = MemoryUse(s)\n\n futures = c.map(slowinc, range(10), delay=0.001)\n yield _wait(futures)\n\n mu.update()\n d = dict(mu.source.data)\n assert all(len(L) == 1 for L in d.values())\n assert d['name'] == ['slowinc']\n\n\n@gen_cluster(client=True)\ndef test_CurrentLoad(c, s, a, b):\n cl = CurrentLoad(s)\n\n futures = c.map(slowinc, range(10), delay=0.001)\n yield _wait(futures)\n\n cl.update()\n d = dict(cl.source.data)\n\n assert all(len(L) == 2 for L in d.values())\n assert all(d['nbytes'])\n\n\n@gen_cluster(client=True)\ndef test_ProcessingHistogram(c, s, a, b):\n ph = ProcessingHistogram(s)\n ph.update()\n assert (ph.source.data['top'] != 0).sum() == 1\n\n futures = c.map(slowinc, range(10), delay=0.050)\n yield gen.sleep(0.100)\n\n ph.update()\n assert ph.source.data['right'][-1] > 2\n\n\n@gen_cluster(client=True)\ndef test_NBytesHistogram(c, s, a, b):\n nh = NBytesHistogram(s)\n nh.update()\n assert (nh.source.data['top'] != 0).sum() == 1\n\n futures = c.map(inc, range(10))\n yield _wait(futures)\n\n nh.update()\n assert nh.source.data['right'][-1] > 5 * 20\n\n\n@gen_cluster(client=True)\ndef test_WorkerTable(c, s, a, b):\n wt = WorkerTable(s)\n wt.update()\n assert all(wt.source.data.values())\n assert all(len(v) == 1 for v in wt.source.data.values())\n","sub_path":"python/anaconda/lib/python2.7/site-packages/distributed/bokeh/tests/test_scheduler_bokeh.py","file_name":"test_scheduler_bokeh.py","file_ext":"py","file_size_in_byte":7000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"46976299","text":"#import necessary libraries\nimport csv\nfrom collections import defaultdict\n\n# data dictionary to store information of each person after reading csv\ndata = defaultdict(list)\n\n# open csv file and start reading\nwith open('Sample_Excel_Sheet.csv',encoding=\"ISO-8859-1\") as csvDataFile:\n csvReader = csv.reader(csvDataFile)\n # skip header row\n next(csvReader, None)\n for row in csvReader:\n # store information only if value of \"Need to be Reviewed?\" column is yes\n if row[8].strip().lower() == \"yes\":\n fname = row[6].strip().split()[0]\n email = row[6].strip().split()[2]\n data[fname+\"_\"+email].append(row[7].strip())\n\n# print all the information\nfor person in data:\n movies = data[person]\n info = person.split(\"_\")\n firstname = info[0]\n email = info[1]\n print(email)\n print(\"Hi {0}\".format(firstname))\n print(\"\\r\\nPlease may you review the following movies:\\r\\n\")\n print(\"\\r\\n\".join(movies))\n print(\"\\r\\nThanks\\r\\n\")\n print(\"==\"*25)\n","sub_path":"CSV-Reader.py","file_name":"CSV-Reader.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"331552026","text":" # Routines used in Spectral Cube Building\nfrom __future__ import absolute_import, print_function\n\nimport sys\nimport numpy as np\nimport math\nfrom .. import datamodels\nfrom ..assign_wcs import nirspec\nfrom ..datamodels import dqflags\nfrom . import cube\nfrom . import coord\nfrom gwcs import wcstools\n#________________________________________________________________________________\n\ndef MakePointCloudMIRI(self, input_model,\n x, y, file_no, \n Cube,\n c1_offset, c2_offset):\n \"\"\"\n\n Short Summary\n -------------\n map x,y to Point cloud in final coordinate system (xi,eta of cube) \n\n Parameters\n ----------\n x,y list of x and y values to map\n input_model: slope image\n file_no: the index on the files that are used to construct the Cube\n Cube: holds the basic information on the Cube (including wcs of Cube)\n v2v32radec: temporary (until information is contained in assign_wcs) \n holds the information to do the transformation from v2-v3 to ra-dec\n c1_offset, c2_offset: dither offsets for each file (default = 0)\n provided by the user\n\n Returns\n -------\n coordinates of x,y in point cloud\n\n\n \"\"\"\n#________________________________________________________________________________\n det2ab_transform = input_model.meta.wcs.get_transform('detector','alpha_beta')\n detector2v23 = input_model.meta.wcs.get_transform('detector', 'v2v3')\n v23toworld = input_model.meta.wcs.get_transform(\"v2v3\",\"world\")\n worldtov23 = input_model.meta.wcs.get_transform(\"world\",\"v2v3\")\n\n alpha, beta, wave = det2ab_transform(x, y)\n v2, v3, lam = detector2v23(x, y)\n flux_all = input_model.data[y, x]\n error_all = input_model.err[y, x]\n dq_all = input_model.dq[y,x]\n#________________________________________________________________________________\n# in the slice gaps the v2,v3 and lam are NaN values. Select the valid slice pixels\n# based on if v2,v3,lam are finite \n valid1 = np.isfinite(v2) \n valid2 = np.isfinite(v3)\n valid3 = np.isfinite(lam) \n valid4 = np.isfinite(flux_all)\n\n valid = valid1 & valid2 & valid3 &valid4\n#________________________________________________________________________________\n# using the DQFlags from the input_image find pixels that should be excluded \n# from the cube mapping \n all_flags = (dqflags.pixel['DO_NOT_USE'] + dqflags.pixel['DROPOUT'] + \n dqflags.pixel['NON_SCIENCE'] +\n dqflags.pixel['DEAD'] + dqflags.pixel['HOT'] + \n dqflags.pixel['RC'] + dqflags.pixel['NONLINEAR'])\n\n # find the location of all the values to reject in cube building \n good_data = np.where((np.bitwise_and(dq_all, all_flags)==0) & (valid == True))\n\n # good data holds the location of pixels we want to map to cube \n flux = flux_all[good_data]\n error = error_all[good_data]\n alpha = alpha[good_data]\n beta = beta[good_data]\n xpix = x[good_data] # only used for testing\n ypix = y[good_data] # only used for testing\n\n# ia = 0 \n# for f in flux:\n# if(f < -10):\n# print('very low',flux[ia],xpix[ia],ypix[ia])\n# ia = ia + 1\n if(self.coord_system == 'alpha-beta'):\n coord1 = alpha\n coord2 = beta\n else:\n v2_use = v2[good_data] #arc mins\n v3_use = v3[good_data] #arc mins\n lam_use = lam[good_data]\n\n ra,dec,wave = v23toworld(v2_use,v3_use,lam_use)\n\n ra = ra - c1_offset/3600.0\n dec = dec - c2_offset/3600.0\n xi,eta = coord.radec2std(Cube.Crval1, Cube.Crval2,ra,dec) # xi,eta in arc seconds\n coord1 = xi\n coord2 = eta\n \n# index_xy = np.nonzero( (xpix == 470) & (ypix ==1))[0]\n# print('wavelength',lam_use[index_xy])\n# mm = (lam_use[index_xy])\n# print('Wavelength at 470,1',mm)\n# n= len(lam)\n# for ii in range(0, n - 1):\n# if(lam[ii] < mm):\n# mnew = lam[ii]\n# print(mnew,mm,xpix[ii],ypix[ii],dq_all[ii])\n \n\n ifile = np.zeros(flux.shape, dtype='int') + int(file_no)\n\n # get in form of 8 columns of data - shove the information in an array.\n # xpix,ypix used for testing\n cloud = np.asarray([coord1, coord2, wave, alpha, beta, flux, error, ifile, xpix, ypix])\n# cloud = np.asarray([coord1, coord2, wave, alpha, beta, flux, error, ifile])\n\n return cloud\n\n#________________________________________________________________________________\n\ndef MakePointCloudNIRSPEC(self, input_model,\n file_no,\n islice,\n Cube,\n c1_offset, c2_offset):\n \"\"\"\n\n Short Summary\n -------------\n For NIRSPEC IFU map x,y to Point cloud in final coordinate system (xi,eta of cube) \n\n Parameters\n ----------\n input_model: slope image\n file_no: the index on the files that are used to construct the Cube\n Cube: holds the basic information on the Cube (including wcs of Cube)\n v2v32radec: temporary (until information is contained in assign_wcs) \n holds the information to do the transformation from v2-v3 to ra-dec\n c1_offset, c2_offset: dither offsets for each file (default = 0)\n provided by the user\n\n Returns\n -------\n each valid detector mapped to in point cloud\n\n\n \"\"\"\n#________________________________________________________________________________\n\n slice_wcs = nirspec.nrs_wcs_set_input(input_model, islice)\n yrange = slice_wcs.domain[1]['lower'],slice_wcs.domain[1]['upper']\n xrange = slice_wcs.domain[0]['lower'],slice_wcs.domain[0]['upper']\n x,y = wcstools.grid_from_domain(slice_wcs.domain)\n ra, dec, lam = slice_wcs(x, y) # return v2,v3 are in degrees\n# print('ra',ra.shape,ra[20,0:20])\n# print('x,yrange for slice',xrange,yrange,islice)\n# print('x',x.shape,x[20,0:20])\n# print('y',y.shape,y[20,0:20])\n\n flux_all = input_model.data[y, x]\n error_all = input_model.err[y, x]\n dq_all = input_model.dq[y,x]\n#________________________________________________________________________________\n# Slices are curved on detector. A slice region is grabbed by corner regions so\n# the region returned may include pixels not value for slice \n valid1 = np.isfinite(ra) \n valid2 = np.isfinite(dec)\n valid3 = np.isfinite(lam) \n valid4 = np.isfinite(flux_all)\n\n valid = valid1 & valid2 & valid3 &valid4\n# print('valid',valid.shape,valid[20,0:20])\n#________________________________________________________________________________\n# using the DQFlags from the input_image find pixels that should be excluded \n# from the cube mapping \n all_flags = (dqflags.pixel['DO_NOT_USE'] + dqflags.pixel['DROPOUT'] + \n dqflags.pixel['NON_SCIENCE'] +\n dqflags.pixel['DEAD'] + dqflags.pixel['HOT'] + \n dqflags.pixel['RC'] + dqflags.pixel['NONLINEAR'])\n\n # find the location of all the values to reject in cube building \n good_data = np.where((np.bitwise_and(dq_all, all_flags)==0) & (valid == True))\n\n # good data holds the location of pixels we want to map to cube \n flux = flux_all[good_data]\n error = error_all[good_data]\n alpha = flux*0\n beta = flux*0\n xpix = x[good_data] # only used for testing\n ypix = y[good_data] # only used for testing\n\n ra_use = ra[good_data] #arc mins\n dec_use = dec[good_data] #arc mins\n wave = lam[good_data]\n# print('ra use',ra_use[0:10])\n# print('dec use',dec_use[0:10])\n# print('ra use shape',ra_use.shape)\n\n ra_use = ra_use - c1_offset/3600.0\n dec_use = dec_use - c2_offset/3600.0\n xi,eta = coord.radec2std(Cube.Crval1, Cube.Crval2,ra_use,dec_use) # xi,eta in arc seconds\n coord1 = xi\n coord2 = eta\n\n ifile = np.zeros(flux.shape, dtype='int') + int(file_no)\n\n # stuff the point cloud arrays for this configuration into cloud \n # Point cloud will eventually contain all the cloud values\n # xpix,ypix used for testing\n cloud = np.asarray([coord1, coord2, wave, alpha, beta, flux, error, ifile, xpix, ypix])\n\n return cloud\n#______________________________________________________________________\n\ndef FindROI(self, Cube, spaxel, PointCloud):\n\n \"\"\"\n Short Summary\n -------------\n using the point cloud loop over the Spaxel and find the point cloud members that\n fall withing the ROI of the spaxel center. \n\n\n For MIRI the weighting of the Cloud points is based on the distance in the local\n MRS alpha-beta plane. Each cloud point as an associated alpha-beta coordinate\n The spaxel centers have xi,eta & V2,V3 so we need to know the channel and band \n information and transform the V2,V3 coordinates back to alpha-beta\n\n Parameters\n ----------\n Cube: holds basic Cube information\n spaxel: a class that holds information on each spaxel in the cube. \n PointCloud: array of point cloud members\n\n Returns\n -------\n location of x,y in Point Cloud as well as mapping of spaxel to each overlapping \n PointCloud member\n\n\n \"\"\"\n#________________________________________________________________________________\n nxc = len(Cube.xcoord)\n nzc = len(Cube.zcoord)\n nyc = len(Cube.ycoord)\n\n nplane = Cube.naxis1 * Cube.naxis2\n lower_limit = 0.01\n\n iprint = 0\n nn = len(PointCloud[0])\n\n self.log.info('number of elements in PT %i',nn)\n\n# loop over each point cloud member - might want to change this to looping\n# over spaxels but for now just keep it point cloud elements because it\n# is easy to find ROI members because the cube spaxel values are regularily spaced\n# and we can search over the vector of each axis of the cube rather than entire \n# point could\n\n iprint = 0 \n#________________________________________________________________________________\n for ipt in range(0, nn - 1):\n\n coord1 = PointCloud[0, ipt] # Point cloud xi \n coord2 = PointCloud[1, ipt] # Point cloud eta\n wave = PointCloud[2,ipt] # Point cloud wavelength \n fluxdet = PointCloud[5,ipt]\n xdet = PointCloud[8,ipt]\n ydet = PointCloud[9,ipt]\n ifiledet = PointCloud[7,ipt]\n\n\n if(Cube.instrument == 'MIRI'):\n if(self.weighting == 'miripsf'):\n alpha = PointCloud[3, ipt] \n beta = PointCloud[4, ipt] \n\n ifile = int(PointCloud[7, ipt])\n a = Cube.a_wave[ifile]\n c = Cube.c_wave[ifile]\n wa = Cube.a_weight[ifile]\n wc = Cube.c_weight[ifile]\n weights = FindNormalizationWeights(wave, a, c, wa, wc)\n weight_alpha = weights[0]\n weight_beta = weights[1]\n weight_wave = weights[2]\n\n # transform Cube Spaxel centers to alpha,beta system\n # of point cloud member (only do this transformation for MIRI) \n # for MIRI weighting parameters are based on distance in alpha-beta coord system\n # transform the cube coordinate values to alpha and beta values \n # xi,eta -> ra,dec\n # ra-dec -> v2,v3 \n # v2,v3 -> local alph,beta\n\n v2ab_transform = Cube.transform_v23toab[ifile]\n worldtov23 = Cube.transform_worldtov23[ifile]\n#________________________________________________________________________________\n if(self.coord_system == 'alpha-beta'):\n coord1 = alpha\n coord2 = beta\n#________________________________________________________________________________ \n # Coord1 and Coord2 are in the coordinate system of the cube.\n # using the Cube regularily spaced arrays - Cube.zcoord, xcoord,ycoord\n # find the spaxels that fall withing ROI of point cloud\n\n # find values within the ROI\n\n indexz = np.where(abs(Cube.zcoord - wave) <= self.roiw)\n indexx = np.where(abs(Cube.xcoord - coord1) <= self.roi1)\n indexy = np.where(abs(Cube.ycoord - coord2) <= self.roi2)\n\n zlam = Cube.zcoord[indexz]\n xi = Cube.xcoord[indexx] # Cube values for xi vector axis \n eta = Cube.ycoord[indexy] # Cube values for eta vector axis\n#________________________________________________________________________________\n# loop over the points in the ROI\n for iz, zz in enumerate(indexz[0]):\n istart = zz * nplane\n for iy, yy in enumerate(indexy[0]):\n for ix, xx in enumerate(indexx[0]):\n\n#________________________________________________________________________________\n# NIRSPEC instrument\n # for NIRSPEC find distance between PT and Spaxel Center \n # in xi,eta coordinate system\n if(Cube.instrument == 'NIRSPEC'):\n d1 = (xi[ix] - coord1)/Cube.Cdelt1\n d2 = (eta[iy] - coord2)/Cube.Cdelt2\n d3 = (zlam[iz] - wave)/Cube.Cdelt3\n weight_distance = math.sqrt(d1*d1 + d2*d2 + d3*d3) \n weight_distance = math.pow(weight_distance,self.weight_power)\n#________________________________________________________________________________\n# MIRI instrument\n elif(Cube.instrument == 'MIRI'):\n \n # weighting - standard - distance based on xi,eta distance\n if(self.weighting =='standard'):\n d1 = (xi[ix] - coord1)/Cube.Cdelt1\n d2 = (eta[iy] - coord2)/Cube.Cdelt2\n d3 = (zlam[iz] - wave)/Cube.Cdelt3\n weight_distance_abs = math.sqrt(d1*d1 + d2*d2 + d3*d3)\n weight_distance = math.pow(weight_distance_abs,self.weight_power)\n\n # For MIRI the distance between PT and Spaxel Center is\n # in the alpha - beta cooridate system\n elif(self.weighting =='miripsf'):\n\n ra_spaxel,dec_spaxel=coord.std2radec(Cube.Crval1,\n Cube.Crval2,\n xi[ix],eta[iy])\n\n \n v2_spaxel,v3_spaxel,zl = worldtov23(ra_spaxel,dec_spaxel,zlam[iz])\n\n alpha_spaxel,beta_spaxel,wave_spaxel = v2ab_transform(v2_spaxel,\n v3_spaxel,\n zlam[iz]) \n alpha_distance =alpha-alpha_spaxel\n beta_distance = beta-beta_spaxel\n wave_distance = abs(wave-wave_spaxel)\n\n xn = alpha_distance/weight_alpha\n yn = beta_distance/weight_beta\n wn = wave_distance/weight_wave\n \n # only included the spatial dimensions\n weight_distance = math.sqrt(xn*xn + yn*yn + wn*wn) \n weight_distance = math.pow(weight_distance,self.weight_power)\n#________________________________________________________________________________\n# We have found the weight_distance based on instrument type\n\n if(self.debug_pixel == 1 and self.xdebug == xx and self.ydebug == yy and\n self.zdebug == zz ):\n self.log.info('For spaxel %i %i %i, detector x,y,flux %i %i %f %i %f',self.xdebug+1,self.ydebug+1,\n self.zdebug+1,xdet,ydet,fluxdet,ifiledet,weight_distance)\n\n\n if(weight_distance < lower_limit): weight_distance = lower_limit\n weight_distance = 1.0 / weight_distance\n\n cube_index = istart + yy * Cube.naxis1 + xx\n spaxel[cube_index].ipointcloud.append(ipt)\n spaxel[cube_index].pointcloud_weight.append(weight_distance)\n\n\n# iprint = iprint + 1\n# if(iprint == 900000):\n# print('on point element',ipt)\n# iprint = 0 \n# print(ipt,ix,iy,iz,weight_distance)\n\n#_______________________________________________________________________\ndef FindWaveWeights(channel, subchannel):\n \"\"\"\n Short Summary\n -------------\n Get the wavelength normalization weights that we will use to normalize wavelengths.\n\n Parameters\n ----------\n channel- channel for point\n subchannel- subchannel for point\n\n Returns\n -------\n normalized weighting for wavelength for this channel, subchannel\n\n \"\"\"\n\n if(channel == '1'):\n if(subchannel == 'SHORT'):\n a = 3050.0\n c = 3340.0\n wa = 4.91\n wc = 5.79\n elif(subchannel == 'MEDIUM'):\n a = 2920.0\n c = 3400.0\n wa = 5.6\n wc = 6.62\n elif(subchannel == 'LONG'):\n a = 2800.0\n c = 3220.0\n wa = 6.46\n wc = 7.63\n\n\n if(channel == '2'):\n if(subchannel == 'SHORT'):\n a = 2700.0\n c = 2800.0\n wa = 7.55\n wc = 8.91\n elif(subchannel == 'MEDIUM'):\n a = 2600.0\n c = 2880.0\n wa = 8.71\n wc = 10.34\n elif(subchannel == 'LONG'):\n a = 2590.0\n c = 3000.0\n wa = 9.89\n wc = 11.71\n\n if(channel == '3'):\n if(subchannel == 'SHORT'):\n a = 2390.0\n c = 2650.0\n wa = 11.50\n wc = 13.59\n elif(subchannel == 'MEDIUM'):\n a = 1600.0\n c = 2400.0\n wa = 13.19\n wc = 15.58\n elif(subchannel == 'LONG'):\n a = 1850.0\n c = 2550.0\n wa = 15.40\n wc = 18.14\n\n if(channel == '4'):\n if(subchannel == 'SHORT'):\n a = 1320.0\n c = 1720.0\n wa = 17.88\n wc = 21.34\n elif(subchannel == 'MEDIUM'):\n a = 1550.0\n c = 1600.0\n wa = 20.69\n wc = 24.68\n elif(subchannel == 'LONG'):\n a = 1450.0\n c = 1200.0\n wa = 23.83\n wc = 28.43\n return a, c, wa, wc\n\n#_______________________________________________________________________\n\n\n\ndef FindNormalizationWeights(a, c, wa, wc, wavelength):\n \"\"\"\n Short Summary\n -------------\n we need to normalize how to each point cloud in the spaxel. The normalization of\ngs weighting is determined from width of PSF as well as wavelength resolution\n\n Parameters\n ----------\n channel- channel for point\n subchannel- subchannel for point\n wavelength of point\n\n Returns\n -------\n normalized weighting for 3 dimension\n\n \"\"\"\n alpha_weight = 1.0\n beta_weight = 1.0\n lambda_weight = 1.0\n\n beta_weight = 0.31 * (wavelength / 8.0)\n\n\n if(wavelength < 8.0):\n alpha_weight = 0.31\n else:\n alpha_weight = beta_weight\n\n\n # linear interpolation\n\n if (wavelength >= wa and wavelength <= wc):\n b = a + (c - a) * (wavelength - wa) / (wc - wa)\n\n elif (wavelength < wa):\n b = a\n else:\n b = c\n\n\n lambda_weight = wavelength / b\n\n weight = [alpha_weight, beta_weight, lambda_weight]\n return weight\n\n","sub_path":"jwst/cube_build/CubeCloud.py","file_name":"CubeCloud.py","file_ext":"py","file_size_in_byte":19527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"58800574","text":"\n'''\n Channel file:\n Line 0: Channel name\n Line 1: Number of subscribers\n Lines 2 - N: List of videos\n'''\n\n'''\n This function will allow you to open a file and read its contents line by line\n returns a list of strings, each string is a line in the file\n'''\ndef read_file_as_list(filename):\n\n # Open the file\n file = open(filename, \"r\")\n # Retrieve contents of file as lines; note that each line will end with a newline character, will strip later\n lines_from_file = file.readlines()\n\n lines = []\n for line in lines_from_file:\n lines.append(line.strip())\n\n file.close()\n return lines\n\n'''\n This function takes in a channel name\n returns the channel information: name, subscribers, videos\n'''\ndef get_channel_from_file(channel):\n\n # Name of the file containing the channel data\n filename = channel + \".txt\"\n channel_list = read_file_as_list(filename)\n\n channel_name = channel_list[0]\n channel_subscribers = int(channel_list[1])\n channel_videos = []\n\n # Videos exist\n if len(channel_list) > 2:\n channel_videos = channel_list[2:]\n \n return channel_name, channel_subscribers, channel_videos\n\ndef get_channel_name(channel):\n\n channel_name, channel_subscribers, channel_videos = get_channel_from_file(channel)\n return channel_name\n\ndef get_channel_subscriber_number(channel):\n\n channel_name, channel_subscribers, channel_videos = get_channel_from_file(channel)\n return channel_subscribers\n\ndef get_channel_videos(channel):\n \n channel_name, channel_subscribers, channel_videos = get_channel_from_file(channel)\n return channel_videos\n\n'''\n This function creates a new text file from a channel's data\n nothing is returned\n'''\ndef create_channel_name(channel_name):\n\n # Create a new text file for the channel\n filename = channel_name + \".txt\"\n file = open(filename, \"x\")\n\n # Write the channel name and the number of subscribers into the file\n file.write(channel_name + \"\\n\")\n file.write(0)\n file.close()\n\ndef create_channel_subs(channel_name, channel_subscribers):\n\n # Create a new text file for the channel\n filename = channel_name + \".txt\"\n file = open(filename, \"x\")\n\n # Write the channel name and the number of subscribers into the file\n file.write(channel_name + \"\\n\")\n file.write(str(channel_subscribers) + \"\\n\")\n file.close()\n\ndef create_channel(channel_name, channel_subscribers, channel_videos):\n\n # Create a new text file for the channel\n filename = channel_name + \".txt\"\n file = open(filename, \"x\")\n\n # Write the channel name and the number of subscribers into the file\n file.write(channel_name + \"\\n\")\n file.write(str(channel_subscribers) + \"\\n\")\n\n for video in channel_videos:\n file.write(video + \"\\n\")\n\n file.close()\n\n'''\n This function updates an existing channel file\n nothing is returned\n'''\ndef update_existing_channel(channel_name, channel_subscribers, channel_videos):\n\n # Name of text file for the existing channel\n filename = channel_name + \".txt\"\n file = open(filename, \"w\")\n\n # Erase the contents in the current file; we will update the file by rewriting it\n file.truncate(0)\n file.close()\n\n # Same code below as creating a new channel file! Write the channel name and the number of subscribers into the file\n file = open(filename, \"w\")\n file.write(channel_name + \"\\n\")\n file.write(str(channel_subscribers) + \"\\n\")\n\n for video in channel_videos:\n file.write(video + \"\\n\")\n\n file.close()\n\ndef subscribe_to_channel(channel_name):\n\n channel_name, channel_subscribers, channel_videos = get_channel_from_file(channel_name)\n channel_subscribers += 1\n update_existing_channel(channel_name, channel_subscribers, channel_videos)","sub_path":"lib/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"584667645","text":"# BuildTarget: images/groupFirst.png\n\nimport os\n\nimport Gaffer\nimport GafferScene\n\nimport GafferUI\n\nscriptWindow = GafferUI.ScriptWindow.acquire( script )\n\nscript[\"fileName\"].setValue( os.path.abspath( \"scripts/groupFirst.gfr\" ) )\nscript.load()\ngraph = scriptWindow.getLayout().editors( GafferUI.GraphEditor )[0]\ngraph.frame( script.children( Gaffer.Node ) )\nGafferUI.WidgetAlgo.grab( widget = graph, imagePath = \"images/groupFirst.png\" )\n\nscript[\"fileName\"].setValue( os.path.abspath( \"scripts/groupSecond.gfr\" ) )\nscript.load()\ngraph.frame( script.children( Gaffer.Node ) )\nGafferUI.WidgetAlgo.grab( widget = graph, imagePath = \"images/groupSecond.png\" )\n\nscript[\"StandardOptions\"] = GafferScene.StandardOptions( \"StandardOptions\" )\nscript[\"StandardOptions\"][\"options\"][\"performanceMonitor\"][\"value\"].setValue( True )\nscript[\"StandardOptions\"][\"options\"][\"performanceMonitor\"][\"enabled\"].setValue( True )\neditor = GafferUI.NodeEditor.acquire( script[\"StandardOptions\"], floating=True )\nGafferUI.PlugValueWidget.acquire( script[\"StandardOptions\"][\"options\"][\"performanceMonitor\"] )\nGafferUI.WidgetAlgo.grab( widget = editor, imagePath = \"images/performanceMonitor.png\" )\n","sub_path":"doc/source/Tutorials/ManagingComplexity/screengrab.py","file_name":"screengrab.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"353181721","text":"import cv2,time,pandas\r\nfrom datetime import datetime\r\nvideo=cv2.VideoCapture(0)\r\nwhile 1:\r\n check,frame=video.read()\r\n gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\r\n cv2.imshow('Capturing',gray)\r\n key=cv2.waitKey(1)\r\n if key==ord('q'):\r\n break\r\nvideo.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"VideoCapturing.py","file_name":"VideoCapturing.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"398709134","text":"\nimport shutil\nimport os\n\ntrain_file = '/notebooks/caffe-oxford102/train.txt'\nval_file = '/notebooks/caffe-oxford102/valid.txt'\nvgg_train_dir = '/notebooks/vgg/train/'\nvgg_val_dir = '/notebooks/vgg/val/'\n\n\ndef vgg_dataset(src_file, target_dir):\n lines = open(file=src_file, encoding='utf-8').readlines()\n for line in lines:\n jpg_file, label = line.split()\n filename = os.path.basename(jpg_file)\n dst_dir = os.path.join(target_dir, label)\n if not os.path.isdir(dst_dir):\n os.makedirs(dst_dir)\n dst_path = os.path.join(dst_dir, filename)\n if not os.path.isfile(dst_path):\n shutil.copy(jpg_file, dst_path)\n\n\nvgg_dataset(train_file, vgg_train_dir)\nvgg_dataset(val_file, vgg_val_dir)","sub_path":"vgg_data.py","file_name":"vgg_data.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"186716383","text":"#!/usr/bin/env python3\n# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nimport unittest\nfrom unittest.mock import MagicMock\n\nfrom benchpress.lib.analysis import analyze\nfrom benchpress.lib.metrics import Metrics\n\n\ndef metrics(results):\n \"\"\"Create a mock HistoryEntry from the results\"\"\"\n mock = MagicMock()\n mock.metrics.performance_metrics.return_value = results\n return mock\n\n\nclass TestAnalysis(unittest.TestCase):\n def setUp(self):\n self.job = MagicMock()\n self.history = MagicMock()\n self.history.load_historical_results.return_value = [\n metrics({'a': 1, 'b': 2, 'c': 3}),\n metrics({'a': 2, 'b': 1, 'c': 2}),\n metrics({'a': 1, 'b': 3, 'c': 4}),\n metrics({'a': 2, 'b': 2, 'c': 3}),\n metrics({'a': 1, 'b': 1, 'c': 1}),\n metrics({'a': 2, 'b': 2, 'c': 5}),\n ]\n\n def test_no_thresholds(self):\n \"\"\"No thresholds -> no anomalies\"\"\"\n self.job.tolerances = {}\n current = {'a': 1, 'b': 2, 'c': 3}\n anomalies = analyze(current, self.job, self.history)\n self.assertEqual(0, len(anomalies))\n\n def test_no_anomalies(self):\n \"\"\"No anomalous metric\"\"\"\n self.job.tolerances = {'a': 1.0, 'b': 1.0, 'c': 1.0}\n current = {'a': 2, 'b': 2, 'c': 3}\n anomalies = analyze(current, self.job, self.history)\n self.assertEqual(0, len(anomalies))\n\n def test_one_anomaly(self):\n \"\"\"One anomalous metric\"\"\"\n self.job.tolerances = {'a': 1.0, 'b': 1.0, 'c': 1.0}\n current = {'a': 4, 'b': 2, 'c': 3}\n anomalies = analyze(current, self.job, self.history)\n expected = [('a', 4, 0.0, 3.0)]\n self.assertCountEqual(expected, anomalies)\n\n def test_nested_names(self):\n \"\"\"Nested tolerance dicts work\"\"\"\n self.job.tolerances = {'a': {'b': 1}}\n # Job constructor uses Metrics#flatten for tolerances as well\n self.job.tolerances = Metrics.flatten(self.job.tolerances)\n self.history.load_historical_results.return_value = [\n metrics({'a.b': 1})\n ]\n current = {'a.b': 4}\n anomalies = analyze(current, self.job, self.history)\n expected = [('a.b', 4, 0.0, 2.0)]\n self.assertCountEqual(expected, anomalies)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"benchpress/tests/test_analysis.py","file_name":"test_analysis.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"535940025","text":"# Configuration for Celery\nimport os\n\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_IMPORTS = ('tasks')\nCELERY_IGNORE_RESULT = False\nBROKER_HOST = \"127.0.0.1\" #IP address of the server running RabbitMQ and Celery\nBROKER_PORT = 5672\nBROKER_URL=os.environ.get('CLOUDAMQP_URL', 'amqp://')\nCELERY_RESULT_BACKEND = \"amqp\"\nCELERY_IMPORTS=(\"tasks\",)\n\nfrom celery.schedules import crontab\n \nCELERYBEAT_SCHEDULE = {\n 'every-minute': {\n 'task': 'tasks.process_transit_request',\n 'schedule': crontab(minute='*/1'),\n },\n}\n","sub_path":"celeryconfig.py","file_name":"celeryconfig.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"624924783","text":"class Node() :\r\n def __init__ (self) :\r\n self.data = None\r\n self.link = None\r\n\r\nnode1 = Node()\r\nnode1.data = \"a\"\r\nnode1.link = node1\r\n\r\nnode2 = Node()\r\nnode2.data = \"b\"\r\nnode1.link = node2\r\nnode2.link = node1\r\n\r\nnode3 = Node()\r\nnode3.data = \"c\"\r\nnode2.link = node3\r\nnode3.link = node1\r\n\r\nnode4 = Node()\r\nnode4.data = \"d\"\r\nnode3.link = node4\r\nnode4.link = node1\r\n\r\nnode5 = Node()\r\nnode5.data = \"e\"\r\nnode4.link = node5\r\nnode5.link = node1\r\n\r\ncurrent = node1\r\nprint(current.data, end = ' ')\r\nwhile current.link != node1:\r\n current = current.link\r\n print(current.data, end= ' ')","sub_path":"PycharmProjects/pythonProject/원형 연결 리스트/데이터가 5개인 원형 연결 리스트 생성.py","file_name":"데이터가 5개인 원형 연결 리스트 생성.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"303382098","text":"from django.conf.urls.defaults import patterns, include, url\nfrom django.contrib.gis import admin\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n# Uncomment the next two lines to enable the admin:\n#from django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'SisConLog.views.home', name='home'),\n # url(r'^SisConLog/', include('SisConLog.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n url(r'^login/', include('login.urls')),\n (r'^$','principal.views.index'),\n (r'^index/$','principal.views.index'),\n (r'^query/$', 'req_features.views.request_for_html'),\n (r'^json/$', 'req_features.views.request_json'),\n (r'^json2/$', 'req_features.views.request_json2'),\n (r'^basic/$', 'req_features.views.request_basic'),\n (r'^search/$', 'denominacoes.views.search_denominacao'),\n (r'^search_historico/$', 'denominacoes.views.search_denominacao_historico'),\n (r'^denominacao/$','denominacoes.views.denominacao_edit'),\n #(r'^denominacao/fail$','denominacoes.views.denominacao_edit_fail'),\n)+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\n","sub_path":"SisConLog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"374692901","text":"# Front matter\n##############\nimport os\nfrom os import fdopen, remove\nfrom tempfile import mkstemp\nfrom shutil import move\nimport glob\nimport re\nimport time\nimport pandas as pd\nimport numpy as np\nfrom scipy import constants\nfrom scipy.optimize import curve_fit, fsolve\nfrom scipy.interpolate import interp1d\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator\nfrom matplotlib import gridspec\nfrom scipy.interpolate import spline\nimport math\nimport seaborn as sns\n\nmatplotlib.rc('xtick', labelsize=16) \nmatplotlib.rc('ytick', labelsize=16) \n\nrc = {'lines.linewidth': 1, \n 'axes.labelsize': 20, \n 'axes.titlesize': 20,\n 'legend.fontsize': 26,\n 'xtick.direction': u'in',\n 'ytick.direction': u'in'}\nsns.set_style('ticks', rc=rc)\n\nstart_time = time.time()\n\n\n# Define input values\n#####################\n\nV0_dict = dict()\ndV0_dict = dict()\nM_dict = dict()\n\n# Fe V0 from Dewaele et al. 2006\nV0_dict['Fe'] = 22.428\ndV0_dict['Fe'] = 0.098\nM_dict['Fe'] = 56.942\n\n# Fe0.91Ni0.09 V0 from our EOS study\nV0_dict['FeNi'] = 22.505\ndV0_dict['FeNi'] = 0.042\nM_dict['FeNi'] = 57.100\n\n# Fe0.8Ni0.1Si0.1 V0 from our EOS study\nV0_dict['FeNiSi'] = 22.952\ndV0_dict['FeNiSi'] = 0.072\nM_dict['FeNiSi'] = 54.100\n\nFe_input_filename = 'Fe/Results/input_values.csv'\nFeNi_input_filename = 'FeNi/Results/input_values.csv'\nFeNiSi_input_filename = 'FeNiSi/Results/input_values.csv'\n\nFe_phox_filename = '../050_phox_Fe_man/Results/phox_valsFromPDOS.csv'\nFeNi_phox_filename = '../060_phox_FeNi_man/Results/phox_valsFromPDOS.csv'\nFeNiSi_phox_filename = '../070_phox_FeNiSi_man/Results/phox_valsFromPDOS.csv'\n\n\n# Functions\n###########\n\ndef calcGruneisen(V,dV,V0,dV0,gamma0,dgamma0,q):\n\tgamma = gamma0*(V/V0)**q\n\tdgamma_dgamma0 = (V/V0)**q\n\tdgamma_dV = (gamma0*q/V)*(V/V0)**q\n\tdgamma_dV0 = -(gamma0*q/V0)*(V/V0)**q\n\tdgamma = np.sqrt( (dgamma_dgamma0*dgamma0)**2 + (dgamma_dV*dV)**2 + (dgamma_dV*dV0)**2 )\n\treturn gamma, dgamma\n\ndef calcPvib(V,dV,gammavib,dgammavib,Uvib,dUvib,Cvib,dCvib,Cel):\n\tPvib = (Cvib*gammavib/(Cvib+Cel))*(Uvib/V)\n\tdP_dCvib = Cel*gammavib*Uvib/((Cvib+Cel)**2*V)\n\tdP_dgamma = (Cvib/(Cvib+Cel))*(Uvib/V)\n\tdP_dU = (Cvib*gammavib/(Cvib+Cel))*(1/V)\n\tdP_dV = -(Cvib*gammavib/(Cvib+Cel))*(Uvib/V**2)\n\tdPvib = np.sqrt( (dP_dCvib*dCvib)**2 + (dP_dgamma*dgammavib)**2 +\n\t\t(dP_dU*dUvib)**2 + (dP_dV*dV)**2)\n\treturn Pvib, dPvib\n\ndef calcCel(V,V0,T,M):\n\t# From Fei et al. 2016\n\tbeta0 = 0.07 # in J/(kg K^2)\n\tk = 1.34\n\tNA = 6.022141*10**23 # mol^(-1)\n\tkB = 1.38065*10**(-23) # J/K\n\tbeta0 = beta0*M/(10**3*kB*NA) # in kB/(atom K)\n\tCel = beta0*(V/V0)**k*T # in kB/atom\n\treturn Cel\n\n\n# Combine input and phox datasets\n#################################\n\ninput_dict = dict()\n\ninput_df = pd.read_csv(Fe_input_filename)\nphox_df = pd.read_csv(Fe_phox_filename)\n# Only use hcp phases\ninput_df = input_df[input_df['Phase']=='hcp']\ninput_df = input_df[['Folder','Index','Phase','V','dV','P','dP']]\n# Combine input and phox dataframes\ninput_df = input_df.merge(phox_df,on='Index')\ninput_dict['Fe'] = input_df\n\ninput_df = pd.read_csv(FeNi_input_filename)\nphox_df = pd.read_csv(FeNi_phox_filename)\n# Only use hcp phases\ninput_df = input_df[input_df['Phase']=='hcp']\ninput_df = input_df[['Folder','Index','Phase','V','dV','P','dP']]\n# Combine input and phox dataframes\ninput_df = input_df.merge(phox_df,on='Index')\ninput_dict['FeNi'] = input_df\n\ninput_df = pd.read_csv(FeNiSi_input_filename)\nphox_df = pd.read_csv(FeNiSi_phox_filename)\n# Only use hcp phases\ninput_df = input_df[input_df['Phase']=='hcp']\ninput_df = input_df[['Folder','Index','Phase','V','dV','P','dP']]\n# Combine input and phox dataframes\ninput_df = input_df.merge(phox_df,on='Index')\ninput_dict['FeNiSi'] = input_df\n\n\n# Calculate Gruneisen parameter at each volume: Fe\n##################################################\n\nT = 300\n\ninput_df = input_dict['Fe']\nV0 = V0_dict['Fe']\ndV0 = dV0_dict['Fe']\nM = M_dict['Fe']\nq = 1\ngammavib0 = 2.04\ndgammavib0 = 0.1\ngammaD0 = 1.74\ndgammaD0 = 0.1\n\nresults_df = input_df.copy()\nresults_df = results_df[['Folder','Index','Phase','V','dV','P','dP','KE','dKE',\n\t'Cvib','dCvib']]\n\nresults_df['gamma_vib'], results_df['dgamma_vib'] = calcGruneisen(\n\tresults_df['V'],results_df['dV'],V0,dV0,gammavib0,dgammavib0,q)\nresults_df['gamma_D'], results_df['dgamma_D'] = calcGruneisen(\n\tresults_df['V'],results_df['dV'],V0,dV0,gammaD0,dgammaD0,q)\n\n# Calculate Pvib at each volume\n\nCel = calcCel(results_df['V'],V0,T,M)\nresults_df['Pvib'],results_df['dPvib'] = calcPvib(results_df['V'],results_df['dV'],\n\tresults_df['gamma_vib'], results_df['dgamma_vib'],\n\t2*results_df['KE'],2*results_df['dKE'],results_df['Cvib'],results_df['dCvib'],\n\tCel)\nprint(results_df)\n\nresults_df = results_df.round({'gamma_vib':2,'dgamma_vib':2,'gamma_D':2,'dgamma_D':2,\n\t'Pvib':2,'dPvib':2})\nresults_df.to_csv('Fe/Results/GruneisenResults.csv',index=False)\n\n\n\n# Calculate Gruneisen parameter at each volume: FeNi\n####################################################\n\ninput_df = input_dict['FeNi']\nV0 = V0_dict['FeNi']\ndV0 = dV0_dict['FeNi']\nM = M_dict['FeNi']\nq = 1\ngammavib0 = 2.07\ndgammavib0 = 0.1\ngammaD0 = 1.69\ndgammaD0 = 0.1\n\nresults_df = input_df.copy()\nresults_df = results_df[['Folder','Index','Phase','V','dV','P','dP','KE','dKE',\n\t'Cvib','dCvib']]\n\nresults_df['gamma_vib'], results_df['dgamma_vib'] = calcGruneisen(\n\tresults_df['V'],results_df['dV'],V0,dV0,gammavib0,dgammavib0,q)\nresults_df['gamma_D'], results_df['dgamma_D'] = calcGruneisen(\n\tresults_df['V'],results_df['dV'],V0,dV0,gammaD0,dgammaD0,q)\n\n# Calculate Pvib at each volume\n\nCel = calcCel(results_df['V'],V0,T,M)\nresults_df['Pvib'],results_df['dPvib'] = calcPvib(results_df['V'],results_df['dV'],\n\tresults_df['gamma_vib'], results_df['dgamma_vib'],\n\t2*results_df['KE'],2*results_df['dKE'],results_df['Cvib'],results_df['dCvib'],\n\tCel)\nprint(results_df)\n\nresults_df = results_df.round({'gamma_vib':2,'dgamma_vib':2,'gamma_D':2,'dgamma_D':2,\n\t'Pvib':2,'dPvib':2})\nresults_df.to_csv('FeNi/Results/GruneisenResults.csv',index=False)\n\n\n\n# Calculate Gruneisen parameter at each volume: FeNiSi\n######################################################\n\ninput_df = input_dict['FeNiSi']\nV0 = V0_dict['FeNiSi']\ndV0 = dV0_dict['FeNiSi']\nM = M_dict['FeNiSi']\nq = 1\ngammavib0 = 2.03\ndgammavib0 = 0.1\ngammaD0 = 1.82\ndgammaD0 = 0.1\n\nresults_df = input_df.copy()\nresults_df = results_df[['Folder','Index','Phase','V','dV','P','dP','KE','dKE',\n\t'Cvib','dCvib']]\n\nresults_df['gamma_vib'], results_df['dgamma_vib'] = calcGruneisen(\n\tresults_df['V'],results_df['dV'],V0,dV0,gammavib0,dgammavib0,q)\nresults_df['gamma_D'], results_df['dgamma_D'] = calcGruneisen(\n\tresults_df['V'],results_df['dV'],V0,dV0,gammaD0,dgammaD0,q)\n\n# Calculate Pvib at each volume\n\nCel = calcCel(results_df['V'],V0,T,M)\nresults_df['Pvib'],results_df['dPvib'] = calcPvib(results_df['V'],results_df['dV'],\n\tresults_df['gamma_vib'], results_df['dgamma_vib'],\n\t2*results_df['KE'],2*results_df['dKE'],results_df['Cvib'],results_df['dCvib'],\n\tCel)\nprint(results_df)\n\nresults_df = results_df.round({'gamma_vib':2,'dgamma_vib':2,'gamma_D':2,'dgamma_D':2,\n\t'Pvib':2,'dPvib':2})\nresults_df.to_csv('FeNiSi/Results/GruneisenResults.csv',index=False)\n\n\n","sub_path":"120_GruneisenParam/detGruneisenParam.py","file_name":"detGruneisenParam.py","file_ext":"py","file_size_in_byte":7156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"323761450","text":"\"\"\"\nAnimethemes ap model.\nCopied from the flowchart the dev posted in discord.\n\"\"\"\nfrom typing import Literal, TypedDict, List, Union\nfrom .literals import UrlLike, DateLike, Types, Season, ImageFacet\n\nclass AnimeThemeDict(TypedDict):\n id: int\n created_at: DateLike\n updated_at: DateLike\n links: TypedDict('links',{'show':UrlLike})\n\nclass AnimeThemeSynonym(AnimeThemeDict):\n text: str\n\nclass AnimeThemeArtist(AnimeThemeDict):\n name: str\n slug: str\n as_: str\n\nclass AnimeThemeSong(AnimeThemeDict):\n title: str\n artists: List[AnimeThemeArtist]\n\nclass AnimeThemeVideo(AnimeThemeDict):\n basename: str\n filename: str\n path: str\n size: int\n resolution: int\n nc: bool\n subbed: bool\n lyrics: bool\n uncen: bool\n source: str\n overlap: str\n link: UrlLike\n\nclass AnimeThemeEntry(AnimeThemeDict):\n version: Union[int,Literal['']]\n episodes: str\n nsfw: bool\n spoiler: bool\n notes: str\n videos: List[AnimeThemeVideo]\n\nclass AnimeThemeTheme(AnimeThemeDict):\n type: Types\n sequence: str\n group: str\n slug: str\n song: AnimeThemeSong\n entries: List[AnimeThemeEntry]\n\nclass AnimeThemeSerie(AnimeThemeDict):\n name: str\n slug: str\n\nclass AnimeThemeResource(AnimeThemeDict):\n link: UrlLike\n external_id: int\n site: str\n as_: str\n\nclass AnimeThemeImage(AnimeThemeDict):\n path: str\n facet: ImageFacet\n link: UrlLike\n\nclass AnimeThemeAnime(AnimeThemeDict):\n name: str\n slug: str\n year: int\n season: Season\n synopsis: str\n synonyms: List[AnimeThemeSynonym]\n themes: List[AnimeThemeTheme]\n series: List[AnimeThemeSerie]\n resources: List[AnimeThemeResource]\n images: List[AnimeThemeImage]\n\nif __name__ == \"__main__\":\n from ..parsers.animethemes import fetch_animethemes\n from ..parsers.myanimelist import get_mal\n animelist = fetch_animethemes([ get_mal('sadru')[0] ])\n print(animelist[0]['themes'][0]['entries'][0]['videos'][0]['links']['show'])","sub_path":"animethemes_dl/models/animethemes.py","file_name":"animethemes.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"347192239","text":"import csv\nimport re\nfrom datetime import datetime\n\nfrom pymongo import MongoClient\n\n\ndef read_data(csv_file, db):\n \"\"\"\n Загрузить данные в бд из CSV-файла\n \"\"\"\n with open(csv_file, encoding='utf8') as csvfile:\n # прочитать файл с данными и записать в коллекцию\n list_row = []\n reader = csv.DictReader(csvfile)\n for row in reader:\n row = dict(row)\n row['Цена'] = int(row['Цена'])\n row['Дата'] += '.2019'\n row['Дата'] = datetime.strptime(row['Дата'], '%d.%m.%Y')\n list_row.append(row)\n db.insert_many(list_row)\n\n\ndef find_cheapest(db):\n \"\"\"\n Найти самые дешевые билеты\n Документация: https://docs.mongodb.com/manual/reference/operator/aggregation/sort/\n \"\"\"\n return list(db.find().sort(\"Цена\", 1))\n\n\ndef find_by_name(name, db):\n \"\"\"\n Найти билеты по имени исполнителя (в том числе – по подстроке),\n и выведите их по возрастанию цены\n \"\"\"\n name = re.escape(name)\n regex = re.compile(name)\n return list(db.find({'Исполнитель': regex}).sort(\"Цена\", 1))\n\n\ndef find_by_date(date_1, date_2, db):\n date_1 = datetime_convert(date_1)\n date_2 = datetime_convert(date_2)\n return list(db.find({'Дата': {'$gt': date_1, '$lt': date_2}}))\n\n\ndef datetime_convert(date):\n return datetime.strptime(date, '%d.%m.%Y')\n\n\nif __name__ == '__main__':\n client = MongoClient()\n mydb = client.homework\n mydb_ticket = mydb.ticket\n # read_data('artists.csv', mydb_ticket)\n # print(find_cheapest(mydb_ticket))\n # print(find_by_name('в', mydb_ticket))\n print(find_by_date(\"22.4.2019\", \"22.11.2019\", mydb_ticket))\n","sub_path":"2.4.DB.Mongo.ORM/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"351725567","text":"# -*- coding: utf-8 -*-\nimport requests\nfrom flask import jsonify\nfrom flask_restful import Resource, reqparse\n\n\nparser = reqparse.RequestParser()\nparser.add_argument(name='entity_name',type=str)\nparser.add_argument(name='start_time',type=str)\nparser.add_argument(name='end_time',type=str)\n\nclass GetDistance(Resource):\n def post(self):\n #车辆起始点和结束点的距离\n parse = parser.parse_args()\n entity_name = parse.get('entity_name')\n start_time = parse.get('start_time')\n end_time = parse.get('end_time')\n\n url = 'http://yingyan.baidu.com/api/v3/track/getdistance?ak=GPccFSSW7vYUNcSpoKCzzGNsRxNGGyf1&service_id=208883&entity_name={}&start_time={}&end_time={}&is_processed=1&process_option=need_denoise=1,need_mapmatch=0,radius_threshold=0,transport_mode=driving&supplement_mode=driving'.format(entity_name,start_time,end_time)\n\n response = requests.get(url)\n resData = response.json()\n return jsonify(resData)","sub_path":"test/GetDistanceApi.py","file_name":"GetDistanceApi.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"56300165","text":"# making a data loader same as that of mnist in torchvision.datasets\r\n\r\nimport os\r\nimport torch\r\nimport random\r\nfrom PIL import Image\r\nfrom torchvision.transforms import ToTensor\r\nfrom torch.utils.data import Dataset, DataLoader\r\n\r\ndevice = 'cuda'\r\nimagenet_mean = torch.FloatTensor([0.485, 0.456, 0.406]).unsqueeze(1).unsqueeze(2)\r\nimagenet_std = torch.FloatTensor([0.229, 0.224, 0.225]).unsqueeze(1).unsqueeze(2)\r\nimagenet_mean_cuda = torch.FloatTensor([0.485, 0.456, 0.406]).to(device).unsqueeze(0).unsqueeze(2).unsqueeze(3)\r\nimagenet_std_cuda = torch.FloatTensor([0.229, 0.224, 0.225]).to(device).unsqueeze(0).unsqueeze(2).unsqueeze(3)\r\n\r\n\r\nclass LoadDataGan(Dataset):\r\n def __init__(self, root, crop_size, scaling_factor):\r\n self.crop_size = int(crop_size)\r\n self.scaling_factor = int(scaling_factor)\r\n self.files = []\r\n for img in os.listdir(root):\r\n self.files.append(os.path.join(root, img))\r\n\r\n def __getitem__(self, item):\r\n img = Image.open(self.files[item], mode='r')\r\n img = img.convert('RGB')\r\n\r\n left = random.randint(1, img.width - self.crop_size)\r\n top = random.randint(1, img.height - self.crop_size)\r\n right = left + self.crop_size\r\n bottom = top + self.crop_size\r\n hr_img = img.crop((left, top, right, bottom))\r\n\r\n # Downsize this crop to obtain a low-resolution version of it\r\n lr_width = int(hr_img.width / self.scaling_factor)\r\n lr_height = int(hr_img.height / self.scaling_factor)\r\n lr_img = hr_img.resize((lr_width, lr_height), Image.BICUBIC)\r\n\r\n # Normalize the images after converting into tensors\r\n transform = ToTensor()\r\n lr_img = transform(lr_img)\r\n if lr_img.ndimension() == 3:\r\n lr_img = (lr_img - imagenet_mean) / imagenet_std\r\n elif lr_img.ndimension() == 4:\r\n lr_img = (lr_img - imagenet_mean_cuda) / imagenet_std_cuda\r\n\r\n hr_img = transform(hr_img)\r\n if hr_img.ndimension() == 3:\r\n hr_img = (hr_img - imagenet_mean) / imagenet_std\r\n elif hr_img.ndimension() == 4:\r\n hr_img = (hr_img - imagenet_mean_cuda) / imagenet_std_cuda\r\n\r\n return lr_img, hr_img\r\n\r\n def __len__(self):\r\n return len(self.files)\r\n\r\n\r\ndef load_data_gan(root='', crop_size=96, scaling_factor=4, batch_size=16):\r\n data = LoadDataGan(root, crop_size=crop_size, scaling_factor=scaling_factor)\r\n loader = DataLoader(data, batch_size=batch_size, shuffle=True)\r\n return loader\r\n","sub_path":"load_data_gan.py","file_name":"load_data_gan.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"503615714","text":"import typing\nimport pandas as pd\nimport logging\nimport boto3\nfrom io import StringIO\n\nlogger = logging.getLogger(__name__)\n\n\ndef df_to_s3_csv(\n df: pd.DataFrame, bucket_name: str, file_name: str\n) -> typing.Dict[str, str]:\n try:\n logger.debug(f\"Storing dataframe to S3 as: {bucket_name}/{file_name}\")\n csv_buffer = StringIO()\n df.to_csv(csv_buffer)\n s3_resource = boto3.resource(\"s3\")\n response = s3_resource.Object(bucket_name, file_name).put(\n Body=csv_buffer.getvalue()\n )\n logger.debug(f\"S3 response: {response}\")\n logger.info(f\"Stored to S3 bucket: {bucket_name}, Object name: {file_name}\")\n\n return response\n except Exception as e:\n logger.error(e)\n logger.error(f\"Args: bucket_name={bucket_name}, file_name={file_name}\")\n raise StorageError(\" pyanalysis encountered an error \", e)\n\n\nclass StorageError(Exception):\n \"\"\"Generic exception for the pyanalysis module used to wrap\n exceptions generated by dependencies.\n \"\"\"\n\n def __init__(self, msg: str, original_exception: Exception):\n super(StorageError, self).__init__(f\"{msg}: {original_exception}\")\n self.original_exception = original_exception\n","sub_path":"pyanalysis/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"294628686","text":"from discord_webhook import DiscordWebhook\nimport time as mytime\nimport json\nimport os\nimport time\nimport requests\n#import redis \nimport argparse\nfrom datetime import datetime, time, timedelta\nfrom decimal import *\n#from web3.auto import Web3\n#from web3.auto import w3\n\n\n\ndef run_query(query):\n request = requests.post('http://hasura.core.cloudchainsinc.com/v1/graphql', json={'query': query})\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception(\"Query failed to run by returning code of {}. {}\".format(request.status_code, query))\n\n\n\ndef print_line(string_data):\n # prints w/o new line\n print(string_data, end=\"\\r\", flush=True)\n return\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--debug', help='enable debug mode, no actual selling/buying will happen', action=\"store_true\")\nparser.add_argument('--token', help='token to trade on', default='LTC')\nparser.add_argument('--time', help='token to trade on', default=86400)\nargs = parser.parse_args()\n\ndxendtime = int(datetime.now().timestamp())\ndxstarttime = dxendtime - (int(args.time)) # 24hours ago\n\nprint('time: {}'.format(dxstarttime))\nnewlistings = \"\"\"\nquery MyQuery {\n uniswappair(limit: 100, order_by: {created_at: desc}) {\n address\n created_at\n tokenObj0 {\n name\n symbol\n }\n tokenObj1 {\n name\n symbol\n }\n }\n}\n\"\"\"\ngq_tokenpairs = \"\"\"\nquery MyQuery {\n uniswappair {\n tokenObj0 {\n address\n symbol\n }\n tokenObj1 {\n address\n symbol\n }\n token0\n token1\n address\n id\n }\n}\n\"\"\"\n\n\n\ntokendict = {}\ntokenpairs = run_query(gq_tokenpairs)\ntokendata = tokenpairs['data']['uniswappair']\n\nfor x in tokendata:\n tcontract = x['address']\n t0symbol = x['tokenObj0']['symbol']\n t0contract = x['tokenObj0']['address']\n t1symbol = x['tokenObj1']['symbol']\n t1contract = x['tokenObj1']['address']\n if (t0symbol or t1symbol) == 'UNI-V2':\n continue\n else:\n tokendict[t0contract] = t0symbol\n tokendict[t1contract] = t1symbol\n tokendict[tcontract] = '{}:{}'.format(t0symbol,t1symbol)\n tokendict[x['id']] = '{}:{}'.format(t0symbol,t1symbol)\n\n\ndef cleanorder(orderamount, decimalsallowed):\n return '{0:.{1}f}'.format(float(orderamount), int(decimalsallowed))\n\ndef myFunc(e):\n return len(e)\n\ndef get_change(current, previous):\n if current == previous:\n return 0\n try:\n return (abs(current - previous) / previous) * 100.0\n except ZeroDivisionError:\n return float(0)\n\n#print(tokendict)\nlastlist = []\ntrackerlist = []\ntradecountdict = {}\n#print(rq)\nprint('start')\nrq = run_query(newlistings)\n\nfor z in rq['data']['uniswappair']:\n print(z['address'])\n print(z['tokenObj0'])\n print(z['tokenObj1'])\n address = z['address']\n token0 = z['tokenObj0']\n token1 = z['tokenObj1']\n dexURL = 'https://www.dextools.io/app/uniswap/pair-explorer/{}'.format(address)\n webhookurl = 'https://discord.com/api/webhooks/842886931117506561/PpqAGZGRPRFUQKDMxWQ1oCGEnWOUpBUy2fiZpr02DJX7ohVsACsE2SWufLLmMV91Hte2'\n message = '{} {} {} {}'.format(address, token0, token1, dexURL)\n webhook = DiscordWebhook(url=webhookurl, content=message)\n response = webhook.execute()\n \n\n break","sub_path":"new_listings.py","file_name":"new_listings.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"144650095","text":"# a = 元件(指定老爸是誰)\n# a.排版(pack, grid)\nfrom tkinter import *\n\ndef bmi():\n global e1\n global e2\n global result\n try:\n height = float(e1.get())\n weight = float(e2.get())\n bmi = weight / (height / 100) ** 2\n result[\"text\"] = bmi\n except ValueError:\n result[\"text\"] = \"你肯定又在亂打!!!\"\n\n\nwindow = Tk()\n# Frame:圖層\nf1 = Frame(window)\nf1.pack()\n# Label: 標籤(不能輸入)\nl1 = Label(f1, text=\"輸入身高\")\nl1.pack()\n# Entry: 單行輸入\ne1 = Entry(f1)\ne1.pack()\n\nl2 = Label(f1)\nl2[\"text\"] = \"輸入體重\"\nl2.pack()\n\n# Entry: 單行輸入\ne2 = Entry(f1)\ne2.pack()\n\n# Button\nb1 = Button(f1, text=\"計算\", bg=\"red\", command=bmi)\nb1.pack(expand=True, fill=BOTH)\n\nresult = Label(f1)\nresult.pack()\n\nwindow.mainloop()","sub_path":"2_tk1.py","file_name":"2_tk1.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"113708395","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSC004 \n\nPlot Rs vs T for P and AP.\n\n@author: py07jtb\n\"\"\"\nimport numpy\nimport re\nimport Stoner.Analysis as Analysis\nimport matplotlib.pyplot as plt # pylab imports numpy namespace as well, use pyplot in scripts\nimport Stoner.PlotFormats as SPF\nimport Stoner.Plot as SP\nfrom Stoner.Folders import DataFolder\nfrom lmfit import minimize, Parameters, Parameter, report_fit\n\nclass workfile(Analysis.AnalyseFile,SP.PlotFile):\n \"\"\"A class that combines AnalyseFile and PlotFile together\"\"\"\n pass\n\ndef func(x):\n return numpy.mean(x)\n\ndef quad(x,a,b,c,d):\n return (a*x**4)+(b*x**2)+(c*x)+d\n \n\n\n### Read in Data ### \npattern = 'SC004_2_TDeltaRsvsT.txt'\nfolder = DataFolder('/Volumes/BATLEY/SC004/Transport/DeltaRvsT',pattern = pattern,type=workfile) \n \nRs = folder[0]\n\nRs.template=SPF.JTBPlotStyle\nRs.figure() # Creating new figures like this means we don;t reuse windows from run to run\nf=plt.gcf()\nf.set_size_inches((5.5,3.75),forward=True) # Set for A4 - will make wrapper for this someday\n\n\nRs.title = ''\nRs.plot_xy(\"T\",\"DR\",label = None,linestyle='',marker='o',linewidth=2,markersize=5) \n\nRs.ylabel=r\"$\\Delta R_{s}$ (mV/A)\"\nRs.xlabel=r\"T (K)\"\nplt.legend(loc='best')\nplt.tight_layout()\n \n \n \n \n \n","sub_path":"Devices/SC004/SC004_DeltaRs_vs_T_single_sep.py","file_name":"SC004_DeltaRs_vs_T_single_sep.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"319826809","text":"#!/usr/bin/env python\nimport boto3\nimport json\nimport sys\nimport requests\nimport jmespath\nimport os\nimport http.client\n\ndef get_auth_param(parameter_store_name):\n\t\"\"\" Gets auth token for collibra from parameter store \"\"\"\n\tresponse = client.get_parameters(\n\t\tNames=[\n\t\t\tparameter_store_name,\n\t ],\n\t WithDecryption=True\n\t)\n\n\treturn jmespath.search('Parameters[0].Value', response)\n\ndef upload_file_to_collibra(auth, file_name):\n\t\"\"\" With auth token, uploads file to Collibra\n\t\t\n\t\tReturns:\n\t\t\tcollibra file_id for uploaded file\n\t\"\"\"\n\twith open(file_name, 'rb') as f:\n\t\theaders = {\n\t\t\t'Authorization': auth\n\t\t}\n\t\tfiles = {}\n\t\tfiles[file_name] = f\n\t\tr = requests.post('https://byu.collibra.com/rest/1.0/file/as_text', headers=headers, files=files)\n\t\tresponse = r.json()\n\t\treturn response['files']['file']\n\ndef associate_file_with_workflow(auth, file_name, file_id):\n\t\"\"\" Associates Collibra file with workflow\"\"\"\n\tconn = http.client.HTTPSConnection(\"byu.collibra.com\")\n\n\tpayload = \"file=\"+file_id+\"&fileName=\" + file_name\n\theaders = {\n\t 'authorization': auth,\n\t 'content-type': \"application/x-www-form-urlencoded\"\n\t}\n\n\tconn.request(\"POST\", \"/rest/1.0/workflow\", payload, headers)\n\n\tres = conn.getresponse()\n\tdata = res.read()\n\n\tprint(data.decode(\"utf-8\"))\n\treturn data.decode(\"utf-8\")\n\nclient = boto3.client('ssm', region_name='us-west-2')\nauth = get_auth_param(\"collibra-bpmn.prd.authentication\")\nfiles_path = \"BPMN/src/main/java\"\nos.chdir(files_path)\nfor file in os.listdir('.'):\n\tprint(file)\n\tfile_id = upload_file_to_collibra(auth, file)\n\tprint(file_id)\n\tfile_assoc = associate_file_with_workflow(auth, file, file_id)\n\tprint(file_assoc)","sub_path":"uploadFiles.py","file_name":"uploadFiles.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"36675294","text":"# -*- coding: utf-8 -*-\n\nfrom web_game.lib.functions import *\n\ntemplate_addsessioninfo=True\n\nclass TemplaceContext():\n \n def __init__(self):\n self.LCID = \"\"\n self.template = \"\"\n \n self.data = {}\n\n def AssignValue(self, key, value):\n self.data[key] = value\n \n def Parse(self, key):\n self.data[key] = True\n \n# Return an initialized template\ndef GetTemplate(request, name):\n result = TemplaceContext()\n '''\n result.TrimWhiteSpaces = true\n \n on error resume next\n err.clear\n result.Load Server.MapPath(\"templates\\\" & Session.LCID & \"\\\" & name & \".html\")\n if err.number <> 0 then result.Load Server.MapPath(\"templates\\\" & name & \".html\")\n on error goto 0\n '''\n result.template = name + \".html\"\n\n if template_addsessioninfo:\n # set LCID to the current session LCID\n result.LCID = request.session.get(\"LCID\")\n result.AssignValue(\"LCID\", request.session.get(\"LCID\"))\n result.AssignValue(\"sessionid\", request.session.get(\"SessionID\"))\n\n result.AssignValue(\"PATH_IMAGES\", \"/assets/\")\n result.AssignValue(\"PATH_TEMPLATE\", \"/game/templates\")\n\n return result","sub_path":"web_game/lib/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"507990814","text":"\n## Reference http://naelshiab.com/tutorial-send-email-python/\nimport smtplib\nimport time\nfrom datetime import date\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMEText import MIMEText\nfrom email.MIMEBase import MIMEBase\nfrom email import encoders\n\nfromaddr = \"swatidhoke18@gmail.com\"\ntoaddr = \"swatidhoke@gmail.com\"\n\nmsg = MIMEMultipart()\n\nmsg['From'] = fromaddr\nmsg['To'] = toaddr\nmsg['cc'] = \"meghadhoke04@gmail.com\"\nmsg['Subject'] = \" %s Test Results\" % date.fromtimestamp(time.time())\n\nbody = \"Please find test results log file attached.\" \\\n \"\\nThanks, \" \\\n \"\\nSwati\"\n\nmsg.attach(MIMEText(body, 'plain'))\n#/Users/swatidhoke/Desktop\nfilename = \"test.log\"\nattachment = open(\"/Users/swatidhoke/PycharmProjects/Automation_Frameworks/UI_Selenium_framework/TestResults/test.log\", \"rb\")\n\npart = MIMEBase('application', 'octet-stream')\npart.set_payload((attachment).read())\nencoders.encode_base64(part)\npart.add_header('Content-Disposition', \"attachment; filename= %s\" % filename)\n\nmsg.attach(part)\n\nserver = smtplib.SMTP('smtp.gmail.com', 587)\nserver.starttls()\nserver.login(fromaddr, \"ashalata11\")\ntext = msg.as_string()\nserver.sendmail(fromaddr, toaddr, text)\nserver.quit()\n\n","sub_path":"UI_Selenium_framework/Tests/sendmail.py","file_name":"sendmail.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"354537227","text":"import gym\nimport random\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\n\n#TO DO:\n#Want to input time series data\n#Essentially convolution!\n#Must make sure disjoint games don't overlap in training\n\n##TIME SERIES###\n#Almost there\n#I need to follow the data flow and make sure training_data isn't compromised\n#Separating games for training purposes would be rad\n#Specifically frames between games might confuse AI\n#You can do it!\n\n\n#Amount of frames remembered by the AI\nMEM_SIZE = 3\nnum_epochs = 10\n\nenv = gym.make(\"MsPacman-ram-v0\")\n\n#Games generated during random initialization\ninitial_games = 100\n#number of epochs. Increases by 5 with each iteration\navg_scores = []\n\ndef training_data_initializer():\n global initial_games\n global avg_scores\n #will store the actions taken based on observations for the neural net\n env.reset()\n raw_data = []\n scores = []\n for game_index in range(initial_games):\n game_memory = []\n done = 0\n score = 0\n prev_obs = []\n counter = -1\n while not done:\n #do a random action and record the outputs\n action = random.randrange(0, env.action_space.n)\n observation, reward, done, info = env.step(action)\n score += reward\n #if we have had an observation before, append that one to our data set and the action that we took from that state\n if len(prev_obs) > 0:\n raw_data.append([prev_obs, action])\n #set prev_obs to our new state\n prev_obs = observation\n counter += 1\n env.reset()\n #append the final game score to all of the moves from that game (our metric)\n for x in range(0, counter):\n raw_data[len(raw_data)-1-x].append(score)\n scores.append(score)\n #tracking the amount of games gone by\n if game_index % 10 == 0:\n print(\"{} games simulated...\".format(game_index))\n #tracking average scorese\n avg_scores.append(sum(scores)/len(scores))\n print(avg_scores)\n\n return raw_data\n\n#construction of the model\ndef build_model(input_size):\n #note sure what this means (oops)\n model = Sequential()\n\n #some dense layers. last layer is linear. not sure how we might optimize this\n model.add(Dense(input_size, input_dim = input_size, activation='relu'))\n model.add(Dense(1024, activation='relu'))\n model.add(Dense(1024, activation='relu'))\n model.add(Dense(1024, activation='relu'))\n model.add(Dense(512, activation='relu'))\n model.add(Dense(1, activation='linear'))\n model.compile(loss='mse', optimizer=Adam())\n\n return model\n\n#the function which takes training data and feeds it to the model for backprop\ndef training_function(training_data, model):\n\n global num_epochs\n \n X = []\n Y = []\n #taking all the moves we've stored\n for moveID in range(2, len(training_data)):\n nextSeries = []\n #flatten three frames and their actions into the next sample\n for prevMove in range(0, MEM_SIZE):\n nextSeries.extend(training_data[moveID-prevMove][0].flatten())\n nextSeries.append(training_data[moveID-prevMove][1])\n #print(np.array(nextSeries))\n X.append(nextSeries)\n Y.append(training_data[moveID][2])\n model.fit(np.array(X), np.array(Y), epochs = 20, batch_size = 1000)\n\n num_epochs += 3\n \n return model\n\n#uses the model to guess which move will give the best total game score\ndef make_decision(observation, model):\n #it predicts the total game store for each possible action to take\n decisions = [0.0]*8\n for i in range(0, 8):\n #for each action, plug in our set of observations with an appended action\n nextDec = np.asarray([np.append(observation, i).flatten()])\n decisions[i] = model.predict(nextDec)\n #not sure why I was trying to pop this...\n #np.pop(observation)\n \n #adding some randomness\n if(random.randrange(0,10) >= 2):\n return np.argmax(decisions)\n else:\n decisions[np.argmax(decisions)] = 0.0\n return np.argmax(decisions)\n\n#this plays a game using the model and the make_decision call\ndef play_game(model, renderBool):\n global MEM_SIZE\n \n new_data = []\n done = 0\n score = 0\n prev_obs = []\n memory = []\n #why is this -1?\n counter = -1\n env.reset()\n\n while not done:\n #render every 10th game\n if renderBool%30 == 1:\n env.render()\n #act randomly if we don't have any info\n if len(new_data) < MEM_SIZE-1:\n action = random.randrange(0, env.action_space.n)\n #otherwise make a decision using the previous observations\n else:\n action = make_decision(memory, model)\n memory = memory[129:]\n\n observation, reward, done, info = env.step(action)\n\n score += reward\n\n #if this isn't our first frame, record the observation we used and the decision we made\n #also add the current state and action just taken to the memory\n if len(prev_obs) > 0:\n new_data.append([prev_obs, action])\n memory = np.append(memory, action)\n #if it is our first time, add to local memory our current state\n\n memory = np.concatenate((memory, observation))\n\n\n prev_obs = observation\n\n counter += 1\n\n for x in range(0, counter):\n new_data[len(new_data)-1-x].append(score)\n env.reset()\n return (score, new_data)\n \ndef train_generation(model, training_data):\n global avg_scores\n \n gen_data = []\n scores = []\n for i in range(0, 100):\n gameScore, gameData = play_game(model, i)\n scores.append(gameScore)\n gen_data.extend(gameData)\n print(\"{} games simulated...\".format(i+1))\n\n avg_scores.append(sum(scores)/len(scores))\n print(avg_scores)\n\n training_data.extend(gen_data)\n #print(training_data)\n model = training_function(training_data, model)\n print(avg_scores)\n\n return model, training_data\n\ntraining_data = training_data_initializer()\n\ninput_size = (len(training_data[0][0].flatten())+1)*MEM_SIZE\nprint(\"Input size: {}\".format(input_size)) \n#print(training_data[0][0])\n\n#Figure out where to put this\nmodel = build_model(input_size)\nmodel = training_function(training_data, model)\nfor x in range(0, 100):\n model, training_data = train_generation(model, training_data)\n #print(avg_scores)\n f = open(\"averageScoreLog\", \"w\")\n f.write(str(avg_scores[len(avg_scores)-1]) + \" \")\n","sub_path":"GymNet/gymTestNet/PacNet.py","file_name":"PacNet.py","file_ext":"py","file_size_in_byte":6583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"33037526","text":"# -*- coding:utf-8 -*-\n#请实现一个函数,将一个字符串中的空格替换成“%20”\n#例如,当字符串为We Are Happy.则经过替换之后的字符串\n#为We%20Are%20Happy。\nclass Solution:\n #s源字符串\n def replaceSpace(self, s):\n \"\"\"\n :param s:str\n :return: str\n \"\"\"\n res=s.replace(' ','%20')\n return res\n\nso=Solution()\ns='We are happy!'\nres=so.replaceSpace(s)\nprint(res)\n\t\t\n","sub_path":"myproject/offer/offer2/replace_string.py","file_name":"replace_string.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"22769343","text":"\"\"\"\"hostinfo_02\"\n\nRevision ID: 2d274cd8b63b\nRevises: 4a7737b9f329\nCreate Date: 2016-08-10 11:44:48.689839\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2d274cd8b63b'\ndown_revision = '4a7737b9f329'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('hostinfo', sa.Column('disk_used', sa.String(length=64), nullable=True))\n op.add_column('hostinfo', sa.Column('mem_used', sa.String(length=64), nullable=True))\n op.add_column('hostinfo', sa.Column('running_vms', sa.Integer(), nullable=True))\n op.add_column('hostinfo', sa.Column('vms', sa.Integer(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('hostinfo', 'vms')\n op.drop_column('hostinfo', 'running_vms')\n op.drop_column('hostinfo', 'mem_used')\n op.drop_column('hostinfo', 'disk_used')\n ### end Alembic commands ###\n","sub_path":"src/web/migrations/versions/20160810_2d274cd8b63b_hostinfo_02.py","file_name":"20160810_2d274cd8b63b_hostinfo_02.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"59081052","text":"from processing import *\nimport math, random, time\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n \n def distance(self, point2):\n return math.sqrt((point2.x-self.x)**2 + (point2.y-self.y)**2)\n \n def move(self, vector1):\n self.x += vector1.h\n self.y += vector1.v\n \n def makeVectorTo(self, point2):\n newVector = Vector(point2.x - self.x, point2.y - self.y)\n return newVector\n \n\nclass Vector:\n def __init__(self, h, v):\n self.h = h\n self.v = v\n \n def add(self, vector2):\n self.h += vector2.h\n self.v += vector2.v\n \n def subtract(self, vector2):\n self.h -= vector2.h\n self.v -= vector2.v\n \n def multiply(self, num):\n self.h *= num\n self.v *= num\n \n def divide(self, num):\n if num == 0:\n return \"error\"\n else:\n self.h = self.h/num\n self.v = self.v/num\n \n def length(self):\n return math.sqrt((self.h**2) + (self.v**2))\n \n def normalize(self):\n self.h = self.h/self.length()\n self.v = self.v/self.length()\n \n def random(self, length):\n self.h = random.random() * length\n self.v = math.sqrt(4-(self.h**2))\n\nclass Circle:\n def __init__(self):\n self.position = Point(random.randint(4, 796), random.randint(4, 596))\n self.vector = Vector(0, 0)\n self.vector.random(2)\n self.virus = 0\n self.hasTarget = False\n self.counter = 0\n \n def makeVirus(self):\n global strategy\n self.virus = 1\n if strategy == 2:\n self.vector = Vector(0, 0)\n if strategy == 3:\n self.vector = Vector(0, 0)\n self.vector.random(2)\n \n def draw(self):\n if self.virus == 0:\n fill(0, 255, 0)\n else:\n fill(255, 0, 0)\n ellipse(self.position.x, self.position.y, 7, 7)\n self.position.move(self.vector)\n if self.position.x <= 3.5 or self.position.x >= 796.5:\n self.vector.h *= -1\n if self.position.y <= 3.5 or self.position.y >= 596.5:\n self.vector.v *= -1\n def distance(self, circle):\n return self.position.distance(circle.position)\n\ncircles = []\nviruses = []\n\ndef updates():\n global circles, viruses\n updatedCircles = []\n updatedViruses = viruses\n for circle in circles:\n if circle.virus == 1:\n updatedViruses.append(circle)\n else:\n updatedCircles.append(circle)\n circles = updatedCircles;\n viruses = updatedViruses\n\n# Strategy 1: Completely Random\ndef strategy1():\n global circles, viruses\n for virus in viruses:\n for circle in circles:\n if virus.distance(circle) < 7:\n circle.makeVirus()\n\n# Strategy 2: Still unless within 50 pixels\ndef strategy2():\n global circles, viruses\n for virus in viruses:\n if virus.hasTarget == False:\n for circle in circles:\n if virus.distance(circle) <= 50:\n virus.hasTarget = True\n virus.target = circle\n else:\n if virus.target.virus == 1:\n virus.hasTarget = False\n virus.vector = Vector(0, 0)\n else:\n virus.vector = virus.position.makeVectorTo(virus.target.position)\n virus.vector.normalize()\n virus.vector.multiply(2)\n \n for circle in circles: \n if virus.distance(circle) < 7:\n circle.makeVirus()\n virus.hasTarget = False\n virus.vector = Vector(0, 0)\n\n# Strategy 3: Random unless within 50 pixels\ndef strategy3():\n global circles, viruses\n for virus in viruses:\n if virus.hasTarget == False:\n for circle in circles:\n if virus.distance(circle) <= 50:\n virus.hasTarget = True\n virus.target = circle\n else:\n if virus.target.virus == 1:\n virus.hasTarget = False\n else:\n virus.vector = virus.position.makeVectorTo(virus.target.position)\n virus.vector.normalize()\n virus.vector.multiply(2)\n \n for circle in circles: \n if virus.distance(circle) < 7:\n circle.makeVirus()\n virus.hasTarget = False\n\n# Strategy 4: Viruses repel each other \ndef strategy4():\n global circles, viruses\n for virus in viruses:\n for circle in circles:\n if virus.distance(circle) < 7:\n circle.makeVirus()\n for virus2 in viruses:\n if virus2 != virus:\n if virus.distance(virus2) < 14 and virus.counter == 0:\n virus.vector.multiply(-1)\n virus.counter = 20\n if virus.counter > 0:\n virus.counter -= 1\n\n# Strategy 5: Viruses form line(s)\ndef strategy5():\n global circles, viruses\n for virus in viruses:\n for virus2 in viruses:\n if virus2 != virus:\n if virus.distance(virus2) < 14:\n virus2.vector = virus.vector\n virus2.position.x = virus.position.x\n virus2.position.y = virus.position.y + 14\n\n for circle in circles:\n if virus.distance(circle) < 7:\n circle.makeVirus()\n \ndef initCircles():\n global circles, viruses, startTime\n circles = []\n viruses = []\n for i in range(50):\n circles.append(Circle())\n viruses.append(Circle())\n viruses[0].makeVirus()\n startTime = time.clock()\n \ndef setup():\n size(800,600)\n initCircles()\n \ndef draw():\n global circles,trials, max_trials, startTime, strategy\n if (trials < max_trials):\n background(200)\n for circle in circles:\n circle.draw()\n for virus in viruses:\n virus.draw()\n\n\n if strategy == 1:\n strategy1()\n if (len(circles) == 0):\n trials += 1\n elapsed = time.clock() - startTime\n a = open(\"data1.csv\", \"a\")\n a.write(str(elapsed) + \"\\n\")\n initCircles()\n \n elif strategy == 2:\n strategy2()\n elapsed = time.clock() - startTime\n if elapsed > 60:\n initCircles()\n else:\n if (len(circles) == 0):\n trials += 1\n a = open(\"data2.csv\", \"a\")\n a.write(str(elapsed) + \"\\n\")\n initCircles()\n \n elif strategy == 3:\n strategy3()\n if (len(circles) == 0):\n trials += 1\n elapsed = time.clock() - startTime\n a = open(\"data3.csv\", \"a\")\n a.write(str(elapsed) + \"\\n\")\n initCircles()\n elif strategy == 4:\n strategy4()\n if (len(circles) == 0):\n trials += 1\n elapsed = time.clock() - startTime\n a = open(\"data4.csv\", \"a\")\n a.write(str(elapsed) + \"\\n\")\n initCircles()\n \n elif strategy == 5:\n strategy5()\n if (len(circles) == 0):\n trials += 1\n elapsed = time.clock() - startTime\n a = open(\"data4.csv\", \"a\")\n a.write(str(elapsed) + \"\\n\")\n initCircles()\n \n updates()\n\nmax_trials = 100\ntrials = 0\n\n##### Change Strategy Number Below: ######\nstrategy = 4\nrun()\n","sub_path":"week-8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"302438734","text":"#파일명 : selenium06.py\n# 이미지 크롤링\n\nfrom selenium import webdriver\nimport urllib.request\nimport time\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nprint(os.path.join(BASE_DIR,'abc','def','a.png'))\n\noptions = webdriver.ChromeOptions()\n\noptions.add_argument('headless') #화면 표시 X\noptions.add_argument(\"disable-gpu\") \noptions.add_argument(\"lang=ko_KR\") \noptions.add_argument('user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36') # user-agent \n\ndriver=webdriver.Chrome('./Crawling/chromedriver.exe',options=options)\nurl = 'http://ihongss.com/home/post?id=p_1579160264639'\ndriver.get(url)\ntime.sleep(2)\n#copy -> selector\nimg = driver.find_element_by_css_selector('#image_view_0')\nprint(img)\n#image_view_0 copy-selector\n\nfile1 = img.get_attribute(\"src\") #찾은 태그 중에서 src의 값\nurllib.request.urlretrieve(file1, \"./Download/a2.png\") #이렇게 하면 download에 a2.png로 저장되어야 \n\ndriver.close()\n\ndriver=webdriver.Chrome('./Crawling/chromedriver.exe',options=options)\nurl = 'https://www.monsterzym.com/'\ndriver.get(url)\ntime.sleep(2)\n#copy -> selector\nimg = driver.find_element_by_xpath('//*[@id=\"content\"]/div/div[3]/div[1]/a/img')\nprint(img)\n#image_view_0 copy-selector\n\nfile2 = img.get_attribute(\"src\") #찾은 태그 중에서 src의 값\nurllib.request.urlretrieve(file2, \"./Download/a3.png\") #이렇게 하면 download에 a2.png로 저장되어야 \n\ndriver.close()","sub_path":"Crawling/selenium06.py","file_name":"selenium06.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"247395984","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass MorphoCreateSource(object):\n\n def __init__(self):\n self._address = None\n self._id = None\n\n @property\n def address(self):\n return self._address\n\n @address.setter\n def address(self, value):\n self._address = value\n @property\n def id(self):\n return self._id\n\n @id.setter\n def id(self, value):\n self._id = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.address:\n if hasattr(self.address, 'to_alipay_dict'):\n params['address'] = self.address.to_alipay_dict()\n else:\n params['address'] = self.address\n if self.id:\n if hasattr(self.id, 'to_alipay_dict'):\n params['id'] = self.id.to_alipay_dict()\n else:\n params['id'] = self.id\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = MorphoCreateSource()\n if 'address' in d:\n o.address = d['address']\n if 'id' in d:\n o.id = d['id']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/MorphoCreateSource.py","file_name":"MorphoCreateSource.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"150439911","text":"\"\"\"\r\nMany of the required functions have been provided for you. Your task is to complete the\r\ncalculateSD() function at the end of this file. The calculateSD() function will make use\r\nof all of the provided functions.\r\n\r\nYou will also need to write the countAdjectives(), countAdverbs(), getUniqueWords(), and \r\ncountWords() functions in this Python file.\r\n\r\n\"\"\"\r\n\r\nimport nltk\r\n\r\n\"\"\"\r\nImporting the necessary NLTK libraries and modules.\r\n\"\"\"\r\nfrom nltk.tag import StanfordPOSTagger\r\nfrom nltk.corpus import wordnet\r\nfrom nltk import pos_tag, word_tokenize\r\nfrom nltk.corpus import words\r\nfrom nltk.tokenize import word_tokenize\r\n\r\n\r\n\r\n\"\"\"\r\nTo identify the part-of-speech of the words retrieved from\r\nWord2vec, we used the conditional frequency feature of the NLTK module\r\nwhich returns a frequency-ordered list of the possible parts of speech associated\r\nwith all of the English words that are found in the Brown Corpus. Our sys-\r\ntem uses the Brown Corpus to generate the frequency-ordered list because\r\nof the fact that the words contained in the Brown Corpus are annotated with\r\npart-of-speech tags.\r\n\"\"\"\r\n\r\nwordtags = nltk.ConditionalFreqDist((w.lower(), t) \r\n for w, t in nltk.corpus.brown.tagged_words(tagset=\"universal\"))\r\n\r\n\r\ndef findPOS(word):\r\n \"\"\"\r\n This is a function that accepts a word as its parameter and returns the part-of-speech of the word.\r\n The function considers adjectives, adverbs and nouns.\r\n \"\"\"\r\n\t\r\n lisPOS = list(wordtags[word])\r\n if \"ADJ\" in lisPOS:\r\n return \"ADJECTIVE\"\r\n if \"ADV\" in lisPOS:\r\n return \"ADVERB\"\r\n if \"NN\" in lisPOS:\r\n return \"NOUN\"\r\n \r\n\r\ndef readFile(filename):\r\n \"\"\"\r\n This is a function that accepts a path to a file as its parameter, reads in and returns the file\r\n \"\"\"\r\n speechFile = open(filename, \"r\")\r\n speech = speechFile.read()\r\n speechFile.close()\r\n return speech\r\n\r\n\r\ndef getWords(text):\r\n \"\"\"\r\n This is a function that segments the words in a document\r\n \"\"\"\r\n text = text.replace(\"\\n\", \" \")\r\n text = text.replace(\"-\", \" \")\r\n while text.count(\" \") > 0 :\r\n text = text.replace(\" \", \" \")\r\n text = text.lower()\r\n cleanedUp = \"\"\r\n for char in text:\r\n if char.isalpha() or char == \" \":\r\n cleanedUp = cleanedUp + char\r\n return sorted(cleanedUp.split())\r\n\r\n\r\ndef prepareSemanticDifferential():\r\n \"\"\"\r\n This is a function that reads in the EPA values from the Osgood wordlist and stores the values in \r\n a Python dictionary.\r\n \"\"\"\r\n\t\r\n filename = (\"OsgoodOriginal.csv\") \r\n fileIn = open(filename, 'r')\r\n allData = {}\r\n line = fileIn.readline()\r\n while line != \"\":\r\n line = fileIn.readline().strip()\r\n if line != \"\":\r\n values = line.split(',')\r\n wordData = {}\r\n wordData['evaluation'] = float(values[1])\r\n wordData['activity'] = float(values[2])\r\n wordData['potency'] = float(values[3])\r\n allData[str(values[0])] = wordData\r\n fileIn.close()\r\n return allData\r\n\r\n\r\n#This is the function that calculate the EPA valuess\r\ndef calculateSD(filename):\r\n \"\"\"\r\n This is the function that you need to write. This function will calculate the evaluation, activity and \r\n potency levels of the text. You will need to use all of provided functions findPOS(), readFile(), \r\n getWords() and prepareSemanticDifferential() in your solution. \r\n \"\"\"\r\n #evaluationSum: store the running sum of evaluation scores.\r\n #activitySum: store the running sum of activity scores.\r\n #potencySum: store the running sum of potency scores.\r\n speech = readFile(filename)\r\n words = getWords(speech)\r\n evaluationSum = 0\r\n activitySum = 0\r\n potencySum = 0\r\n allData = prepareSemanticDifferential()\r\n for word in words:\r\n if findPOS(word) and findPOS(word) != \"NOUN\":\r\n if word in allData:\r\n evaluationSum += allData[word]['evaluation']\r\n activitySum += allData[word]['activity']\r\n potencySum += allData[word]['potency']\r\n\r\n print(\"Evaluation Score: \",evaluationSum)\r\n print(\"Activity Score: \",activitySum)\r\n print(\"Potency Score: \",potencySum)\r\n\r\n\r\n#This is the function that returns the number of adjectives in the text file.\r\ndef countAdjectives(filename):\r\n words = getWords(readFile(filename))\r\n countAdj = 0\r\n for word in words: \r\n if findPOS(word) == \"ADJECTIVE\":\r\n countAdj += 1\r\n print(\"The total number of adjective is: \",countAdj)\r\n\r\n\r\n#This is the function that returns the number of adverbs in the text file. \r\ndef countAdverbs(filename):\r\n words = getWords(readFile(filename))\r\n countAdv = 0\r\n for word in words:\r\n if findPOS(word) == \"ADVERB\":\r\n countAdv += 1\r\n print(\"The total number of adverbs is: \",countAdv)\r\n\r\n\r\n#This is the function that returns the subset of unique words from the text file.\r\ndef getUniqueWords(filename):\r\n words = getWords(readFile(filename))\r\n uniqueWords = []\r\n for word in words:\r\n if not word in uniqueWords:\r\n uniqueWords.append(word)\r\n return uniqueWords\r\n \r\n\r\n#This is the function that returns the count of each of the unique words\r\ndef countWords(filename):\r\n words = getWords(readFile(filename))\r\n uniqueWords = getUniqueWords(filename)\r\n result = {}\r\n for word in uniqueWords:\r\n result[word] = words.count(word)\r\n return result\r\n\r\n\r\ndef main():\r\n filename = \"a.txt\"\r\n calculateSD(filename)\r\n countAdjectives(filename)\r\n countAdverbs(filename)\r\n print(getUniqueWords(filename))\r\n print(countWords(filename))\r\n\r\nmain()","sub_path":"Yun Miao, 20043044, Assignment1.py","file_name":"Yun Miao, 20043044, Assignment1.py","file_ext":"py","file_size_in_byte":5675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"386212162","text":"# This source code is part of the Biotite package and is distributed\n# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further\n# information.\n\n__author__ = \"Patrick Kunzmann\"\n__all__ = [\"SequenceLogo\"]\n\nimport numpy as np\nfrom ...visualize import Visualizer\nfrom .colorschemes import get_color_scheme\n\nclass SequenceLogo(Visualizer):\n \"\"\"\n This class is used for creation of sequence logos. [1]_\n\n EXPERIMENTAL: Future API changes are probable.\n\n Parameters\n ----------\n alignment : Alignment\n The logo is created based on this alignment.\n width : float\n The width of the figure (pixels).\n height : float\n The height of the figure (pixels).\n \n References\n ----------\n \n .. [1] TD Schneider and RM Stephens,\n \"Sequence logos: a new way to display consensus sequences\"\n Nucleic Acids Res, 18, 6097-6100 (1990).\n \"\"\"\n \n def __init__(self, alignment, width, height):\n super().__init__()\n # Check if all sequences share the same alphabet\n sequences = alignment.sequences\n self._alphabet = sequences[0].get_alphabet()\n for seq in sequences:\n if seq.get_alphabet() != self._alphabet:\n raise ValueError(\"Alphabets of the sequences in the alignment \"\n \"are not equal\")\n \n trace = alignment.trace\n self._freq = np.zeros((len(trace), len(self._alphabet)))\n for i in range(trace.shape[0]):\n for j in range(trace.shape[1]):\n index = trace[i,j]\n if index != -1:\n code = sequences[j].code[index]\n self._freq[i, code] += 1\n self._freq = self._freq / np.sum(self._freq, axis=1)[:, np.newaxis]\n # 0 * log2(0) = 0 -> Convert NaN to 0\n no_zeros = self._freq != 0\n pre_entropies = np.zeros(self._freq.shape)\n pre_entropies[no_zeros] \\\n = self._freq[no_zeros] * np.log2(self._freq[no_zeros])\n ## 0 * log2(0) = 0 -> Convert NaN to 0\n #pre_entropies[np.isnan(pre_entropies)] = 0\n self._entropies = -np.sum(pre_entropies, axis=1)\n self._max_entropy = np.log2(len(self._alphabet))\n\n self._width = width\n self._height = height\n self._font = None\n self._colors = get_color_scheme(\"rainbow\", self._alphabet)\n \n def set_font(self, font):\n \"\"\"\n Set the font used for the symbols in the logo.\n\n Parameters\n ----------\n font : FontProperties\n The font that should be used for symbol rendering.\n \"\"\"\n self._font = font\n \n def set_color_scheme(self, scheme):\n \"\"\"\n Set the color scheme used for the logo.\n\n Parameters\n ----------\n scheme : str or list of (tuple or str)\n Either a valid color scheme name\n (e.g. ``\"rainbow\"``, ``\"clustalx\"``, etc.)\n or a list of `matplotlib` compatible colors.\n The list length must be at least as long as the\n length of the alphabet used by the sequences.\n \"\"\"\n if isinstance(scheme, str):\n self._colors = get_color_scheme(scheme, self._alphabet)\n else:\n self._colors = scheme\n\n def generate(self):\n from matplotlib.patches import Rectangle\n from matplotlib.text import Text\n from matplotlib.patheffects import AbstractPathEffect\n\n class ScaleEffect(AbstractPathEffect):\n def __init__(self, scale_x, scale_y):\n self._scale_x = scale_x\n self._scale_y = scale_y\n\n def draw_path(self, renderer, gc, tpath, affine, rgbFace=None):\n affine = affine \\\n .identity() \\\n .scale(self._scale_x, self._scale_y) \\\n + affine\n renderer.draw_path(gc, tpath, affine, rgbFace)\n\n fig = self.create_figure(size=(self._width, self._height))\n renderer = fig.canvas.get_renderer()\n \n symbol_width = self._width / len(self._entropies)\n pos_heights = (1 - self._entropies/self._max_entropy) * self._height\n symbols_heights = pos_heights[:, np.newaxis] * self._freq\n index_order = np.argsort(symbols_heights, axis=1)\n for i in range(symbols_heights.shape[0]):\n index_order = np.argsort(symbols_heights)\n start_height = 0\n for j in index_order[i]:\n height = symbols_heights[i,j]\n if height > 0:\n symbol = self._alphabet.decode(j)\n text = Text(i*symbol_width, start_height, symbol,\n ha=\"left\", va=\"bottom\", color=self._colors[j],\n fontproperties=self._font, figure=fig)\n fig.texts.append(text)\n # Rescale symbols,\n # so that they fit the given width and height\n # Scale factor is desired size\n # divided by current size\n bounds = text.get_window_extent(renderer=renderer).bounds\n text.set_path_effects([\n ScaleEffect(symbol_width / bounds[2],\n height / bounds[3])\n ])\n start_height += height\n return fig","sub_path":"src/biotite/sequence/graphics/logo.py","file_name":"logo.py","file_ext":"py","file_size_in_byte":5411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"32991664","text":"class Solution(object):\n def validPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n count, l, r = 0, 0, len(s) - 1\n while l <= r:\n if s[l] != s[r]: \n left, right = s[l:r], s[l+ 1:r + 1]\n return left == left[::-1] or right == right[::-1] \n l += 1\n r -= 1\n return True","sub_path":"Leetcode_FirstPass/Palindrome/680_validPalindromeII.py","file_name":"680_validPalindromeII.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"98214695","text":"#!python3\n\n# Capture all arguments\ndef multiply(*args):\n total = 1\n\n for arg in args:\n total = total * arg\n\n return total\n\n\nprint(multiply(1, 3, 5))\n\n\n#\ndef add(x, y):\n return x + y\n\n\nnums = [3, 5]\nprint(add(*nums)) # Unpack entire list as arguments\n\n\n#\ndef add2(x, y):\n return x + y\n\nnums = {'x': 15, 'y': 25} # x and y have the same names as the add function arguments\nprint(add2(**nums)) # Pass each key of the dict as a named argument\n\n\n# pass a named argument called 'operator'\n# *args gets all arguments, except the named arguments called operator\ndef apply(*args, operator): \n if operator == \"*\":\n return multiply(args)\n elif operator == \"+\":\n return sum(args)\n else:\n return \"No valid operator procided to apply().\"\n\nprint('Apply: ', apply(1, 3, 6, 7, operator=\"*\"))\n","sub_path":"unpacking_arguments.py","file_name":"unpacking_arguments.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"597165848","text":"import Autodesk.Revit.DB as DB\nfrom Autodesk.Revit.DB import Transaction, Document\nfrom System.Collections.Generic import List\nimport rpw\n\ndoc = __revit__.ActiveUIDocument.Document\nuidoc = __revit__.ActiveUIDocument\n\nselect_file = rpw.ui.forms.select_file(extensions='All Files (*.*)|*.*', title='Select File', multiple=False, restore_directory=True)\n\npath_index = select_file.LastIndexOf(\"\\\\\"[:])\n\nfilepath = select_file[0:path_index]\n\nfilename = select_file[path_index + 4:]\n\nnavis_options = DB.NavisworksExportOptions()\n\nt = Transaction(doc, \"Export Current View to nwc\")\nt.Start()\n\ndoc.Export(filepath, filename, navis_options)\n\nt.Commit()\n","sub_path":"pyMapes.extension/_deprecated tools/Export Currnet View to nwc.pushbutton/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"275369763","text":"import jsonpickle as jsonpickle\n\nclass Persistencia():\n\n def save_json(cls,pregunta) :\n text_open=open(\"files/\" + '.json',mode = 'w')\n json_pre=jsonpickle.encode(pregunta)\n text_open.write(json_pre)\n text_open.close()\n\n @classmethod\n def load_json(cls,file_name) :\n text_open=open(\"files/\" + file_name,mode = 'r')\n json_pre=text_open.readline()\n pregunta=jsonpickle.decode(json_pre)\n text_open.close()\n return pregunta\n","sub_path":"Infraestructura/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"322263609","text":"#encoding: utf-8\n\"\"\"\n@Time : 2017/11/23\n@Author : author_name\n@Desc : 后台fio数据测试\n\"\"\"\nimport itertools\nimport subprocess\nimport os\nimport signal\nimport threading\nimport shlex, time\nimport sys\nimport json\nfrom lib.fio import testparams\nfrom django.core.cache import cache\nimport re\nimport queue\nimport time\nimport datetime\n#import pexpect\nimport platform\nfrom django.shortcuts import render\nfrom django.http import HttpResponse ,JsonResponse\n#return HttpResponse('Hello,World!')\nimport importlib\n\n#杀死其他线程\ndef fio_kill():\n try:\n os.killpg(fio_process.pid, signal.SIGTERM)\n fio_process_output, fio_process_error = fio_process.communicate()\n except:\n pass\n\ndef fio_parse_output():\n global exit_code\n log = open(\"test.txt\", \"a\")\n log.write(\"%s\\n\" % time.asctime(time.localtime(time.time())))\n temp = {'read_iops_total':0,\n 'read_bw_total':0,\n 'read_lat_peak':0,\n 'write_iops_total':0,\n 'write_bw_total':0,\n 'write_lat_peak':0\n }\n i=0\n thread_number = 0\n while fio_process.poll() == None:\n exit_code=None\n line = fio_process.stdout.readline()\n parsedline = line.split(\";\")\n\n if len(parsedline) >= 130:\n if thread_number < numjobs:\n read_iops_all[thread_number].append(int(parsedline[7]))\n read_bw_all[thread_number].append(int(parsedline[6]))\n write_iops_all[thread_number].append(int(parsedline[48]))\n write_bw_all[thread_number].append(int(parsedline[47]))\n read_lat_mean_all[thread_number].append(\n float(parsedline[15])/1000)\n read_lat_max_all[thread_number].append(\n float(parsedline[14])/1000)\n write_lat_mean_all[thread_number].append(\n float(parsedline[56])/1000)\n write_lat_max_all[thread_number].append(\n float(parsedline[55])/1000)\n thread_number += 1\n\n if thread_number == numjobs:\n if numjobs >= 1:\n tmp1 = []\n for x in range(0, numjobs):\n tmp1.append(read_iops_all[x][-1])\n tmp2 = []\n for x in range(0, numjobs):\n tmp2.append(read_bw_all[x][-1])\n tmp3 = []\n for x in range(0, numjobs):\n tmp3.append(write_iops_all[x][-1])\n tmp4 = []\n for x in range(0, numjobs):\n tmp4.append(write_bw_all[x][-1])\n tmp5 = []\n for x in range(0, numjobs):\n tmp5.append(read_lat_max_all[x][-1])\n tmp6 = []\n for x in range(0, numjobs):\n tmp6.append(write_lat_max_all[x][-1])\n read_iops_total.append(sum(tmp1))\n read_bw_total.append(sum(tmp2))\n read_lat_peak.append(max(tmp5))\n write_iops_total.append(sum(tmp3))\n write_bw_total.append(sum(tmp4))\n write_lat_peak.append(max(tmp6))\n dict2 = {'read_iops_total':read_iops_total[i],\n 'read_bw_total':read_bw_total[i],\n 'read_lat_peak':read_lat_peak[i],\n 'write_iops_total':write_iops_total[i],\n 'write_bw_total':write_bw_total[i],\n 'write_lat_peak':write_lat_peak[i]\n }\n json_dict2 = json.dumps(dict2)\n log.write(\"%s\\n\" % (json_dict2))\n log.flush()\n i += 1\n for n in dict2:\n if temp[n] == 0:\n temp[n] = dict2[n]\n temp[n] = temp[n]*s + dict2[n]*(1-s)\n dict2[n] = temp[n]\n q.put(dict2)\n\n # yield dict2\n thread_number = 0\n else:\n exit_code=fio_process.poll()\n else:\n exit_code=fio_process.poll()\n log.close()\n\n\ndef fio_queue_get():\n if not q.empty():\n a = q.get()\n # json_dict3 = json.dumps(a)\n return a\n\n# def fio_res():\n# while 1:\n# time.sleep(1)\n# print(fio_queue_get())\n\n\ndef fio_test(bs, rw, numjobs_, runtime, filename, weights=0.5):\n global parsing_thread, fio_process, timer, read_iops_all, read_bw_all, write_iops_all, write_bw_all, read_lat_mean_all, read_lat_max_all, write_lat_mean_all,\\\n write_lat_max_all, read_iops_total, read_bw_total, write_iops_total, write_bw_total, read_lat_peak, write_lat_peak, numjobs, flag, q, s\n q = queue.Queue()\n if re.match(r\"^(\\d+)(k|K|m|M)$\",bs) is None:\n return \"bs illegality\"\n if re.match('(read|write|rw|randread|randwrite|randrw)',rw) is None:\n return \"rw illegality\"\n if type(numjobs_) is not int:\n return \"numjobs illegality\"\n if type(runtime) is not int:\n return \"runtime illegality\"\n DISK = testparams.getalldiskname()\n flag = 1\n for i in DISK:\n if i != filename:\n pass\n else:\n flag = 0\n if flag:\n return \"filename illegality\"\n if weights >= 1 or weights < 0:\n return \"weights illegality\"\n numjobs = numjobs_\n s = weights\n fio_command = '''fio --minimal --eta=0\n --status-interval=1\n --name=%s-sequential-%s\n --ioengine=libaio\n --direct=1\n --rw=%s\n --iodepth=128\n --numjobs=%d\n --buffered=0\n --size=100%%\n --runtime=%d\n --time_based\n --randrepeat=0\n --norandommap\n --refill_buffers\n --filename=/dev/%s\n --bs=%s\n --thread''' % (bs,rw,rw,numjobs,runtime,filename,bs)\n # print(fio_command)\n args = shlex.split(fio_command)\n fio_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True,preexec_fn=os.setpgrp)\n read_iops_all = [[] for i in itertools.repeat(None, numjobs)]\n read_bw_all = [[] for i in itertools.repeat(None, numjobs)]\n write_iops_all = [[] for i in itertools.repeat(None, numjobs)]\n write_bw_all = [[] for i in itertools.repeat(None, numjobs)]\n read_lat_mean_all = [[] for i in itertools.repeat(None, numjobs)]\n read_lat_max_all = [[] for i in itertools.repeat(None, numjobs)]\n write_lat_mean_all = [[] for i in itertools.repeat(None, numjobs)]\n write_lat_max_all = [[] for i in itertools.repeat(None, numjobs)]\n read_iops_total = []\n read_bw_total = []\n write_iops_total = []\n write_bw_total = []\n read_lat_peak = []\n write_lat_peak = []\n t1 = threading.Thread(target=fio_parse_output, args=())\n # t2 = threading.Thread(target=fio_res, args=())\n t1.start()\n # t2.start()\n t1.join()\n # t2.join()\n # print(fio_queue_get())\n\n\n\nif __name__ == '__main__':\n bs ='128k'\n rw ='rw'\n numjobs_ = 1\n runtime = 5\n filename = 'sdd'\n weights = 0.5\n fio_test(bs,rw,numjobs_,runtime,filename,weights)\n while True:\n time.sleep(1)\n print(fio_queue_get())\n\n\n\n\n\n","sub_path":"www/lib/fio/fiotest.py","file_name":"fiotest.py","file_ext":"py","file_size_in_byte":7539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"97388885","text":"from bs4 import BeautifulSoup as soup\r\nimport re\r\ncpfs = []\r\nwith open(input(\"digite o nome dos dados: \")+'.txt','r') as arq:\r\n arqui = arq.read().strip()\r\n regex = re.findall('([0-9]{2}[\\.]?[0-9]{3}[\\.]?[0-9]{3}[\\/]?[0-9]{4}[-]?[0-9]{2})|([0-9]{3}[\\.]?[0-9]{3}[\\.]?[0-9]{3}[-]?[0-9]{2})',arqui)\r\nfor dados in regex:\r\n cpfs.append(dados[1])\r\n for cpf in cpfs:\r\n cp = open('cpfs.txt', 'a')\r\n cp.write(cpf + '\\n')\r\n print(cpf)\r\n","sub_path":"cpf extraction.py","file_name":"cpf extraction.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"136550345","text":"from django.test import TestCase\nfrom .models import Patient\nfrom datetime import date\n\n# Create your tests here.\nclass PatientTests(TestCase):\n \"\"\"Contact model tests.\"\"\"\n\n def test_str(self):\n bday = date(1972, 1, 1)\n patient = Patient(first_name='John', last_name='Smith', birthdate=bday)\n # Test if the class return the good value\n self.assertEquals(\n str(patient),\n 'Smith John (1972-01-01)',\n )\n\n # Test if the property return the good value\n self.assertEquals(\n patient.full_name,\n 'SMITH John',\n )\n\n # Test if the property return the good value\n #\n # !! Warning the value need to be inserted manually\n #\n self.assertEquals(\n patient.age,\n 44,\n )\n\n","sub_path":"losteod/patient/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"151770409","text":"from apps.bankcontroller.views import (CreateMoneyCardView,\n InfoListMoneyTransfer,\n InfoListShopService, ServiceView)\nfrom apps.users.views import UserViewSet\nfrom rest_framework.routers import DefaultRouter\n\nrouter = DefaultRouter()\n\nrouter.register(r'users', UserViewSet, basename='users')\nrouter.register(r'services', ServiceView, basename='services')\nrouter.register(r'info_list_service', InfoListShopService,\n basename='info-list-service')\nrouter.register(r'info_list_money_transfer',\n InfoListMoneyTransfer, basename='info-list-money-transfer')\nrouter.register(r'create_money_card', CreateMoneyCardView,\n basename='create-money-card')\n\nurlpatterns = [] + router.urls\n","sub_path":"apps/services/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"296279716","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\nimport random\n\nfrom .models import PaperInfo, ConferenceInfo\n\n# 分页函数 https://blog.csdn.net/weixin_44951273/article/details/100889972?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-3.channel_param&depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-3.channel_param\n\n\nclass MyPaginator(Paginator): # 继承Paginator\n def __init__(self, object_list, per_page, show_count=2, orphans=0, allow_empty_first_page=True): # show_count代表要展示的当前页之前或之后的页码数,默认展示3页\n super().__init__(object_list, per_page, orphans, allow_empty_first_page) # 继承父类的属性和方法\n self.show_count = show_count\n self.has_previous_more = True # 定义show_count之前是否还有更多页码\n self.has_next_more = True # 定义show_count之后是否还有更多页码\n # 覆写page方法\n\n def page(self, number):\n self.number = int(number)\n # 判断当前页之前是否还有show_count显示的页码数\n if self.number <= self.show_count + 2:\n self.has_previous_more = False\n self.previous_range = range(1, self.number)\n else:\n self.previous_range = range(self.number - self.show_count, self.number)\n # 判断当前页之后是否还有show_count显示的页码数\n if self.number >= self.num_pages - self.show_count - 1:\n self.has_next_more = False\n self.next_range = range(self.number + 1, self.num_pages + 1)\n else:\n self.next_range = range(self.number + 1, self.number + self.show_count + 1)\n return super().page(number)\n\n\n# Create your views here.\n# 论文首页,应该是论文列表加上搜索框\ndef index(request):\n papers_list = PaperInfo.objects.all()\n # 随机取五份论文,之后会修改为根据个人兴趣推荐\n papers = random.sample(list(papers_list), 5)\n context = {\"papers\": papers}\n return render(request, 'papers/index.html', context)\n\n# 论文详情页\n\n\ndef detail(request, paper_id):\n paper = get_object_or_404(PaperInfo, pk=paper_id)\n return render(request, 'papers/detail.html', {'paper': paper})\n # return HttpResponse(\"The paper id is \" + paper_id)\n\n# 搜索页\n\n\ndef search(request, searchword, pindex):\n q = request.GET.get('q')\n error_msg = ''\n if(searchword and q == None):\n q = searchword\n searchword = q\n papers_list = PaperInfo.objects.all()\n paginator = MyPaginator(papers_list, 10)\n int(pindex)\n page = paginator.page(pindex)\n context = {\"page\": page, \"paginator\": paginator, \"searchword\": searchword}\n if not q:\n error_msg = \"请输入关键词\"\n context['error_msg'] = error_msg\n return render(request, 'papers/result.html', context)\n papers_list = PaperInfo.objects.filter(Q(paper_title__icontains=q) | Q(abstract__icontains=q))\n # papers_list = PaperInfo.objects.filter(Q(paper_title__icontains=q) | Q(abstract__icontains=q)) # 这是在摘要中查询\n paginator = MyPaginator(papers_list, 10)\n page = paginator.page(pindex)\n context = {\"page\": page, \"paginator\": paginator, \"error_msg\": error_msg, \"searchword\": searchword}\n return render(request, 'papers/result.html', context)\n","sub_path":"papers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"594324650","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.express as px\n# imports for pandas\nimport pandas as pd\n\n\napp = dash.Dash()\n\ndf = pd.read_excel('Clase1_Datos.xlsx',sheet_name = 'Linea')\n\ndf['Range High-Low'] = 100*(df['High']-df['Low'])/df['Low']\ndf['Range Open-Close'] = 100*(df['Close']-df['Open'])/df['Open']\nfig = px.scatter(df, x='Range High-Low', y='Range Open-Close',size='Volume')\n\napp.layout = html.Div([\n\thtml.H1(children = 'Mi Gráfico'),\n\tdcc.Graph(\n id='graph',\n figure=fig\n )]\n\t)\n\nif __name__ == '__main__':\n\tapp.run_server(debug = True)","sub_path":"03. Data Analytics y Business Intelligence/DashTutorial/ejercicios/basico/ScatterPlot.py","file_name":"ScatterPlot.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"581770399","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nimport pandas as pd\n\nfrom utils import RG_TEMPLATE, STORAGE_ACCOUNT_TEMPLATE\n\nusers = pd.read_json(\"users.json\", orient=\"records\")\nstorage_keys = json.loads(open(\"storage_keys.json\", \"r\").read())\n\nADMIN_STORAGE_ACCOUNT = STORAGE_ACCOUNT_TEMPLATE.format(\"admin\")\nCONTAINER = \"images\"\nPATTERN = \"ubuntugpu.vhd\"\n\nadmin_key = storage_keys[\"admin\"]\n\nwith open(\"azcopy.bat\", \"w\", buffering=0) as f:\n for _, row in users.iterrows():\n row = dict(row)\n user = row[\"user\"]\n user_account = STORAGE_ACCOUNT_TEMPLATE.format(user)\n user_rg = RG_TEMPLATE.format(user)\n user_key = storage_keys[user]\n command = \\\n \"\"\"md \"C:\\\\Users\\\\andrey\\\\Desktop\\\\AzureTemp\\\\{d}\"\\r\\n\\\nstart \"AzCopy {s} to {d}\" \"C:\\\\Program Files (x86)\\\\Microsoft SDKs\\\\Azure\\\\AzCopy\\\\AzCopy.exe\" \\\n /Source:https://{s}.blob.core.windows.net/{cont} \\\n /Dest:https://{d}.blob.core.windows.net/{cont} \\\n /SourceKey:{sk} \\\n /DestKey:{dk} \\\n /Pattern:{p}\\\n /V:\"C:\\\\Users\\\\andrey\\\\Desktop\\\\AzureTemp\\\\{d}-log.txt\"\\\n /Z:\"C:\\\\Users\\\\andrey\\\\Desktop\\\\AzureTemp\\\\{d}\"\\r\\n\"\"\".format(\n s=ADMIN_STORAGE_ACCOUNT,\n d=user_account,\n sk=admin_key,\n dk=user_key,\n p=PATTERN,\n cont=CONTAINER\n )\n f.write(command)\n print(user, \"done\")\n","sub_path":"azure/generate_azcopy_commands.py","file_name":"generate_azcopy_commands.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"369516074","text":"import os\nimport argparse\nimport _pickle as cPickle\nimport gc\nimport time\nimport json\nimport copy\n\nimport hist as hist2\nfrom coffea import processor\n\n# hists path\npath = \"/eos/uscms/store/user/docampoh/boostedhiggs/Aug31_UL/outfiles/\" \n\n# xsec paths\nxsec_path = \"/uscms/home/docampoh/nobackup/boostedhiggs/data/xsecs.json\" \n\ndef iter_flatten(iterable):\n \"\"\"flatten nested lists\"\"\"\n it = iter(iterable)\n for e in it:\n if isinstance(e, (list, tuple)):\n for f in iter_flatten(e):\n yield f\n else:\n yield e\n\ndef read_hists(sample):\n files = os.listdir(path)\n\n hww = {\n \"GluGluHToWWToLNuQQ\": [file for file in files if \"GluGluHToWWToLNuQQ\" in file]\n }\n tt = {\n \"TTToSemiLeptonic\": [file for file in files if \"TTToSemiLeptonic\" in file]\n }\n qcd = {\n \"QCD_HT300to500\": [file for file in files if \"QCD_HT300to500\" in file],\n \"QCD_HT500to700\": [file for file in files if \"QCD_HT500to700\" in file],\n \"QCD_HT700to1000\": [file for file in files if \"QCD_HT700to1000\" in file],\n \"QCD_HT1000to1500\": [file for file in files if \"QCD_HT1000to1500\" in file],\n \"QCD_HT1500to2000\": [file for file in files if \"QCD_HT1500to2000\" in file],\n \"QCD_HT2000toInf\": [file for file in files if \"QCD_HT2000toInf\" in file]\n }\n wjets = {\n \"WJetsToLNu_HT-200To400\": [file for file in files if \"WJetsToLNu_HT-200To400\" in file],\n \"WJetsToLNu_HT-400To600\": [file for file in files if \"WJetsToLNu_HT-400To600\" in file],\n \"WJetsToLNu_HT-600To800\": [file for file in files if \"WJetsToLNu_HT-600To800\" in file],\n \"WJetsToLNu_HT-800To1200\": [file for file in files if \"WJetsToLNu_HT-800To1200\" in file],\n \"WJetsToLNu_HT-1200To2500\": [file for file in files if \"WJetsToLNu_HT-1200To2500\" in file],\n }\n singleElectron = {\n \"SingleElectron\": [file for file in files if \"SingleElectron\" in file]\n }\n singleMuon = {\n \"SingleMuon\": [file for file in files if \"SingleMuon\" in file]\n }\n \n samples_dics = {\n \"hww\": hww,\n \"tt\": tt,\n \"qcd\": qcd,\n \"wjets\": wjets,\n \"electron\": singleElectron,\n \"muon\": singleMuon,\n }\n\n return samples_dics[sample]\n\n\n\ndef load_hists(sample_dic, histograms):\n \"\"\"load and accumulate histograms by sample\"\"\"\n \n hists = {key: [] for key in sample_dic}\n \n for key, hist in sample_dic.items():\n print(f\"processing {key} histograms\")\n for h in hist:\n with open(f\"{path}/{h}\", \"rb\") as f:\n # disable garbage collector\n gc.disable()\n \n # load and save histograms\n H = cPickle.load(f)\n k = [key for key in H]\n histos = {key:val for key, val in H[k[0]].items() if key in histograms}\n hists[key].append(histos)\n \n # enable garbage collector again\n gc.enable()\n\n for key in sample_dic:\n sample_dic[key] = processor.accumulate(hists[key])\n\n return sample_dic\n\n\ndef scale_hists(sample_dic, lumi):\n \"\"\"scale histograms to cross section\"\"\"\n\n with open(xsec_path) as f:\n xsecs = json.load(f)\n\n out = []\n for sample in sample_dic:\n \n hists = sample_dic[sample]\n sumw = sample_dic[sample][\"sumw\"]\n \n try:\n xsec = eval(xsecs[sample])\n except:\n xsec = xsecs[sample]\n\n weight = (xsec * lumi) / sumw\n hists = copy.deepcopy(hists)\n\n for h in hists.values():\n if isinstance(h, hist2.Hist):\n h *= weight\n \n out.append(hists)\n \n if len(out) == 1:\n return {sample.split(\"_\")[0]: out[0]}\n else:\n return {sample.split(\"_\")[0]: processor.accumulate(out)}\n\n\ndef main(args):\n histograms = [hist for hist in iter_flatten(args.histogram)]\n print(f\"histogram: {histograms[1:][0]}\")\n \n start_time = time.time()\n sample_dic = read_hists(args.sample)\n sample_dic = load_hists(sample_dic, histograms)\n if args.sample not in [\"electron\", \"muon\"]:\n output = scale_hists(sample_dic, args.lumi)\n else:\n output = sample_dic\n\n output_path = \"/uscms/home/docampoh/nobackup/boostedhiggs/hists\"\n output_name = \"\"\n for hist in histograms[1:]:\n output_name += \"_\" + hist\n\n os.system(f\"mkdir -p {output_path}/{args.histogram[-1][0]}\")\n with open(f\"{output_path}/{args.histogram[-1][0]}/{args.sample}{output_name}.pkl\", \"wb\") as f:\n cPickle.dump(output, f, protocol=-1)\n \n end_time = time.time()\n\n print(f\"{histograms[1:][0]} histograms processed in {end_time - start_time:.2f} seconds\")\n\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"--sample\", dest=\"sample\", type=str)\n parser.add_argument(\"--histogram\", dest=\"histogram\", nargs=\"+\", action=\"append\", default=[\"sumw\"], type=str)\n parser.add_argument(\"--lumi\", dest=\"lumi\", default=41500, type=float)\n args = parser.parse_args()\n\n main(args)\n","sub_path":"python/process_histograms.py","file_name":"process_histograms.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"230971592","text":"#-*- coding: latin-1 -*-\n\nfrom pts.vzGrid import *\nfrom pyramid.view import view_config\nfrom pts.vzComponents import *\nfrom pts.vzSQL import mysql_db\nfrom pts.vzGrid import vzMultipleComponentGridActions\nfrom pts.login.views import jsonLogin\n\nclass UserGroupPermissions(vzOneToManyComponent):\n\n type = \"multiple\"\n\n name = \"permissions\"\n\n table_name = \"user_group_x_user_permission\"\n\n fields = {\"id\" : ['hidden', False,False, 'user_group_x_user_permission.user_group_id'],\n \"permission\" : [\"select\", True, False,\"user_permission.user_permission_id\"],}\n \n list_field = [\"id\" , \"permission\"]\n\n form_select_sql = {\"permission\":\"SELECT user_permission_id , CONCAT(user_permission_name, ' - ', user_permission_desc) as user_permission_name FROM user_permission ORDER BY user_permission_name\"}\n \n #select_related_sql = \"\"\"SELECT CONCAT(user_group_id ,'/',user_permission_id) , user_permission_id FROM user_group_x_user_permission WHERE user_group_id= %s\"\"\"\n\n select_related_sql = \"\"\"SELECT CONCAT(gp.user_group_id ,'/',gp.user_permission_id) , gp.user_permission_id FROM user_group_x_user_permission gp LEFT JOIN user_permission p ON p.user_permission_id = gp.user_permission_id WHERE gp.user_group_id = %s ORDER BY p.user_permission_name\"\"\"\n\n key_field = \"permission\"\n\n delete_sql = \"\"\"DELETE FROM user_group_x_user_permission WHERE user_group_id = %s AND user_permission_id = %s\"\"\"\n\n delete_all_sql = \"\"\"DELETE FROM user_group_x_user_permission WHERE user_group_id = %s\"\"\"\n\n select_one_sql = \"\"\"SELECT user_group_id FROM user_group_x_user_permission WHERE user_group_id = %s AND user_permission_id = %s\"\"\"\n\n order = 1\n\n father_field = \"user_group_id\"\n\n def _start_add(self):\n return \"INSERT IGNORE INTO %s (\"%self.table_name\n\n def add(self,params_dict,id=None):\n permission_id = super(UserGroupPermissions,self).add(params_dict,id)\n self.add_user_permission(params_dict)\n return permission_id\n\n def add_user_permission(self, group_id):\n list_user_id = mysql_db.execute(\"SELECT user_id FROM user_x_user_group WHERE user_group_id = %s\", group_id).fetchall()\n for i in list_user_id:\n user_id = i[0]\n mysql_db.execute(\"INSERT IGNORE INTO user_x_user_permission (user_id, user_permission_id) SELECT %s ,user_permission_id FROM user_group_x_user_permission WHERE user_group_id IN (SELECT user_group_id FROM user_x_user_group WHERE user_id = %s );\"%(user_id,user_id))\n\n\n def update(self,params_dict):\n sql = \"UPDATE %s SET \"%self.table_name\n id_val = params_dict[\"id\"].split(\"/\")\n id_group = id_val[0]\n id_perm = id_val[1]\n id_field = self.get_db_field(\"id\")\n set_list = []\n for param in params_dict:\n if param in self.fields.keys():\n field_type = self.get_field_type(param)\n if field_type != \"hidden\":\n set_field = self.get_db_field(param)\n set_val = params_dict[param]\n if field_type != \"select\":\n set_val = '\"%s\"'%set_val\n set_list.append(\"%s=%s\"%(set_field,set_val))\n if len(set_list) > 0:\n sql = sql + self._print_list(set_list)\n sql = sql + \" WHERE user_group_id=%s AND user_permission_id=%s \"%(id_group,id_perm)\n mysql_db.execute(sql)\n self.add_user_permission(params_dict[\"id\"])\n return id_group\n\n def delete(self,id):\n id_val = id.split(\"/\")\n id_group = id_val[0]\n id_perm = id_val[1]\n\n mysql_db.execute(self.delete_sql, id_group, id_perm)\n delete = mysql_db.execute(self.select_one_sql, id_group, id_perm).fetchall()\n\n if not delete:\n list_user_id = mysql_db.execute(\"SELECT user_id FROM user_x_user_group WHERE user_id NOT IN (SELECT a.user_id FROM user_x_user_group a, user_x_user_group b WHERE a.user_group_id IN (SELECT user_group_id FROM user_group_x_user_permission WHERE user_permission_id = %s) AND b.user_group_id = %s AND a.user_id = b.user_id) AND user_group_id = %s\", id_perm, id_group, id_group).fetchall()\n for i in list_user_id:\n id_user = i[0]\n mysql_db.execute(\"DELETE FROM user_x_user_permission WHERE user_id = %s AND user_permission_id = %s\", id_user, id_perm)\n\n return {\"error\": [\"ok\", \"success\"]}\n else:\n return {\"error\": [\"failed\", \"error on delete\"]}\n\nclass UserGroup(vzMultipleComponent):\n\n def __init__(self):\n self.permissions = UserGroupPermissions()\n\n table_name = \"user_group\"\n \n name =\"usergroup\"\n \n type=\"single\"\n \n fields = {\"id\":[\"hidden\",False,False,\"user_group.user_group_id\"],\n \"description\":[\"text\",True,True,\"user_group.user_group_desc\"],}\n \n list_field = ['id',\"description\"]\n \n list_all_sql = \"\"\"SELECT * FROM user_group\"\"\"\n \n select_unique_id_sql = \"SELECT * FROM user_group WHERE user_group_id=%s \" \n \n list_search_sql = \"SELECT * FROM user_group WHERE user_group_desc LIKE %s\"\n \n count_sql = \"SELECT count(user_group_id) FROM user_group\"\n \n count_search_sql = \"SELECT count(user_group_id) FROM user_group WHERE user_group_desc LIKE %s\"\n\n delete_sql = \"\"\"DELETE FROM user_group WHERE user_group_id=%s\"\"\"\n\n field_order_sql = {\"description\" : \" ORDER BY user_group_desc\"}\n\n #Necessita de:\n #delete_sql -> código SQL que apague um elemento id (marcado por %s)\n # def main_delete(self,id):\n # try:\n # st = mysql_db.execute(self.delete_sql,(id,id))\n # return st.rowcount\n # except:\n # return 0\n \n\n#Retorna um json para a grid administrator.entity \n@view_config(route_name='admin.user.group.grid.list',renderer=\"json\")\n@jsonLogin\ndef entity_grid_list(request):\n usergroup = UserGroup()\n grid = vzMultipleComponentJsonGrid(model=usergroup)\n params_copy = request.params.copy()\n params_copy[\"order_field\"] = \"description\"\n params_copy[\"order\"] = \"ASC\"\n return grid.list_grid(params_copy)\n\n@view_config(route_name='admin.user.group.grid.del',renderer=\"json\")\n@jsonLogin\ndef entity_del(request):\n action = vzMultipleComponentGridActions( model = UserGroup())\n return action.delete(request)\n\n@view_config(route_name='admin.user.group.grid.save',renderer=\"json\")\n@jsonLogin\ndef entity_grid_save(request):\n model = UserGroup()\n action = vzMultipleComponentGridActions(model=model)\n save = action.save(request)\n usergroup_id = request.params[\"usergroup_id\"]\n\n # Delete permissions\n # cursor_users = mysql_db.execute(\"SELECT user_id FROM user_x_user_group WHERE user_group_id = %s\"%usergroup_id)\n # while(1):\n # if cursor_users != None: \n # row_user = cursor_users.fetchone()\n # if row_user == None: break\n # user_id = row_user[\"user_id\"]\n # delete_user_permissions = \"\"\"DELETE FROM user_x_user_permission \n # WHERE user_id = %s AND user_permission_id NOT IN \n # (SELECT user_permission_id FROM user_group_x_user_permission \n # WHERE user_group_id IN \n # (SELECT user_group_id FROM user_x_user_group WHERE user_id = %s))\"\"\"%(user_id, user_id)\n # mysql_db.execute(delete_user_permissions)\n return save\n\n@view_config(route_name='admin.user.group.row.id',renderer=\"json\")\n@jsonLogin\ndef entity_select_unique(request):\n usergroup = UserGroup()\n grid = vzMultipleComponentJsonGrid(model=usergroup)\n return grid.select_unique(request.params)\n \n\n@view_config(route_name='admin.user.group.select',renderer=\"json\")\n@jsonLogin\ndef entity_grid_select(request):\n usergroup = UserGroup()\n grid = vzMultipleComponentJsonGrid(model=usergroup)\n return grid.form_field_select(request.params)\n","sub_path":"administrator/user_group.py","file_name":"user_group.py","file_ext":"py","file_size_in_byte":8034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"536828454","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nfrom collections import defaultdict\n\nimport posixpath\nimport unittest\nimport responses\n\nfrom dockermap import DEFAULT_COREIMAGE, DEFAULT_BASEIMAGE\nfrom dockermap.map.config import ClientConfiguration, get_host_path\nfrom dockermap.map.container import ContainerMap\nfrom dockermap.map.input import ExecCommand, EXEC_POLICY_INITIAL, EXEC_POLICY_RESTART\nfrom dockermap.map.policy import CONFIG_FLAG_DEPENDENT\nfrom dockermap.map.policy.base import BasePolicy\nfrom dockermap.map.state import (INITIAL_START_TIME, STATE_RUNNING, STATE_PRESENT, STATE_ABSENT,\n STATE_FLAG_NONRECOVERABLE, STATE_FLAG_RESTARTING, STATE_FLAG_INITIAL,\n STATE_FLAG_OUTDATED)\nfrom dockermap.map.state.base import DependencyStateGenerator, DependentStateGenerator, SingleStateGenerator\nfrom dockermap.map.state.update import UpdateStateGenerator\n\nfrom tests import MAP_DATA_2, CLIENT_DATA_1\n\n\nURL_PREFIX = 'http+docker://localunixsocket/v{0}'.format(CLIENT_DATA_1['version'])\n\nP_STATE_INITIAL = 0\nP_STATE_RUNNING = 1\nP_STATE_RESTARTING = 2\nP_STATE_EXITED_0 = 3\nP_STATE_EXITED_127 = 4\nSTATE_RESULTS = {\n P_STATE_INITIAL: {\n 'Running': False,\n 'Restarting': False,\n 'ExitCode': 0,\n 'StartedAt': INITIAL_START_TIME,\n },\n P_STATE_RESTARTING: {\n 'Running': False,\n 'Restarting': True,\n 'ExitCode': 255,\n 'StartedAt': \"2016-02-05T20:14:04.655843958Z\",\n },\n P_STATE_RUNNING: {\n 'Running': True,\n 'Restarting': False,\n 'ExitCode': 0,\n 'StartedAt': \"2016-02-05T20:14:04.655843958Z\",\n },\n P_STATE_EXITED_0: {\n 'Running': False,\n 'Restarting': False,\n 'ExitCode': 0,\n 'StartedAt': \"2016-02-05T20:14:04.655843958Z\",\n },\n P_STATE_EXITED_127: {\n 'Running': False,\n 'Restarting': False,\n 'ExitCode': -127,\n 'StartedAt': \"2016-02-05T20:14:04.655843958Z\",\n },\n}\n\n\ndef _container(config_name, p_state=P_STATE_RUNNING, instances=None, attached_volumes_valid=True,\n instance_volumes_valid=True, **kwargs):\n return config_name, p_state, instances, attached_volumes_valid, instance_volumes_valid, kwargs\n\n\ndef _add_container_list(rsps, container_names):\n results = [\n {'Id': '{0}'.format(c_id), 'Names': ['/{0}'.format(name)]}\n for c_id, name in container_names\n ]\n rsps.add('GET', '{0}/containers/json'.format(URL_PREFIX), content_type='application/json', json=results)\n\n\ndef _add_image_list(rsps, image_names):\n image_list = [\n {\n 'RepoTags': ['{0}:latest'.format(i_name), '{0}:1.0'.format(i_name)] if ':' not in i_name else [i_name],\n 'Id': '{0}'.format(i_id),\n }\n for i_id, i_name in image_names\n ]\n rsps.add('GET', '{0}/images/json'.format(URL_PREFIX), content_type='application/json', json=image_list)\n rsps.add('POST', '{0}/images/create'.format(URL_PREFIX), content_type='application/json')\n\n\ndef _get_container_mounts(container_map, c_config, config_name, instance_name, valid, is_attached=False):\n if valid:\n path_prefix = '/valid'\n else:\n path_prefix = '/invalid_{0}'.format(config_name)\n for a in c_config.attaches:\n c_path = container_map.volumes[a]\n yield {'Source': posixpath.join(path_prefix, 'attached', a), 'Destination': c_path, 'RW': True}\n if not is_attached:\n for vol, ro in c_config.binds:\n if isinstance(vol, tuple):\n c_path, h_r_path = vol\n h_path = get_host_path(container_map.host.root, h_r_path, instance_name)\n else:\n c_path = container_map.volumes[vol]\n h_path = container_map.host.get_path(vol, instance_name)\n yield {'Source': posixpath.join(path_prefix, h_path), 'Destination': c_path, 'RW': not ro}\n for s in c_config.shares:\n yield {'Source': posixpath.join(path_prefix, 'shared', s), 'Destination': s, 'RW': True}\n for vol, ro in c_config.uses:\n c, __, i = vol.partition('.')\n c_ref = container_map.get_existing(c)\n if i in c_ref.attaches:\n c_path = container_map.volumes[i]\n yield {'Source': posixpath.join(path_prefix, 'attached', i), 'Destination': c_path, 'RW': not ro}\n elif c_ref and (not i or i in c_ref.instances):\n for r_mount in _get_container_mounts(container_map, c_ref, c, i, valid):\n yield r_mount\n else:\n raise ValueError(\"Invalid uses declaration in {0}: {1}\".format(config_name, vol))\n\n\ndef _add_inspect(rsps, container_map, map_name, c_config, config_name, instance_name, state, valid, container_id,\n image_id, is_attached, **kwargs):\n if instance_name:\n container_name = '{0}.{1}.{2}'.format(map_name, config_name, instance_name)\n else:\n container_name = '{0}.{1}'.format(map_name, config_name)\n ports = defaultdict(list)\n if not is_attached:\n for ex in c_config.exposes:\n ex_port = '{0}/tcp'.format(ex.exposed_port)\n if ex.host_port:\n if ex.interface:\n ip = CLIENT_DATA_1['interfaces'][ex.interface]\n else:\n ip = '0.0.0.0'\n ports[ex_port].append({\n 'HostIp': ip,\n 'HostPort': '{0}'.format(ex.host_port)\n })\n else:\n ports[ex_port].extend(())\n results = {\n 'Id': '{0}'.format(container_id),\n 'Names': ['/{0}'.format(container_name)],\n 'State': STATE_RESULTS[state],\n 'Image': '{0}'.format(image_id),\n 'Mounts': list(_get_container_mounts(container_map, c_config, config_name, instance_name, valid, is_attached)),\n 'HostConfig': {'Links': [\n '/{0}.{1}:/{2}/{3}'.format(map_name, link.container, container_name, link.alias)\n for link in c_config.links\n ]},\n 'Config': {\n 'Env': None,\n 'Cmd': [],\n 'Entrypoint': [],\n },\n 'NetworkSettings': {\n 'Ports': ports,\n },\n }\n exec_results = {\n 'Processes': [\n [cmd_i, cmd.user, cmd.cmd]\n for cmd_i, cmd in enumerate(c_config.exec_commands)\n ],\n }\n results.update(kwargs)\n rsps.add('GET', '{0}/containers/{1}/json'.format(URL_PREFIX, container_name),\n content_type='application/json',\n json=results)\n rsps.add('GET', '{0}/containers/{1}/json'.format(URL_PREFIX, container_id),\n content_type='application/json',\n json=results)\n rsps.add('GET', '{0}/containers/{1}/top'.format(URL_PREFIX, container_name),\n content_type='application/json',\n json=exec_results)\n rsps.add('GET', '{0}/containers/{1}/top'.format(URL_PREFIX, container_id),\n content_type='application/json',\n json=exec_results)\n return container_id, container_name\n\n\ndef _get_single_state(sg, map_name, config_name, instance=None):\n if instance:\n instances = [instance]\n else:\n instances = None\n states = [si\n for s in sg.get_states(map_name, config_name, instances)\n for si in s.instances]\n return states[0]\n\n\nclass TestPolicyStateGenerators(unittest.TestCase):\n def setUp(self):\n self.map_name = map_name = 'main'\n self.sample_map = sample_map = ContainerMap('main', MAP_DATA_2,\n use_attached_parent_name=True).get_extended_map()\n self.sample_map.repository = None\n self.sample_client_config = client_config = ClientConfiguration(**CLIENT_DATA_1)\n self.policy = BasePolicy({map_name: sample_map}, {'__default__': client_config})\n all_images = set(c_config.image or c_name for c_name, c_config in sample_map)\n all_images.add(DEFAULT_COREIMAGE)\n all_images.add(DEFAULT_BASEIMAGE)\n self.images = list(enumerate(all_images))\n\n def _setup_containers(self, rsps, containers_states):\n container_names = []\n _add_image_list(rsps, self.images)\n image_dict = {name: _id for _id, name in self.images}\n container_id = 0\n base_image_id = image_dict[DEFAULT_BASEIMAGE]\n for name, state, instances, attached_valid, instances_valid, kwargs in containers_states:\n c_config = self.sample_map.get_existing(name)\n for a in c_config.attaches:\n container_id += 1\n container_names.append(_add_inspect(rsps, self.sample_map, self.map_name, c_config, name, a,\n P_STATE_EXITED_0, attached_valid, container_id, base_image_id,\n True))\n image_id = image_dict[c_config.image or name]\n for i in instances or c_config.instances or [None]:\n container_id += 1\n container_names.append(_add_inspect(rsps, self.sample_map, self.map_name, c_config, name, i,\n state, instances_valid, container_id, image_id, False, **kwargs))\n _add_container_list(rsps, container_names)\n\n def test_dependency_states_running(self):\n with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:\n self._setup_containers(rsps, [\n _container('redis'),\n _container('svc'),\n _container('server'),\n ])\n states = list(DependencyStateGenerator(self.policy, {}).get_states(self.map_name, 'server'))\n instance_base_states = [si.base_state\n for s in states\n for si in s.instances]\n attached_base_states = [si.base_state\n for s in states\n for si in s.attached]\n self.assertTrue(all(si == STATE_RUNNING\n for si in instance_base_states))\n self.assertTrue(all(si == STATE_PRESENT\n for si in attached_base_states))\n self.assertTrue(all(s.flags == CONFIG_FLAG_DEPENDENT\n for s in states\n if s.config != 'server'))\n\n def test_single_states_mixed(self):\n with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:\n self._setup_containers(rsps, [\n _container('redis', P_STATE_EXITED_0, instances=['cache']),\n _container('redis', instances=['queue']),\n _container('svc', P_STATE_EXITED_127),\n _container('worker', P_STATE_RESTARTING),\n _container('worker_q2', P_STATE_INITIAL),\n ])\n sg = SingleStateGenerator(self.policy, {})\n cache_state = _get_single_state(sg, self.map_name, 'redis', 'cache')\n self.assertEqual(cache_state.base_state, STATE_PRESENT)\n queue_state = _get_single_state(sg, self.map_name, 'redis', 'queue')\n self.assertEqual(queue_state.base_state, STATE_RUNNING)\n svc_state = _get_single_state(sg, self.map_name, 'svc')\n self.assertEqual(svc_state.base_state, STATE_PRESENT)\n self.assertEqual(svc_state.flags & STATE_FLAG_NONRECOVERABLE, STATE_FLAG_NONRECOVERABLE)\n worker_state = _get_single_state(sg, self.map_name, 'worker')\n self.assertEqual(worker_state.flags & STATE_FLAG_RESTARTING, STATE_FLAG_RESTARTING)\n worker2_state = _get_single_state(sg, self.map_name, 'worker_q2')\n self.assertEqual(worker2_state.base_state, STATE_PRESENT)\n self.assertEqual(worker2_state.flags & STATE_FLAG_INITIAL, STATE_FLAG_INITIAL)\n server_states = _get_single_state(sg, self.map_name, 'server')\n self.assertEqual(server_states.base_state, STATE_ABSENT)\n\n def test_dependent_states(self):\n with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:\n self._setup_containers(rsps, [\n _container('redis'),\n _container('server'),\n _container('worker'),\n _container('worker_q2'),\n ])\n states = list(DependentStateGenerator(self.policy, {}).get_states(self.map_name, 'redis'))\n instance_base_states = [si.base_state\n for s in states\n for si in s.instances]\n attached_base_states = [si.base_state\n for s in states\n for si in s.attached]\n self.assertTrue(all(si == STATE_RUNNING\n for si in instance_base_states))\n self.assertTrue(all(si == STATE_PRESENT\n for si in attached_base_states))\n self.assertTrue(all(s.flags == CONFIG_FLAG_DEPENDENT\n for s in states\n if s.config != 'redis'))\n\n def test_update_states_clean(self):\n with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:\n self._setup_containers(rsps, [\n _container('redis'),\n _container('svc'),\n _container('server'),\n ])\n states = {s.config: s for s in UpdateStateGenerator(self.policy, {}).get_states(self.map_name, 'server')}\n server_states = states['server'].instances[0]\n self.assertEqual(server_states.base_state, STATE_RUNNING)\n self.assertEqual(server_states.flags, 0)\n redis_states = states['redis'].instances[0]\n self.assertEqual(redis_states.base_state, STATE_RUNNING)\n self.assertEqual(redis_states.flags, 0)\n\n def test_update_states_invalid_attached(self):\n with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:\n self._setup_containers(rsps, [\n _container('redis', attached_volumes_valid=False),\n _container('svc'),\n _container('server'),\n ])\n states = {s.config: s for s in UpdateStateGenerator(self.policy, {}).get_states(self.map_name, 'server')}\n server_states = states['server'].instances[0]\n self.assertEqual(server_states.base_state, STATE_RUNNING)\n self.assertEqual(server_states.flags & STATE_FLAG_OUTDATED, STATE_FLAG_OUTDATED)\n redis_states = states['redis'].instances[0]\n self.assertEqual(redis_states.base_state, STATE_RUNNING)\n self.assertEqual(redis_states.flags & STATE_FLAG_OUTDATED, STATE_FLAG_OUTDATED)\n\n def test_update_states_invalid_dependent_instance(self):\n with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:\n self._setup_containers(rsps, [\n _container('redis', instance_volumes_valid=False),\n _container('svc'),\n _container('server'),\n ])\n states = {s.config: s for s in UpdateStateGenerator(self.policy, {}).get_states(self.map_name, 'server')}\n server_states = states['server'].instances[0]\n self.assertEqual(server_states.base_state, STATE_RUNNING)\n self.assertEqual(server_states.flags & STATE_FLAG_OUTDATED, 0)\n redis_states = states['redis'].instances[0]\n self.assertEqual(redis_states.base_state, STATE_RUNNING)\n self.assertEqual(redis_states.flags & STATE_FLAG_OUTDATED, STATE_FLAG_OUTDATED)\n\n def test_update_states_invalid_dependent_instance_attached(self):\n with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:\n self._setup_containers(rsps, [\n _container('redis'),\n _container('svc'),\n _container('server', attached_volumes_valid=False),\n ])\n states = {s.config: s for s in UpdateStateGenerator(self.policy, {}).get_states(self.map_name, 'server')}\n server_states = states['server'].instances[0]\n self.assertEqual(server_states.base_state, STATE_RUNNING)\n self.assertEqual(server_states.flags & STATE_FLAG_OUTDATED, STATE_FLAG_OUTDATED)\n redis_states = states['redis'].instances[0]\n self.assertEqual(redis_states.base_state, STATE_RUNNING)\n self.assertEqual(redis_states.flags & STATE_FLAG_OUTDATED, 0)\n\n def test_update_states_invalid_image(self):\n with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:\n self._setup_containers(rsps, [\n _container('redis'),\n _container('svc'),\n _container('server', Image='invalid'),\n ])\n states = {s.config: s for s in UpdateStateGenerator(self.policy, {}).get_states(self.map_name, 'server')}\n server_states = states['server'].instances[0]\n self.assertEqual(server_states.base_state, STATE_RUNNING)\n self.assertEqual(server_states.flags & STATE_FLAG_OUTDATED, STATE_FLAG_OUTDATED)\n\n def test_update_states_invalid_network(self):\n with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:\n self._setup_containers(rsps, [\n _container('redis'),\n _container('svc'),\n _container('server', NetworkSettings=dict(Ports={})),\n ])\n states = {s.config: s for s in UpdateStateGenerator(self.policy, {}).get_states(self.map_name, 'server')}\n server_states = states['server'].instances[0]\n self.assertEqual(server_states.base_state, STATE_RUNNING)\n self.assertEqual(server_states.flags & STATE_FLAG_OUTDATED, STATE_FLAG_OUTDATED)\n\n def test_update_states_updated_environment(self):\n with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:\n self._setup_containers(rsps, [\n _container('redis'),\n _container('svc'),\n _container('server'),\n ])\n self.sample_map.containers['server'].create_options.update(environment=dict(Test='x'))\n states = {s.config: s for s in UpdateStateGenerator(self.policy, {}).get_states(self.map_name, 'server')}\n server_states = states['server'].instances[0]\n self.assertEqual(server_states.base_state, STATE_RUNNING)\n self.assertEqual(server_states.flags & STATE_FLAG_OUTDATED, STATE_FLAG_OUTDATED)\n\n def test_update_states_updated_command(self):\n with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:\n self._setup_containers(rsps, [\n _container('redis'),\n _container('svc'),\n _container('server'),\n ])\n self.sample_map.containers['server'].create_options.update(command='/bin/true')\n states = {s.config: s for s in UpdateStateGenerator(self.policy, {}).get_states(self.map_name, 'server')}\n server_states = states['server'].instances[0]\n self.assertEqual(server_states.base_state, STATE_RUNNING)\n self.assertEqual(server_states.flags & STATE_FLAG_OUTDATED, STATE_FLAG_OUTDATED)\n\n def test_update_states_updated_exec(self):\n with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:\n cmd1 = ExecCommand(2, '/bin/true', EXEC_POLICY_INITIAL)\n cmd2 = ExecCommand(3, '/bin/true', EXEC_POLICY_INITIAL)\n cmd3 = ExecCommand(4, '/bin/true', EXEC_POLICY_RESTART)\n self.sample_map.containers['server'].exec_commands = [cmd1]\n self._setup_containers(rsps, [\n _container('redis'),\n _container('svc'),\n _container('server'),\n ])\n self.sample_map.containers['server'].exec_commands = [cmd1, cmd2, cmd3]\n states = {s.config: s for s in UpdateStateGenerator(self.policy, {}).get_states(self.map_name, 'server')}\n server_states = states['server'].instances[0]\n self.assertEqual(server_states.base_state, STATE_RUNNING)\n self.assertEqual(server_states.flags & STATE_FLAG_OUTDATED, 0)\n self.assertDictEqual(server_states.extra_data, {'exec_commands': [\n (cmd1, True),\n (cmd2, False),\n (cmd3, False),\n ]})\n","sub_path":"tests/test_state.py","file_name":"test_state.py","file_ext":"py","file_size_in_byte":20662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"89833091","text":"#coding:utf-8\nimport datetime\nfrom ftplib import FTP, error_perm\nimport os\nimport os.path\nimport re,time\nimport shutil\n\n# from dateutil import parser\nclass MyFTP(FTP):\n encoding = \"utf-8\" # 默认编码\n pattern = re.compile(r'(\\d+)\\s+[a-zA-Z]{3}\\s+\\d{2}\\s+[\\d:]{4,}\\s(.*)$')\n\n def getdirs(self, *args):\n '''拷贝了 nlst() 和 dir() 代码修改,返回详细信息而不打印'''\n cmd = 'LIST'\n # func = None\n # if args[-1:] and type(args[-1]) != type(''):\n # args, func = args[:-1], args[-1]\n for arg in args:\n cmd = cmd + (' ' + arg)\n files = []\n\n def filter(file):\n #print(file)\n if file.startswith('d'):\n file = self.pattern.findall(file)[0]\n files.append(file)\n\n self.retrlines(cmd, filter)\n return files\n\n def getfiles(self, *args):\n \"\"\"返回文件列表,简要信息\"\"\"\n cmd = 'LIST'\n for arg in args:\n cmd = cmd + (' ' + arg)\n files = []\n\n def filter(file):\n print(file)\n if file.startswith('-'):\n # print(file)\n # r'(\\d+)\\s+[a-zA-Z]{3}\\s+\\d{2}\\s+[\\d:]{4,}\\s(.*)$'\n match_file = self.pattern.findall(file)[0]\n files.append(match_file)\n\n self.retrlines(cmd, filter)\n return files\n\n\nLOCAL_PATH = os.getcwd()\nprint(LOCAL_PATH)\nREMOTE_PATH = '/syncftp'\n\n\ndef list_dirs_files(dirpath):\n dirs = []\n files = []\n for filename in os.listdir(dirpath):\n path = dirpath + '/' + filename\n if os.path.isfile(path):\n files.append(filename)\n else:\n dirs.append(filename)\n return dirs, files\n\n\ndef sync_ftp_files(ftp, rem_path):\n r_dirs = ftp.getdirs(rem_path)\n r_file_tuples = ftp.getfiles(rem_path)\n r_files = [x[1] for x in r_file_tuples]\n # print(r_dirs)\n # print(r_files)\n\n loc_path = rem_path.replace(REMOTE_PATH, LOCAL_PATH)\n l_dirs, l_files = list_dirs_files(loc_path)\n # print(l_dirs)\n # print(l_files)\n\n print('\\n----------------' + rem_path + '----------------')\n # 删除 本地多余的文件\n del_files = list(set(l_files).difference(set(r_files)))\n for f in del_files:\n dp = loc_path+'/'+f\n print('删除文件:' + dp)\n os.remove(dp)\n\n # 下载 ftp多出的文件\n add_files = list(set(r_files).difference(set(l_files)))\n for f in add_files:\n fp = open(loc_path + '/' + f, 'wb')\n bufsize = 1024\n dp = rem_path+'/'+f\n print('下载文件:' + dp)\n ftp.retrbinary('RETR ' + dp, fp.write, bufsize)\n ftp.set_debuglevel(0)\n\n # 上传 本地最近修改的文件\n cmp_files = list(set(l_files).intersection(set(r_files)))\n for f in cmp_files:\n rem_f = rem_path + '/' +f\n # size_r = ftp.size(rem_f)\n size_r = int([x[0] for x in r_file_tuples if x[1]==f][0])\n\n loc_f = loc_path + '/' + f\n size_l = os.path.getsize(loc_f)\n\n if size_r!=size_l:\n mtime_r = ftp.sendcmd(\"MDTM \"+rem_f)[4:].strip()\n mtime_r = datetime.datetime.strptime(mtime_r, '%Y%m%d%H%M%S').timestamp()\n mtime_l = os.path.getmtime(loc_f)\n\n if mtime_l > mtime_r:\n print(\"上传\", f, size_l, mtime_l, \"=>\", size_r, mtime_r)\n try:\n with open(loc_f, 'rb') as file :\n #覆盖上传\n ftp.cwd(rem_path)\n ftp.storbinary('STOR %s' % os.path.basename(rem_f), file)\n except Exception as err:\n print(err)\n result = False\n\n\n # files = ftp.mlsd(f)\n # print(files)\n # for file in files:\n # name = file[0]\n # timestamp = file[1]['modify']\n # print(timestamp)\n # time = parser.parse(timestamp)\n # print(name + ' - ' + str(time))\n\n\n # 删除 本地多余的目录\n del_dirs = list(set(l_dirs).difference(set(r_dirs)))\n for f in del_dirs:\n dp = rem_path+'/'+f\n print('删除目录:' + dp)\n shutil.rmtree(dp)\n\n # 下载 ftp多出的目录\n add_dirs = list(set(r_dirs).difference(set(l_dirs)))\n for f in add_dirs:\n dp = rem_path+'/'+f\n print('创建目录:' + dp)\n os.mkdir(dp)\n sync_ftp_files(ftp, dp)\n\n # 检查已经存在ftp是否和本地相同\n for dp in r_dirs:\n sync_ftp_files(ftp, rem_path + '/' + dp)\n\n\ndef test():\n ftp = MyFTP()\n ftp.connect(\"cddyys.ddns.net\", 94) # 连接\n ftp.login(\"gjh\", \"hbdl9431\")\n\n sync_ftp_files(ftp, REMOTE_PATH)\n\n ftp.quit() # 退出\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"py-tools/main/syncftp.py","file_name":"syncftp.py","file_ext":"py","file_size_in_byte":4760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"260456940","text":"\r\n\r\nimport pandas as pd\r\npass_data = pd.read_excel(\"E:/360digiTMG assignment/Data Science/Ensemble technique/Datasets_ET/Ensemble_Password_Strength.xlsx\")\r\n\r\n#creating dummies\r\npass_data1 = pd.get_dummies(pass_data[['characters']])\r\npass_data.drop(pass_data[['characters']], axis=1, inplace=True)\r\npass_data = pd.concat([pass_data, pass_data1],axis=1)\r\n\r\n# Input and Output Split\r\npredictors = pass_data.loc[:, pass_data.columns!=\"characters_strength\"]\r\ntype(predictors)\r\n\r\ntarget = pass_data[\"characters_strength\"]\r\ntype(target)\r\n\r\n# Train Test partition of the data\r\nfrom sklearn.model_selection import train_test_split\r\nx_train, x_test, y_train, y_test = train_test_split(predictors, target, test_size = 0.2, random_state=0)\r\n\r\n# GridSearchCV\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nrf_clf_grid = RandomForestClassifier(n_estimators=500, n_jobs=1, random_state=42)\r\n\r\nparam_grid = {\"max_features\": [4, 5, 6, 7, 8, 9, 10], \"min_samples_split\": [2, 3, 10]}\r\n\r\ngrid_search = GridSearchCV(rf_clf_grid, param_grid, n_jobs = -1, cv = 5, scoring = 'accuracy')\r\n\r\ngrid_search.fit(x_train, y_train)\r\n\r\ngrid_search.best_params_\r\n\r\ncv_rf_clf_grid = grid_search.best_estimator_\r\n\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix\r\n\r\nconfusion_matrix(y_test, cv_rf_clf_grid.predict(x_test))\r\naccuracy_score(y_test, cv_rf_clf_grid.predict(x_test))\r\n\r\n#Bagging technique\r\nfrom sklearn import tree\r\nclftree = tree.DecisionTreeClassifier()\r\nfrom sklearn.ensemble import BaggingClassifier\r\n\r\nbag_clf = BaggingClassifier(base_estimator = clftree, n_estimators = 500,\r\n bootstrap = True, n_jobs = 1, random_state = 42)\r\n\r\nbag_clf.fit(x_train, y_train)\r\n\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix\r\n\r\n# Evaluation on Testing Data\r\nconfusion_matrix(y_test, bag_clf.predict(x_test))\r\naccuracy_score(y_test, bag_clf.predict(x_test))\r\n\r\n# Evaluation on Training Data\r\nconfusion_matrix(y_train, bag_clf.predict(x_train))\r\naccuracy_score(y_train, bag_clf.predict(x_train))\r\n\r\n#AdaBoosting technique\r\nfrom sklearn.ensemble import AdaBoostClassifier\r\n\r\nada_clf = AdaBoostClassifier(learning_rate = 0.02, n_estimators = 5000)\r\n\r\nada_clf.fit(x_train, y_train)\r\n\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix\r\n\r\n# Evaluation on Testing Data\r\nconfusion_matrix(y_test, ada_clf.predict(x_test))\r\naccuracy_score(y_test, ada_clf.predict(x_test))\r\n\r\n# Evaluation on Training Data\r\nconfusion_matrix(y_train, ada_clf.predict(x_train))\r\naccuracy_score(y_train, ada_clf.predict(x_train))\r\n\r\n#Grddient boosting\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\n\r\nboost_clf = GradientBoostingClassifier()\r\n\r\nboost_clf.fit(x_train, y_train)\r\n\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix\r\n\r\n# Evaluation on Testing Data\r\nconfusion_matrix(y_test, boost_clf.predict(x_test))\r\naccuracy_score(y_test, boost_clf.predict(x_test))\r\n\r\n# Evaluation on Training Data\r\nconfusion_matrix(y_train, boost_clf.predict(x_train))\r\naccuracy_score(y_train, boost_clf.predict(x_train))\r\n\r\n# Hyperparameters\r\nboost_clf2 = GradientBoostingClassifier(learning_rate = 0.02, n_estimators = 1000, max_depth = 1)\r\nboost_clf2.fit(x_train, y_train)\r\n\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix\r\n\r\n# Evaluation on Testing Data\r\nconfusion_matrix(y_test, boost_clf2.predict(x_test))\r\naccuracy_score(y_test, boost_clf2.predict(x_test))\r\n\r\n# Evaluation on Training Data\r\nconfusion_matrix(y_train, boost_clf2.predict(x_train))\r\naccuracy_score(y_train, boost_clf2.predict(x_train))\r\n\r\n#XGBoosting\r\nimport xgboost as xgb\r\n\r\nxgb_clf = xgb.XGBClassifier(max_depths = 5, n_estimators = 10000, learning_rate = 0.3, n_jobs = -1)\r\n\r\nxgb_clf.fit(x_train, y_train)\r\n\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix\r\n\r\n# Evaluation on Testing Data\r\nconfusion_matrix(y_test, xgb_clf.predict(x_test))\r\naccuracy_score(y_test, xgb_clf.predict(x_test))\r\n\r\n# Evaluation on Training Data\r\nconfusion_matrix(y_train, xgb_clf.predict(x_train))\r\naccuracy_score(y_train, xgb_clf.predict(x_train))\r\n\r\n#Voting technique\r\n# Import the required libraries\r\nfrom sklearn import linear_model, svm, neighbors, naive_bayes\r\nfrom sklearn.ensemble import VotingClassifier\r\nfrom sklearn.metrics import accuracy_score\r\n\r\n# Instantiate the learners (classifiers)\r\nlearner_1 = neighbors.KNeighborsClassifier(n_neighbors=5)\r\nlearner_2 = linear_model.Perceptron(tol=1e-2, random_state=0)\r\nlearner_3 = svm.SVC(gamma=0.001)\r\n\r\n# Instantiate the voting classifier\r\nvoting = VotingClassifier([('KNN', learner_1),\r\n ('Prc', learner_2),\r\n ('SVM', learner_3)])\r\n\r\n# Fit classifier with the training data\r\nvoting.fit(x_train, y_train)\r\n\r\n# Predict the most voted class\r\nhard_predictions = voting.predict(x_test)\r\n\r\n# Accuracy of hard voting\r\nprint('Hard Voting:', accuracy_score(y_test, hard_predictions))\r\n\r\n# Soft Voting\r\n# Instantiate the learners (classifiers)\r\nlearner_4 = neighbors.KNeighborsClassifier(n_neighbors = 5)\r\nlearner_5 = naive_bayes.GaussianNB()\r\nlearner_6 = svm.SVC(gamma = 0.001, probability = True)\r\n\r\n# Instantiate the voting classifier\r\nvoting = VotingClassifier([('KNN', learner_4),\r\n ('NB', learner_5),\r\n ('SVM', learner_6)],\r\n voting = 'soft')\r\n\r\n# Fit classifier with the training data\r\nvoting.fit(x_train, y_train)\r\nlearner_4.fit(x_train, y_train)\r\nlearner_5.fit(x_train, y_train)\r\nlearner_6.fit(x_train, y_train)\r\n\r\n# Predict the most probable class\r\nsoft_predictions = voting.predict(x_test)\r\n\r\n# Get the base learner predictions\r\npredictions_4 = learner_4.predict(x_test)\r\npredictions_5 = learner_5.predict(x_test)\r\npredictions_6 = learner_6.predict(x_test)\r\n\r\n# Accuracies of base learners\r\nprint('L4:', accuracy_score(y_test, predictions_4))\r\nprint('L5:', accuracy_score(y_test, predictions_5))\r\nprint('L6:', accuracy_score(y_test, predictions_6))\r\n\r\n# Accuracy of Soft voting\r\nprint('Soft Voting:', accuracy_score(y_test, soft_predictions))\r\n\r\n#stacking classification\r\n# Libraries and data loading\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn import metrics\r\nimport numpy as np\r\n\r\nbase_learners = []\r\n\r\n# KNN classifier model\r\nknn = KNeighborsClassifier(n_neighbors=2)\r\nbase_learners.append(knn)\r\n\r\n# Decision Tree Classifier model\r\ndtr = DecisionTreeClassifier(max_depth=4, random_state=123456)\r\nbase_learners.append(dtr)\r\n\r\n# Multi Layered Perceptron classifier\r\nmlpc = MLPClassifier(hidden_layer_sizes =(100, ), solver='lbfgs', random_state=123456)\r\nbase_learners.append(mlpc)\r\n\r\n# Meta model using Logistic Regression\r\nmeta_learner = LogisticRegression(solver='lbfgs')\r\n\r\n# Create the training meta data\r\n\r\n# Create variables to store meta data and the targets\r\nmeta_data = np.zeros((len(base_learners), len(x_train)))\r\nmeta_targets = np.zeros(len(x_train))\r\n\r\n# Create the cross-validation folds\r\nKF = KFold(n_splits = 5)\r\nmeta_index = 0\r\nfor train_indices, test_indices in KF.split(x_train):\r\n # Train each learner on the K-1 folds and create meta data for the Kth fold\r\n for i in range(len(base_learners)):\r\n learner = base_learners[i]\r\n\r\n learner.fit(x_train[train_indices], y_train[train_indices])\r\n predictions = learner.predict_proba(x_train[test_indices])[:,0]\r\n\r\n meta_data[i][meta_index:meta_index+len(test_indices)] = predictions\r\n\r\n meta_targets[meta_index:meta_index+len(test_indices)] = y_train[test_indices]\r\n meta_index += len(test_indices)\r\n\r\n# Transpose the meta data to be fed into the meta learner\r\nmeta_data = meta_data.transpose()\r\n\r\n# Create the meta data for the test set and evaluate the base learners\r\ntest_meta_data = np.zeros((len(base_learners), len(x_test)))\r\nbase_acc = []\r\n\r\nfor i in range(len(base_learners)):\r\n learner = base_learners[i]\r\n learner.fit(x_train, y_train)\r\n predictions = learner.predict_proba(x_test)[:,0]\r\n test_meta_data[i] = predictions\r\n\r\n acc = metrics.accuracy_score(y_test, learner.predict(x_test))\r\n base_acc.append(acc)\r\ntest_meta_data = test_meta_data.transpose()\r\n\r\n# Fit the meta learner on the train set and evaluate it on the test set\r\nmeta_learner.fit(meta_data, meta_targets)\r\nensemble_predictions = meta_learner.predict(test_meta_data)\r\n\r\nacc = metrics.accuracy_score(y_test, ensemble_predictions)\r\n\r\n# Print the results\r\nfor i in range(len(base_learners)):\r\n learner = base_learners[i]\r\n\r\n print(f'{base_acc[i]:.2f} {learner.__class__.__name__}')\r\n \r\nprint(f'{acc:.2f} Ensemble')\r\n\r\n\r\n","sub_path":"password.py","file_name":"password.py","file_ext":"py","file_size_in_byte":8777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"392371743","text":"import sys\r\nfrom math import exp\r\nimport numpy as np\r\n\r\ndef theta(s):\r\n return 1.0/(1.0 + exp(-s))\r\n\r\nf = open('./hw3_train.dat')\r\ndata = []\r\ncounter = 0\r\nfor l in f:\r\n line = l.strip('\\n').split()\r\n data.append([1] + [float(num) for num in line])\r\n\r\nf.close()\r\nprint(data[0])\r\n\r\nw = np.matrix(np.zeros(21))\r\n\r\n\r\neta = 0.01\r\nT = 2000\r\nfor times in range(T):\r\n err_vec = np.matrix(np.zeros(21))\r\n for i in range(len(data)):\r\n #for i in range(1):\r\n Xn = np.matrix(data[i][0:21])\r\n Yn = np.matrix(data[i][-1])\r\n err_vec += theta(-Yn * w * np.transpose(Xn)) * (-Yn * Xn)\r\n\r\n w = w - eta * (err_vec / len(data))\r\n\r\nprint(w)\r\n\r\nf = open('./hw3_test.dat')\r\ndata = []\r\ncounter = 0\r\nfor l in f:\r\n data.append([])\r\n for num in l.strip().split(' '):\r\n data[counter].append(float(num))\r\n counter += 1\r\nf.close()\r\n\r\nerr_num = 0.0\r\nfor i in range(len(data)):\r\n Xn = np.matrix([1] + data[i][0:20])\r\n\r\n res = w * np.transpose(Xn) * data[i][-1]\r\n# print w * np.transpose(Xn)\r\n# print data[i][-1]\r\n if res < 0.0:\r\n err_num += 1\r\n\r\nprint(err_num / len(data))\r\n","sub_path":"ML/hw3/18/qq.py","file_name":"qq.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"368691822","text":"def unnest_list(list_):\n \"\"\"\n Unnest arbitrary nested list. Unlike chain method, this can unnest multiple nested level\n Args:\n list_ (List): list\n Returns: unnested list\n \"\"\"\n res = []\n for i in list_:\n if isinstance(i, list):\n res += unnest_list(i)\n else:\n res += [i]\n return res\n\n\nif __name__ == '__main__':\n test_list = [[1, 2], [3, 4], 5, 6, [7, [8, 9]], [[[10]]], 11]\n\n print(test_list)\n print(unnest_list(test_list))\n","sub_path":"python/unnest_list.py","file_name":"unnest_list.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"546012450","text":"# Class based views below\n\nfrom snippets.models import Snippet\nfrom snippets.serializers import SnippetSerializer, UserSerializer\nfrom rest_framework import generics, permissions\n\nfrom django.contrib.auth.models import User\n\nclass SnippetList(generics.ListCreateAPIView):\n queryset = Snippet.objects.all()\n serializer_class = SnippetSerializer\n # We can grant permissions\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n\n # Associate a user to a list of snippet\n def perform_create(self, serializer):\n serializer.save(owner=self.request.user)\n\nclass SnippetDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Snippet.objects.all()\n serializer_class = SnippetSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n\nclass UserList(generics.ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\nclass UserDetail(generics.RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n","sub_path":"tutorial/snippets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"290264719","text":"employees = []\nempl_position = {}\nempl_salary = {}\nempl_age = {}\n\nwhile True:\n value = input()\n if value == 'filter base':\n break\n else:\n employees.append(value)\n\nfor i in employees:\n i = i.split(' -> ')\n try:\n v = float(i[1])\n if v % 1 == 0.0:\n empl_age[i[0]] = int(v)\n else:\n empl_salary[i[0]] = v\n except ValueError:\n empl_position[i[0]] = i[1]\n\nstatus = input()\n\nif status == 'Position':\n for j in empl_position:\n print('Name: ' + str(j))\n print('Position: ' + str(empl_position[j]))\n print('====================')\nelif status == 'Salary':\n for j in sorted(empl_salary, reverse=True):\n print('Name: ' + str(j))\n print('Salary: ' + str(empl_salary[j]))\n print('====================')\nelse:\n for j in sorted(empl_age, reverse=True):\n print('Name: ' + str(j))\n print('Age: ' + str(empl_age[j]))\n print('====================')\n\n","sub_path":"08. Filter Base.py","file_name":"08. Filter Base.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"176763016","text":"#!/usr/bin/env python3\n\nimport subprocess\n\n\noutfile = open('/dev/null', 'w')\n\nfor enemy in ['BaselineHero', 'BaselineHero2', 'BaselineHero3']:\n print(enemy)\n for trial in range(3):\n # with open(\"params.py\", \"w\") as f:\n # f.write(\"RETREAT_PERCENT = %f\\n\" % pct)\n # f.write(\"HERO_REQUIRED_ADVANTAGE = %d\\n\" % advantage)\n\n # print(\"trying advantage = %d, retreat pct = %f\" % (advantage, pct))\n # print(\"Fighting enemy '%s'\" % enemy)\n retcode = subprocess.call(['python2', 'runherocompetition.py', 'MyHero', enemy], stdout=outfile, stderr=outfile)\n # print(retcode)\n if retcode == 0:\n print(\"W\",end=\"\")\n else:\n print(\"L\",end=\"\")\n","sub_path":"hw6/runner2.py","file_name":"runner2.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"407041417","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 22 17:15:59 2017\n\n@author: iarey\n\"\"\"\nimport numba as nb\nimport numpy as np\n\nimport particles_1D as particles\nimport fields_1D as fields\nimport init_1D as init\n\nfrom simulation_parameters_1D import dx, NC, NX, ND, qm_ratios, freq_res, orbit_res, E_nodes, disable_waves\n\n@nb.njit()\ndef cross_product(A, B, C):\n '''\n Vector (cross) product between two vectors, A and B of same dimensions.\n\n INPUT:\n A, B -- 3D vectors (ndarrays)\n\n OUTPUT:\n C -- The resultant cross product with same dimensions as input vectors\n '''\n C[:, 0] += A[:, 1] * B[:, 2]\n C[:, 1] += A[:, 2] * B[:, 0]\n C[:, 2] += A[:, 0] * B[:, 1]\n \n C[:, 0] -= A[:, 2] * B[:, 1]\n C[:, 1] -= A[:, 0] * B[:, 2]\n C[:, 2] -= A[:, 1] * B[:, 0]\n return\n\n\n@nb.njit()\ndef interpolate_edges_to_center(B, interp, zero_boundaries=False):\n ''' \n Used for interpolating values on the B-grid to the E-grid (for E-field calculation)\n with a 3D array (e.g. B). Second derivative y2 is calculated on the B-grid, with\n forwards/backwards difference used for endpoints.\n \n interp has one more gridpoint than required just because of the array used. interp[-1]\n should remain zero.\n \n This might be able to be done without the intermediate y2 array since the interpolated\n points don't require previous point values.\n '''\n y2 = np.zeros(B.shape, dtype=nb.float64)\n interp *= 0.\n \n # Calculate second derivative\n for jj in range(1, B.shape[1]):\n \n # Interior B-nodes, Centered difference\n for ii in range(1, NC):\n y2[ii, jj] = B[ii + 1, jj] - 2*B[ii, jj] + B[ii - 1, jj]\n \n # Edge B-nodes, Forwards/Backwards difference\n if zero_boundaries == True:\n y2[0 , jj] = 0.\n y2[NC, jj] = 0.\n else:\n y2[0, jj] = 2*B[0 , jj] - 5*B[1 , jj] + 4*B[2 , jj] - B[3 , jj]\n y2[NC, jj] = 2*B[NC, jj] - 5*B[NC - 1, jj] + 4*B[NC - 2, jj] - B[NC - 3, jj]\n \n # Do spline interpolation: E[ii] is bracketed by B[ii], B[ii + 1]\n for jj in range(1, B.shape[1]):\n for ii in range(NC):\n interp[ii, jj] = 0.5 * (B[ii, jj] + B[ii + 1, jj] + (1/6) * (y2[ii, jj] + y2[ii + 1, jj]))\n \n # Add B0x to interpolated array\n for ii in range(NC):\n interp[ii, 0] = fields.eval_B0x(E_nodes[ii])\n return\n\n\n@nb.njit()\ndef check_timestep(pos, vel, B, E, q_dens, Ie, W_elec, Ib, W_mag, B_center, Ep, Bp, v_prime, S, T, temp_N,\\\n qq, DT, max_inc, part_save_iter, field_save_iter, idx, B_damping_array, E_damping_array):\n '''\n Evaluates all the things that could cause a violation of the timestep:\n - Magnetic field dispersion (switchable in param file since this can be tiny)\n - Gyromotion resolution\n - Ion velocity (Don't cross more than half a cell in a timestep)\n - Electric field acceleration\n \n When a violating condition found, velocity is advanced by 0.5DT (since this happens\n at the top of a loop anyway). The assumption is that the timestep isn't violated by\n enough to cause instant instability (each criteria should have a little give), which \n should be valid except under extreme instability. The timestep is then halved and all\n time-dependent counters and quantities are doubled. Velocity is then retarded back\n half a timestep to de-sync back into a leapfrog scheme.\n '''\n interpolate_edges_to_center(B, B_center)\n B_magnitude = np.sqrt(B_center[ND:ND+NX+1, 0] ** 2 +\n B_center[ND:ND+NX+1, 1] ** 2 +\n B_center[ND:ND+NX+1, 2] ** 2)\n gyfreq = qm_ratios.max() * B_magnitude.max() \n ion_ts = orbit_res / gyfreq\n \n if E[:, 0].max() != 0:\n elecfreq = qm_ratios.max()*(np.abs(E[:, 0] / np.abs(vel).max()).max()) # Electron acceleration \"frequency\"\n Eacc_ts = freq_res / elecfreq \n else:\n Eacc_ts = ion_ts\n\n vel_ts = 0.60 * dx / np.abs(vel[0, :]).max() # Timestep to satisfy CFL condition: Fastest particle doesn't traverse more than 'half' a cell in one time step\n DT_part = min(Eacc_ts, vel_ts, ion_ts) # Smallest of the allowable timesteps\n \n if DT_part < 0.9*DT:\n\n particles.velocity_update(pos, vel, Ie, W_elec, Ib, W_mag, idx, Ep, Bp, B, E, v_prime, S, T,temp_N,0.5*DT) # Re-sync vel/pos \n\n DT *= 0.5\n max_inc *= 2\n qq *= 2\n \n field_save_iter *= 2\n part_save_iter *= 2\n\n particles.velocity_update(pos, vel, Ie, W_elec, Ib, W_mag, idx, Ep, Bp, B, E, v_prime, S, T,temp_N,-0.5*DT) # De-sync vel/pos \n print('Timestep halved. Syncing particle velocity...')\n init.set_damping_array(B_damping_array, E_damping_array, DT)\n\n return qq, DT, max_inc, part_save_iter, field_save_iter, B_damping_array, E_damping_array\n\n\n#@nb.njit()\ndef main_loop(pos, vel, idx, Ie, W_elec, Ib, W_mag, Ep, Bp, v_prime, S, T,temp_N, \\\n B, E_int, E_half, q_dens, q_dens_adv, Ji, ni, nu, \\\n Ve, Te, Te0, temp3De, temp3Db, temp1D, old_particles, old_fields, flux, \\\n B_damping_array, E_damping_array, qq, DT, max_inc, part_save_iter, field_save_iter):\n '''\n Main loop separated from __main__ function, since this is the actual computation bit.\n Could probably be optimized further, but I wanted to njit() it.\n The only reason everything's not njit() is because of the output functions.\n \n Future: Come up with some way to loop until next save point\n \n Thoughts: declare a variable steps_to_go. Output all time variables at return\n to resync everything, and calculate steps to next stop.\n If no saves, steps_to_go = max_inc\n '''\n \n\n return qq, DT, max_inc, part_save_iter, field_save_iter\n","sub_path":"simulation_codes/_archived/PARTICLE_PUSHER_ONLY/auxilliary_1D.py","file_name":"auxilliary_1D.py","file_ext":"py","file_size_in_byte":6049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"438578376","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 26 11:29:00 2018\n\n@author: qg186002\n\nextract and merge one-day raw data ETL\n\"\"\"\n\n\nimport pymssql # connect to sqlserver\nimport os\nimport csv\nimport datetime\n\n# 获取系统当前日期的前一天 格式为2018-02-23\n# workpath ='C:/work/teradata/projects'\n# os.chdir(os.path.join(workpath,'sinopec2/data'))\n\n# now_time = datetime.datetime.now()\n# yes_time = now_time + datetime.timedelta(days=-21)\n# yes_time_nyr = yes_time.strftime('%Y-%m-%d')\n# 输出路径 以 oildetail_2018-02-23.csv形式命名\n#output_path = \"./oildetail_\"+yes_time_nyr+\".csv\"\n\nfor i in range(1, 32):\n if i < 10:\n yes_time_nyr = \"2017-12-0\"+str(i)\n else:\n yes_time_nyr = \"2017-12-\" + str(i)\n\n print(\"day: \"+yes_time_nyr)\n conn = pymssql.connect(host='10.182.5.217', user='tr', password='1qaz@WSX', database='OILCARD_DB')\n cur = conn.cursor()\n #cur.execute(\"SELECT top(100) nodeno,cardno,oilno,opetime,litter,amount FROM oildetail \"\n # \"WHERE Convert(varchar,opetime,23) = %s order by opetime desc\",yes_time_nyr)\n\n cur.execute(\"select lefttable.*,cardinfor.compno, cardpsninfor.compname,cardpsninfor.identifyno,\"\n \"cardpsninfor.knowledge,cardpsninfor.province,cardpsninfor.cardnum from\"\n \"(select nodeno,cardno,oilno,opetime,litter,amount,balance from oildetail \"\n \"where convert(varchar,opetime,23) = %s) as lefttable \" \n \"left join cardinfor on lefttable.cardno = cardinfor.cardno \"\n \"inner join cardpsninfor on cardinfor.compno = cardpsninfor.compno\",yes_time_nyr)\n\n rows = cur.fetchall()\n\n # merge and load all raw data only 2 min, 159367 data on 2018-02-05 #\n\n output_path = \"./data/alldata_\"+yes_time_nyr+\".csv\"\n\n with open(os.path.abspath(output_path), 'w+', newline='', encoding='utf-8') as tbl_output:\n writer = csv.writer(tbl_output)\n # 添加csv表头\n writer.writerow([i[0] for i in cur.description])\n for row in rows:\n writer.writerow(row)\n\n tbl_output.close()\n cur.close()\n conn.close()\n","sub_path":"oil/MergeOneDaySQL.py","file_name":"MergeOneDaySQL.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"309826600","text":"import zmq\n\ncontext = zmq.Context()\nsocket = context.socket(zmq.REQ)\nsocket.connect(\"tcp://192.168.1.8:5555\")\n\nif __name__ == '__main__':\n print('zmq client start....')\n for i in range(1, 10):\n socket.send_string(\"hello\")\n message = socket.recv()\n print('received reply message:{}'.format(message))","sub_path":"zmq_test/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"402907273","text":"#!/bin/env python\n#-*- coding: UTF-8 -*-\n\nimport mmhash\nimport datetime\nimport time\nfrom M2Crypto.EVP import Cipher\nimport base64\nimport json\nfrom sqlalchemy.ext.declarative import DeclarativeMeta\nfrom decimal import Decimal\n\nfrom common import mylog\nfrom common import conf\nfrom common import error_msg\n\nfrom handler.authorization.authorization_service import authorization_obj\n\n\ndef login_requested(func):\n def _login_requested(*args, **kwargs):\n self = args[0]\n if check_token(self._user_id, self._token_id):\n return func(*args, **kwargs)\n else:\n self.ret = 2\n self.msg = error_msg.TOKEN_TIMEOUT_ERROR\n return self.response()\n return _login_requested\n\ndef check_right(right_id):\n def _check_right(func):\n def __check_right(*args, **kwargs):\n self = args[0]\n if authorization_obj.check_authorization(\n self._db_session, self._user_id, right_id):\n return func(*args, **kwargs)\n else:\n self.ret = 3\n self.msg = error_msg.AUTHORIZATION_ERROR\n return self.response()\n return __check_right\n return _check_right\n\ndef make_pwd(pwd):\n return mmhash.get_unsigned_hash(pwd)\n\ndef check_pwd(old_pwd, raw_pwd):\n pwd = make_pwd(raw_pwd)\n if old_pwd == pwd:\n return True\n else:\n return False\n\ndef create_token(user_id):\n plain_token = (\"%s_%s_%s\" %\n (int(time.time()),\n user_id,\n conf.SERVICE_NAME))\n return Encrypt(plain_token)\n\ndef check_token(user_id, token):\n if not user_id or not token:\n return False\n try:\n plain_token = Decrypt(token)\n plain_token_list = plain_token.split(\"_\")\n create_time = plain_token_list[0]\n plain_user_id = plain_token_list[1]\n if int(time.time()) - int(create_time) > conf.TOKEN_TIMEOUT:\n return False\n if str(plain_user_id) == str(user_id):\n return True\n return False\n except Exception as e:\n return False\n\ndef Encrypt(data):\n # '使用aes_128_ecb算法对数据加密'\n # 加密操作\n ENCRYPT_OP = 1\n # 初始化变量,对于aes_128_ecb算法无用\n iv = '\\0' * 16\n cipher = Cipher(alg='aes_128_ecb',\n key=conf.PRIVATE_KEY,\n iv=iv,\n op=ENCRYPT_OP)\n buf = cipher.update(data)\n buf = buf + cipher.final()\n del cipher\n return base64.b64encode(buf)\n\ndef Decrypt(data):\n # '使用aes_128_ecb算法对数据解密'\n # 解密操作\n DECRYPT_OP = 0\n # 初始化变量,对于aes_128_ecb算法无用\n iv = '\\0' * 16\n data = base64.b64decode(data)\n cipher = Cipher(alg='aes_128_ecb',\n key=conf.PRIVATE_KEY,\n iv=iv,\n op=DECRYPT_OP)\n buf = cipher.update(data)\n buf = buf + cipher.final()\n del cipher\n return buf \n\ndef to_obj(orm_obj, without_fields=[]):\n if orm_obj == None:\n return None\n elif isinstance(orm_obj.__class__, DeclarativeMeta):\n fields = {}\n for field in [x for x in dir(orm_obj)\n if not x.startswith('_') and x != 'metadata' and x not in without_fields]:\n data = orm_obj.__getattribute__(field)\n if isinstance(data, datetime.datetime):\n fields[field] = data.strftime(\"%Y-%m-%d %H:%M:%S\")\n elif isinstance(data, datetime.date):\n fields[field] = data.strftime(\"%Y-%m-%d %H:%M:%S\")\n elif isinstance(data, datetime.timedelta):\n fields[field] = ((datetime.datetime.min + data).time().\n strftime(\"%Y-%m-%d %H:%M:%S\"))\n elif isinstance(data, Decimal):\n fields[field] = float(data)\n else:\n fields[field] = data\n return fields\n elif isinstance(orm_obj, list):\n obj_list = list()\n for i in orm_obj:\n obj_list.append(to_obj(i, without_fields))\n return obj_list\n","sub_path":"wood-erp/common/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"351099899","text":"#!/usr/local/bin/python3\nimport glob\nimport os\nimport sys\n\nsnippet_prefix = \"____\"\n\nsnippet_template = \"\"\"snippet %s%s \"%s\"\n%s\nendsnippet\"\"\"\n\nreplace_str = \"{- %%MODULE%% -}\"\n\nsuffix_map = {\n \"haskell\" : \".hs\",\n \"cpp\" : \".cpp\",\n}\n\ndef convert_module( lang, module_name ):\n module_dir = os.path.join('.', module_name)\n test_template_path = os.path.join( module_dir, 'test.template' )\n if not os.path.exists( test_template_path ):\n return None\n\n func_pattern = '*' + suffix_map[lang]\n funcs = [func for func in glob.glob(os.path.join(module_dir,func_pattern))]\n\n primary_func = os.path.join( module_dir, module_name + suffix_map[lang] )\n try:\n i = funcs.index(primary_func)\n funcs[i], funcs[0] = funcs[0], funcs[i]\n except:\n pass\n\n res = []\n for func_path in funcs:\n func_name = os.path.basename(func_path).split('.')[0]\n content = ''.join( open(func_path).readlines() )[:-1]\n res.append( content )\n module_str = '\\n\\n'.join( res )\n\n test_template = ''.join(open(test_template_path).readlines())[:-1]\n return test_template.replace( replace_str, module_str )\n\ndef main():\n langs = [lang for lang in glob.glob('*') if os.path.isdir( lang ) ]\n\n root_dir = os.path.dirname( os.path.abspath(__file__) )\n for lang in langs:\n if not lang in suffix_map:\n continue\n\n lang_dir = os.path.join( root_dir, lang )\n os.chdir( lang_dir )\n\n for module in glob.glob('*'):\n if not os.path.isdir( module ):\n continue\n\n test_str = convert_module( lang, module )\n if test_str is None:\n continue\n\n output_file_name = 'test_' + module + suffix_map[lang]\n output_file_path = os.path.join( 'test', output_file_name )\n with open( output_file_path, 'w' ) as f:\n f.write(test_str)\n\n os.chdir( root_dir )\n pass\n\nif __name__ == '__main__':\n main()\n","sub_path":"snippets/generate_test.py","file_name":"generate_test.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"342938300","text":"import itchat,json\nfrom itchat.content import *\n@itchat.msg_register(itchat.content.NOTE,isGroupChat=True)\ndef reply_msg(msg):\n if(str(msg['Text']).find('加入了群聊'))>0:\n with open('msg.txt', 'r') as file_to_read:\n while True:\n text = file_to_read.read() # 读取txt文件内容,作为消息发送\n return text\n else:\n return None\nitchat.login()\nitchat.run()","sub_path":"ReturnMessage.py","file_name":"ReturnMessage.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"570951895","text":"import numpy as np\nimport time\nfrom matplotlib import pyplot as plt\n\nclass Smoother(object):\n def __init__(self,size):\n self.size = size\n self.l = np.zeros(size)\n self.idx = 0\n self.full = False\n\n def put(self, v):\n self.l[self.idx] = v\n self.idx += 1\n if self.idx >= self.size:\n self.full = True\n self.idx = 0\n\n def get(self):\n if self.full:\n return np.mean(self.l)\n else:\n return np.mean(self.l[:self.idx])\n\nif __name__ == \"__main__\":\n smoother = Smoother(100)\n ts = []\n vs = []\n ss = []\n\n plt.ion()\n\n ax = plt.axes()\n ax.autoscale(enable=True,axis='both',tight=False)\n\n g1 = plt.plot(ts,vs)[0]\n g2 = plt.plot(ts,ss)[0]\n\n\n start = time.time()\n\n while True:\n t = time.time() - start\n v = np.random.normal()\n smoother.put(v)\n s = smoother.get()\n\n ts.append(t)\n vs.append(v)\n ss.append(s)\n\n g1.set_data(ts,vs)\n g2.set_data(ts,ss)\n ax.relim()\n ax.autoscale_view(True,True,True)\n plt.draw()\n plt.pause(0.01)\n","sub_path":"controls/smoother.py","file_name":"smoother.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"175350706","text":"\n\nimport pymongo\n\nfrom pymongo import MongoClient\n\nclient = MongoClient('mongodb://localhost:27017')\n\n\ndb = client['ibm_test']\n\nrepos = db.repos\n\nrepo = {\n 'title': 'Python and MongoDB',\n 'content': 'PyMongo is fun, you guys',\n 'author': 'Howie'\n} \n\nresult = repos.insert_one(repo)\n\nprint('One Post: {0}.format(result.inserted_id')\n\n\npost_1 = {\n 'title': 'Python and MongoDB',\n 'content': 'PyMongo is fun, you guys',\n 'author': 'Scott'\n}\npost_2 = {\n 'title': 'Virtual Environments',\n 'content': 'Use virtual environments, you guys',\n 'author': 'Scott'\n}\npost_3 = {\n 'title': 'Learning Python',\n 'content': 'Learn Python, it is easy',\n 'author': 'Bill'\n}\nnew_result = repos.insert_many([post_1, post_2, post_3])\nprint('Multiple posts: {0}'.format(new_result.inserted_ids))\n\n\nbills_post = repos.find_one({'author': 'Bill'})\nprint(bills_post)\n\nscotts_posts = repos.find({'author': 'Scott'})\nprint(scotts_posts)\n\n\nfor post in scotts_posts:\n print(post)\n\n\n","sub_path":"express-ibm/test/dummy.py","file_name":"dummy.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"74465368","text":"from django.core.exceptions import FieldError\n\nfrom .constants import *\n\nimport django_mock_queries.query\n\n\ndef merge(first, second):\n return first + list(set(second) - set(first))\n\n\ndef intersect(first, second):\n return list(set(first).intersection(second))\n\n\n# noinspection PyProtectedMember\ndef find_field_names(obj):\n field_names = set()\n field_names.update(obj._meta._forward_fields_map.keys())\n field_names.update(field.get_accessor_name()\n for field in obj._meta.fields_map.values())\n for parent in obj._meta.parents.keys():\n parent_fields = find_field_names(parent) or []\n field_names.update(parent_fields)\n return sorted(field_names)\n\n\ndef get_attribute(obj, attr, default=None):\n result = obj\n comparison = None\n parts = attr.split('__')\n\n for p in parts:\n if p in COMPARISONS:\n comparison = p\n elif result is None:\n break\n else:\n field_names = find_field_names(result)\n if p != 'pk' and field_names and p not in field_names:\n message = \"Cannot resolve keyword '{}' into field. Choices are {}.\".format(\n p,\n ', '.join(map(repr, map(str, field_names)))\n )\n raise FieldError(message)\n result = getattr(result, p, None)\n\n value = result if result is not None else default\n return value, comparison\n\n\ndef is_match(first, second, comparison=None):\n if isinstance(first, django_mock_queries.query.MockBase):\n return is_match_in_children(comparison, first, second)\n if (isinstance(first, (int, str)) and\n isinstance(second, django_mock_queries.query.MockBase)):\n second = convert_to_pks(second)\n if not comparison:\n return first == second\n return {\n COMPARISON_EXACT: lambda: first == second,\n COMPARISON_IEXACT: lambda: first.lower() == second.lower(),\n COMPARISON_CONTAINS: lambda: second in first,\n COMPARISON_ICONTAINS: lambda: second.lower() in first.lower(),\n COMPARISON_GT: lambda: first > second,\n COMPARISON_GTE: lambda: first >= second,\n COMPARISON_LT: lambda: first < second,\n COMPARISON_LTE: lambda: first <= second,\n COMPARISON_IN: lambda: first in second,\n COMPARISON_STARTSWITH: lambda: first.startswith(second),\n COMPARISON_ISTARTSWITH: lambda: first.lower().startswith(second.lower()),\n COMPARISON_ENDSWITH: lambda: first.endswith(second),\n COMPARISON_IENDSWITH: lambda: first.lower().endswith(second.lower()),\n COMPARISON_ISNULL: lambda: (first is None) == bool(second),\n }[comparison]()\n\n\ndef convert_to_pks(query):\n return [item.pk for item in query]\n\n\ndef is_match_in_children(comparison, first, second):\n return any(is_match(item, second, comparison)\n for item in first)\n\n\ndef matches(*source, **attrs):\n exclude = []\n for x in source:\n for attr_name, filter_value in attrs.items():\n attr_value, comparison = get_attribute(x, attr_name)\n if not is_match(attr_value, filter_value, comparison):\n exclude.append(x)\n break\n for x in source:\n if x not in exclude:\n yield x\n","sub_path":"django_mock_queries/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"8529271","text":"\"\"\"\nDeploys a set of pathogen build JSON data files to a remote location.\n\nnextstrain.org, or other instances of the Nextstrain web frontend (auspice),\nfetch the deployed JSON data files for display.\n \n \nDestinations\n------------\n\nCurrently only Amazon S3 buckets (s3://…) are supported as the remote\ndestination, but others can be added in the future.\n\nDestination URLs support optional path prefixes if you want your local\nfilenames to be prefixed on the remote destination. For example:\n\n nextstrain deploy s3://my-bucket/some/prefix/ auspice/zika*.json\n\nwill upload files named \"some/prefix/zika*.json\".\n \n \nAuthentication\n--------------\n\nCredentials for authentication should generally be provided by environment\nvariables specific to each destination type.\n\nS3\n--\n\n* AWS_ACCESS_KEY_ID\n* AWS_SECRET_ACCESS_KEY\n\nMore information at:\n\n https://boto3.readthedocs.io/en/latest/guide/configuration.html#environment-variables\n\nA persistent credentials file, ~/.aws/credentials, is also supported:\n\n https://boto3.readthedocs.io/en/latest/guide/configuration.html#shared-credentials-file\n \n\"\"\"\n\nfrom pathlib import Path\nfrom urllib.parse import urlparse\nfrom ..util import warn\nfrom ..deploy import s3\n\n\nSUPPORTED_SCHEMES = {\n \"s3\": s3,\n}\n\n\ndef register_parser(subparser):\n parser = subparser.add_parser(\"deploy\", help = \"Deploy pathogen build\")\n parser.description = __doc__\n\n # Destination\n parser.add_argument(\n \"destination\",\n help = \"Deploy destination as a URL, with optional key/path prefix\",\n metavar = \"\")\n\n # Files to deploy\n parser.add_argument(\n \"files\",\n help = \"JSON data files to deploy\",\n metavar = \"\",\n nargs = \"+\")\n\n return parser\n\n\ndef run(opts):\n url = urlparse(opts.destination)\n\n if url.scheme not in SUPPORTED_SCHEMES:\n warn(\"Error: Unsupported destination scheme %s://\" % url.scheme)\n warn(\"\")\n warn(\"Supported schemes are: %s\" % \", \".join(SUPPORTED_SCHEMES))\n return 1\n\n deploy = SUPPORTED_SCHEMES[url.scheme]\n files = [Path(f) for f in opts.files]\n\n return deploy.run(url, files)\n","sub_path":"nextstrain/cli/command/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"238714103","text":"import csv\n\ndef transform(filepath,id):\n result = {}\n with open(filepath, encoding=\"utf8\") as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n \n # extracting field names through first row to crate map\n fields = next(csvreader)\n \n # id not found\n if id not in fields:\n print(\"Field %s not found in %s\" % (id,filepath))\n return {}\n \n # finds the location\n IDIndex = fields.index(id)\n \n # for each line, serialize it \n for line in csvreader:\n m = result[line[IDIndex]] = {}\n for el,id in zip(line,range(len(line))):\n m[fields[id]] = el\n \n return result\n ","sub_path":"scripts/filter-chairsandsv-gathertown/csv2map.py","file_name":"csv2map.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"603448602","text":"# coding=utf-8\n\"\"\"\nDATE: 2021/8/9\nAUTHOR: TesterCC\n\"\"\"\n\n# 抓包 sniffer pkt; 发包forward pkt\n# ref: https://cloud.tencent.com/developer/article/1694737\n# https://blog.csdn.net/GFS_lele/article/details/105132287\n\nfrom scapy.all import *\n\nfrom scapy.all import *\nfrom scapy.layers.dot11 import Dot11Beacon\nfrom scapy.layers.inet import IP, TCP, UDP\n\ninterface = \"wlan0\"\nap_list = []\n\n\ndef info(pkt):\n # if pkt.haslayer('Ethernet'):\n # print(\n # f\"[Eth] Ethernet Info: {pkt['Ethernet'].src} -> {pkt['Ethernet'].dst}, type is {pkt['Ethernet'].type} \")\n\n # if pkt.haslayer('IP'):\n # print(\n # f\"[IP] IP Info: {pkt['IP'].src} -> {pkt['IP'].dst}, protocol is : {pkt['IP'].proto}, ttl is : {pkt['IP'].ttl}\")\n\n if pkt.haslayer(Dot11Beacon):\n if pkt.addr2 not in ap_list:\n ap_list.append(pkt.addr2)\n print(f\"[802.11] SSID --> {pkt.info}, -- BSSID --> {pkt.addr2}\")\n # SSID --> b'CTC', -- BSSID --> 00:c0:02:2c:a5:a2\n\n if pkt.haslayer('TCP'):\n # print(f\"[D] {pkt[TCP]}\")\n src = pkt[IP].src\n dst = pkt[IP].dst\n src_port = pkt[TCP].sport\n dst_port = pkt[TCP].dport\n tcp_flag = pkt.getlayer(TCP).flags\n print(f\"[TCP] src: {src}:{src_port}, dst: {dst}:{dst_port}, tcp flag: {tcp_flag}\")\n\n # if pkt.haslayer('UDP'):\n # print(f\"[UDP] UDP Info: {pkt['UDP'].sport} -> {pkt['UDP'].dport}, udp data length: {pkt['UDP'].len}\")\n\n # if pkt.haslayer('ARP'):\n # print(f\"[ARP] ARP Info: {pkt['ARP'].psrc} -> {pkt['ARP'].pdst}; [ARP] ARP hw Info: {pkt['ARP'].hwsrc} -> {pkt['ARP'].hwdst}\")\n\n # print(pkt.show())\n # pkt.display() # 这个是优化为可度的展示\n # print(pkt.haslayer('Ether')) # True or False\n\n\nsniff(iface=interface, prn=info, count=0)\n\n'''\n  count:抓包的数量,0表示无限制;\n  store:保存抓取的数据包或者丢弃,1保存,0丢弃\n  offline:从 pcap 文件读取数据包,而不进行嗅探,默认为None\n  prn:为每一个数据包定义一个函数,如果返回了什么,则显示。例如:prn = lambda x: x.summary();(packct.summar()函数返回的是对包的统计性信息)\n  filter:过滤规则,使用wireshark里面的过滤语法\n  L2socket:使用给定的 L2socket\n  timeout:在给定的时间后停止嗅探,默认为 None\n  opened_socket:对指定的对象使用 .recv() 进行读取;\n  stop_filter:定义一个函数,决定在抓到指定数据包后停止抓包,如:stop_filter = lambda x: x.haslayer(TCP);\n  iface:指定抓包的接口\n'''\n","sub_path":"wireless_parse/sniff_wifi.py","file_name":"sniff_wifi.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"233594154","text":"\"\"\"\n从文本文件中读取数据\n\nVersion: 0.1\nAuthor: 骆昊\nDate: 2018-03-13\n\"\"\"\n\nimport time\n\n\ndef main():\n\n # 一次性读取整个文件内容\n f = None\n try:\n f = open('致橡树.txt', 'r', encoding='utf-8')\n print(f.read())\n except FileNotFoundError:\n print('无法打开指定的文件!')\n except LookupError:\n print('指定了未知的编码!')\n except UnicodeDecodeError:\n print('读取文件时解码错误!')\n finally: # finally clause is used to close the file, which is necessary.\n if f:\n f.close()\n\n print() # separate the 2 copies\n\n # 通过for-in循环逐行读取\n with open('致橡树.txt', mode='r') as f:\n # 如果不愿意在finally代码块中关闭文件对象释放资源,也可以使用上下文语法,通过with关键字指定文件对象的上下文环境并在离开上下文环\n # 境时自动释放文件资源.\n for line in f:\n print(line, end='') # totally control the blank lines\n time.sleep(0.1)\n print() # insert行距manually instead of the original ones\n print()\n\n # 读取文件按行读取到列表list中\n with open('致橡树.txt') as f:\n lines = f.readlines()\n print(lines)\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"Day01-15/code/Day11/file1_read_basics.py","file_name":"file1_read_basics.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"641555944","text":"from io import BytesIO\nfrom typing import Set\nimport pandas as pd\nfrom common.data_types import data_sets, data_set_enum_to_data_set_object, DigitalReceiptsEnum\nfrom common.logging_utils import log_and_slack\n\n\ndef get_data_object_from_data_file(data_file):\n log_and_slack(\"in get_data_object_from_data_file(data_file)\")\n if isinstance(data_file, tuple):\n log_and_slack(\"in isinstance(data_file, tuple)\")\n data_file = BytesIO(data_file[1])\n data_file.seek(0)\n log_and_slack(\"end isinstance(data_file, tuple)\")\n\n df: pd.DataFrame = pd.read_csv(data_file)\n log_and_slack(\"Columns are: \" + str(df.columns))\n for data_set in data_sets:\n required_cols: Set[str] = list_attributes_of_object(data_set).union(\n list_attributes_of_object(DigitalReceiptsEnum))\n file_cols: Set[str] = set(df.columns)\n if required_cols.issubset(file_cols):\n return data_set_enum_to_data_set_object[data_set]\n raise ValueError(\"Data set can't be determined\")\n\n\ndef list_attributes_of_object(o):\n return set(e for e in o.__dict__ if not e.startswith('__'))\n","sub_path":"common/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"535885585","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import norm\nimport matplotlib.colors as colors\n\nfrom matplotlib import rc\nfrom matplotlib import cm\n\n__author__ = 'ernesto'\n\n# if use latex or mathtext\nrc('text', usetex=False)\nrc('mathtext', fontset='cm')\n\n#####################################\n# PARAMETERS - This can be modified #\n#####################################\n\nR_dev = 100 # range deviation\nR_dev_prob = 0.99 # range deviation probability\nc = 3 * 10e8 # electromagnetic propagation velocity\n\n#####################\n# END OF PARAMETERS #\n#####################\n\nz_0 = norm.ppf((1 + R_dev_prob) / 2)\nst_dev_tau_0 = 2 * R_dev / (z_0 * c)\nst_dev_R = c * st_dev_tau_0 / 2\n\n\n# axis parameters\nxmin = -R_dev - R_dev / 2\nxmax = R_dev + R_dev / 2\nymax_value = norm.pdf(0, loc=0, scale=st_dev_R)\nymin = -0.15 * ymax_value\nymax = 1.2 * ymax_value\n\ndx = R_dev / 10\nxmin_ax = xmin - dx\nxmax_ax = xmax + dx\nymin_ax = ymin\nymax_ax = ymax\n\n# abscissa values\nx = np.linspace(xmin, xmax, 500)\n\n# normal distribution and density values in x\npdf = norm.pdf(x, loc=0, scale=st_dev_R)\nx_R = np.linspace(-R_dev, R_dev, 400)\nx_R_prob = norm.pdf(x_R, loc=0, scale=st_dev_R)\n\n###############\n# PLOTS #\n###############\n# colors from coolwarm\ncNorm = colors.Normalize(vmin=0, vmax=1)\nscalarMap = cm.ScalarMappable(norm=cNorm, cmap=cm.coolwarm)\ncol10 = scalarMap.to_rgba(0)\ncol11 = scalarMap.to_rgba(0.2)\ncol12 = scalarMap.to_rgba(0.4)\ncol20 = scalarMap.to_rgba(1)\ncol21 = scalarMap.to_rgba(0.85)\ncol22 = scalarMap.to_rgba(0.7)\n\nfontsize = 14\n# vertical tick margin\nvtm = -0.0015\n# horizontal tick margin\nhtm = 10\n\n\nfig = plt.figure(0, figsize=(5, 3), frameon=False)\n\nax = fig.add_subplot(111)\n\nplt.xlim(xmin_ax, xmax_ax)\nplt.ylim(ymin_ax, ymax_ax)\n\n# axis arrows\nplt.annotate(\"\", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',\n arrowprops=dict(width=0.1, headwidth=5, headlength=8, facecolor='black', shrink=0.002))\nplt.annotate(\"\", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',\n arrowprops=dict(width=0.1, headwidth=5, headlength=8, facecolor='black', shrink=0.002))\nplt.plot(x, pdf, color=col20)\n\n# area\nax.fill_between(x_R, 0, x_R_prob, color=col12)\n# area limits\nplt.plot(-R_dev * np.ones(2,), [0, x_R_prob[0]], color=col10)\nplt.plot(R_dev * np.ones(2,), [0, x_R_prob[0]], color=col10)\nplt.plot(x_R, x_R_prob, color=col10)\n\nplt.text(xmax_ax, vtm, '$\\hat{R}-R$', fontsize=fontsize, ha='center', va='baseline')\nplt.text(R_dev, vtm, '$100$', fontsize=fontsize, ha='center', va='baseline')\nplt.text(-R_dev, vtm, '$-100$', fontsize=fontsize, ha='center', va='baseline')\nplt.text(4, vtm, '$0$', fontsize=fontsize, ha='left', va='baseline')\nplt.text(8, ymax_ax, '$p(\\hat{R}-R)$', fontsize=fontsize, ha='left', va='center')\nplt.text(0, 0.004, 'Área\\n0.99', fontsize=fontsize, ha='center', va='center', color=col10,\n backgroundcolor=col12)\n\n\nplt.axis('off')\n\n# save as pdf image\nplt.savefig('problem_1_1.pdf', bbox_inches='tight')\nplt.show()\n","sub_path":"figuras/PycharmKayStatisticalReport/problem_1_1.py","file_name":"problem_1_1.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"523217707","text":"\nfrom __future__ import print_function\nimport httplib2\nimport os\nfrom apiclient import discovery\nimport oauth2client\nfrom oauth2client import client\nfrom oauth2client import tools\nimport datetime\nimport json\nfrom tkinter import *\nfrom tkinter.ttk import *\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\nSCOPES = 'https://www.googleapis.com/auth/calendar.readonly'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Google Calendar API Python Quickstart'\n\nclass dataGui:\n def __init__(self):\n self.main = Tk()\n self.lstFrame = Frame(self.main)\n self.detailFrame = Frame(self.main)\n self.main.title(\"Employee roster\")\n self.main.minsize(450,400)\n self.main.geometry('1150x500')\n #self.main.maxsize(1200,700)\n self.style = Style(self.main) \n self.mTree = Treeview(self.main,\n selectmode='extended',\n columns=(0,1,2,3,4,5,6),\n style='c.Treeview',\n height=8)\n self.lstScrollY = Scrollbar(self.lstFrame,orient=VERTICAL)\n self.mListBox = Listbox(self.lstFrame,width=50,height=5,yscrollcommand=self.lstScrollY.set)\n self.lstScrollY.config(command=self.mListBox.yview)\n self.mListBox.bind('<>',self.LstSelChange)\n\n self.vLblInf = []\n for i in range(0,4):\n temp = StringVar()\n self.vLblInf.append(temp)\n\n self.lblSumm = Label(self.detailFrame,textvariable=self.vLblInf[0]).pack()\n self.lblStart = Label(self.detailFrame,textvariable=self.vLblInf[1]).pack()\n self.lblEnd = Label(self.detailFrame,textvariable=self.vLblInf[2]).pack()\n self.lblDesc = Label(self.detailFrame,textvariable=self.vLblInf[3]).pack()\n\n for i in range(0,4):\n self.vLblInf[i].set(\"HELLO!\")\n\n self.EventList = []\n self.NameTree()\n self.LoadData() \n \n self.mTree.pack(fill=X)\n self.mListBox.pack(side=LEFT)\n self.lstScrollY.pack(side=RIGHT,fill=Y)\n self.lstFrame.pack(side=LEFT,fill=X)\n self.detailFrame.pack(side=LEFT,fill=X)\n mainloop()\n\n def LstSelChange(self,evt):\n w = evt.widget\n idx = int(w.curselection()[0])\n \n tSumm = '\\tEvent Title: ' + self.EventList[idx]['name']\n strtTime = self.EventList[idx]['start']\n endTime = self.EventList[idx]['end']\n\n strtTime = strtTime['dateTime']\n strtTime = self.FormatTime(strtTime)\n endTime = endTime['dateTime']\n endTime = self.FormatTime(endTime)\n strtTime = '\\tStart time: ' + strtTime \n endTime = '\\tEnd time: ' + endTime\n \n tDesc = '\\tDescription: ' + self.EventList[idx]['desc']\n \n self.vLblInf[0].set(tSumm) \n self.vLblInf[1].set(strtTime)\n self.vLblInf[2].set(endTime)\n self.vLblInf[3].set(tDesc)\n \n def FormatTime(self,t):\n t = str(t).split('T')\n t = t[1][:5]\n hr = t[:2]\n hr = int(str(hr))\n min = t[2:]\n\n if hr > 12:\n hr-=12\n t = str(hr) + min + 'pm'\n else:\n t = str(hr) + min + 'am' \n\n return t\n \n\n def GetDay(self,dNum):\n\n if dNum == 0:\n return 'Mon'\n elif dNum == 1:\n return 'Tues'\n elif dNum == 2:\n return 'Wed'\n elif dNum == 3:\n return 'Thurs'\n elif dNum == 4:\n return 'Fri'\n elif dNum == 5:\n return 'Sat'\n else:\n return 'Sun'\n\n #function to return the date of a given weekday prior to a date received \n def PrevWeekday(self,d, weekday): \n days_ahead = weekday - d.weekday() \n d = d + datetime.timedelta(days_ahead)\n return d\n\n #function to name the headings of the treeview\n def NameTree(self):\n wDay = self.PrevWeekday(datetime.datetime.now(),0)\n for i in range(0,7):\n strDay = self.GetDay(wDay.weekday())\n print(strDay)\n dTemp = str(wDay).split(' ')\n ausDate = dTemp[0].split('-')\n dHeading = strDay + ' ' + ausDate[2] + '/' + ausDate[1] + '/' + ausDate[0][2:] \n self.mTree.heading(i,text=dHeading)\n self.mTree.column(i,anchor='nw')\n wDay = wDay + datetime.timedelta(1)\n \n #function to load event data from google calender\n def LoadData(self): \n credentials = self.get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n #now = datetime.datetime.utcnow().isoformat() + 'Z'\n print('Getting events')\n pMon = self.PrevWeekday(datetime.datetime.now(),0)\n pMon = pMon + datetime.timedelta(-1)\n nMon = pMon + datetime.timedelta(7)\n #u03c5ks0a33pak81ffavm42qpo@group.calendar.google.com\n eventsResult = service.events().list(\n calendarId='primary',timeMin=pMon.isoformat()+'Z', timeMax=nMon.isoformat()+'Z',\n singleEvents=True, orderBy='startTime',showHiddenInvitations=True).execute()\n events = eventsResult.get('items', [])\n\n f = open('data.txt','w')\n json.dump(events,f,indent=4)\n f.close()\n \n empEvent = []\n emps = [] \n if not events:\n print('No upcoming events found.')\n return\n else:\n print('Found events')\n \n try:\n for event in events:\n #if event['organizer'].get('self') == True:\n print(event['summary'])\n\n try:\n tEvent = {'name':event['summary'],'start':event['start'],'end':event['end'],'desc':event['description']}\n except Exception:\n tEvent = {'name':event['summary'],'start':event['start'],'end':event['end'],'desc':'no description'}\n self.EventList.append(tEvent)\n\n \n atnd = event['attendees']\n for a in atnd:\n try:\n if not a['displayName'].title() in emps: \n emps.append(a['displayName'].title())\n print('\\t',a['displayName'])\n except Exception:\n if not a['email'] in emps: \n emps.append(a['email'])\n emps.sort()\n empEvent.append(emps) # List^2\n print('\\n\\n')\n \n for e in emps: \n evts = []\n for event in events:\n atnd = event['attendees']\n for a in atnd: \n if e == a['displayName'].title():\n eSum = []\n eSum.append(event['summary'])\n eSum.append(event['start'].get('dateTime'))\n evts.append(eSum) # List^3\n empEvent.append(evts)\n\n\n self.mTree.tag_configure('oddT',background='#EDEDED') #Create new background\n\n s = 0 # prep get max events for an employee in a day\n # loop through employees\n for i in range(1,len(emps)+1):\n print(emps[i-1])\n tDays = [0,0,0,0,0,0,0]\n if not i % 2: # Set odd rows with new bg\n self.mTree.insert('','end',str(emps[i-1]),text=str(emps[i-1]),tags=('oddT'))\n else:\n self.mTree.insert('','end',str(emps[i-1]),text=str(emps[i-1]))\n\n # loop through events for each employee\n for n in range(0,len(empEvent[i])):\n print('\\t',empEvent[i][n][0])\n eDate = empEvent[i][n][1].split('T') #split event date\n tVal = self.mTree.item(str(emps[i-1]))\n tItem = tVal['values'] #Attempt to append event on gui\n try:\n tItem = tItem[datetime.datetime.strptime(eDate[0],'%Y-%m-%d').weekday()]\n tItem = tItem + empEvent[i][n][0] + '\\n'\n self.mTree.set(str(emps[i-1]),\n datetime.datetime.strptime(eDate[0],'%Y-%m-%d').weekday(),\n tItem)\n except Exception: \n self.mTree.set(str(emps[i-1]),\n datetime.datetime.strptime(eDate[0],'%Y-%m-%d').weekday(),\n str(empEvent[i][n][0] + '\\n'))\n tDays[datetime.datetime.strptime(eDate[0],'%Y-%m-%d').weekday()] += 1\n for i in tDays:\n if i > s:\n s = i\n \n self.style.configure('c.Treeview',rowheight=s*16)\n\n for i in self.EventList:\n self.mListBox.insert(END,i['name'])\n\n except Exception:\n print(\"Uh oh!\") \n \n\n #function to validate google API\n def get_credentials(self): \n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n #credentials = None \n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES) \n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n \ngui = dataGui()\n","sub_path":"DESTINY PROGRAM/TreeViewTest.py","file_name":"TreeViewTest.py","file_ext":"py","file_size_in_byte":10303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"472000736","text":"import argparse\nimport matplotlib.pyplot as plt\nimport custom_style\nfrom custom_style import remove_chart_junk\nimport sys\nimport numpy as np\nimport math\nimport util\nimport config\n\nfrom util import parse_args\nparser = argparse.ArgumentParser(description='Plot suborams vs. latency.')\nargs = parse_args(parser)\nin_name, out_name = args.input, args.output\nprint((\"Out: %s\") % (out_name))\nprint((\"In: %s\") % (in_name))\nblock_sz = 160\n\nnum_blocks = 2000000 \nlatency_type = \"mean_latency\"\n'''\n# ./app osm insert-one 1 3000000\noblix_insert_latency = 0.021087882518768312\n#./app osm search 1 3000000 1\noblix_search_latency = 0.014064238071441651\n# ./app osm delete-one 1 3000000'\noblix_delete_latency = 0.0\noblix_latency = (oblix_insert_latency + (oblix_search_latency + oblix_delete_latency)) / 2.0\n'''\nTIME_MS = 1\nbaseline = util.parse_baseline(args.baseline)\noblix_latency = (baseline[\"oblix_latency\"][\"160\"][\"2000000\"] * 1000) / TIME_MS\noblix_latency = (0.001146628141*1000) / TIME_MS\nobladi_latency = baseline[\"obladi_latency\"] / TIME_MS\n\ndata = util.parseDataNew2(in_name)\nsuborams = util.getListOfVals(data, \"suborams\")\nlatencies = []\nlatencies_50 = []\nlatencies_75 = []\nlatencies_90 = []\nlatencies_95 = []\noblix_latencies = []\nobladi_latencies = []\nfor suboram in suborams:\n latencies.append(util.getLatencyForSuboramAndDataSize(data, suboram, num_blocks, latency_type) / TIME_MS)\n latencies_50.append(util.getLatencyForSuboramAndDataSize(data, suboram, num_blocks, \"50_latency\") / TIME_MS)\n latencies_75.append(util.getLatencyForSuboramAndDataSize(data, suboram, num_blocks, \"75_latency\") / TIME_MS)\n latencies_90.append(util.getLatencyForSuboramAndDataSize(data, suboram, num_blocks, \"90_latency\") / TIME_MS)\n latencies_95.append(util.getLatencyForSuboramAndDataSize(data, suboram, num_blocks, \"95_latency\") / TIME_MS)\n oblix_latencies.append(oblix_latency)\n obladi_latencies.append(obladi_latency)\n\nprint(suborams)\nprint(latencies)\n\nfig = plt.figure(figsize = (8,8))\nax = fig.add_subplot(111)\nax.plot(suborams, latencies, color=config.dumbo_color, label=\"Snoopy\", marker=config.dumbo_marker)\n#ax.plot(suborams, latencies_50, color=colors[0], label=\"Dumbo (50th percentile)\")\n#ax.plot(suborams, latencies_75, color=colors[1], label=\"Dumbo (75th percentile)\")\n#ax.plot(suborams, latencies_90, color=colors[2], label=\"Dumbo (90th percentile)\")\n#ax.plot(suborams, latencies_95, color=colors[3], label=\"Dumbo (95th percentile)\")\nax.plot(suborams, oblix_latencies, color=config.oblix_color, label=\"Oblix (1 machine)\", linestyle=config.oblix_line)\nax.plot(suborams, obladi_latencies, color=config.obladi_color, label=\"Obladi (2 machines)\", linestyle=config.obladi_line)\nax.set_xlabel(\"SubORAMs\")\nax.set_ylabel(\"Average latency (ms)\")\n#ax.set_yticks(range(0,11, 2))\nax.spines['bottom'].set_position(\"zero\")\n#ax.set_yticks([20 * (2 ** 20), 40 * (2 ** 20), 60 * (2 ** 20), 80 * (2 ** 20), 100 * (2 ** 20)])\n#ax.set_yticklabels([\"20MB\", \"40MB\", \"60MB\", \"80MB\", \"100MB\"])\n#plt.legend()\n\n\nremove_chart_junk(plt,ax,grid=True)\nif args.title:\n ax.set_title(args.title)\nif args.large:\n plt.legend()\n custom_style.save_fig(fig, out_name, [2.5, 2], pad=0.3)\nelse:\n custom_style.save_fig(fig, out_name, [1.8, 1.5])\nplt.savefig(\"temp.pdf\")\n","sub_path":"scripts/fig/suborams_vs_latency.py","file_name":"suborams_vs_latency.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"126410213","text":"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Nicolas P. Rougier. All rights reserved.\n# Distributed under the terms of the new BSD License.\n# -----------------------------------------------------------------------------\nimport numpy as np\nfrom numpy.testing import assert_array_equal, assert_allclose\nfrom nose.tools import assert_true, assert_equal, assert_raises\n\nfrom vispy import gloo\nfrom vispy.gloo import gl\nfrom vispy.app import Canvas\nfrom vispy.testing import requires_application\nfrom vispy.gloo import read_pixels\n\n\n@requires_application()\ndef test_wrappers():\n \"\"\"Test gloo wrappers\"\"\"\n with Canvas():\n gl.use_gl('desktop debug')\n gloo.clear('#112233') # make it so that there's something non-zero\n # check presets\n assert_raises(ValueError, gloo.set_state, preset='foo')\n for state in gloo.get_state_presets().keys():\n gloo.set_state(state)\n assert_raises(ValueError, gloo.set_blend_color, (0., 0.)) # bad color\n assert_raises(TypeError, gloo.set_hint, 1, 2) # need strs\n assert_raises(TypeError, gloo.get_parameter, 1) # need str\n # this doesn't exist in ES 2.0 namespace\n assert_raises(ValueError, gloo.set_hint, 'fog_hint', 'nicest')\n # test bad enum\n assert_raises(RuntimeError, gloo.set_line_width, -1)\n\n # check read_pixels\n x = gloo.read_pixels()\n assert_true(isinstance(x, np.ndarray))\n assert_true(isinstance(gloo.read_pixels((0, 0, 1, 1)), np.ndarray))\n assert_raises(ValueError, gloo.read_pixels, (0, 0, 1)) # bad port\n y = gloo.read_pixels(alpha=False, out_type=np.ubyte)\n assert_equal(y.shape, x.shape[:2] + (3,))\n assert_array_equal(x[..., :3], y)\n y = gloo.read_pixels(out_type='float')\n assert_allclose(x/255., y)\n\n # now let's (indirectly) check our set_* functions\n viewport = (0, 0, 1, 1)\n blend_color = (0., 0., 0.)\n _funs = dict(viewport=viewport, # checked\n hint=('generate_mipmap_hint', 'nicest'),\n depth_range=(1., 2.),\n front_face='cw', # checked\n cull_face='front',\n line_width=1.,\n polygon_offset=(1., 1.),\n blend_func=('zero', 'one'),\n blend_color=blend_color,\n blend_equation='func_add',\n scissor=(0, 0, 1, 1),\n stencil_func=('never', 1, 2, 'back'),\n stencil_mask=4,\n stencil_op=('zero', 'zero', 'zero', 'back'),\n depth_func='greater',\n depth_mask=True,\n color_mask=(True, True, True, True),\n sample_coverage=(0.5, True))\n gloo.set_state(**_funs)\n gloo.clear((1., 1., 1., 1.), 0.5, 1)\n gloo.flush()\n gloo.finish()\n # check some results\n assert_array_equal(gloo.get_parameter('viewport'), viewport)\n assert_equal(gloo.get_parameter('front_face'), gl.GL_CW)\n assert_equal(gloo.get_parameter('blend_color'), blend_color + (1,))\n\n\n@requires_application()\ndef test_read_pixels():\n \"\"\"Test read_pixels to ensure that the image is not flipped\"\"\"\n # Create vertices\n vPosition = np.array([[-1, 1], [0, 1], # For drawing a square to top left\n [-1, 0], [0, 0]], np.float32)\n\n VERT_SHADER = \"\"\" // simple vertex shader\n attribute vec2 a_position;\n void main (void) {\n gl_Position = vec4(a_position, 0., 1.0);\n }\n \"\"\"\n\n FRAG_SHADER = \"\"\" // simple fragment shader\n void main()\n {\n gl_FragColor = vec4(1,1,1,1);\n }\n \"\"\"\n\n with Canvas() as c:\n gloo.set_viewport(0, 0, *c.size)\n c._program = gloo.Program(VERT_SHADER, FRAG_SHADER)\n c._program['a_position'] = gloo.VertexBuffer(vPosition)\n gloo.clear(color='black')\n c._program.draw('triangle_strip')\n\n # Check if the return of read_pixels is the same as our drawing\n img = read_pixels(alpha=False)\n assert_equal(img.shape[:2], c.size[::-1])\n top_left = sum(img[0, 0])\n assert_true(top_left > 0) # Should be > 0 (255*4)\n # Sum of the pixels in top right + bottom left + bottom right corners\n corners = sum(img[0, -1] + img[-1, 0] + img[-1, -1])\n assert_true(corners == 0) # Should be all 0\n gloo.flush()\n gloo.finish()\n","sub_path":"vispy/gloo/tests/test_wrappers.py","file_name":"test_wrappers.py","file_ext":"py","file_size_in_byte":4568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"4522357","text":"from text_auto_complete import TextAutoComplete\nfrom tkinter import *\n\n# TODO : key release debounce\n# TODO : Handle delete key\n\n\nclass AutoCompleterUI:\n def __init__(self, words, tk_root, text):\n self.words = words\n self.auto_complete = TextAutoComplete(words)\n self.prefix = []\n self.text_pane = text\n self.listbox = Listbox(tk_root)\n self.list_visible = False\n\n self.listbox.bind('', self.__on_key_released)\n self.text_pane.bind('', self.__on_key_released)\n\n def __show(self):\n location = self.__get_last_character_coordinate()\n self.listbox.place(x=location[0], y=location[1], width=165, height=100)\n self.listbox.select_set(0)\n self.listbox.focus_set()\n self.list_visible = True\n\n def __hide_and_append_text(self, string=None):\n if string is not None:\n self.text_pane.insert('insert', string)\n\n self.listbox.place(x=0, y=0, width=0, height=0)\n self.text_pane.focus_set()\n self.list_visible = False\n\n def __on_listbox(self, event):\n if event.keysym == 'Return' and self.list_visible:\n value = self.listbox.get(self.listbox.curselection()[0])\n self.__hide_and_append_text(value[len(self.prefix):])\n elif str.isalnum(event.char):\n # Allow typing to continue, ignoring the auto complete suggestions\n self.__hide_and_append_text(event.char)\n\n def __on_key_released(self, event):\n if event.keysym == 'Down' and self.list_visible:\n return\n\n if event.keysym == 'Escape':\n self.__hide_and_append_text()\n return\n\n if self.list_visible:\n if event.char == ' ':\n self.__hide_and_append_text(event.char)\n else:\n self.__on_listbox(event)\n\n if not str.isalnum(event.char):\n self.prefix = []\n return\n\n self.prefix.append(event.char)\n\n words = self.auto_complete.autocomplete(''.join(self.prefix))\n if words is None and self.list_visible:\n # hide\n self.__hide_and_append_text()\n index = 0\n self.listbox.delete(0, END)\n if words is not None:\n for w in words:\n if w == ' ':\n continue\n\n word = ''.join(self.prefix) + w\n self.listbox.insert(index, word)\n index += 1\n\n self.__show()\n\n\n def __get_last_character_coordinate(self):\n x_pos, y_pos, _, height = text_pane.bbox('insert')\n return x_pos, y_pos + height + 20\n\n\ndef read_autocomplete_words(filename):\n words = []\n\n with open(filename, \"r\") as f:\n for line in f:\n words.extend(line.split())\n\n return words\n\n\nif __name__ == '__main__':\n # creating root window\n root = Tk()\n root.title(\"Auto Completed Demo\")\n root.geometry('800x600')\n\n frame = Frame(root)\n frame.pack(pady=5)\n\n text_scroll = Scrollbar(frame)\n text_scroll.pack(side=RIGHT, fill=Y)\n text_pane = Text(frame, width=97, height=25, font=(\"Helvetica\", 16), undo=True, yscrollcommand=text_scroll.set, highlightthickness=0)\n text_pane.pack()\n\n text_scroll.config(command=text_pane.yview)\n\n auto_complete = AutoCompleterUI(read_autocomplete_words('keywords.txt'), root, text_pane)\n root.mainloop()\n\n","sub_path":"autocomplete/notepad.py","file_name":"notepad.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"333442802","text":"from tkinter import *\nthings = [{\"dictionaryItem\":\"value\"}, {\"anotherDict\":\"itsValue\"}, 3, \"foo\", [\"bar\", \"baz\"]]\nroot = Tk()\nf = Frame(root).pack()\nl = Listbox(root)\nb = Button(root, text = \"delete selection\", command = lambda: delete(l))\nb.pack()\nl.pack()\n\nfor i in range(5):\n l.insert(END, things[i])\n\ndef delete(listbox):\n\n global things\n # Delete from Listbox\n selection = l.curselection()\n l.delete(selection[0])\n # Delete from list that provided it\n value = eval(l.get(selection[0]))\n ind = things.index(value)\n del(things[ind])\n print(things)\n\nroot.mainloop()","sub_path":"Versiones/v5/Recursos/test/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"144053565","text":"import copy\nwith open('input.txt') as f:\n sm = [[char for char in line.rstrip('\\n')] for line in f.readlines()]\n\nwhile True:\n nm = copy.deepcopy(sm)\n for y, line in enumerate(sm):\n for x, char in enumerate(line):\n lim_l = max(0, x-1)\n lim_r = min(x+2, len(line))\n lim_u = max(0,y-1)\n lim_d = min(y+2,len(sm)) \n\n surroundings = [letter for v in sm[lim_u:lim_d] for letter in v[lim_l:lim_r]]\n if char == 'L':\n if surroundings.count('#') == 0:\n nm[y][x] = '#'\n if char == '#':\n if surroundings.count('#')-1 >= 4:\n nm[y][x] = 'L'\n \n if all([sm[y][x] == nm[y][x] for y in range(len(sm)) for x in range(len(sm[y])) ]):\n break\n sm = nm\n\nprint(sum([char == '#' for line in sm for char in line]))","sub_path":"day11/day11_part1.py","file_name":"day11_part1.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"344078153","text":"import numpy as np\n# import matplotlib.pyplot as plt\nfrom scipy.io import loadmat, savemat\nfrom numpy.core.records import fromarrays\nfrom PIL import Image\nfrom itertools import compress\nimport os, pprint\nimport json\nfrom tqdm import tqdm\n\n\naicha_pred_dir = '/home/yuliang/code/multi-human-pose/predict/NMS'\n# aicha_gt = os.path.join(aicha_pred_dir, 'keypoint_validation_annotations_20170911.json')\n# aicha_gt = os.path.join(aicha_pred_dir, 'submit.json')\n# image_dir = '/home/yuliang/code/multi-human-pose/predict/data/images'\nindex = np.loadtxt('index.txt', delimiter=' ', dtype='S50,i4,i4')\npose_score = np.loadtxt('scores.txt', dtype='S50'+14*',d')\npose = np.loadtxt('pred.txt', dtype='S50'+28*',i4')\n\n# with open(aicha_gt, 'r') as f:\n# gt = json.load(f)\n\naicha_results = []\n\nvis = False\n\n\nfor idx, item in enumerate(tqdm(index)):\n \n image_id = item[0][:-4]\n human_num = item[2]-item[1]+1\n image_result = {}\n image_result['image_id'] = image_id\n image_result['keypoint_annotations'] = {}\n \n for human_id in xrange(1, human_num+1):\n image_result['keypoint_annotations']['human'+str(human_id)] = np.hstack((np.array([p for p in pose[item[1]+human_id-2] if type(p) is np.int32])\\\n .reshape(-1,2),np.ones((14,1)))).flatten().tolist()\n aicha_results.append(image_result)\n \n if vis:\n # prediction \n plt.figure(figsize=(20,10))\n plt.subplot(1,2,1)\n ax = plt.gca()\n plt.imshow(Image.open(os.path.join(image_dir, item[0])))\n for i in xrange(human_num):\n pose_ = np.array([p for p in pose[item[1]-1+i] if type(p) is np.int32]).reshape(-1,2)\n pose_score_ = np.array([p for p in pose_score[idx+i] if type(p) is np.float64]).reshape(-1,1)\n xmin = np.min(pose_[:,0])\n ymin = np.min(pose_[:,1])\n xmax = np.max(pose_[:,0])\n ymax = np.max(pose_[:,1])\n display_txt = '%.2f'%(np.mean(pose_score_))\n coords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1\n ax.add_patch(plt.Rectangle(*coords, fill=False, edgecolor='yellow', linewidth=2))\n ax.text(xmin, ymin, display_txt, bbox={'facecolor':'red', 'alpha':0.5})\n \n # groundtruth\n plt.subplot(1,2,2)\n ax = plt.gca()\n for idx_gt in xrange(len(gt)):\n if gt[idx_gt]['image_id'] == image_id:\n human_num_ = len(gt[idx_gt]['keypoint_annotations'].keys())\n plt.imshow(Image.open(os.path.join(image_dir, item[0])))\n for i in xrange(1,human_num_+1):\n pose_ = np.array(gt[idx_gt]['keypoint_annotations']['human'+str(i)]).reshape(-1,3)\n xmin = np.min(pose_[:,0])\n ymin = np.min(pose_[:,1])\n xmax = np.max(pose_[:,0])\n ymax = np.max(pose_[:,1])\n coords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1\n ax.add_patch(plt.Rectangle(*coords, fill=False, edgecolor='yellow', linewidth=2))\n \n# if idx == 5:\n# break\n\nfinal = json.dumps(aicha_results)\nwith open('submit-valid-0.4.json','w') as f:\n f.write(final)\n \n","sub_path":"predict/NMS/convert_txt_json.py","file_name":"convert_txt_json.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"600840688","text":"from random import randrange\nfrom collections import deque\nfrom names import get_full_name\nfrom datetime import datetime\n\n\ndef combinations(n, k):\n total = 1\n for i in range(1, k + 1):\n total *= (n + 1 - i) / i\n return int(total)\n\n\nclass User:\n def __init__(self, name):\n self.name = name\n\n\nclass SocialGraph:\n def __init__(self):\n self.lastID = 0\n self.users = {}\n self.friendships = {}\n\n def addFriendship(self, userID, friendID):\n \"\"\"\n Creates a bi-directional friendship\n \"\"\"\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)\n\n def addUser(self, name):\n \"\"\"\n Create a new user with a sequential integer ID\n \"\"\"\n self.lastID += 1 # automatically increment the ID to assign the new user\n self.users[self.lastID] = User(name)\n self.friendships[self.lastID] = set()\n\n def populateGraph(self, numUsers, avgFriendships):\n \"\"\"\n Takes a number of users and an average number of friendships\n as arguments\n\n Creates that number of users and a randomly distributed friendships\n between those users.\n\n The number of users must be greater than the average number of friendships.\n \"\"\"\n # Reset graph\n self.lastID = 0\n self.users = {}\n self.friendships = {}\n\n for _ in range(numUsers):\n self.addUser(get_full_name())\n\n ids = [id for id in self.users.keys()]\n\n num_friendships = (avgFriendships * numUsers) // 2\n possibilities = combinations(numUsers, 2) - 1\n picked = []\n\n while len(picked) < num_friendships:\n candidate = randrange(possibilities)\n drop = len(picked)\n\n for i in range(len(picked)):\n if candidate >= picked[i]:\n candidate += 1\n else:\n drop = i\n break\n\n picked.insert(drop, candidate)\n possibilities -= 1\n\n for pointer in picked:\n segment = len(ids) - 1\n while pointer >= segment:\n pointer = pointer - segment\n segment -= 1\n\n first_id_index = len(ids) - 1 - segment\n second_id_index = first_id_index + 1 + pointer\n\n self.addFriendship(ids[first_id_index], ids[second_id_index])\n\n def getAllSocialPaths(self, userID):\n \"\"\"\n Takes a user's userID as an argument\n\n Returns a dictionary containing every user in that user's\n extended network with the shortest friendship path between them.\n\n The key is the friend's ID and the value is the path.\n \"\"\"\n visited = {} # Note that this is a dictionary, not a set\n\n queue = deque()\n visited[userID] = []\n for friend_id in self.friendships[userID]:\n queue.appendleft((userID, friend_id))\n\n while queue:\n v1, v2 = queue.pop()\n if visited.get(v2) is None:\n # if not, add to visited dict, using tuple and visited dict to construct friendship path\n visited[v2] = visited[v1] + [v1]\n\n # if not, add all friends edges to queue in the form of a tuple (currentID, friendID)\n for friend_id in self.friendships[v2]:\n queue.appendleft((v2, friend_id))\n\n for key in visited:\n visited[key].append(key)\n\n return visited\n\n def stats(self):\n total_friends = 0\n\n avg_degs = 0\n\n for id in self.users:\n user = self.users[id]\n network = self.getAllSocialPaths(id)\n degs = 0\n for friend in network:\n degs += len(network[friend])\n avg_degs += degs / len(network)\n total_friends += len(network)\n\n # print(f'{user.name} has {len(network)} friends in extended network')\n\n print(\n f'Average user has {total_friends / len(self.users)} in extended network')\n print(\n f'Average user has average {avg_degs / len(self.users)} degrees of separation')\n\n\nif __name__ == '__main__':\n ns = []\n times = []\n for n in range(100, 500, 100):\n sg = SocialGraph()\n timestart = datetime.now().timestamp()\n sg.populateGraph(n, 4)\n ns.append(n)\n times.append(datetime.now().timestamp() - timestart)\n for n in range(1000, 3001, 1000):\n sg = SocialGraph()\n timestart = datetime.now().timestamp()\n sg.populateGraph(n, 4)\n ns.append(n)\n times.append(datetime.now().timestamp() - timestart)\n for n in range(3000, 10001, 3000):\n sg = SocialGraph()\n timestart = datetime.now().timestamp()\n sg.populateGraph(n, 4)\n ns.append(n)\n times.append(datetime.now().timestamp() - timestart)\n for n in ns:\n print(n)\n for time in times:\n print(time)\n\n # connections = sg.getAllSocialPaths(1)\n # print(connections)\n","sub_path":"projects/graph/social/social.py","file_name":"social.py","file_ext":"py","file_size_in_byte":5306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"566189691","text":"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom args import parse_args\nimport tensorflow as tf\n\nimport sys\nsys.path.append(\"..\")\nfrom common import tensorflow_api_benchmark as tensorflow_api\n\nclass abs(tensorflow_api.TensorflowAPIBenchmarkBase):\n def build_graph(self, backward=False):\n data = tf.placeholder(name='data', shape=[10, 10, 100, 100], dtype=tf.float32)\n result = tf.abs(x=data)\n\n self.feed_list = [data]\n if backward:\n gradients = tf.gradients(result, [data])\n self.fetch_list = [result, gradients[0]]\n else:\n self.fetch_list = [result]\n\n\nif __name__ == '__main__':\n args = parse_args()\n obj = abs()\n obj.build_graph(backward=args.backward)\n obj.run(use_gpu=args.use_gpu,\n repeat=args.repeat,\n log_level=args.log_level,\n check_output=args.check_output,\n profile=args.profile)\n \n","sub_path":"api/tensorflow/abs.py","file_name":"abs.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"19024330","text":"# Used in cdfplayer\nimport os\nfrom scipy.io import netcdf\nimport numpy as np\nimport aug_sfutils as sf\nfrom aug_sfutils import sfhmod\nimport parse_nml, ctr2rz_sf, get_sf_grid, tr_read_ctr\n\nww = sf.WW()\n\n\ndef sfh_mod(cdf_file, nml=None, fsfh_in=None, fsfh_out=None):\n\n\n sfh = sfhmod.SFHMOD(fin=fsfh_in)\n\n runid = os.path.split(cdf_file)[1][:8]\n cv = netcdf.netcdf_file(cdf_file, 'r', mmap=False).variables\n sfh_d = {}\n\n sfo = sfh.sfo\n\n tbases = [ lbl for lbl in sfo.sfh.keys() if sfo.sfh[lbl].obj_type == 8 ]\n abases = [ lbl for lbl in sfo.sfh.keys() if sfo.sfh[lbl].obj_type == 13 ]\n\n for obj in tbases:\n nt_cdf = cv[obj].shape[0]\n sfh.modtimeall(obj, nt_cdf)\n\n for obj in abases:\n if obj in cv.keys():\n nx_cdf = cv[obj].shape[-1]\n sfh.modareaall(obj, nx_cdf)\n\n for obj in sfo.parsets:\n if nml is None:\n print('Parameter sets not read from any namelist')\n else: # Read values from namelist\n parset = sfo.getparset(obj)\n sfh_d[obj] = {}\n for pn, val in parset.items():\n if pn == 'runid':\n sfh_d[obj][pn] = [runid]\n else:\n sfh_d[obj][pn] = parse_nml.parsenml(nml, pn, fmt=val.data_format)\n\n for obj in sfo.objects:\n\n name_flg = False\n if obj in cv.keys():\n name_flg = True\n cdfobj = obj\n else:\n if len(obj) == 8:\n name_flg = False\n for cdfobj in cv.keys():\n if obj in cdfobj:\n print('CDF signal %s stored as shotfile %s' %(cdfobj, obj))\n name_flg = True\n break\n\n if name_flg:\n sfh_d[obj] = cv[cdfobj].data\n else:\n print('Signal %s not found in %s' %(obj, cdf_file))\n\n sfh.write(fout=fsfh_out)\n\n return sfh_d\n\n\ndef cdf2tre(runid, time=-1):\n\n homdir = os.getenv('HOME')\n os.system('mkdir -p %s/shotfiles/TRE' %homdir)\n source = '/afs/ipp/home/t/transp/pub/TRE00000.sfh.temp'\n sfhdir = '%s/tr_client/AUGD' %homdir\n nshot = int(runid[0:5])\n tail = runid[5:8]\n fcdf = '%s/%d/%s/%s.CDF' %(sfhdir, nshot, tail, runid)\n fsfh = '%s/TRE00000.sfh' %sfhdir\n\n cv = netcdf.netcdf_file(fcdf, 'r', mmap=False).variables\n\n# Parameters\n\n os.chdir(sfhdir)\n\n coords = ('RMAJM', 'XB', 'X')\n cdf2d = ['DVOL', 'DAREA', 'ELONG', 'TRFLX', 'BPOL', 'PLFLX', \\\n 'Q', 'PMHD_IN', 'PPLAS', 'CUR', 'PLCURTOT', 'GFUN', 'PLFMP']\n cdf1d = ['ASHAF', 'BETAT', 'LIO2', 'PCUR', 'BZ', 'BZXR', 'RAXIS', 'YAXIS']\n\n prof1 = ('Ri', 'Zj', 'Lpf', 'IpiPSI')\n prof2 = ('PFL', 'TFLx', 'Qpsi', 'FFP')\n specs = ('LPFx', 'PFxx', 'RPFx', 'zPFx')\n mixed = ('Vol', 'Area', 'Pres', 'Jpol')\n sig3d = ('PFM',) #'CDM')\n sig1d = ('ixti', 'time')\n switch = ('ikCAT',)\n\n ssqlist = ('Rsquad', 'zsquad', 'Rgeo', 'Zgeo', 'Rxpu', 'Zxpu', 'ahor', 'k', \\\n 'Rin', 'Raus', 'Zunt', 'Rzunt', 'Zoben', 'Rzoben', 'Rmag', 'Zmag', \\\n 'Vol', 'IVSF', 'Slunt', 'Srunt', 'fXP1fIL', 'fXP1fAL', 'XPfdif', \\\n 'lenH-1', 'lenH-2' ,'lenH-3', 'Rxpo', 'Zxpo', 'Slobn', 'Srobn', \\\n 'bpli2', 'betpol', 'li', 'Wmhd', 'Circumf', 'bver', 'q95/q0', \\\n 'Finstab', 'Fstab', 'Rax-Rgeo', 'fax-bnd', 'fbnd-ref', \\\n 'q0', 'q95', 'fbnd-f12', 'fbnd-f17', 'fbnd-f22', 'fbnd-f25', \\\n 'q25', 'q50', 'q75', 'Zgeri2b', 'Zgera2b', 'Zskewi2b', 'Zskewa2b', \\\n 'Rrrofi2b', 'Rroofa2b', 'lenH-4', 'delRoben', 'delRuntn', \\\n 'GapAbot', 'GamAmid', 'GapAtop', 'GapAmin', \\\n 'lenH-5', 'lenV-1', 'koben', 'kuntn', 'lenV-2', 'lenV-3', 'dRXP')\n\n#==========\n# Profiles\n#==========\n\n grid = {}\n for coord in coords:\n grid[coord] = cv[coord][0, :]\n\n nt = len(cv['TIME3'].data)\n n_xb = len(grid['XB'])\n n_xb1 = n_xb + 1\n n_x = len(grid['X'])\n n_ssq = len(ssqlist)\n\n if time == -1:\n time = cv['TIME3'][-1]\n\n# grid['XB'] = 0.025, 0.05, ...,1.0\n# grid['X'] = 0.0125,0.0375,...,0.9875\n\n gfun = cv['GFUN'].data.T\n Jpol = (-5.e4*gfun*cv['BZXR'].data).T\n xb_prof = {}\n psi_prof = {}\n ddpsi = {}\n\n for prof in mixed + prof2:\n xb_prof [prof] = np.zeros((nt, n_xb1), dtype=np.float32)\n psi_prof[prof] = np.zeros((nt, n_xb1), dtype=np.float32)\n ddpsi[prof] = np.zeros((nt, n_xb1), dtype=np.float32)\n\n xb_prof['Vol'] [:, 1:n_xb1] = 1.e-6*np.cumsum(cv['DVOL'] [:, :], axis=1)\n xb_prof['Area'][:, 1:n_xb1] = 1.e-4*np.cumsum(cv['DAREA'][:, :], axis=1)\n\n# Interpolate on X and XB grids, to have profiles and derivatives on XB grid\n\n xb_grid = np.append(0, grid['XB'])\n for jt in range(nt):\n xb_prof['PFL'] [jt, :] = np.append(0, -2*np.pi*cv['PLFLX'][jt, :])\n xb_prof['TFLx'][jt, :] = np.append(0, -cv['TRFLX'] [jt, :])\n xb_prof['Qpsi'][jt, :] = np.interp(xb_grid, grid['XB'], cv['Q'][jt, :])\n xb_prof['Jpol'][jt, :] = np.interp(xb_grid, grid['XB'], Jpol[jt, :])\n xb_prof['Pres'][jt, :] = np.interp(xb_grid, grid['X'] , cv['PMHD_IN'][jt, :])\n\n# Regular psi grid\n\n for jt in range(nt):\n pfl = xb_prof['PFL'][jt, :]\n psi_prof['PFL'][jt, :] = np.linspace(pfl[0], pfl[-1], n_xb1)\n dpsi = psi_prof['PFL'][:, 1] - psi_prof['PFL'][:, 0] # time dependent\n for lbl in ('TFLx', 'Qpsi') + mixed:\n for jt in range(nt):\n tmp = np.interp(psi_prof['PFL'][jt, ::-1], xb_prof['PFL'][jt, ::-1], xb_prof[lbl][jt, ::-1])\n psi_prof[lbl][jt, :] = tmp[::-1]\n# Derivatives d/dPsi\n if lbl in mixed:\n ddpsi[lbl][jt, :] = np.gradient(psi_prof[lbl][jt, :])/dpsi[jt]\n psi_prof['FFP'] = 4.e-14*psi_prof['Jpol']*ddpsi['Jpol']\n\n#==================\n# Psi(R, z), j(R, z)\n#==================\n\n eq = get_sf_grid.get_grid(runid)\n Rgrid = eq['Ri'][:, 0]\n zgrid = eq['Zj'][:, 0]\n nr = len(Rgrid)\n nz = len(zgrid)\n\n#================\n# shotfile output\n#================\n\n nshot = int(runid[:5])\n\n tre = {}\n tre['ixti'] = np.arange(nt) + 1\n tre['time'] = np.float32(cv['TIME3'].data)\n# R, z grids\n tre['Ri'] = np.repeat(np.float32(Rgrid), nt).reshape(nr, nt)\n tre['Zj'] = np.repeat(np.float32(zgrid), nt).reshape(nz, nt)\n# Lpf profiles, from plasma edge to magnetic axis\n tre['Lpf'] = np.repeat( np.int32(n_xb), nt ).reshape(1, nt)\n\n for lbl in prof2:\n tre[lbl] = psi_prof[lbl].T # (t, r) -> (r, t)\n tre[lbl] = tre[lbl][::-1, :] # [0]<-> separatrix\n\n# Profiles with d/dpsi mixed\n for lbl in mixed:\n tre[lbl] = np.zeros((2*n_xb1, nt), dtype=np.float32)\n for jt in range(nt):\n tre[lbl][:, jt] = np.array([psi_prof[lbl][jt, ::-1], ddpsi[lbl][jt, ::-1]]).T.ravel()\n\n tre['IpiPSI'] = np.float32(cv['PCUR'].data.reshape(1, nt))\n tre['ikCAT'] = np.repeat( np.int32(4), nt ).reshape(1, nt)\n\n# Matrices and separatrix\n tre['PFM'] = np.zeros((nr, nz, nt), dtype=np.float32)\n\n nthe_eqd = 101\n\n# Special points: axis, sepx, lim=sepx\n lpfx = 5\n tre['LPFx'] = np.repeat( np.int32(lpfx-1), nt ).reshape(1, nt)\n tre['PFxx'] = np.zeros((lpfx, nt), dtype=np.float32)\n tre['RPFx'] = np.zeros((lpfx, nt), dtype=np.float32)\n tre['zPFx'] = np.zeros((lpfx, nt), dtype=np.float32)\n\n# SSQ\n\n ssq_d = {}\n for lbl in ssqlist:\n ssq_d[lbl] = np.zeros(nt)\n\n fm = tr_read_ctr.TR_READ_CTR(fcdf, rho=1, nthe=nthe_eqd)\n r_sep = 0.01*fm.Rsurf\n z_sep = 0.01*fm.Zsurf\n j_xpoint = np.argmin(z_sep, axis=1)\n j_up = np.argmax(z_sep, axis=1)\n\n tre['zPFx'][1, :] = np.min(z_sep, axis=1) # z lower X-point\n ssq_d['Zoben'] = np.max(z_sep, axis=1)\n ssq_d['Rin'] = np.min(r_sep, axis=1)\n ssq_d['Raus'] = np.max(r_sep, axis=1)\n\n#-----------------\n# Bottle neck: PFM\n#-----------------\n\n for jt in range(nt):\n rz = ctr2rz_sf.CTR2RZ(fcdf, it=jt)\n tre['PFM'][:, :, jt] = rz.pfm # Pol. flux matrix\n tre['PFL'][:, jt] += rz.pf_shift\n tre['RPFx'][1, jt] = r_sep[jt, j_xpoint[jt]] # R lower X-point\n ssq_d['Rzoben'][jt] = r_sep[jt, j_up[jt]]\n for rho in (0, 0.25, 0.50, 0.75, 0.95):\n lbl = 'q%d' %int(100*rho)\n ssq_d[lbl][jt] = np.interp(rho, xb_grid**2, xb_prof['Qpsi'][jt])\n\n ssq_d['Rzunt'] = tre['RPFx'][1, :] \n ssq_d['Zunt'] = tre['zPFx'][1, :]\n\n# Magnetic axis\n tre['PFxx'][0, :] = tre['PFL'][-1, :]\n tre['RPFx'][0, :] = 0.01*cv['RAXIS'].data\n tre['zPFx'][0, :] = 0.01*cv['YAXIS'].data\n# X point, 2nd X-point\n tre['PFxx'][1, :] = tre['PFL'][0, :]\n tre['PFxx'][3, :] = tre['PFL'][0, :]\n tre['PFxx'][4, :] = tre['PFL'][0, :]\n\n# Parameters\n tre['PARMV'] = {}\n tre['PARMV']['M'] = np.int32(nr-1)\n tre['PARMV']['N'] = np.int32(nz-1)\n tre['PARMV']['NCL'] = np.int32(13)\n tre['PARMV']['NTIME'] = np.int32(nt)\n\n ssq_d['Rgeo'] = 0.01*(cv['RAXIS'].data - cv['ASHAF'].data)\n ssq_d['Rax-Rgeo'] = 0.01*cv['ASHAF'].data\n ssq_d['Zgeo'] = np.zeros(nt)\n ssq_d['Rmag'] = 0.01*cv['RAXIS'].data\n ssq_d['Zmag'] = 0.01*cv['YAXIS'].data\n ssq_d['betpol'] = cv['BETAT'].data\n ssq_d['li'] = cv['LIO2'].data\n ssq_d['Vol'] = psi_prof['Vol'][:, -1]\n ssq_d['k'] = cv['ELONG'][:, -1]\n\n# Create SSQ array in right sequence for shotfile writing\n ssq = np.zeros((n_ssq, nt), dtype=np.float32)\n for jssq, lbl in enumerate(ssqlist):\n ssq[jssq, :] = ssq_d[lbl]\n\n# Modify sfh\n\n sig_gr = sig3d + prof1 + prof2 + mixed + specs + switch\n\n sfh = sfhmod.SFHMOD(fin=source)\n sfh.modtime('time', nt)\n sfh.modtime('ixti', nt)\n for lbl in sig_gr:\n print('Sfh_Mod %s' %lbl)\n dims = tre[lbl].shape\n sfh.modindex(lbl, dims)\n sfh.write(fout=fsfh)\n\n# Write TRE shotfile\n\n exp = os.getenv('USER')\n\n diag = 'TRE'\n if ww.Open(exp, diag, nshot):\n for lbl in sig1d + sig_gr:\n status = ww.SetObject(lbl, tre[lbl])\n\n# Parameter Sets\n ps = 'PARMV'\n for pn in tre[ps].keys():\n val = tre[ps][pn]\n print('Writing %s value = %s' %(pn, val))\n status = ww.SetParameter(ps, pn, val)\n\n status = ww.SetParameter('RunDocu', 'runid', runid)\n try:\n status = ww.SetParameter('RunDocu', 'CLISTexp', eq['!exp'])\n except:\n print('No CLISTE-exp found to store in ParameterSet RunDocu')\n try:\n status = ww.SetParameter('RunDocu', 'CLISTdia', eq['!dia'])\n except:\n print('No CLISTE-diag found to store in ParameterSet RunDocu')\n try:\n cl_ed = np.int32(eq['!ed'])\n status = ww.SetParameter('RunDocu', 'CLISTed', cl_ed)\n except:\n print('No CLISTE-edition found to store in ParameterSet RunDocu')\n\n# Write SSQ, SSQnam\n status = ww.SetObject('SSQ', ssq)\n status = ww.SetObject('SSQnam', ssqlist)\n\n ww.Close()\n\n\ndef cdf2tra(runid):\n\n homdir = os.getenv('HOME')\n os.system('mkdir -p %s/shotfiles/TRA' %homdir)\n source = '/afs/ipp/home/t/transp/pub/TRA00000.sfh.temp'\n sfhdir = '%s/tr_client/AUGD' %homdir\n nshot = int(runid[0:5])\n tail = runid[5:8]\n fcdf = '%s/%d/%s/%s.CDF' %(sfhdir, nshot, tail, runid)\n fnml = '%sTR.DAT' %fcdf[: -4]\n fsfh = '%s/TRA00000.sfh' %sfhdir\n\n for fname in fcdf, fnml:\n if not os.path.isfile(fname):\n print('%s not found' %fname)\n return\n\n sfh_dic = sfh_mod(fcdf, nml=fnml, fsfh_in=source, fsfh_out=fsfh)\n sf.write_sf(nshot, sfh_dic, sfhdir, 'TRA', exp=os.getenv('USER'))\n\n\ndef cdf2nub(runid):\n\n homdir = os.getenv('HOME')\n os.system('mkdir -p %s/shotfiles/NUB' %homdir)\n source = '/afs/ipp/home/n/nubeam/pub/NUB00000.sfh.temp'\n sfhdir = '%s/nb_client/AUGD' %homdir\n nshot = int(runid[0:5])\n tail = runid[5:8]\n fcdf = '%s/%d/%s/%s.cdf' %(sfhdir, nshot, tail, runid)\n fnml = '%s/%d/%s.nml' %(sfhdir, nshot, runid)\n fsfh = '%s/NUB00000.sfh' %sfhdir\n\n sfh_dic = sfh_mod(fcdf, nml=fnml, fsfh_in=source, fsfh_out=fsfh)\n sf.write_sf(nshot, sfh_dic, sfhdir, 'NUB', exp=os.getenv('USER'))\n\n\nif __name__ == \"__main__\":\n\n import sys\n if len(sys.argv) > 1:\n runid = sys.argv[1]\n cdftra(runid)\n","sub_path":"cdf2sf.py","file_name":"cdf2sf.py","file_ext":"py","file_size_in_byte":12293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"186474321","text":"# outputs dict \"deep\", keys: (\"HepG2\", \"minP\"), (\"K562\", \"minP\"), (\"HepG2\", \"SV40P\"), (\"K562\", \"SV40P\"), values chroms\n# chroms is a dict keys: chrom (string), values positions\n# positions is a dict keys: position (int), values deeplift score\n\nimport sys\nimport json\nimport numpy as np\nfrom dragonn import models\n\nbegin = int(sys.argv[1])\nend = int(sys.argv[2])\nout = open(sys.argv[3], 'w')\n\nmodel = models.SequenceDNN_Regression.load(\"model.arch.json\", \"model.weights.h5\")\n\nf = open(\"../../id_dict_gen/id_dict.txt\", 'r')\nid_to_seq = json.loads(f.readlines()[0])\nf.close()\n\nbase_to_row = {'A': 0, 'T': 1, 'C': 2, 'G': 3}\n\nfor name in id_to_seq.keys()[int(sys.argv[1]):int(sys.argv[2])]:\n sequence, coords = str(id_to_seq[name][0]), id_to_seq[name][1]\n chrom, start, end = str(coords[0]), coords[1], coords[2]\n for i in xrange(31):\n model_input = np.zeros((1, 1, 4, 145))\n subseq = sequence[5 * i : 145 + 5 * i]\n for j in xrange(145):\n # coordinates chrom, start + 5 * i + j\n model_input[0][0][base_to_row[subseq[j]]][j] = 1\n D = model.deeplift(model_input)\n entry = []\n for task in range(4):\n for pos in range(145):\n scores = D[task][0][0][:, pos]\n if max(scores) != 0:\n entry += [max(scores)]\n else:\n entry += [min(scores)]\n out.write(','.join([chrom, str(start + 5 * i)] + map(str, entry)) + '\\n')\nout.close()\n","sub_path":"notebooks/interpretation/get_deeplift.py","file_name":"get_deeplift.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"487476862","text":"import numpy as np \n\nclusters = [[(0.1,0.5),(0.35,0.75)],[(0.28,1.35)],[(0,1.01)]]\ncluster_names = ['black', 'red', 'blue']\ndist_names = ['l_1', 'l_2', 'l_inf']\nsimilarity_names = ['min', 'max', 'centroid', 'average']\n\ndef l_1(a, b):\n\treturn np.absolute(a[0] - b[0]) + np.absolute(a[1] - b[1])\n\ndef l_2(a, b):\n\treturn ((a[0] - b[0])**2 + (a[1] - b[1])**2) ** 0.5\n\ndef l_inf(a,b):\n\treturn max([np.absolute(a[0] - b[0]), np.absolute(a[1] - b[1])])\n\ndef sim_min(c1, c2, dist):\n\tminimum = 1000\n\tfor a in c1:\n\t\tfor b in c2:\n\t\t\tif dist(a,b) < minimum:\n\t\t\t\tminimum = dist(a,b)\n\treturn minimum\n\ndef sim_max(c1, c2, dist):\n\tmaximum = -1000\n\tfor a in c1:\n\t\tfor b in c2:\n\t\t\tif dist(a,b) > maximum:\n\t\t\t\tmaximum = dist(a,b)\n\treturn maximum\n\ndef sim_centroid(c1, c2, dist):\n\treturn dist(np.mean(c1,axis=0), np.mean(c2,axis=0))\n\ndef sim_avg(c1, c2, dist):\n\tsumation = 0\n\tfor a in c1:\n\t\tfor b in c2:\n\t\t\tsumation += dist(a,b)\n\treturn sumation/len(c1)/len(c2)\n\nfor x, dist in enumerate([l_1, l_2, l_inf]):\n\tfor y, sim in enumerate([sim_min, sim_max, sim_centroid, sim_avg]):\n\t\tminimum = 1000\n\t\tcluster1, cluster2 = None, None\n\t\tfor i, c1 in enumerate(clusters):\n\t\t\tfor j, c2 in enumerate(clusters):\n\t\t\t\tif c1 != c2: # not efficient, but correct\n\t\t\t\t\tdistance = sim(c1,c2,dist)\n\t\t\t\t\tif distance < minimum:\n\t\t\t\t\t\tminimum = distance\n\t\t\t\t\t\tcluster1, cluster2 = i, j\n\n\t\tprint(\"distance function was {}. similarity function was {}...\".format(dist_names[x], similarity_names[y]))\n\t\tprint(\"clusters merged first are {} and {} with distance {}\".format(cluster_names[cluster1], cluster_names[cluster2], minimum))\n\t\tprint(\"\\n\")\n\n","sub_path":"T4/problem1_part2.py","file_name":"problem1_part2.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"149612376","text":"import requests\n\nfw_api_name_to_id = \"https://www.fuzzwork.co.uk/api/typeid2.php?typename=\"\nfw_api_blueprints = \"https://www.fuzzwork.co.uk/blueprint/api/blueprint.php?typeid=\"\n\nactivities = {'manufacturing': '1',\n 'ME' : '4',\n 'TE' : '5',\n 'Invention' : '8'}\n\n\ndef get_single_id(name: str) -> str:\n \"\"\"\n Gets an ID of a single entry from the fuzzwork.co.uk website, amazing!\n :param name: Can be 'Sabre', 'Sabre Blueprint', 'Caldari Shuttle'\n :return:\n \"\"\"\n if len(name.split(\",\")) >1:\n name = name[:-1*(len(name.split(\",\")[-1])+1)]\n return requests.get(fw_api_name_to_id + name).json()[0]['typeID']\n\ndef get_blueprint_details(id: str) -> dict:\n \"\"\"\n Gets the blueprint details\n :param id:\n :return:\n \"\"\"\n\n return requests.get(fw_api_blueprints + id).json()\n\n\ndef get_manufacturing_materials(bp_data_dump : dict) -> dict:\n \"\"\"\n Gets the list of stuff needed for a blueprint to make\n :param bp_data_dump:\n :return:\n \"\"\"\n if 'activityMaterials' not in bp_data_dump.keys():\n return {}\n\n return bp_data_dump['activityMaterials'][activities['manufacturing']]\n","sub_path":"fuzzworks.py","file_name":"fuzzworks.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"169814166","text":"import torch\nimport torch.nn as nn\nimport copy\n\nfrom .modules.transformer import Transformer\nfrom .modules.embedding import BERTEmbedding\nfrom .modules import utils\n\n\nclass BertConfig(object):\n \"\"\"Configuration for `BertModel`.\"\"\"\n\n def __init__(self, vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n\n\nclass BERT(nn.Module):\n \"\"\"\n BERT model : Bidirectional Encoder Representations from Transformers.\n \"\"\"\n def __init__(self, config):\n super(BERT, self).__init__()\n config = copy.deepcopy(config)\n\n self.embeddings = BERTEmbedding(\n vocab_size=config.vocab_size,\n embedding_size=config.hidden_size,\n use_token_type=True,\n token_type_vocab_size=config.type_vocab_size,\n use_position_embeddings=True,\n initializer_range=config.initializer_range,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n\n self.encoder = Transformer(\n hidden_size=config.hidden_size,\n num_hidden_layers=config.num_hidden_layers,\n num_attention_heads=config.num_attention_heads,\n intermediate_size=config.intermediate_size,\n intermediate_act_fn=utils.get_activation(config.hidden_act),\n hidden_dropout_prob=config.hidden_dropout_prob,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n initializer_range=config.initializer_range\n )\n\n def forward(self, input_ids, input_mask=None, token_type_ids=None):\n assert len(input_ids.shape) == 2\n batch_size, seq_length = input_ids.shape\n\n if input_mask is None:\n input_mask = torch.ones([batch_size, seq_length], dtype=torch.long)\n\n attention_mask = utils.create_attention_mask_from_input_mask(\n input_ids, input_mask)\n\n embedding_output = self.embeddings(input_ids)\n all_encoder_layers = self.encoder(embedding_output, attention_mask,\n do_return_all_layers=True)\n\n return all_encoder_layers\n","sub_path":"bert_pytorch/model/bert.py","file_name":"bert.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"213356747","text":"import pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\nimport time\nfrom matplotlib.gridspec import GridSpec\nfrom datetime import timedelta\n\ndef initial(name_in):\n directory=\"data/\"\n data=pd.read_csv(str(directory+name_in+\"mod.csv\"), sep=',', header=0, engine='python')\n \n time_dd=np.array([datetime.strptime(x, '%Y-%m-%dT%H:%M:%S.000Z') for x in data[\"LASTRECORDED\"]])\n \n delt=np.array([float(x) for x in data['FRACTIONAL_PRICE_CHANGE']])\n \n #these are too big\n dd1=delt[delt<3]\n tt1=time_dd[delt<3]\n\n #these are zero\n dd=dd1[dd1!=0]\n tt=tt1[dd1!=0]\n \n #order by time\n x=np.array([time.mktime(x.timetuple()) for x in tt])\n aa=np.argsort(x)\n xt=x[aa]\n dt=dd[aa]\n\n #remove the first two points\n xt=xt[2:]\n dt=dt[2:]\n\n #convert back to datetime\n xx = np.array([datetime.fromtimestamp(x) for x in xt])\n \n #plotting setup\n plt.figure(0, figsize=(12, 8))\n gs1 = GridSpec(2, 3)\n gs1.update(left=0.15, right=0.95, wspace=0.3, hspace=0.15)\n ax1 = plt.subplot(gs1[:,:])\n \n\n #these are negative change\n s1=plt.scatter(xx[dt<0], dt[dt<0], c='r', s = 20, lw = 0, alpha=0.7)\n\n #these are postive change\n s2=plt.scatter(xx[dt>0], dt[dt>0], c='b', s = 20, lw = 0, alpha=0.7)\n\n #plot the running average\n N=100\n g=np.convolve(dt, np.ones((N,))/N, mode='valid')\n lin,=plt.plot(xx[N/2:-(N/2-1)], g, 'g', linewidth=2, alpha=.7)\n \n plt.plot(xx, np.zeros(len(xx)), '--', color='gray')\n \n quarters=['1990-03-31', '1990-06-30', '1990-09-30', '1990-12-31']\n quarters_dt=np.array([datetime.strptime(x, '%Y-%m-%d') for x in quarters])\n for y in range(30):\n quarters_dt=quarters_dt + timedelta(days=366)\n for i in quarters_dt:\n plt.plot([i,i], [-1,1], color='gray')\n\n plt.ylabel('Fractional change')\n #plt.xlabel('Date')\n plt.ylim([-1,1])\n plt.xlim(xx[0]-timedelta(weeks=12), xx[-1]+timedelta(weeks=12))\n plt.title(str(name_in))\n plt.legend([s2,s1,lin], ['Increase in vendor price', 'Decrease in vendor price', 'running average (N='+str(N)+')'], loc=2) \n\n \n plt.show()\n plt.savefig(str(name_in+'.pdf'), format='pdf')\n\n\n\n","sub_path":"pipeline/semantics_utils/plot_data.py","file_name":"plot_data.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"101719105","text":"# -*- coding: utf-8 -*-\n\"\"\"\narchve.pyで取り込んだtweetsを分析する。\n・分析内容:\n 日にちごとの頻出単語トップXX\n XXはconfig_feature_words.pyで設定可能。\n \n・出力ファイル:\n - 特徴語データ: feature_words_YYYYMMDD-YYYYMMDD.json\n [{\"date\": 日付, \"tweet_count\":ツイート数, \"retweet_count\":リツイート数,\n \"posi_count\":ポジティブツイート数, \"nega_count\":ネガティブツイート数,\n \"feature_words\":[特徴語リスト]},...] ※dateでソート\n - 日別ツイートデータ: tweets_YYYYMMDD.json\n [{'created_datetime': 日時,'retweet_count':収集時点のリツイート数, \n 'id': ツイートのID(文字列形式:id_str), user.screen_name': ツイッターアカウント名, 'text':ツイート本文, 'media_urls'(option):画像URL, \n 'PrintID'(option):プリント予約番号, 'retweet'(option):1 ※リツイートの場合に固定で入る項目}, ...] ※created_datetimeでソート\n - 特徴語データファイルリスト: filelist-feature_words.json: \n 特徴語データ出力先フォルダ内の特徴語データファイルのリスト(降順)。\"feature_words_\"で始まるファイルが対象。\n - ツイートファイルリスト: filelist-tweets.json\n 日別ツイートデータ出力先フォルダ内の日別ツイートデータファイルのリスト(降順)。\"tweets_\"ではじまるファイルが対象。\n - 日別ワードクラウド画像: wordcloud_YYYYMMDD.png\n\n \n@author: hitoshi\n\"\"\"\nfrom collections import defaultdict\nimport pymongo\nimport shared.datetime_extentions as dutil\nimport conf.config_feature_words as config\nimport datetime\nfrom pytz import timezone\nimport json\nimport os.path, sys\nfrom os.path import join, relpath\nfrom glob import glob\nfrom wordcloud import WordCloud\nimport shared.text_utility as util\n\n#形態素解析のライブラリ\nimport MeCab\n#TF-IDFフィルタのクラス\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nclient = pymongo.MongoClient(config.HOST, config.PORT)\ntweet_collection = client[config.DB_NAME][config.COLLECTION_NAME]\n\n#分析するデータの日数\nANALYZE_DAYS =config.ANALYZE_DAYS\n#抽出単語数\nEXTRACT_FEATURE_WORDS_MAX = config.EXTRACT_FEATURE_WORDS_MAX\n#TF-IDFパラメータ: 除外する合計出現回数\nTFIDF_EXCLUDE_APPEARANCE = config.TFIDF_EXCLUDE_APPEARANCE\n\ndef create_tweets_analyze_result_file(output_folder_path, start_date, end_date):\n \"\"\"\n ツイート分析(特徴語抽出)を実行し、結果をファイル(feature_words_YYYYMMDD-YYYYMMDD.json)に保存する。\n :param output_folder_path: 分析結果ファイルの保存先。\n :param start_datetime: 検索対象の開始時刻\n :param end_datetime: 検索対象の終了時刻\n \"\"\"\n str_end_date = format(end_date.strftime('%Y%m%d'))\n str_start_date = format(start_date.strftime('%Y%m%d'))\n file_path = output_folder_path + 'feature_words_' + str_start_date + '-' + str_end_date + '.json' \n\n #分析(特徴語抽出)を実行し、ファイルに保存する \n file = open(file_path,'w')\n condition = {'created_datetime': {'$gte': start_date, '$lte': end_date}}\n json.dump(_get_feature_words_from_tweets_text(condition,'%Y/%m/%d'),file)\n file.close()\n \ndef _get_feature_words_from_tweets_text(condition, date_format, extract_feature_words_max=EXTRACT_FEATURE_WORDS_MAX):\n \"\"\"\n 日付フォーマットに合致するつぶやきの頻出名詞をJSON形式で返す\n :param condition: 検索の絞り込み条件(Dictionary)\n :param date_format: 日付フォーマット、指定されたフォーマットごとにつぶやき数を計算する\n :return: JSON [{},...]\n \"\"\"\n \n tweets_count_dict = defaultdict(int) #集計時間単位(以下、わかりやすくするために「日別」とする)のtweet件数\n retweets_count_dict = defaultdict(int)\n nega_count_dict = defaultdict(int)\n posi_count_dict = defaultdict(int)\n nouns_dict = defaultdict(str) #「日別」のtweet textの名詞を連結した文字列\n words_dict = defaultdict(str)\n \n target_time_units =[] #date_formatで指定した年月日時文字列。例)date_format='%Y%m%d'の場合は'2016/06/01'のような日にちの配列になる。\n target_time_unit_nouns =[] #date_formatで指定した年月日時ごとのtweetに含まれる名詞を連結した文字列\n \n #tweetsの読み込み(mongoDBからのfind時のsortはメモリ不足でエラーになるため、ファイル出力前にこのプログラムでソートする)\n for tweet in tweet_collection.find(condition, {'_id': 1, 'created_datetime': 1,'retweeted_status': 1, 'text':1, 'negaposi':1}):\n str_date = dutil.date_to_japan_time(tweet['created_datetime']).strftime(date_format)\n \n #初めて処理する日付の場合はtarget_time_unitsに格納する\n if (str_date in target_time_units) == False :\n target_time_units.append(str_date)\n \n #その日の件数をカウントアップする\n tweets_count_dict[str_date] += 1 \n \n #そのツイートがretweetの場合はカウントアップする\n if 'retweeted_status' in tweet:\n retweets_count_dict[str_date] += 1\n \n #そのツイートがネガまはたポジ場合はカウントアップする\n if 'negaposi' in tweet:\n negaposi = tweet[\"negaposi\"]\n if negaposi == 1 : \n posi_count_dict[str_date] += 1\n elif negaposi == -1 :\n nega_count_dict[str_date] += 1\n \n #形態素解析で名詞を抽出して文字列として連結する\n nouns_dict[str_date] += \" \" + _split_text_only_noun(util.get_text_eliminated_some_pattern_words(tweet['text']))\n\n #日付リストをソート\n target_time_units.sort()\n \n #TF-IDF用にnouns_dictからtarget_time_unit_nounsへ格納する。\n for i in range(0, len(target_time_units)) :\n target_time_unit_nouns.append(nouns_dict[target_time_units[i]])\n \n # TF-IDF 計算\n # TFIDF_EXCLUDE_APPEARANCE日以上出現した単語は除外\n tfidf_vectorizer = TfidfVectorizer(\n use_idf=True,\n lowercase=False,\n max_df=TFIDF_EXCLUDE_APPEARANCE\n )\n tfidf_matrix = tfidf_vectorizer.fit_transform(target_time_unit_nouns)\n # index 順の単語のリスト\n terms = tfidf_vectorizer.get_feature_names()\n # TF-IDF 行列 (numpy の ndarray 形式)\n tfidfs = tfidf_matrix.toarray()\n \n # 結果の出力\n for i in range(0, len(target_time_units)) :\n words_dict[target_time_units[i]] = []\n for x in _extract_feature_words(terms, tfidfs, i, extract_feature_words_max):\n words_dict[target_time_units[i]].append(x)\n\n results_list =[]\n for i in range(0, len(target_time_units)) :\n result = {\"date\": target_time_units[i], \n \"tweets_count\": tweets_count_dict[target_time_units[i]], \"retweets_count\": retweets_count_dict[target_time_units[i]],\n \"posi_count\": posi_count_dict[target_time_units[i]], \"nega_count\": nega_count_dict[target_time_units[i]],\n \"feature_words\": words_dict[target_time_units[i]]}\n results_list.append(result)\n \n print(results_list)\n #dateで降順ソートする\n return sorted(results_list,key=lambda x:x[\"date\"],reverse=True)\n \n### MeCab による単語への分割関数 (名詞のみ残す)\ndef _split_text_only_noun(text):\n tagger = MeCab.Tagger()\n tagger.parse('')\n node = tagger.parseToNode(text)\n\n words = []\n while node:\n pos = node.feature.split(\",\")[0]\n if pos == \"名詞\":\n word = node.surface\n words.append(word)\n node = node.next\n return \" \".join(words)\n \n### TF-IDF の結果からi 番目のドキュメントの特徴的な上位 n 語を取り出す\ndef _extract_feature_words(terms, tfidfs, i, n):\n tfidf_array = tfidfs[i]\n top_n_idx = tfidf_array.argsort()[-n:][::-1]\n words = [terms[idx] for idx in top_n_idx]\n return words\n\ndef create_tweets_files(output_folder_path, start_date, end_date, days = ANALYZE_DAYS):\n \"\"\"\n 日別ツイートファイル(tweets_YYYYMMDD.json)を保存する。すでに存在していた場合、上書きする。\n :param output_folder_path: 日別ツイートファイルの保存先。\n :param start_datetime: 検索対象の開始時刻\n :param end_datetime: 検索対象の終了時刻\n \"\"\"\n for i in range(0, days) :\n\n date = start_date + datetime.timedelta(days=i)\n condition = {'created_datetime': {'$gte': date, '$lt': date + datetime.timedelta(days=1)}}\n\n str_date = format(date.strftime('%Y%m%d'))\n file_path = output_folder_path + 'tweets_' + str_date + '.json' \n \n file = open(file_path,'w')\n json.dump(_get_tweets_data(condition),file, indent=0)\n file.close()\n print(file_path)\n\ndef _get_tweets_data(condition):\n \"\"\"\n つぶやきの内容をMongoDBから取得するして必要な項目を抽出する\n :param condition: 検索の絞り込み条件(Dictionary)\n :return: JSON [{},...]\n \"\"\"\n\n #ツイートを取得しcreated_datetimeでソート\n tweets_tmp = []\n for tweet in tweet_collection.find(condition,{'created_datetime':1 ,'created_at': 1, \n 'retweet_count': 1, 'id_str': 1, 'user': 1, 'text': 1, 'entities':1,\n 'retweeted_status': 1, 'negaposi':1, 'hash_match':1, 'labels':1}):\n tweets_tmp.append(tweet)\n \n tweets = sorted(tweets_tmp,key=lambda x:x[\"created_datetime\"])\n\n date_format = '%Y/%m/%d %H:%M:%S' \n results_list = []\n for i in range(0,len(tweets)):\n tweet = tweets[i]\n #retweetの場合はフラグを立てる\n retweet_flag = False\n if 'retweeted_status' in tweet:\n retweet_flag = True\n\n #ツイート本文を比較する\n exist_flag = False\n for t in results_list :\n if(t['text']==tweet['text']):\n exist_flag = True\n break\n #すでにresult_listに含まれているツイートは処理しない\n if exist_flag == False:\n result = {'created_datetime': dutil.str_to_date_jp(tweet[\"created_at\"]).strftime(date_format),\n 'retweet_count': tweet['retweet_count'], 'id': tweet['id_str'],\n 'user.screen_name': tweet['user']['screen_name'], 'text': tweet['text']}\n \n #media_urlを持つtweetにはそのURLを保存する\n media_elements = tweet.get('entities').get('media')\n if media_elements != None:\n media_urls = []\n for media in media_elements:\n media_url = media.get('media_url')\n if media_url != None: media_urls.append(media_url)\n result['media_urls'] = \",\".join(media_urls)\n #result['media_urls'] = media_urls ##media_urlsが複数入っているツイートは見たことないが、複数入る前提でリストにしておくのが良さそう。\n \n label_elements = tweet.get('labels')\n if label_elements != None:\n result['labels'] = label_elements\n\n #プリント予約番号が抽出できたら保存する。\n printids = util.get_nps_printid(result['text'])\n if len(printids) > 0: result['PrintID'] = \",\".join(printids)\n \n #リツイートの場合は印をつける\n if retweet_flag == True: result['retweet'] = 1\n \n #ネガポジの要素を含む場合はその値を保存する\n negaposi = tweet.get('negaposi')\n if negaposi != None: result['negaposi'] = negaposi\n \n #一致した画像のラベルを保存する\n hash_match = tweet.get('hash_match')\n if hash_match != None: result['hash_match'] = hash_match\n \n results_list.append(result)\n\n #ツイートの作成日(created_datetime)で昇順ソートする\n return sorted(results_list,key=lambda x:x[\"created_datetime\"])\n\n\ndef create_word_cloud(output_folder_path, start_datetime, end_datetime):\n \"\"\"\n MongoDBに格納されているつぶやきから日別の特徴語を抽出し、ワードクラウドを生成する。\n :param output_folder_path: ワードクラウド画像ファイルの保存先。\n :param start_datetime: 検索対象の開始時刻\n :param end_datetime: 検索対象の終了時刻\n \"\"\"\n condition = {'created_datetime': {'$gte': start_datetime, '$lte': end_datetime}}\n feature_word_list = _get_feature_words_from_tweets_text(condition, '%Y/%m/%d', extract_feature_words_max=120)\n [_feature_word_to_word_cloud(output_folder_path, feature_word) for feature_word in feature_word_list]\n\ndef _feature_word_to_word_cloud(output_folder_path, feature_word):\n \"\"\"\n 特徴語からワードクラウドに変換する。\n outディレクトリ以下に日別の画像ファイルを出力する。\n :param output_folder_path: 出力先フォルダのパス\n :param feature_word: 特徴語\n \"\"\"\n file_name = 'wordcloud_' + feature_word['date'].replace('/', '') + '.png'\n file_path = os.path.abspath(os.path.join(output_folder_path, file_name))\n # 特徴語の出現頻度は、リストの順番をもとに機械的に設定する。\n size = len(feature_word['feature_words'])\n array_of_tuples = [(word, size - idx) for idx, word in enumerate(feature_word['feature_words'])]\n _save_word_cloud_img(array_of_tuples, file_path)\n\n\ndef _save_word_cloud_img(frequencies, file_path):\n \"\"\"\n ワードクラウドの画像ファイルを指定されたファイルパスに保存する。\n 参考:http://amueller.github.io/word_cloud/index.html\n :param frequencies: タブル(単語, 出現頻度)のリスト\n :param file_path: 画像ファイルのパス\n \"\"\"\n # 日本語フォントのパスが正しく設定されている必要がある。\n font_path = config.JAPANESE_FONT_PATH\n wc = WordCloud(background_color='white', max_font_size=320, font_path=font_path, width=900, height=500)\n wc.generate_from_frequencies(frequencies)\n wc.to_file(file_path)\n\ndef create_filelist(folder_path, target_filename_regexp, output_filename, count=7):\n \"\"\"\n 指定したフォルダパス内の正規表現にマッチするファイル名のリストファイルを作成する。\n :param folder_path: ファイル名を取得するフォルダのパス。また、リストファイルの保存先。\n :param target_filename_regexp: 取得するファイル名の正規表現。\n :param output_filename: リストファイルのファイル名\n \"\"\"\n files_list = [relpath(x, folder_path) for x in glob(join(folder_path, target_filename_regexp))]\n files_list.sort(reverse=True)\n files_list_out = files_list[:count]\n output_file_path = folder_path + output_filename \n\n output_file = open(output_file_path,'w')\n json.dump(files_list_out,output_file)\n output_file.close()\n\n## main\nif __name__ == '__main__':\n \n #出力先パスを指定 \n output_folder_path = config.OUTPUT_FOLDER_PATH\n print(\"[info] 出力先パス: \" + output_folder_path)\n\n d = datetime.datetime.now()\n \n if len(sys.argv) == 2 and sys.argv[1] == 'tweets':\n date = datetime.datetime(d.year,d.month,d.day,0,0,0,0,timezone('Asia/Tokyo'))\n start_date = date - datetime.timedelta(days=1)\n date = datetime.datetime(d.year,d.month,d.day,23,59,59,999999,timezone('Asia/Tokyo'))\n end_date = date - datetime.timedelta(days=1)\n\n #分析したツイートを日別ツイートファイルとして保存する(上書き)\n print(\"[info] 日別ツイートファイル作成開始\")\n create_tweets_files(output_folder_path, start_date, end_date, days = 1)\n print(\"[info] 処理完了\")\n\n else:\n date = datetime.datetime(d.year,d.month,d.day,0,0,0,0,timezone('Asia/Tokyo'))\n start_date = date - datetime.timedelta(days=ANALYZE_DAYS)\n date = datetime.datetime(d.year,d.month,d.day,23,59,59,999999,timezone('Asia/Tokyo'))\n end_date = date - datetime.timedelta(days=1)\n\n #ツイート分析結果をファイルに保存する\n print(\"[info]ツイート分析開始\")\n create_tweets_analyze_result_file(output_folder_path, start_date, end_date)\n #分析したツイートを日別ツイートファイルとして保存する(上書き)\n print(\"[info] 日別ツイートファイル作成開始\")\n create_tweets_files(output_folder_path, start_date, end_date)\n #分析したツイートの日別ワードクラウドを画像ファイルとして保存する(上書き)\n print(\"[info] 日別ワードクラウド作成開始\")\n create_word_cloud(output_folder_path,start_date, end_date)\n \n #出力先フォルダ内の特徴語ファイルと日別ツイートファイルのリストを作成する(上書き)\n print(\"[info] ファイルリストの作成開始\")\n create_filelist(output_folder_path, 'feature_words_*', 'filelist-feature_words.json')\n create_filelist(output_folder_path, 'tweets_*', 'filelist-tweets.json')\n \n print(\"[info] 処理完了\")\n","sub_path":"analyzer/feature_words_extractor.py","file_name":"feature_words_extractor.py","file_ext":"py","file_size_in_byte":17697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"444395668","text":"import pandas as pd\nimport plotly.express as px\n\n\ndef where_is_the_iss():\n url = 'http://api.open-notify.org/iss-now.json'\n df = pd.read_json(url)\n print(df)\n df['latitude'] = df.loc['latitude', 'iss_position']\n df['longitude'] = df.loc['longitude', 'iss_position']\n df.reset_index(inplace=True)\n print(df)\n df.drop(['index', 'message'], axis=1)\n fig = px.scatter_geo(df, lat='latitude', lon='longitude')\n fig.show()\n\n\nif __name__ == '__main__':\n where_is_the_iss()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"518972538","text":"from magma import wire, compile, EndCircuit\nfrom loam.boards.icestick import IceStick, Counter, TFF\n\nicestick = IceStick()\nicestick.Clock.on()\nicestick.D5.on()\n\nmain = icestick.main()\n\nclock = Counter(24)\n\ntff = TFF()\n\ntff(clock.COUT)\n\nwire(tff, main.D5)\n\nEndCircuit()\n","sub_path":"tests/test_mantle/ff/mothball/tff.py","file_name":"tff.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"295245504","text":"import numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\n\ntf.set_random_seed(777)\n\n# 가장 작은 값을 0으로하고 0을 기준으로 정규화 (숫자가 크기 때문에)\ndef min_max_scaling(x):\n x_np = np.asarray(x)\n return (x_np - x_np.min())/(x_np.max() - x_np.min() + 1e-7)\n\ndef reverse_min_max_scaling(org_x, x):\n org_x_np = np.asarray(org_x)\n x_np = np.asarray(x)\n return (x_np * (org_x_np.max() - org_x_np.min() + 1e-7)) + org_x_np.min()\n\nseq_length = 21\ninput_dim = 5\noutput_dim =1\nhidden_dim = 10 # 각 셀의 (hidden)출력 크기\n\n# 데이터 불러오기\ndataframe = pd.read_csv('stock_data.csv', encoding='euc-kr')\nname = dataframe['종목명'][1]\ndel dataframe['날짜']\ndel dataframe['종목명']\n\n# 데이터 정규화\ndf = min_max_scaling(dataframe)\n\n# x : 입력 데이터 , y : 출력 데이터(종가)\nx = df\ny = x[:, [0]]\n\ndataX = []\ndataY = []\n\n#_x : i부터 seq_length 기간 동안의 입력 데이터, _y : seq_length 다음날의 출력 데이터(종가)\nfor i in range(0, len(y) - seq_length):\n _x = x[i: i + seq_length]\n _y = y[i + seq_length]\n dataX.append(_x)\n dataY.append(_y)\n\n# 학습 데이터 : 70% , 테스트 데이터 : 30%\ntrain_size = int(len(dataY) * 0.7)\ntest_size = len(dataY) - train_size\n\ntrainX, testX = np.array(dataX[0:train_size]), np.array(dataX[train_size: len(dataX)])\ntrainY, testY = np.array(dataY[0:train_size]), np.array(dataY[train_size: len(dataY)])\n\n# placeholder, 행렬의 차원[seq_length:input]\nX = tf.placeholder(tf.float32, [None, seq_length, input_dim])\nY = tf.placeholder(tf.float32, [None, 1])\n\ncell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim, state_is_tuple=True)\noutputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)\n# ***fully_connected(fc)\nY_pred = tf.contrib.layers.fully_connected(outputs[:, -1], output_dim, activation_fn = None) # Y의 예측 값\n\nloss = tf.reduce_sum(tf.square(Y_pred - Y))\noptimizer = tf.train.AdamOptimizer(0.01)\ntrain = optimizer.minimize(loss)\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nfor i in range(2001):\n _, l = sess.run([train, loss], feed_dict={X:trainX, Y:trainY})\n if i % 100 == 0:\n print('epoch : {0}, loss : {1}'.format(i, l))\n\ntestPredict = sess.run(Y_pred, feed_dict={X:testX})\n\npath = 'c:Windows\\Fonts\\malgunbd.ttf'\nfontprop = fm.FontProperties(fname=path, size=18)\n\nplt.plot(testY,'skyblue')\nplt.plot(testPredict,'orange')\nplt.title('종목명 : ' + name, fontproperties=fontprop)\nplt.xlabel(\"Time Period\")\nplt.ylabel(\"Stock Price\") # 정규화 되어있는 주가\nplt.show()\n\nrecent_data = np.array([x[len(x) - seq_length:]])\ntestPredict = sess.run(Y_pred, feed_dict={X:recent_data})\ntestPredict = reverse_min_max_scaling(dataframe, testPredict) # 금액데이터 역정규화\nprint(\"Tomorrow's stock price\", testPredict[0]) # 예측한 주가를 출력","sub_path":"stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"633469052","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom qutip import *\nfrom Constants import *\nfrom math import *\nimport time\nimport datetime\nimport Constants as cons\nimport os\n\ndef H1_cos(t,*args):\n\treturn cos(omega * t)\n\nomega = 1.57\n\nR = 1/sqrt(2) * (sigmax() + sigmay())\n#R=1\n\na = R * sigmap() * R\n\nq1 = tensor(a, qeye(2))\nq2 = tensor(qeye(2),a)\n\nq1d = q1.dag()\nq2d = q2.dag()\n\nzero = R * basis(2,0)\none = R * basis(2,1)\n\nH0 = 0*q1*q1d\n\nH1_1 = 5 * ((q1 + q1d)) + 1* ((q2 + q2d)) + 5*(((q1 + q1d)) * ((q2 + q2d)))\n\nH = [H0,[H1_1,H1_cos]]\n\npsi0 = tensor(zero,zero)\n\ntlist = np.linspace(0,10,1000)\n\nsx1 = tensor(R * sigmax() * R,qeye(2))\nsx2 = tensor(qeye(2), R * sigmax() * R)\n\nsy1 = tensor(R * sigmay() * R,qeye(2))\nsy2 = tensor(qeye(2), R * sigmay() * R)\n\nsz1 = tensor(R * sigmaz() * R,qeye(2))\nsz2 = tensor(qeye(2), R * sigmaz() * R)\n\nc_ops = [0.01*q1,0.01*q2,0.02*sz1,0.02*sz2]\n\nresult = mesolve(H,psi0,tlist,c_ops,[sx1,sy1,sz1,sx2,sy2,sz2])\n\nfig, ax = plt.subplots(figsize=(12,6))\nax.plot(tlist, np.real(result.expect[0]), 'r')\nax.plot(tlist, np.real(result.expect[1]), 'b')\nax.plot(tlist, -1 * np.real(result.expect[2]), 'g')\nax.legend((\"sx\", \"sy\", \"sz\"))\nax.set_xlabel('Time')\nax.set_ylabel('expectation value')\nfig.savefig('../Output/testing/gates1.png')\n#plt.show()#\n\nsphere=Bloch()\nsphere.add_points([result.expect[0],result.expect[1],-1 *result.expect[2]], meth='l')\nsphere.vector_color = ['r']\n#sphere.add_vectors([np.sin(theta), 0, np.cos(theta)])\nsphere.show()\ns = sphere.fig\ns.savefig('../Output/testing/bloch1.png')\n\nfig1, ax1 = plt.subplots(figsize=(12,6))\nax1.plot(tlist, np.real(result.expect[3]), 'r')\nax1.plot(tlist, np.real(result.expect[4]), 'b')\nax1.plot(tlist, -1 * np.real(result.expect[5]), 'g')\nax1.legend((\"sx\", \"sy\", \"sz\"))\nax1.set_xlabel('Time')\nax1.set_ylabel('expectation value')\nfig1.savefig('../Output/testing/gates2.png')\n#plt.show()\n\nsphere1=Bloch()\nsphere1.add_points([result.expect[3],result.expect[4],-1 *result.expect[5]], meth='l')\nsphere1.vector_color = ['r']\n#sphere.add_vectors([np.sin(theta), 0, np.cos(theta)])\nsphere1.show()\ns1 = sphere1.fig\ns1.savefig('../Output/testing/bloch2.png')\n#sphere1.show()","sub_path":"Legacy/src_Long/two_body_test.py","file_name":"two_body_test.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"18446674","text":"# GradientDescent.py\n\"\"\"\nAutor: Arturo Toro\nCarnet: 12-10796\nCarrera: Ingenieria en computacion - USB\n\"\"\"\nimport numpy as np\n\n# W(n+1) = W(n) - lr*dE(W)/dW\n# Descenso de gradiente: Cauchy\n\ndef LMS(X, d, f, alfa):\n\t\"\"\" En la actualizacion continua se presenta un dato a la vez\n\ty se actualizan los pesos.\n\t# Consideraciones: - Tasa de aprendizaje fija.\n\t\t\t\t\t - La diferencia con cauchy es alfa fijo.\n\t# Entrada: Matriz de datos (X),\n\t \t\t Vector de resultados esperados (d),\n\t \t\t Funcion de activacion (f),\n\t \t\t Tasa de aprendizaje (alfa)\n\t\"\"\"\n\n\t# Inicializacion de vectores.\n\tYs = np.zeros(np.size(X,1))\t\t# Vector de resultados.\n\terror = np.ones(np.size(X,1))\t# Vector de errores.\n\tWs = np.ones(np.size(X,1))\t\t# Vector de pesos.\n\n\tfor i in range(np.size(X,2)):\n\t\tYs[i] = f(X[i]*Ws) \t\t\t\t# Calcular respuesta.\n\t\terror[i] = d[i] - Ys[i]\t\t\t# Error de respuesta.\n\n\t\t# Para la actualizacion, mover a los pesos hacia el dato.\n\t\tWs = Ws + alfa*error[i]*X[i]\t# Actualizacion online.\n\n\t# Seccion para graficar error y eso.\t\n\t# Evaluar condicion de parada.\n","sub_path":"GradientDescent.py","file_name":"GradientDescent.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"378956051","text":"#!/usr/bin/env python3 \n#-*- coding: utf-8 -*- \n\n\"\"\"\nPlantear una clase que administre dos listas de 5 nombres de alumnos y sus notas. Mostrar un menú de opciones que permita:\n1- Cargar alumnos.\n2- Listar alumnos.\n3- Mostrar alumnos con notas mayores o iguales a 7.\n4- Finalizar programa.\n\"\"\"\n\nclass Alumnos:\n\n def __init__(self):\n self.nombres = []\n self.notas = []\n self.menu()\n\n\n def menu(self):\n opcion = 0\n while opcion != 4:\n print(\"1 - Cargar alumnos\")\n print(\"2 - Listar alumnos\")\n print(\"3 - Listado de alumnos con notas mayores o iguales a 7\")\n print(\"4 - Finalizar programa\")\n opcion = int(input(\"Ingrese su opcion\"))\n if opcion==1:\n self.cargar()\n elif opcion==2:\n self.listar()\n elif opcion==3:\n self.notas_altas()\n\n \n def cargar(self):\n for x in range(5):\n nom = input(\"Ingrese el nombre: \")\n self.nombres.append(nom)\n nota = int(input(\"Ingrese la nota que obtubo {}\".format(nom)))\n self.notas.append(nota)\n\n \n def listar(self):\n print(\"Listado completo de alumnos\")\n for x in range(5):\n print(self.nombres[x], self.notas[x])\n print(\"--------------------------\")\n\n\n def notas_altas(self):\n print(\"Alumnos con notas superiores o iguales a 7\")\n for x in range(5):\n if self.notas[x] >= 7:\n print(self.nombres[x], self.notas[x], sep = \":\")\n print(\"_____________________________\")\n\n\n\n#MAIN BLOCK\nalumnos = Alumnos()\n#alumnos.menu()\n","sub_path":"Ejercicios/ejercicio195.py","file_name":"ejercicio195.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"624094201","text":"from functools import reduce\n\ntemperatury = [10, 13, 14, 15, 14, 10]\n\nacc = 1\nfor x in temperatury:\n acc = acc * x\nprint(acc)\n\n\ndef multiply(elem1, elem2):\n return elem1 * elem2\n\n\nacc_2 = reduce(lambda elem1, elem2: elem1 * elem2, temperatury[2:])\nprint(acc_2)\n\n\n# [10, 13, 14, 15, 14, 10] -> 1 Etap\n# [130, 14, 15, 14, 10] -> 2 Etap\n# [1820, 15, 14, 10] -> 3 Etap\n# [27300, 14, 10] -> 4 Etap\n# [382200, 10] -> 5 Etap\n# [3822000] -> 6 Etap\n# 3822000 -> 7 Etap\n\ndef a(arg1, arg2):\n # 'r', 'w', 'a'\n with open(arg1, 'a') as f:\n f.write(arg2)\n return (arg1, arg2)\n\na(\"plik.txt\", \"NASZ PLIK\")\na(\"plik.txt\", \"NASZ PLIK\")\na(\"plik.txt\", \"NASZ PLIK\")\na(\"plik.txt\", \"NASZ PLIK\")\n","sub_path":"oop/day2/reduce_explain.py","file_name":"reduce_explain.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"634490593","text":"import sys\nimport time\nimport pandas as pd\nfrom time import strftime\nfrom functools import reduce\nimport itertools \nimport numpy as np\nimport os\n\ndef start_evaluation(file_path):\n\tresults_directory = os.path.dirname(file_path)\n\tfile_name = os.path.basename(file_path).split(\".\")[0]\n\n\tRESULT_PATH = \"results/\"+results_directory\n\n\tif not os.path.exists(RESULT_PATH):\n\t\tos.mkdir(RESULT_PATH)\n\n\tRESULT_PATH += \"/\"+file_name+\"_results.txt\"\n\n\tstart_time = time.time()\n\n\tdata_df = pd.read_csv(file_path)\n\n\ttemp_result = optimized_columns_and_singletons_stats_and_quasi_identifies(data_df)\n\n\tend_time = time.time()\n\tduration = end_time - start_time\n\n\toutput_file = open(RESULT_PATH, \"w\")\n\n\toutput_file.write(\"Number of identifiers: \" + str(len(temp_result['identifiers']))+\"\\n\")\n\toutput_file.write(\"Number of singletons: \" + str(temp_result['absolute_value_quasi_identifier']) \n\t\t+ \"/\" + str(temp_result['size_sample'])+\"\\n\")\n\toutput_file.write(\"Percentage of singletons: \" + str(temp_result['percentage_quasi_identifiers'])+\"\\n\")\n\toutput_file.write(\"Best quasi-identifier: \"+ \",\".join(temp_result['quasi_identifiers'])+\"\\n\")\n\toutput_file.write(\"Distinct values: \" + str(temp_result['distinct_values'])+\"\\n\")\n\n\toutput_file.write(\"Execution time: \" + str(round(duration/1000, 4)) + \" seconds\\n\")\n\n\toutput_file.close()\n\n\t#print(\"It requires \" + str(round(duration/1000, 4)) + \" seconds\")\n\ndef optimized_columns_and_singletons_stats_and_quasi_identifies(data_df):\n\n\tall_statistics_for_combination = {}\n\tall_singleton_occurrences = {};\n\n\tidentifiers = [];\n\tnum_columns = data_df.shape[1]\n\n\tfieldKeys = list(data_df)\n\tdataset_size = data_df.shape[0]\n\tcolumns_to_check = fieldKeys\n\n\tfor subset_size in range(1, num_columns+1): \n\t\tcombinations = [];\n\t\tdata_list = [];\n\n\t\tcombinations = get_combinations(columns_to_check, subset_size)\n\t\ttemp_identifiers, still_to_check = split_in_identifiers_and_not(data_df, combinations)\n\t\tidentifiers += temp_identifiers\n\n\t\ttemp_statistics_for_combination, temp_singleton_occurrences = get_columns_and_singletons_stats(data_df, still_to_check)\n\t\tall_statistics_for_combination.update(temp_statistics_for_combination)\n\t\tall_singleton_occurrences.update(temp_singleton_occurrences)\n\n\t\tcolumns_to_check = list(set(reduce(lambda x,y: x+y,still_to_check)))\n\n\ttemp_result = get_quasi_identifiers(all_statistics_for_combination)\n\n\ttemp_to_return = {}\n\n\tidentifiers += temp_result['identifiers']\n\n\ttemp_to_return['identifiers'] = identifiers\n\ttemp_to_return['percentage_quasi_identifiers'] = temp_result['percentage_quasi_identifiers']\n\ttemp_to_return['absolute_value_quasi_identifier'] = temp_result['absolute_value_quasi_identifier']\n\ttemp_to_return['size_sample'] = temp_result['size_sample']\n\ttemp_to_return['quasi_identifiers'] = temp_result['quasi_identifiers']\n\ttemp_to_return['distinct_values'] = temp_result['distinct_values']\n\n\ttemp_to_return['statistics_for_combination'] = all_statistics_for_combination\n\ttemp_to_return['statistics_for_combination'] = all_singleton_occurrences\n\n\t'''print(\"Number of identifiers: \" + str(len(identifiers)))\n\tprint(\"Number of singletons: \" + str(temp_result['absolute_value_quasi_identifier']) \n\t\t+ \"/\" + str(temp_result['size_sample']))\n\tprint(\"Percentage of singletons: \" + str(temp_result['percentage_quasi_identifiers']))\n\tprint(temp_result['quasi_identifiers'])\n\tprint(\"Best quasi-identifier: \"+ \",\".join(temp_result['quasi_identifiers']))\n\tprint(\"Distinct values: \" + str(temp_result['distinct_values']))'''\n\n\treturn temp_to_return\n\n\n\ndef get_combinations(arr, r): \n\tdata = list(range(r)) \n\tcombinations = []\n\tcombination_util(arr, len(arr), r, 0, data, 0, combinations)\n\treturn combinations \n\ndef combination_util(arr, n, r, index, data, i, combinations): \n\t\n\tif(index == r): \n\t\tmylist=[]\n\t\tfor j in range(r):\n\t\t\tmylist.append(data[j])\n\t\tcombinations.append(mylist)\n\t\treturn\n\n\tif(i >= n): \n\t\treturn\n\n\tdata[index] = arr[i] \n\tcombination_util(arr, n, r, \n\t\t\t\t\tindex + 1, data, i + 1, combinations) \n\n\tcombination_util(arr, n, r, index, \n\t\t\t\t\tdata, i + 1, combinations) \n\n\ndef split_in_identifiers_and_not(data_df, columns):\n\tidentifiers = [];\n\tstill_to_check = [];\n\n\tfor combination in columns:\n\t\tnum_of_rows = data_df.shape[0]\n\t\tcolumn_values = {}\n\n\t\tvalues = data_df[combination]\n\t\tvalues = values.drop_duplicates()\n\n\t\tif values.shape[0]==data_df.shape[0]:\n\t\t\tidentifiers.append(combination);\n \n\t\telse:\n\t\t\tstill_to_check.append(combination);\n\n\treturn identifiers, still_to_check\n\ndef get_columns_and_singletons_stats(records, columns):\n\trecords = records.dropna()\n\t\n\tsingleton_occurrences = {}\n\tstatistics_for_combination = {}\n\n\tdataset_size = records.shape[0]\n\n\toccurrences_for_combination = {};\n\tfor combo in columns:\n\t\toutput=pd.DataFrame(columns=[\"sesso\",\"comune_residenza\",\"anno_nascita\",\"n\"])\n\n\t\tcounts = records.groupby(combo).size()\n##\t\t\n\t\t\n\t\tsingletons = counts[counts == 1]\n\t\t\n\t\toutput=singletons\n\t\toutput.to_csv(\",\".join(combo)+\".csv\")\n\t\tsingletons = singletons.to_dict()\n\n\t\toccurrences_for_combination[\",\".join(combo)] = counts\n\t\tsingleton_occurrences[\",\".join(combo)] = singletons\n\n\tfor combo in singleton_occurrences:\n\t\tabs_value = len(singleton_occurrences[combo])\n\t\tpercentage = round(abs_value/dataset_size*100)\n\t\tstatistics_for_combination[combo] = {\n\t\t\t'singleton_occurrences_absolute_value': abs_value,\n\t\t\t'dataset_size':dataset_size,\n\t\t\t'percentage_of_singletons':percentage,\n\t\t\t'distinct_values':len(occurrences_for_combination[combo])\n\t\t}\n\n\treturn statistics_for_combination,singleton_occurrences\n\t\ndef get_quasi_identifiers(stats):\n\n\tdistinct_values = -1\n\tsize_sample = -1\n\tmax_absolute_value = -1\n\tmax_percentage = -1\n\tmin_size = 0\n\tquasi_identifiers = []\n\n\tidentifiers = []\n\n\tfor column_combination in stats:\n\t\tstatistic = stats[column_combination]\n\t\tcolumn_combination_as_array = column_combination.split(\",\")\n\n\t\tif statistic['percentage_of_singletons']==100:\n\t\t\tidentifiers.append(column_combnation_as_array)\n\n\t\telif statistic['percentage_of_singletons']>max_percentage:\n\t\t\tquasi_identifiers = []\n\t\t\tquasi_identifiers.append(column_combination)\n\t\t\tmin_size = len(column_combination_as_array)\n\t\t\tmax_absolute_value = statistic['singleton_occurrences_absolute_value']\n\t\t\tsize_sample = statistic['dataset_size']\n\t\t\tmax_percentage = statistic['percentage_of_singletons']\n\t\t\tdistinct_values = statistic['distinct_values']\n\n\t\telif statistic['percentage_of_singletons']==max_percentage:\n\t\t\tactual_size = len(column_combination_as_array)\n\t\t\tif actual_size\n#define TILE_DIM 32\n\n#define CUDA_KERNEL_LOOP(i, n) \\\n for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)\n\n __global__ void relu(const int width, const int height, float* a) {\n\n\n int row = blockIdx.y * blockDim.y + threadIdx.y;\n int col = blockIdx.x * blockDim.x + threadIdx.x;\n\n if(row >= height || col >= width) {\n return;\n }\n\n int index = col + row * width;\n\n if (a[index] < 0) {\n a[index] = 0;\n }\n }\n\n __global__ void relu_backwards(const int width, const int height, float* a, float** b) {\n\n\n int row = blockIdx.y * blockDim.y + threadIdx.y;\n int col = blockIdx.x * blockDim.x + threadIdx.x;\n\n if(row >= height || col >= width) {\n return;\n }\n\n int index = col + row * width;\n\n if (b[row][col] < 0) {\n a[index] = 0;\n }\n }\n\n\n __device__ float denominator;\n __global__ void softmax(const int width, const int height, const float* in, float* out) {\n\n int col = blockIdx.x * blockDim.x + threadIdx.x;\n int row = blockIdx.y * blockDim.y + threadIdx.y;\n\n if (row >= height || col >= width) {\n return;\n }\n\n int index = col + row * width;\n\n atomicAdd(&denominator, expf(in[index]));\n\n out[index] = expf(in[index]) / denominator;\n\n }\n \n __global__ void transpose_pointer(float *odata, const float **idata,\n const int width, const int height)\n {\n __shared__ float tile[32][33];\n int x = blockIdx.x * 32 + threadIdx.x;\n int y = blockIdx.y * 32 + threadIdx.y;\n\n if (x < width && y < height) {\n tile[threadIdx.y][threadIdx.x] = idata[y][x];\n }\n\n __syncthreads();\n\n x = blockIdx.y * 32 + threadIdx.x; // transpose block offset\n y = blockIdx.x * 32 + threadIdx.y;\n\n if (y < width && x < height) {\n odata[y*height + x] = tile[threadIdx.x][threadIdx.y];\n }\n }\n\n // Matrix Tranpose based on implementation from: http://disq.us/p/vncug5\n __global__ void transpose(float *odata, const float *idata,\n const int width, const int height)\n {\n __shared__ float tile[32][33];\n int x = blockIdx.x * 32 + threadIdx.x;\n int y = blockIdx.y * 32 + threadIdx.y;\n\n if (x < width && y < height) {\n tile[threadIdx.y][threadIdx.x] = idata[y*width + x];\n }\n\n __syncthreads();\n\n x = blockIdx.y * 32 + threadIdx.x; // transpose block offset\n y = blockIdx.x * 32 + threadIdx.y;\n\n if (y < width && x < height) {\n odata[y*height + x] = tile[threadIdx.x][threadIdx.y];\n }\n }\n \n // Matrix multiply based on implementation from: https://stackoverflow.com/questions/18815489\n __global__ void multiply(float* A, float* B, float* C, int ARows, int ACols, int BRows,\n int BCols, int CRows, int CCols)\n {\n float CValue = 0;\n \n int Row = blockIdx.y*TILE_DIM + threadIdx.y;\n int Col = blockIdx.x*TILE_DIM + threadIdx.x;\n \n __shared__ float As[TILE_DIM][TILE_DIM];\n __shared__ float Bs[TILE_DIM][TILE_DIM];\n \n for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) {\n \n if (k*TILE_DIM + threadIdx.x < ACols && Row < ARows)\n As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];\n else\n As[threadIdx.y][threadIdx.x] = 0.0;\n \n if (k*TILE_DIM + threadIdx.y < BRows && Col < BCols)\n Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];\n else\n Bs[threadIdx.y][threadIdx.x] = 0.0;\n \n __syncthreads();\n \n for (int n = 0; n < TILE_DIM; ++n)\n CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];\n \n __syncthreads();\n }\n \n if (Row < CRows && Col < CCols)\n C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols) +\n (blockIdx.x * blockDim.x)+ threadIdx.x] = CValue;\n }\n\n \"\"\")\n\n\n# FOR TESTING CPU VS GPU\ndef softmax_cpu(x):\n return np.exp(x) / np.exp(x).sum(axis=1, keepdims=True)\n\n\n# Pick highest probability action, with a certain probability of picking a different choice\ndef pick_action(action_prob):\n r = np.random.uniform()\n total = 0\n for i, p in enumerate(action_prob[0]):\n total += p\n if r <= total:\n return i\n\n # action_prob should always sum to 1. This is in case of small rounding error\n return i\n\n\n# Compute discount rewards. Give more recent rewards more weight.\ndef discount_rewards(rewards, discount_factor=Constants.GAMMA):\n discounted_r = np.zeros_like(rewards).astype(np.float32)\n running_add = 0\n\n for i in reversed(xrange(0, len(rewards))):\n\n # Final state of round\n if rewards[i] != 0:\n running_add = 0\n\n running_add = running_add * discount_factor + rewards[i]\n discounted_r[i] = running_add\n return discounted_r\n\n\n# ------------------------------------------------------------------\ngame = Game.Game()\n\nnum_inputs = np.int32(Constants.HAND_INPUT_SIZE)\nnum_outputs = np.int32(Constants.NUM_OUTPUTS) # CALL/CHECK, RAISE, FOLD\nnum_hiddens = [np.int32(Constants.NUM_NODE_HIDDEN)] # Each value represents number of nodes per layer\nactions = []\nreward_count = []\nmodel = {}\n\nif Constants.RANDOM_WEIGHT_INIT:\n model['W1'] = 0.1 * np.random.randn(num_inputs, num_hiddens[0]).astype(np.float32)\n model['W2'] = 0.1 * np.random.randn(num_hiddens[0], num_outputs).astype(np.float32)\n # Uncomment to save weights\n #model['W1'].tofile(\"W1.txt\")\n #model['W2'].tofile(\"W2.txt\")\nelse:\n model['W1'] = np.fromfile(\"W1.txt\", dtype=np.float32).reshape((num_inputs, num_hiddens[0]))\n model['W2'] = np.fromfile(\"W2.txt\", dtype=np.float32).reshape((num_hiddens[0], num_outputs))\n\ngrad_buffer = {k: np.zeros_like(v) for k, v in model.iteritems()}\nrmsprop_cache = {k: np.zeros_like(v) for k, v in model.iteritems()}\n\n\ndef forward():\n input = game.get_input()\n\n multiply = mod.get_function(\"multiply\")\n softmax = mod.get_function(\"softmax\")\n relu = mod.get_function(\"relu\")\n\n W1_out = np.zeros((1, num_hiddens[0])).astype(np.float32)\n y = np.zeros((1, num_outputs)).astype(np.float32)\n predictions = np.zeros((1, num_outputs)).astype(np.float32)\n\n input_gpu = cuda.mem_alloc(input.nbytes)\n W1_gpu = cuda.mem_alloc(model['W1'].nbytes)\n W2_gpu = cuda.mem_alloc(model['W2'].nbytes)\n W1_out_gpu = cuda.mem_alloc(W1_out.nbytes)\n y_gpu = cuda.mem_alloc(y.nbytes)\n predictions_gpu = cuda.mem_alloc(predictions.nbytes)\n\n cuda.memcpy_htod(input_gpu, input)\n cuda.memcpy_htod(W1_gpu, model['W1'])\n cuda.memcpy_htod(W2_gpu, model['W2'])\n\n block_x, block_y = 32, 32\n block = (block_x, block_y, 1)\n grid = (int(math.ceil(model['W1'].shape[1] / block_x)),\n int(math.ceil(input.shape[0] / block_x)))\n\n multiply(input_gpu, W1_gpu, W1_out_gpu, np.int32(1), np.int32(input.shape[0]), np.int32(model['W1'].shape[0]),\n np.int32(model['W1'].shape[1]), np.int32(W1_out.shape[0]), np.int32(W1_out.shape[1]), block=block, grid=grid)\n\n # DEBUG \n # debug_answer = np.dot(input, model['W1'])\n # cuda.memcpy_dtoh(W1_out, W1_out_gpu)\n # print(\"Number of mistakes for matrix multiply (Hidden 1): %d\" % (np.abs(W1_out - debug_answer) > 0.001).sum())\n\n relu(num_hiddens[0], np.int32(1), W1_out_gpu, block=block, grid=grid)\n\n\n # DEBUG\n # cuda.memcpy_dtoh(W1_out, W1_out_gpu)\n # debug_answer[debug_answer < 0] = 0 # RELU\n # print(\"Number of mistakes for RELU (Hidden 1): %d\" % (np.abs(W1_out - debug_answer) > 0.001).sum())\n\n block_x, block_y = 32, 32\n block = (block_x, block_y, 1)\n grid = (int(math.ceil(model['W2'].shape[1] / block_x)),\n int(math.ceil(W1_out.shape[0] / block_x)))\n\n multiply(W1_out_gpu, W2_gpu, y_gpu, np.int32(W1_out.shape[0]), np.int32(W1_out.shape[1]), np.int32(model['W2'].shape[0]), np.int32(model['W2'].shape[1]),\n np.int32(y.shape[0]), np.int32(y.shape[1]), block=block, grid=grid)\n\n # DEBUG\n # cuda.memcpy_dtoh(y, y_gpu)\n # debug_answer = np.dot(debug_answer, model['W2'])\n # print(\"Number of mistakes for matrix multiply (Output): %d\" % (np.abs(y - debug_answer) > 0.001).sum())\n\n # Work around for denominator that needs to be reset for each round\n denom = np.array([0]).astype(np.float32)\n denom_gpu, _ = mod.get_global(\"denominator\")\n cuda.memcpy_htod(denom_gpu, denom)\n\n softmax(num_outputs, np.int32(1), y_gpu, predictions_gpu, np.int32(0), block=block, grid=grid)\n\n cuda.memcpy_dtoh(predictions, predictions_gpu)\n\n # DEBUG STATEMENT\n # print(\"Number of mistakes for softmax: %d\" % (np.abs(predictions - softmax_cpu(y)) > 0.001).sum())\n # print(\"Prediction probabilities: %s\" % str(predictions))\n\n return predictions, W1_out_gpu\n\n\ndef backward(eph, epdlogp, epx):\n\n eph_shape = (eph.shape[0], int(num_hiddens[0]))\n\n multiply = mod.get_function(\"multiply\")\n transpose = mod.get_function(\"transpose\")\n transpose_pointer = mod.get_function(\"transpose_pointer\")\n softmax = mod.get_function(\"softmax\")\n relu_backwards = mod.get_function(\"relu_backwards\")\n\n dW2 = np.zeros((eph_shape[1], epdlogp.shape[1])).astype(np.float32)\n dW1 = np.zeros((epx.shape[1], model['W2'].shape[0])).astype(np.float32)\n\n eph_gpu = cuda.mem_alloc(eph.nbytes)\n epdlogp_gpu = cuda.mem_alloc(epdlogp.nbytes)\n epx_gpu = cuda.mem_alloc(epx.nbytes)\n eph_T_gpu = cuda.mem_alloc(eph_shape[0]*eph_shape[1]*np.float32().nbytes)\n epx_T_gpu = cuda.mem_alloc(epx.nbytes)\n W2_gpu = cuda.mem_alloc(model['W2'].nbytes)\n W2_T_gpu = cuda.mem_alloc(model['W2'].nbytes)\n dW2_gpu = cuda.mem_alloc(dW2.nbytes)\n dW1_gpu = cuda.mem_alloc(dW1.nbytes)\n dh_gpu = cuda.mem_alloc(epdlogp.shape[0] * model['W2'].shape[0] * np.float32().nbytes)\n\n cuda.memcpy_htod(eph_gpu, eph)\n cuda.memcpy_htod(epx_gpu, epx)\n cuda.memcpy_htod(W2_gpu, model['W2'])\n cuda.memcpy_htod(epdlogp_gpu, epdlogp)\n\n # --- eph transpose ---\n\n # DEBUG\n # eph_T = np.zeros((eph_test.shape[1], eph_test.shape[0])).astype(np.float32)\n dh = np.zeros((epdlogp.shape[0], model['W2'].shape[0])).astype(np.float32)\n\n block_x, block_y = 32, 32\n block = (block_x, block_y, 1)\n grid = (int(math.ceil(eph_shape[1] / block_x)), int(math.ceil(eph_shape[0]/block_y)))\n\n transpose_pointer(eph_T_gpu, eph_gpu, np.int32(eph_shape[1]), np.int32(eph_shape[0]), block=block, grid=grid)\n\n # DEBUG\n # cuda.memcpy_dtoh(eph_T, eph_T_gpu)\n # print(\"Number of mistakes for Transpose (eph): %d\" % (np.abs(eph_T - eph_test.T) > 0.001).sum())\n\n # --- Matrix multiply epdlogp and transpose of eph\n\n block_x, block_y = 32, 32\n block = (block_x, block_y, 1)\n grid = (int(math.ceil(epdlogp.shape[1] / block_x)),\n int(math.ceil(eph_shape[1] / block_x)))\n\n multiply(eph_T_gpu, epdlogp_gpu, dW2_gpu, np.int32(eph_shape[1]), np.int32(eph_shape[0]), np.int32(epdlogp.shape[0])\n ,np.int32(epdlogp.shape[1]), np.int32(dW2.shape[0]), np.int32(dW2.shape[1]), block=block, grid=grid)\n\n cuda.memcpy_dtoh(dW2, dW2_gpu)\n\n # DEBUG\n # print(\"Number of mistakes for dot product (dW2): %d\" % (np.abs(dW2 - np.dot(eph_T, epdlogp)) > 0.001).sum())\n\n # --- W2 transpose ---\n\n block_x, block_y = 32, 32\n block = (block_x, block_y, 1)\n grid = (int(math.ceil(model['W2'].shape[1] / block_x)), int(math.ceil(model['W2'].shape[0]/block_y)))\n\n transpose(W2_T_gpu, W2_gpu, np.int32(model['W2'].shape[1]), np.int32(model['W2'].shape[0]), block=block, grid=grid)\n\n # DEBUG\n # W2_T = np.zeros((model['W2'].shape[1], model['W2'].shape[0])).astype(np.float32)\n # cuda.memcpy_dtoh(W2_T, W2_T_gpu)\n # print(\"Number of mistakes for Transpose (W2): %d\" % (np.abs(W2_T - model['W2'].T) > 0.01).sum())\n\n # --- Matrix multiply epdlogp and W2 transpose ---\n\n block_x, block_y = 32, 32\n block = (block_x, block_y, 1)\n grid = (int(math.ceil(model['W2'].shape[0] / block_x)),\n int(math.ceil(epdlogp.shape[0] / block_x)))\n\n multiply(epdlogp_gpu, W2_T_gpu, dh_gpu, np.int32(epdlogp.shape[0]), np.int32(epdlogp.shape[1]),\n np.int32(model['W2'].shape[1]), np.int32(model['W2'].shape[0]), np.int32(dh.shape[0]),\n np.int32(dh.shape[1]), block=block, grid=grid)\n\n # DEBUG\n # cuda.memcpy_dtoh(dh, dh_gpu)\n # print(\"Number of mistakes for dot product (dh): %d\" % (np.abs(dh - np.dot(epdlogp, model['W2'].T)) > 0.001).sum())\n\n relu_backwards(np.int32(dh.shape[1]), np.int32(dh.shape[0]), dh_gpu, eph_gpu, block=block, grid=grid)\n\n\n # DEBUG\n # cuda.memcpy_dtoh(dh, dh_gpu)\n # debug_answer = np.dot(epdlogp, model['W2'].T).astype(np.float32)\n # debug_answer[eph_test < 0] = 0 # RELU\n # print(\"Number of mistakes for RELU (dh): %d\" % (np.abs(dh - debug_answer) > 0.001).sum())\n\n # --- W2 transpose ---\n\n block_x, block_y = 32, 32\n block = (block_x, block_y, 1)\n grid = (int(math.ceil(epx.shape[1] / block_x)), int(math.ceil(epx.shape[0]/block_y)))\n\n transpose(epx_T_gpu, epx_gpu, np.int32(epx.shape[1]), np.int32(epx.shape[0]), block=block, grid=grid)\n\n # DEBUG\n # epx_T = np.zeros((epx.shape[1], epx.shape[0])).astype(np.float32)\n # cuda.memcpy_dtoh(epx_T, epx_T_gpu)\n # print(\"Number of mistakes for Transpose (epx): %d\" % (np.abs(epx_T - epx.T) > 0.01).sum())\n\n # --- Matrix multiply epx transpose and dh\n\n block_x, block_y = 32, 32\n block = (block_x, block_y, 1)\n grid = (int(math.ceil(dh.shape[1] / block_x)),\n int(math.ceil(epx.shape[1] / block_x)))\n\n multiply(epx_T_gpu, dh_gpu, dW1_gpu, np.int32(epx.shape[1]), np.int32(epx.shape[0]), np.int32(dh.shape[0]),\n np.int32(dh.shape[1]), np.int32(dW1.shape[0]), np.int32(dW1.shape[1]), block=block, grid=grid)\n\n cuda.memcpy_dtoh(dW1, dW1_gpu)\n\n # DEBUG\n # debug_answer = np.dot(epx.T, dh)\n # print(\"Number of mistakes for dot product (dW1): %d\" % (np.abs(dW1 - debug_answer) > 0.001).sum())\n\n return {'W1': dW1, 'W2': dW2}\n\ndef next_round():\n # raw_input()\n game.next_round()\n # forward()\n\n\ndef update_learning_params(xs, hs, dlogps, action_raise=False):\n action_prob, h = forward()\n\n action = pick_action(action_prob)\n\n # actions.append(action)\n\n # Raise only once. If the chosen action is to raise again, check instead.\n if Constants.ACTIONS[action] == \"RAISE\" and action_raise:\n action = 0\n\n hs.append(h)\n\n # https://math.stackexchange.com/questions/945871/derivative-of-softmax-loss-function\n dlogsoftmax = action_prob\n dlogsoftmax[0, action] -= 1\n dlogps.append(dlogsoftmax)\n\n # Observation/input\n # xs.append(game._bot1.get_hand().flatten().astype(np.float32))\n xs.append(game.get_input())\n\n return action\n\n\n# Allow only one Raise per state. Return True if action for either player was FOLD\ndef handle_action(action, rewards):\n # bot2_action = game.bot_decision(bot_can_raise)\n\n if Constants.ACTIONS[action] == \"FOLD\":\n rewards.append(-1 * game.get_num_bets())\n return True\n elif Constants.ACTIONS[action] == \"RAISE\":\n bot2_action = game.bot_decision(can_raise=False)\n # return handle_action(action, rewards, bot_can_raise=False)\n else:\n bot2_action = game.bot_decision(can_raise=True)\n\n if Constants.ACTIONS[bot2_action] == \"FOLD\":\n rewards.append(1 * game.get_num_bets())\n return True\n elif Constants.ACTIONS[bot2_action] == \"RAISE\":\n action = update_learning_params(xs, hs, dlogps, action_raise=True)\n rewards.append(0)\n if Constants.ACTIONS[action] == \"FOLD\":\n rewards.append(-1 * game.get_num_bets())\n return True\n\n if Constants.ACTIONS[bot2_action] == \"RAISE\" or Constants.ACTIONS[action] == \"RAISE\":\n game.add_bet()\n\n # Otherwise, both players have checked\n rewards.append(0)\n return False\n\nimport time\nstart = time.time()\nxs, hs, dlogps, rewards = [], [], [], []\nfor i in range(Constants.NUM_OF_EPS):\n game.new_game()\n\n for _ in range(3):\n\n action = update_learning_params(xs, hs, dlogps)\n is_fold = handle_action(action, rewards)\n\n if is_fold:\n break\n\n next_round()\n\n if not is_fold:\n action = update_learning_params(xs, hs, dlogps)\n is_fold = handle_action(action, rewards)\n\n if not is_fold:\n # Remove the last reward. The length of rewards must be equal to the number of actions\n rewards.pop()\n # EVALUATE WINNER\n reward = game.evaluate_winner(game.get_p1_hand(), game.get_p2_hand())\n\n rewards.append(reward * game.get_num_bets()) # +1 / -1 depending on who wins. 0 for tie\n\n # DEBUG\n reward_count.append(rewards[-1])\n\n if i > 0 and i % 100 == 0:\n rewards = discount_rewards(rewards)\n\n epx = np.vstack(xs)\n eph = np.array([np.uint64(int(j)) for j in hs]).astype(np.uint64)\n epdlogp = np.vstack(dlogps)\n epr = np.vstack(rewards)\n epdlogp *= epr\n\n grad = backward(eph, epdlogp, epx)\n\n for k in model:\n # accumulate grad over batch\n grad_buffer[k] += grad[k]\n\n if i % 200 == 0:\n for k, v in model.iteritems():\n g = grad_buffer[k] # gradient\n\n # RMSprop: Gradient descent optimization algorithms\n rmsprop_cache[k] = Constants.DECAY_RATE * rmsprop_cache[k] + (1 - Constants.DECAY_RATE) * g ** 2\n\n # Update weights to minimize the error\n model[k] -= Constants.LEARNING_RATE * g / (np.sqrt(rmsprop_cache[k]) + 1e-5)\n\n # Reset grad buffer\n grad_buffer[k] = np.zeros_like(v)\n\n xs, hs, dlogps, rewards = [], [], [], []\n\n if i > 0 and i % 200 == 0:\n x = np.array(reward_count)\n unique, counts = np.unique(x, return_counts=True)\n values = np.asarray((unique, counts)).T\n earning = 0\n for i in range(values.shape[0]):\n earning += values[i][1] * values[i][0]\n print(earning)\n reward_count = []\n\nend = time.time()\nprint(end-start)\n\n# Performance of algorithm\nx = np.array(reward_count)\nunique, counts = np.unique(x, return_counts=True)\n\nvalues = np.asarray((unique, counts)).T\nprint(values)\nearning = 0\nfor i in range(11):\n earning += values[i][1] * values[i][0]\nloss = 0\nfor i in range(0, 5):\n loss += values[i][1] * values[i][0]\nwin = 0\nfor i in range(6, 11):\n win += values[i][1] * values[i][0]\nprint(earning)\nprint(win)\nprint(loss)\n\n","sub_path":"poker/Learning.py","file_name":"Learning.py","file_ext":"py","file_size_in_byte":18904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"534103340","text":"#!/usr/bin/env python\n'''\n Code to read a galaxy, random catalog,\n swap weights, subsample randoms, assess the redshift distribution\n export the new catalogs\n\n\n get help with $> python swap_mocks.py --help\n run with $> python swap_mocks.py\n'''\nimport os\nimport logging\n\n\nimport sys\nsys.path.append('/home/mehdi/github/LSSutils')\nfrom LSSutils.catalogs.combinefits import SysWeight, EbossCatalog, reassignment\n\ndef main(imock=1,\n model='plain',\n cont='null',\n zmin=0.8,\n zmax=2.2,\n nside=512,\n zsplit='lowmidhigh',\n slices=['low', 'high'],\n cap='NGC',\n target='QSO',\n version='v7',\n versiono='0.3'): \n\n # \n if 'zhigh' in slices:\n raise RuntimeError(f'zhigh is not allowed for mocks')\n\n output_dir = f'/B/Shared/mehdi/eboss/mocks/{versiono}/{cap}_{imock:04d}_{cont}'\n\n if cont == 'contaminated':\n input_dir = '/B/Shared/eBOSS/contaminated'\n data_name_in = input_dir+ f'/EZmock_eBOSS_{target}_{cap}_{version}_{imock:04d}.dat.fits'\n rand_name_in = input_dir+ f'/EZmock_eBOSS_{target}_{cap}_{version}_{imock:04d}.ran.fits'\n elif cont == 'null':\n input_dir = '/B/Shared/eBOSS/null'\n data_name_in = input_dir+ f'/EZmock_eBOSS_{target}_{cap}_{version}_noweight_{imock:04d}.dat.fits'\n rand_name_in = input_dir+ f'/EZmock_eBOSS_{target}_{cap}_{version}_noweight_{imock:04d}.ran.fits'\n else:\n raise ValueError(f'{cont} should be either cont or null')\n \n tag = '_'.join((version, versiono, model, zsplit))\n data_name_out = output_dir + f'/EZmock_eBOSS_{target}_{cap}_{tag}_{imock:04d}.dat.fits'\n rand_name_out = output_dir + f'/EZmock_eBOSS_{target}_{cap}_{tag}_{imock:04d}.ran.fits'\n plotname = output_dir + f'/eBOSS_{target}_{cap}_{tag}_{imock:04d}.pdf'\n\n weight = lambda zcut, model: output_dir + f'/results/{cap}_{zcut}_{nside}'\\\n +f'/regression/nn_{model}/nn-weights.hp{nside}.fits'\n\n zcuts = {'low':[[0.8, 1.5], None],\n 'high':[[1.5, 2.2], None],\n 'all':[[0.8, 2.2], None],\n 'zhigh':[[2.2, 3.5], None],\n 'z1':[[0.8, 1.3], None],\n 'z2':[[1.3, 1.6], None],\n 'z3':[[1.6, 2.2], None]}\n\n\n\n\n logger = logging.getLogger(\"Swapper\")\n logger.info('results will be written under {}'.format(output_dir)) \n logger.info('swap the NN-z weights')\n logger.info(f'input data : {data_name_in}')\n logger.info(f'input random : {rand_name_in}')\n logger.info(f'output data : {data_name_out}')\n logger.info(f'output random : {rand_name_out}')\n \n # --- check if the output directory exists\n if not os.path.isdir(output_dir):\n logger.info('create {}'.format(output_dir))\n #os.makedirs(output_dir)\n\n for zcut in slices: #--only read slices that we need\n logger.info(f'zcut : {zcut}')\n zcuts[zcut][1]=SysWeight(weight(zcut, model))\n\n data = EbossCatalog(data_name_in, zmin=zmin, zmax=zmax, kind='galaxy')\n data.swap(zcuts=zcuts, slices=slices)\n #data.make_plots(zcuts, slices=slices, filename=plotname)\n data.to_fits(data_name_out)\n\n\n random = EbossCatalog(rand_name_in, zmin=zmin, zmax=zmax, kind='random')\n newrandom = reassignment(random.data, data.data, seed=1234567)\n newrandom.write(rand_name_out) \n\n \n \nif __name__ == '__main__':\n \n from argparse import ArgumentParser\n ap = ArgumentParser(description='Prepare EBOSS Data and Random Catalogs')\n ap.add_argument('--model', type=str, default='plain', help='eg:plain, other options are ablation and known ')\n ap.add_argument('--imock', type=int, default=1, help='eg:1')\n ap.add_argument('--cont', type=str, default='null', help='null or contaminated')\n ap.add_argument('--zmin', type=float, default=0.8, help='eg:0.8')\n ap.add_argument('--zmax', type=float, default=2.2, help='eg:2.2')\n ap.add_argument('--nside', type=int, default=512, help='eg:512')\n ap.add_argument('--zsplit', type=str, default='lowmidhigh', help='eg: lowmidhigh')\n ap.add_argument('--slices', type=str, default=['low', 'high'], nargs='*', help=\"eg:['low', 'high']\")\n ap.add_argument('--cap', type=str, default='NGC', help='eg: NGC or SGC')\n ap.add_argument('--target', type=str, default='QSO', help='eg: QSO')\n ap.add_argument('--version', type=str, default='v7', help='eg: v7')\n ap.add_argument('--versiono',type=str, default='0.3', help='eg: 0.3')\n ns = ap.parse_args() \n\n #--- default\n #\n # model='plain',\n # zmin=0.8,\n # zmax=3.5,\n # nside=512,\n # zsplit='lowmidhigh',\n # slices=['low', 'high', 'zhigh'],\n # cap='NGC',\n # target='QSO',\n # version='v7_2',\n # versiono='0.3'\n \n from LSSutils import setup_logging\n setup_logging('info')\n\n logger = logging.getLogger(\"Swapper\")\n \n kwargs = ns.__dict__\n for (a,b) in zip(kwargs.keys(), kwargs.values()):\n logger.info('{:6s}{:15s} : {}'.format('', a, b))\n \n # -- call the function \n main(**kwargs)\n \n \n","sub_path":"swap_mocks.py","file_name":"swap_mocks.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"265462198","text":"# Alacritty config options\n# Antonio Sarosi\n# December 10, 2020\n\nfrom typing import List, Dict, Any\nfrom collections.abc import Mapping\nfrom pathlib import Path\nfrom sys import stderr\nimport yaml\nimport log\n\n\nclass ConfigError(Exception):\n def __init__(self, message='Error applying configuration'):\n super().__init__(message)\n\n\nclass Alacritty:\n def __init__(self):\n self.base_path = Path().home() / '.config' / 'alacritty'\n if not self.base_path.exists():\n raise ConfigError(f'Config directory not found: {self.base_path}')\n\n self.config_file = self.base_path / 'alacritty.yml'\n if not self.config_file.is_file():\n log.warn('Config file not found')\n self.config_file.touch()\n print('Created config file =>', end=' ', file=stderr)\n log.color_print(self.config_file, log.Color.BLUE, file=stderr)\n\n self.config = self._load(self.config_file)\n if self.config is None:\n self.config = {}\n log.warn('Alacritty config file was empty')\n\n self.resources = {\n 'themes': {\n 'type': 'Themes directory',\n 'path': self.base_path / 'themes',\n 'exists': lambda: self.resources['themes']['path'].is_dir(),\n 'create': lambda: self.resources['themes']['path'].mkdir()\n },\n 'fonts': {\n 'type': 'Fonts file',\n 'path': self.base_path / 'fonts.yaml',\n 'exists': lambda: self.resources['fonts']['path'].is_file(),\n 'create': lambda: self.resources['fonts']['path'].touch()\n }\n }\n\n def _load(self, yaml_file: Path) -> Dict[str, Any]:\n with open(yaml_file) as f:\n try:\n return yaml.load(f, Loader=yaml.FullLoader)\n except yaml.YAMLError as e:\n raise ConfigError((\n 'YAML error at parsing file \"{0}\", '\n 'at line {1.problem_mark.line}, '\n 'column {1.problem_mark.column}:\\n'\n '{1.problem} {1.context}'\n ).format(yaml_file.name, e))\n\n def _resource_path(self, resource: str) -> Path:\n if resource not in self.resources:\n raise ConfigError(f'Path for resource \"{resource}\" not set')\n\n resource = self.resources[resource]\n if not resource['exists']():\n log.warn(f'{resource[\"type\"]} not found')\n resource['create']()\n print('Created resource =>', end=' ', file=stderr)\n log.color_print(resource['path'], log.Color.BLUE, file=stderr)\n\n return resource['path']\n\n def save(self):\n with open(self.config_file, 'w') as f:\n yaml.dump(self.config, f)\n\n def apply(self, **config):\n if config is None or len(config) < 1:\n raise ConfigError('No options provided')\n\n actions = {\n 'theme': self.change_theme,\n 'font': self.change_font,\n 'size': self.change_font_size,\n 'opacity': self.change_opacity,\n 'padding': self.change_padding,\n 'offset': self.change_font_offset,\n 'list': self.list,\n 'print': self.print,\n }\n\n errors_found = 0\n for param, action in actions.items():\n if param in config:\n try:\n action(config[param])\n except ConfigError as e:\n log.err(e)\n errors_found += 1\n\n if errors_found > 0:\n raise ConfigError(f'\\n{errors_found} error(s) found')\n\n def change_theme(self, theme: str):\n themes_directory = self._resource_path('themes')\n theme_file = themes_directory / f'{theme}.yaml'\n if not theme_file.is_file():\n raise ConfigError(f'Theme \"{theme}\" not found')\n\n theme_yaml = self._load(theme_file)\n if theme_yaml is None:\n raise ConfigError(f'File {theme_file.name} is empty')\n if 'colors' not in theme_yaml:\n raise ConfigError(f'{theme_file} does not contain color config')\n\n expected_colors = [\n 'black',\n 'red',\n 'green',\n 'yellow',\n 'blue',\n 'magenta',\n 'cyan',\n 'white',\n ]\n\n expected_props = {\n 'primary': ['background', 'foreground'],\n 'normal': expected_colors,\n 'bright': expected_colors,\n }\n\n for k in expected_props:\n if k not in theme_yaml['colors']:\n log.warn(f'Missing \"colors:{k}\" for theme \"{theme}\"')\n continue\n for v in expected_props[k]:\n if v not in theme_yaml['colors'][k]:\n log.warn(f'Missing \"colors:{k}:{v}\" for theme \"{theme}\"')\n\n self.config['colors'] = theme_yaml['colors']\n log.ok(f'Theme {theme} applied')\n\n def change_font_size(self, size: float):\n if size <= 0:\n raise ConfigError('Font size cannot be negative or zero')\n\n if 'font' not in self.config:\n self.config['font'] = {}\n log.warn('\"font\" prop config was not present in alacritty.yml')\n self.config['font']['size'] = size\n log.ok(f'Font size set to {size:.1f}')\n\n def change_font(self, font: str):\n if 'font' not in self.config:\n self.config['font'] = {}\n log.warn('\"font\" prop was not present in alacritty.yml')\n\n fonts_file = self._resource_path('fonts')\n fonts = self._load(fonts_file)\n if fonts is None:\n raise ConfigError(f'File \"{fonts_file}\" is empty')\n if 'fonts' not in fonts:\n raise ConfigError(f'No font config found in \"{fonts_file}\"')\n\n fonts = fonts['fonts']\n if font not in fonts:\n raise ConfigError(f'Config for font \"{font}\" not found')\n\n font_types = ['normal', 'bold', 'italic']\n\n if isinstance(fonts[font], str):\n font_name = fonts[font]\n fonts[font] = {}\n for t in font_types:\n fonts[font][t] = font_name\n\n if not isinstance(fonts[font], Mapping):\n raise ConfigError(f'Font \"{font}\" has wrong format')\n\n for t in font_types:\n if t not in fonts[font]:\n raise ConfigError(f'Font \"{font}\" does not have \"{t}\" property')\n if t not in self.config['font']:\n self.config['font'][t] = {'family': 'tmp'}\n self.config['font'][t]['family'] = fonts[font][t]\n\n log.ok(f'Font {font} applied')\n\n def change_opacity(self, opacity: float):\n if opacity < 0.0 or opacity > 1.0:\n raise ConfigError('Opacity should be between 0.0 and 1.0')\n\n self.config['background_opacity'] = opacity\n log.ok(f'Opacity set to {opacity:.2f}')\n\n def change_padding(self, padding: List[int]):\n if len(padding) != 2:\n raise ConfigError('Padding should only have an x and y value')\n\n x, y = padding\n if 'window' not in self.config:\n self.config['window'] = {}\n log.warn('\"window\" prop was not present in config file')\n if 'padding' not in self.config['window']:\n self.config['window']['padding'] = {}\n log.warn('\"padding\" prop was not present in config file')\n\n self.config['window']['padding']['x'] = x\n self.config['window']['padding']['y'] = y\n log.ok(f'Padding set to x: {x}, y: {y}')\n \n def change_font_offset(self, offset: List[int]):\n if len(offset) != 2:\n raise ConfigError('Wrong offset config, should be [x, y]')\n\n x, y = offset\n if 'font' not in self.config:\n self.config['font'] = {}\n if 'offset' not in self.config['font']:\n log.warn('\"offset\" prop was not set')\n self.config['font']['offset'] = {}\n \n self.config['font']['offset']['x'] = x\n self.config['font']['offset']['y'] = y\n log.ok(f'Offset set to x: {x}, y: {y}')\n\n def list(self, to_be_listed: str):\n def list_themes():\n themes_dir = self._resource_path('themes')\n themes = [file.name.split('.')[0] for file in themes_dir.iterdir()]\n if len(themes) < 1:\n log.warn('Cannot list themes, themes directory is empty')\n else:\n log.color_print('Themes:', log.Color.BOLD)\n for theme in themes:\n log.color_print(f' {theme}', log.Color.BLUE)\n\n def list_fonts():\n fonts = self._load(self._resource_path('fonts'))\n if fonts is None or 'fonts' not in fonts:\n log.warn('Cannot list fonts, no fonts found')\n else:\n log.color_print('Fonts:', log.Color.BOLD)\n for font in fonts['fonts']:\n log.color_print(f' {font}', log.Color.PURPLE)\n\n options = {\n 'themes': list_themes,\n 'fonts': list_fonts,\n }\n\n if to_be_listed == 'all':\n for _, list_function in options.items():\n list_function()\n else:\n if to_be_listed not in options:\n raise ConfigError(f'Cannot list {to_be_listed}, unknown option')\n options[to_be_listed]()\n\n def print(self, to_be_printed: List[str]):\n def print_config():\n log.color_print(self.config_file, log.Color.BOLD)\n print(yaml.dump(self.config))\n\n def print_fonts():\n fonts_file = self._resource_path('fonts')\n log.color_print(fonts_file, log.Color.BOLD)\n print(yaml.dump(self._load(fonts_file)))\n\n def print_theme(theme: str):\n themes_dir = self._resource_path('themes')\n theme_file = themes_dir / f'{theme}.yaml'\n if not theme_file.is_file():\n raise ConfigError(\n f'Failed printing \"{theme}\" theme, \"{theme_file}\" not found'\n )\n log.color_print(theme_file, log.Color.BOLD)\n print(yaml.dump(self._load(theme_file)))\n\n options = {\n 'fonts': print_fonts,\n 'config': print_config,\n }\n\n if len(to_be_printed) == 0:\n to_be_printed.append('config')\n\n for param in to_be_printed:\n if param not in options:\n print_theme(param)\n else:\n options[param]()\n","sub_path":"src/alacritty.py","file_name":"alacritty.py","file_ext":"py","file_size_in_byte":10535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"653564981","text":"from collections import deque\n\nn, q = int(input()), deque()\nx1, y1 = map(int, input().split())\nx1, y1 = x1-1, y1-1\nx2, y2 = map(int, input().split())\nx2, y2 = x2-1, y2-1\nx = [-2, -2, -1, -1, 1, 1, 2, 2]\ny = [-1, 1, -2, 2, -2, 2, -1, 1]\nstol = [0]*n\nfor i in range(n):\n stol[i] = [1000000000]*n\nstol[x1][y1] = 0\nq.appendleft([x1, y1])\nwhile q:\n front = q.popleft()\n i = front[0]\n j = front[1]\n for l in range(8):\n k = (i + x[l])\n t = (j + y[l])\n if (k >= 0 and k < n):\n if t >= 0 and t < n:\n if stol[k][t] > stol[i][j] + 1:\n stol[k][t] = stol[i][j] + 1\n q.appendleft([k, t])\nprint(stol[x2][y2])","sub_path":"chess.py","file_name":"chess.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"131318543","text":"from lesson_5.banana9.jinja import render\nfrom lesson_5.banana9.api import API, MockApplication\nfrom lesson_5.banana9.models import TrainingSite\nfrom lesson_5.banana9.logging_mod import Logger, debug\nfrom lesson_5.banana9.middleware import fronts\n\n\n\n\nsite = TrainingSite()\nlogger = Logger('main')\n\n\ndef main_view(request):\n logger.log('Список курсов')\n return '200 OK', render('course_list.html', objects_list=site.courses)\n\ndef not_found_404_view(request):\n return '404 WHAT', '

404 PAGE Not Found

'\n\n\ndef about_view(request):\n logger.log('Список курсов')\n return '200 OK', render('about_us.html')\n\n\ndef hello_view(request):\n name = request.get('name', None)\n # Используем шаблонизатор\n return '200 OK', render('hello.html', name=name)\n\n\ndef contact_view(request):\n # Проверка метода запроса\n if request['method'] == 'POST':\n data = request['data']\n title = data['title']\n text = data['text']\n email = data['email']\n print(f'Нам пришло сообщение от {email} с темой {title} и текстом {text}')\n return '200 OK', render('contact.html')\n else:\n return '200 OK', render('contact.html')\n\n\n@debug\ndef create_course(request):\n if request['method'] == 'POST':\n # метод пост\n data = request['data']\n name = data['name']\n category_id = data.get('category_id')\n print(category_id)\n category = None\n if category_id:\n category = site.find_category_by_id(int(category_id))\n\n course = site.create_course('record', name, category)\n site.courses.append(course)\n # редирект?\n # return '302 Moved Temporarily', render('create_course.html')\n # Для начала можно без него\n return '200 OK', render('create_course.html')\n else:\n categories = site.categories\n return '200 OK', render('create_course.html', categories=categories)\n\n\ndef create_category(request):\n if request['method'] == 'POST':\n data = request['data']\n name = data['name']\n category_id = data.get('category_id')\n\n category = None\n if category_id:\n category = site.find_category_by_id(int(category_id))\n\n new_category = site.create_category(name, category)\n site.categories.append(new_category)\n\n return '200 OK', render('create_category.html')\n else:\n categories = site.categories\n return '200 OK', render('create_category.html', categories=categories)\n\n\nurlpatterns = {\n '/': main_view,\n '/hello/': hello_view,\n '/about/': about_view,\n # '/secret/': secret_view,\n # '/authors/': authors_view,\n '/contact/': contact_view,\n '/create-course/': create_course,\n '/create-category/': create_category\n}\n\n\n# application = DebugApplication(urlpatterns, fronts)\napplication = MockApplication(urlpatterns, fronts)\n\n\n@application.add_route('/copy-course/')\ndef copy_course(request):\n request_params = request['request_params']\n # print(request_params)\n name = request_params['name']\n old_course = site.get_course(name)\n if old_course:\n new_name = f'copy_{name}'\n new_course = old_course.clone()\n new_course.name = new_name\n site.courses.append(new_course)\n\n return '200 OK', render('course_list.html', objects_list=site.courses)\n\n\n@application.add_route('/category-list/')\ndef category_list(request):\n logger.log('Список категорий')\n return '200 OK', render('category_list.html', objects_list=site.categories)\n\n\nif __name__ == '__main__':\n\n app = API(urlpatterns, fronts)\n app.run()","sub_path":"lesson_5/banana9/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"543231761","text":"#\n# linter.py\n# Linter for SublimeLinter3, a code checking framework for Sublime Text 3\n#\n# Written by roadhump\n# Copyright (c) 2014 roadhump\n#\n# License: MIT\n#\n\n\"\"\"This module exports the ESLint plugin class.\"\"\"\n\nimport json\nimport logging\nimport re\nfrom SublimeLinter.lint import NodeLinter, LintMatch\n\n\nlogger = logging.getLogger('SublimeLinter.plugin.eslint')\n\n\nclass ESLint(NodeLinter):\n \"\"\"Provides an interface to the eslint executable.\"\"\"\n\n cmd = 'eslint --format json --stdin'\n\n missing_config_regex = re.compile(\n r'^(.*?)\\r?\\n\\w*(ESLint couldn\\'t find a configuration file.)',\n re.DOTALL\n )\n line_col_base = (1, 1)\n defaults = {\n 'selector': 'source.js - meta.attribute-with-value',\n '--stdin-filename': '${file}'\n }\n\n def on_stderr(self, stderr):\n # Demote 'annoying' config is missing error to a warning.\n if self.missing_config_regex.match(stderr):\n logger.warning(stderr)\n self.notify_failure()\n elif (\n 'DeprecationWarning' in stderr\n or 'ExperimentalWarning' in stderr\n or 'in the next version' in stderr # is that a proper deprecation?\n ):\n logger.warning(stderr)\n else:\n logger.error(stderr)\n self.notify_failure()\n\n def find_errors(self, output):\n \"\"\"Parse errors from linter's output.\"\"\"\n try:\n # It is possible that users output debug messages to stdout, so we\n # only parse the last line, which is hopefully the actual eslint\n # output.\n # https://github.com/SublimeLinter/SublimeLinter-eslint/issues/251\n last_line = output.rstrip().split('\\n')[-1]\n content = json.loads(last_line)\n except ValueError:\n logger.error(\n \"JSON Decode error: We expected JSON from 'eslint', \"\n \"but instead got this:\\n{}\\n\\n\"\n \"Be aware that we only parse the last line of above \"\n \"output.\".format(output))\n self.notify_failure()\n return\n\n if logger.isEnabledFor(logging.INFO):\n import pprint\n logger.info(\n '{} output:\\n{}'.format(self.name, pprint.pformat(content)))\n\n for entry in content:\n filename = entry.get('filePath', None)\n if filename == '':\n filename = 'stdin'\n\n for match in entry['messages']:\n if match['message'].startswith('File ignored'):\n continue\n\n column = match.get('column', None)\n if column is not None:\n # apply line_col_base manually\n column = column - 1\n\n if 'line' not in match:\n logger.error(match['message'])\n self.notify_failure()\n continue\n\n yield LintMatch(\n match=match,\n filename=filename,\n line=match['line'] - 1, # apply line_col_base manually\n col=column,\n error_type='error' if match['severity'] == 2 else 'warning',\n code=match.get('ruleId', ''),\n message=match['message'],\n )\n\n def reposition_match(self, line, col, m, vv):\n match = m.match\n if (\n col is None\n or 'endLine' not in match\n or 'endColumn' not in match\n ):\n return super().reposition_match(line, col, m, vv)\n\n # apply line_col_base manually\n end_line = match['endLine'] - 1\n end_column = match['endColumn'] - 1\n\n for _line in range(line, end_line):\n text = vv.select_line(_line)\n end_column += len(text)\n\n return line, col, end_column\n\n def run(self, cmd, code):\n # Workaround eslint bug https://github.com/eslint/eslint/issues/9515\n # Fixed in eslint 4.10.0\n if code == '':\n code = ' '\n\n return super().run(cmd, code)\n","sub_path":"linter.py","file_name":"linter.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"473765495","text":"#James B. McNicholas III\r\n#CSC-842 - Dakota State University - (Summer 18)\r\n#Cycle 10\r\n#Description: This program can be user to track and monitor a user that is the target of an inquiry on investigation OR\r\n#to control user activity.\r\n#Operating System: Windows 7\r\n#Packages Installed:\r\n#https://pythonhosted.org/watchdog/installation.html\r\n#Some source borrowed from:\r\n#https://www.pythoncentral.io/introduction-to-sqlite-in-python/\r\n#https://www.michaelcho.me/article/using-pythons-watchdog-to-monitor-changes-to-a-director\r\n#https://stackoverflow.com/questions/36099331/how-to-grab-all-files-in-a-folder-and-get-their-md5-hash-in-python\r\n\r\n#NEW (CYCLE 10)########################################\r\nimport sqlite3\r\nimport os.path\r\nimport glob\r\nimport hashlib\r\n########################################\r\nimport time,os,sys, random, ctypes\r\nfrom watchdog.observers import Observer\r\nfrom watchdog.events import FileSystemEventHandler\r\n\r\ncursor = None\r\ndb = None\r\n\r\n#Normal Usage Statisics\r\nCREATEDEVENTS = 5\r\nMODIFIEDEVENTS = 5\r\n\r\n#Directory to Monitor\r\nworkingDirectory = \"\"\r\n\r\n#Current User\r\ncurrentUser = \"\"\r\n\r\n#User Activity\r\nuCreatedEvents = 0\r\nuModifiedEvents = 0\r\nuFileCountInit = \"\"\r\nuFileCountEnd = \"\"\r\nuTriggerEventCount = 0\r\n\r\n#User Input\r\nuserInput = \"\"\r\n\r\n#Used to store filenames for the filename tirgger event.\r\nfileTrigger1 = \"\"\r\nfileTrigger2 = \"\"\r\nfileTriggerAction = \"\"\r\n\r\nglobalTriggerAction = \"\"\r\n\r\n#NEW (CYCLE 10)########################################\r\n#This function will determine global trigger action path.\r\ndef globalTriggerActions():\r\n global globalTriggerAction\r\n if fileTriggerAction == \"1\":\r\n #Lock the Workstation\r\n #NTD: Lock and prevent rentry \r\n ctypes.windll.user32.LockWorkStation()\r\n if fileTriggerAction == \"2\":\r\n exit()\r\n########################################\r\n\r\n#NEW (CYCLE 10)########################################\r\ndef setGlobalTriggerActions():\r\n print (\"Note: Not all features have been implemented!\")\r\n print (\"Select Trigger Action\")\r\n print (\"1. Lock Workstation\")\r\n print (\"2. Stop Monitoring\")\r\n userInput = input(\"Enter Selection: \" )\r\n globalTriggerAction = userInput\r\n########################################\r\n\r\n\r\n\r\n\r\n#This new function will determine trigger actions that can be taken. \r\ndef triggerActions():\r\n global fileTriggerAction\r\n print (\"Note: Not all features have been implemented!\")\r\n print (\"Select Trigger Action\")\r\n print (\"1. Lock Workstation\")\r\n print (\"2. Stop Monitoring\")\r\n userInput = input(\"Enter Selection: \" )\r\n fileTriggerAction = userInput\r\n\r\n\r\n#User Menu Function\r\ndef userMenu():\r\n global db\r\n global cursor\r\n global userInput\r\n global uCreatedEvents \r\n global uModifiedEvents \r\n global uFileCountInit \r\n global uFileCountEnd\r\n global uTriggerEventCount\r\n global fileTrigger1 \r\n global fileTrigger2\r\n global fileTriggerAction\r\n global globalTriggerAction\r\n \r\n #User Welcome Screen & Options\r\n print (\"---------------------------------------------------------\")\r\n print (\"Welcome to User Monitor\")\r\n #NEW (CYCLE 10)########################################\r\n print (\"Note: If this is your first time using the application you must do the following:\")\r\n print (\"#1. Select Option - 3. Admin Functions\")\r\n print (\"#2. Select Option - 6. Initialize DB\")\r\n ########################################\r\n print(\"\\n\")\r\n #Use if tracking a user based (triggers based)\r\n #NEW (CYCLE 10)########################################\r\n print (\"MAIN MENU\")\r\n ########################################\r\n print (\"1. Active Investigation\")\r\n #Use to build statistics for future tracking\r\n print (\"2. Build User Statistics\")\r\n #NTD: Admin Functions\r\n print (\"3. Admin Functions\")\r\n print(\"\\n\")\r\n userInput = input(\"Enter Selection: \" )\r\n print(\"\\n\")\r\n\r\n #Active Investigation\r\n if userInput == \"1\":\r\n w = Watcher()\r\n w.run()\r\n generateReport()\r\n #User Statistics\r\n if userInput == \"2\":\r\n w = Watcher()\r\n w.run()\r\n print(\"\\n\")\r\n generateReport()\r\n #Admin Functions \r\n if userInput == \"3\":\r\n print(\"Not All Features Have Been Fully Implemented\")\r\n print(\"1. View Usage Statistics\")\r\n print(\"2. Get Current User\")\r\n print(\"3. Reset Statistics\")\r\n print(\"4. Return to Main Menu\")\r\n print(\"5. Set File Triggers\")\r\n #NEW (CYCLE 10)######################################## \r\n print(\"6. Initialize DB\")\r\n print(\"7. Hash Directory\")\r\n ######################################## \r\n print(\"8. Close Application\")\r\n userInput = input(\"Enter Selection:\")\r\n print(\"\\n\")\r\n \r\n if userInput == \"1\":\r\n if workingDirectory == \"\":\r\n print(\"Cannot Generate Report\" + \"\\n\")\r\n else:\r\n generateReport()\r\n userInput = -1\r\n if userInput == \"2\":\r\n print(\"Current User:\", os.getlogin() + \"\\n\")\r\n userInput = -1\r\n if userInput == \"3\":\r\n uCreatedEvents = 0\r\n uModifiedEvents = 0\r\n uFileCountInit = \"\"\r\n uFileCountEnd = \"\"\r\n uTriggerEventCount = 0\r\n userInput = -1\r\n if userInput == \"4\":\r\n userInput = -1\r\n if userInput == \"5\":\r\n userInput = -1\r\n print(\"Set File Triggers\")\r\n print(\"1. View File Triggers\")\r\n print(\"2. File Trigger #1\")\r\n print(\"3. File Trigger #2\")\r\n print(\"4. Set Trigger Action\")\r\n print(\"5. Set Global Trigger Action\")\r\n userInput = input(\"Enter Selection: \" )\r\n if userInput == \"1\":\r\n print(\"Trigger 1 =\", fileTrigger1)\r\n print(\"Trigger 2 =\", fileTrigger2)\r\n if userInput == \"2\":\r\n print(\"FileTrigger #1 =\", fileTrigger1)\r\n userInput = input(\"Enter New File Trigger: \" )\r\n fileTrigger1 = userInput\r\n if userInput == \"3\":\r\n print(\"FileTrigger #2 =\", fileTrigger2)\r\n userInput = input(\"Enter New File Trigger: \" )\r\n fileTrigger2 = userInput\r\n if userInput == \"4\":\r\n triggerActions()\r\n #NEW (CYCLE 10)########################################\r\n if userInput ==\"5\":\r\n setGlobalTriggerActions()\r\n #######################################\r\n \r\n #NEW (CYCLE 10)########################################\r\n if userInput == \"6\":\r\n # Create a database in RAM\r\n db = sqlite3.connect(':memory:')\r\n # Creates or opens a file called mydb with a SQLite3 DB\r\n db = sqlite3.connect('C:\\TestCase\\TestCaseDB', check_same_thread=False)\r\n # Get a cursor object\r\n cursor = db.cursor()\r\n # Get a cursor object Create Log Table\r\n cursor.execute('''CREATE TABLE log(id INTEGER PRIMARY KEY, event TEXT)''')\r\n cursor.execute('''CREATE TABLE hashs(id INTEGER PRIMARY KEY, filename TEXT, hash TEXT)''')\r\n db.commit()\r\n ########################################\r\n \r\n #NEW (CYCLE 10)########################################\r\n if userInput == \"7\":\r\n userInput = input(\"Input File Directory: \" )\r\n filenames = glob.glob(userInput + \"/*.*\")\r\n print(\"\\n\")\r\n print(\"HASH TABLE\")\r\n for filename in filenames:\r\n with open(filename, 'rb') as inputfile:\r\n data = inputfile.read()\r\n print(filename, hashlib.md5(data).hexdigest())\r\n cursor.execute('INSERT INTO hashs(filename, hash) VALUES(?,?)', (filename, hashlib.md5(data).hexdigest(),))\r\n db.commit\r\n ########################################\r\n print(\"\\n\")\r\n if userInput == \"8\":\r\n if not db == None:\r\n db.close\r\n exit()\r\n\r\nclass Watcher:\r\n #Get the directory to monitor\r\n global workingDirectory\r\n\r\n def __init__(self):\r\n self.observer = Observer()\r\n\r\n def run(self):\r\n global workingDirectory\r\n print(\"To Quit: CTRL + C\")\r\n #Gets the directory to monitor\r\n workingDirectory = input(\"Enter Directory to Monitor: \")\r\n countFilesInit()\r\n self.DIRECTORY_TO_WATCH = workingDirectory\r\n event_handler = Handler()\r\n self.observer.schedule(event_handler, self.DIRECTORY_TO_WATCH, recursive=True)\r\n self.observer.start()\r\n try:\r\n while True:\r\n time.sleep(1)\r\n except:\r\n self.observer.stop()\r\n self.observer.join()\r\n\r\n\r\nclass Handler(FileSystemEventHandler):\r\n @staticmethod\r\n\r\n def on_any_event(event):\r\n global cursor\r\n global db\r\n global userInput\r\n global fileTrigger1\r\n global fileTrigger2\r\n global fileTriggerAction\r\n global globalTriggerAction\r\n global uTriggerEventCount \r\n global CREATEDEVENTS \r\n global MODIFIEDEVENTS\r\n #NEW (CYCLE 10)########################################\r\n global cursor\r\n ########################################\r\n\r\n if event.is_directory:\r\n return None\r\n elif event.event_type == 'created':\r\n #Note the Event\r\n print (\"Received created event - %s.\" % event.src_path)\r\n #NEW (CYCLE 10)########################################\r\n cursor.execute('INSERT INTO log(event) VALUES(?)', (\"Received created event - %s.\" % event.src_path,))\r\n db.commit\r\n ########################################\r\n print(\"UI:\", userInput)\r\n #Record the Event\r\n global uCreatedEvents\r\n uCreatedEvents += 1\r\n \r\n if userInput == \"1\":\r\n if uCreatedEvents > CREATEDEVENTS:\r\n print (\"Allowable Created Events Exceeded\")\r\n #NEW (CYCLE 10)########################################\r\n cursor.execute('INSERT INTO log(event) VALUES(?)', (\"Allowable Created Events Exceeded\",))\r\n db.commit\r\n ######################################## \r\n #Lock the Workstation\r\n #NTD: Lock and prevent rentry \r\n ctypes.windll.user32.LockWorkStation()\r\n elif event.event_type == 'modified': \r\n #Note the Event\r\n print (\"Received modified event - %s.\" % event.src_path)\r\n #NEW (CYCLE 10)########################################\r\n cursor.execute('INSERT INTO log(event) VALUES(?)', (\"Received modified event - %s.\" % event.src_path,))\r\n db.commit\r\n ######################################## \r\n #Record the Event\r\n global uModifiedEvents\r\n uModifiedEvents += 1\r\n if userInput == \"1\":\r\n if uModifiedEvents > MODIFIEDEVENTS:\r\n print (\"Allowable Modified Events Exceeded\")\r\n #NEW (CYCLE 10)########################################\r\n cursor.execute('INSERT INTO log(event) VALUES(?)', (\"Allowable Modified Events Exceeded\",))\r\n db.commit\r\n ######################################## \r\n #Lock the Workstation\r\n #NTD: Lock and prevent rentry \r\n ctypes.windll.user32.LockWorkStation()\r\n if fileTrigger1 != \"\":\r\n print (\"File Trigger Activated\")\r\n #NEW (CYCLE 10)########################################\r\n cursor.execute('INSERT INTO log(event) VALUES(?)', (\"File Trigger Activated\",))\r\n db.commit\r\n ######################################## \r\n if fileTrigger1 in event.src_path:\r\n uTriggerEventCount += 1 \r\n #NEW (CYCLE 10)########################################\r\n if globalTriggerAction == \"\":\r\n ######################################## \r\n if fileTriggerAction == \"1\":\r\n #Lock the Workstation\r\n #NTD: Lock and prevent rentry \r\n ctypes.windll.user32.LockWorkStation()\r\n if fileTriggerAction == \"2\":\r\n exit()\r\n #NEW (CYCLE 10)########################################\r\n else:\r\n globalTriggerActions()\r\n ######################################## \r\n \r\n if fileTrigger2 != \"\":\r\n print (\"File Trigger Activated\")\r\n #NEW (CYCLE 10)########################################\r\n cursor.execute('INSERT INTO log(event) VALUES(?)', (\"File Trigger Activated\",))\r\n db.commit\r\n ######################################## \r\n uTriggerEventCount += 1\r\n if fileTrigger2 in event.src_path:\r\n #NEW (CYCLE 10)########################################\r\n if globalTriggerAction == \"\":\r\n ######################################## \r\n if fileTriggerAction == \"1\":\r\n #Lock the Workstation\r\n #NTD: Lock and prevent rentry \r\n ctypes.windll.user32.LockWorkStation()\r\n if fileTriggerAction == \"2\":\r\n exit()\r\n #NEW (CYCLE 10)########################################\r\n else:\r\n globalTriggerActions()\r\n ######################################## \r\n \r\n#Funtion: Generates a User Statics Report\r\ndef generateReport():\r\n #Gets the file count at the end of the process\\cycle\r\n countFilesEnd()\r\n #Report Format\r\n print(\"---------------------------------------------------------\")\r\n print(\"Usage Report:\")\r\n print(\"\\n\")\r\n print(\"User:\", os.getlogin())\r\n print(\"\\n\")\r\n print(\"Initial File Count:\", len(uFileCountInit))\r\n print(\"\\n\")\r\n print(\"Final File Count:\", len(uFileCountEnd))\r\n print(\"\\n\")\r\n print(\"Created Events:\", uCreatedEvents)\r\n print(\"\\n\")\r\n print(\"Modified Events:\", uModifiedEvents)\r\n print(\"\\n\")\r\n print(\"Trigger Events:\", uTriggerEventCount)\r\n print(\"\\n\")\r\n \r\n#Funtion: Counts the files at the start of the process\\cycle\r\ndef countFilesInit():\r\n global workingDirectory\r\n global uFileCountInit\r\n uFileCountInit = os.listdir(workingDirectory)\r\n#Funtion: Counts the files at the end of the process\\cycle\r\ndef countFilesEnd():\r\n global workingDirectory\r\n global uFileCountEnd\r\n uFileCountEnd = os.listdir(workingDirectory) \r\n\r\n#Main \r\nif __name__ == '__main__':\r\n #NEW (CYCLE 10)########################################\r\n if os.path.exists(\"C:\\TestCase\\TestCaseDB\"):\r\n # Create a database in RAM\r\n db = sqlite3.connect(':memory:')\r\n # Creates or opens a file with a SQLite3 DB\r\n db = sqlite3.connect('C:\\TestCase\\TestCaseDB', check_same_thread=False)\r\n # Get a cursor object\r\n cursor = db.cursor()\r\n print(\"DB EXISTS\")\r\n ######################################## \r\n while userInput != \"-1\":\r\n userMenu()\r\ndb.close()\r\nexit()\r\n","sub_path":"C10.py","file_name":"C10.py","file_ext":"py","file_size_in_byte":15905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"371056713","text":"import glob\nimport math\nimport os\nimport random\nimport sys\nfrom time import ctime, time\nimport numpy as np\nimport cv2\nimport h5py\nimport matplotlib.pyplot as plt\nimport PIL\nimport scipy.io as io\nimport tensorflow as tf\nimport tensorflow.keras as keras\nfrom austens_model import *\nfrom austens_model2 import *\nfrom image import *\nfrom matplotlib import cm as CM\nfrom PIL import Image\nfrom skimage.measure import compare_psnr, compare_ssim\nfrom sklearn.metrics import mean_squared_error\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.applications.vgg16 import VGG16\nfrom tensorflow.keras.initializers import RandomNormal\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.layers import (BatchNormalization, Conv2D, Dense,\n Dropout, Flatten, MaxPooling2D)\nfrom tensorflow.keras.models import Model, Sequential, model_from_json\nfrom tensorflow.keras.optimizers import SGD, Adam\nfrom tensorflow.keras.preprocessing.image import img_to_array, load_img\nfrom tqdm import tqdm\nimport argparse\n\n\n\ndef main():\n print(\"Starting train of ASS model...\")\n\n # Arguments parsing: (put this in later)\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', help='Not implemented but should be able to choose from A or B')\n parser.add_argument('--data_subset',type=int, help='Subset size of data to use')\n parser.add_argument('--validate_size', type=int, help='Size of dataset we use to validate')\n parser.add_argument('--batch_size', type=int, help='Batch size')\n parser.add_argument('--epochs', help='Number of epochs (times we go through the data)')\n parser.add_argument('--resize_size', type=int, help = 'Dimension (square) to resize image with')\n parser.add_argument('--grayscale', type=int, help = '1 for converting to grayscale, 0 for normal')\n args = parser.parse_args()\n\n\n dataset = args.dataset if args.dataset else 'A'\n data_subset = int(args.data_subset) if args.data_subset else -1\n test_size = int(args.validate_size) if args.validate_size else 16 # Number of images we test on\n batch_size = int(args.batch_size) if args.batch_size else 16 # sections of data we use\n epochs = int(args.epochs) if args.epochs else 4 # default epochs\n resize_size = int(args.resize_size) if args.resize_size else 1024\n grayscale = 1 if args.grayscale else 0\n # Directories\n root = 'data/ShanghaiTech/'\n model_dir = 'model/'\n weights_dir = 'model/weights/'\n part_A_train = os.path.join(root,'part_A/train_data','images') # need to generate h5 for Part A\n part_A_test = os.path.join(root,'part_A/test_data','images')\n part_B_train = os.path.join(root,'part_B/train_data','images') # Already generated\n part_B_test = os.path.join(root,'part_B/test_data','images')\n temp = 'test_images'\n train_sets = [part_A_train]\n test_sets = [part_A_test]\n\n img_paths = []\n for path in train_sets:\n for img_path in glob.glob(os.path.join(path, '*.jpg')):\n img_paths.append(str(img_path))\n print(\"Number of Images found: \", len(img_paths),\"\\n\")\n # Get the images for evaluation\n test_paths = []\n for path in test_sets:\n for img_path in glob.glob(os.path.join(path, '*.jpg')):\n test_paths.append(str(img_path))\n\n test_paths = test_paths[0:data_subset]\n print(\"Number of test images found: \", len(test_paths))\n test_gen = image_generator(test_paths, test_size, resize_size, grayscale)\n\n img_paths = img_paths[0:data_subset] # Takes only a portion of the data\n len_images = len(img_paths)\n print(\"Total images: \", len_images)\n\n model = contextaware() # Model being used\n\n # Training Parameters\n print(model.summary())\n net = 'ASSNet' # Naming\n time_start = time()\n #epochs = 2\n #batch_size = 64\n validate_rate = 0.5 # Evaluate the model twice every epoch\n losses = [[1e5, 1e5, 1e5, 1e5]]\n best_values = {'mae': 1e5, 'rmse': 1e5, 'sfn': 1e5, 'mape': 1e5} # Best loss values\n val_rate_dec = {'A': [80, 70], 'B': [9, 8.5]}\n num_iter = int((len_images - 0.1) // batch_size + 1) # Avoid overflow\n test_x, test_y = next(test_gen)\n \n train_gen = image_generator(img_paths, batch_size, resize_size, grayscale)\n\n\n print(\"Beginning training:\")\n #losses file write\n returnlosses = []\n # Training iterations\n for epoch in range(epochs):\n print(\"Epoch: \", epoch,'\\n')\n for data_part in range(0, len_images, batch_size):\n x, y = next(train_gen) # Get the next images\n\n model.fit(x, y, batch_size, validation_data = (test_x, test_y), verbose=1)\n\n idx_val = (data_part / batch_size + 1)\n # Eval losses and save models\n if idx_val % (num_iter * validate_rate) == 0:\n print(\"Evaluating..\\n\")\n loss = eval_loss(model, test_x, test_y, quality=False) # get the loss\n if loss[0] < val_rate_dec[dataset][0]:\n validate_rate = min(validate_rate, 0.25)\n if loss[0] < val_rate_dec[dataset][1]:\n validate_rate = min(validate_rate, 0.1)\n losses.append(loss)\n if (loss[0] < best_values['mae']) or (loss[0] == best_values['mae'] and loss[1] < best_values['rmse']):\n model.save_weights(os.path.join(weights_dir, '{}_best.hdf5'.format(net)))\n to_save = False\n for idx_best in range(len(loss)):\n if loss[idx_best] < best_values[list(best_values.keys())[idx_best]]:\n best_values[list(best_values.keys())[idx_best]] = loss[idx_best]\n to_save = True\n if to_save:\n path_save = os.path.join(weights_dir, ''.join([\n net,\n '_MAE', str(round(loss[0], 3)), '_RMSE', str(round(loss[1], 3)),\n '_SFN', str(round(loss[2], 3)), '_MAPE', str(round(loss[3], 3)),\n '_epoch', str(epoch + 1), '-', str(idx_val), '.hdf5'\n ]))\n # model.save_weights(path_save)\n to_save = False\n returnlosses.append([epoch, data_part, loss[0]])\n\n # Progress panel\n time_consuming = time() - time_start\n print('In epoch {}, with MAE-RMSE-SFN-MAPE={}, time consuming={}m-{}s\\r'.format(\n epoch, np.round(np.array(losses)[-1, :], 2),\n int(time_consuming/60), int(time_consuming-int(time_consuming/60)*60)\n ))\n if epoch % 10 == 0:\n tf.keras.models.save_model(model, model_dir+\"modelbackup.h5\")\n print(\"temporary model backup saved\")\n\n print(\"FINISHED TRAINING\")\n #save_mod(model, model_dir + \"weights/model_A_weights.h5\", model_dir + \"/model.json\")\n #store_dir = \"bestmodel.h5\"\n tf.keras.models.save_model(model, model_dir + \"bestmodel.h5\")\n print(\"Saved model to \", model_dir+\"bestmodel.h5\")\n lossesarray = np.array(returnlosses)\n np.savetxt('losses.txt', lossesarray, delimiter = ',')\n print(\"CHRISTIAN YOU CAN EXIT THE THING NOW\")\n\n\n #def save_mod(model , str1):# , str2):\n #austen edit to try to get model to save differently\n #model.save_weights(str1)\n\n #model_json = model.to_json()\n\n #with open(str2, \"w\") as json_file:\n # json_file.write(model_json)\n\ndef init_weights_vgg(model, model_dir):\n #vgg = VGG16(weights='imagenet', include_top=False)\n # print(model_dir)\n # EDIT BY AUSTEN--the following block is replaced by...\n # json_file = open(model_dir + 'VGG_16.json', 'r')\n # loaded_model_json = json_file.read()\n # json_file.close()\n # loaded_model = model_from_json(loaded_model_json)\n # loaded_model.load_weights(model_dir + \"weights/VGG_16.h5\")\n #\n # vgg = loaded_model\n\n # THIS!\n vgg = VGG16(weights='imagenet', include_top=False)\n\n\n vgg_weights=[]\n for layer in vgg.layers:\n if('conv' in layer.name):\n vgg_weights.append(layer.get_weights())\n\n #AUSTEN EDIT TO FREEZE VGG\n # vgg.trainable = False\n\n offset = 0\n i = 0\n while(i < 10):\n if('conv' in model.layers[i + offset].name):\n model.layers[i + offset].set_weights(vgg_weights[i])\n i = i + 1\n else:\n offset=offset+1\n # # Following line is an Austen Edit\n # model.layers[i+offset].trainable = False\n\n return (model)\n\ndef euclidean_distance_loss(y_true, y_pred):\n # Euclidean distance as a measure of loss (Loss function)\n return K.sqrt(K.sum(K.square(y_pred - y_true), axis=-1))\n\n# Neural network model : VGG + Conv\ndef austensmodel2(model_dir):\n #Variable Input Size\n rows = None\n cols = None\n\n #Batch Normalisation option\n\n batch_norm = 0\n kernel = (3, 3)\n init = RandomNormal(stddev=0.01)\n model = Sequential()\n\n # Custom VGG:\n if(batch_norm):\n model.add(Conv2D(64, kernel_size = kernel, input_shape = (rows,cols,3),activation = 'relu', padding='same'))\n model.add(BatchNormalization())\n model.add(Conv2D(64, kernel_size = kernel,activation = 'relu', padding='same'))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(strides=2))\n model.add(Conv2D(128,kernel_size = kernel, activation = 'relu', padding='same'))\n model.add(BatchNormalization())\n model.add(Conv2D(128,kernel_size = kernel, activation = 'relu', padding='same'))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(strides=2))\n model.add(Conv2D(256,kernel_size = kernel, activation = 'relu', padding='same'))\n model.add(BatchNormalization())\n model.add(Conv2D(256,kernel_size = kernel, activation = 'relu', padding='same'))\n model.add(BatchNormalization())\n model.add(Conv2D(256,kernel_size = kernel, activation = 'relu', padding='same'))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(strides=2))\n model.add(Conv2D(512, kernel_size = kernel,activation = 'relu', padding='same'))\n model.add(BatchNormalization())\n model.add(Conv2D(512, kernel_size = kernel,activation = 'relu', padding='same'))\n model.add(BatchNormalization())\n # model.add(Conv2D(512, kernel_size = kernel,activation = 'relu', padding='same'))\n # model.add(BatchNormalization())\n\n else:\n model.add(Conv2D(64, kernel_size = kernel,activation = 'relu', padding='same',input_shape = (rows, cols, 3), kernel_initializer = init))\n model.add(Conv2D(64, kernel_size = kernel,activation = 'relu', padding='same', kernel_initializer = init))\n model.add(MaxPooling2D(strides=2))\n model.add(Conv2D(128,kernel_size = kernel, activation = 'relu', padding='same', kernel_initializer = init))\n model.add(Conv2D(128,kernel_size = kernel, activation = 'relu', padding='same', kernel_initializer = init))\n model.add(MaxPooling2D(strides=2))\n model.add(Conv2D(256,kernel_size = kernel, activation = 'relu', padding='same', kernel_initializer = init))\n model.add(Conv2D(256,kernel_size = kernel, activation = 'relu', padding='same', kernel_initializer = init))\n model.add(Conv2D(256,kernel_size = kernel, activation = 'relu', padding='same', kernel_initializer = init))\n model.add(MaxPooling2D(strides=2))\n model.add(Conv2D(512, kernel_size = kernel,activation = 'relu', padding='same', kernel_initializer = init))\n model.add(Conv2D(512, kernel_size = kernel,activation = 'relu', padding='same', kernel_initializer = init))\n # model.add(Conv2D(512, kernel_size = kernel,activation = 'relu', padding='same', kernel_initializer = init))\n\n last = Conv2D(512, kernel_size = kernel,activation = 'relu', padding='same')\n model.add(last)\n # pool1 = tf.keras.layers.AveragePooling2D(pool_size=(1,1), padding='same')(last)\n pool2 = tf.keras.layers.AveragePooling2D(pool_size=(2,2), padding='same')(last)\n pool3 = tf.keras.layers.AveragePooling2D(pool_size=(3,3), padding='same')(last)\n pool4 = tf.keras.layers.AveragePooling2D(pool_size=(6,6), padding='same')(last)\n # conv1 = Conv2D(1, kernel_size = (1,1), activation = 'relu', padding='same', kernel_initializer = init)(pool1)\n conv1 = Conv2D(1, kernel_size = (1,1), activation = 'relu', padding='same', kernel_initializer = init)(last)\n conv2 = Conv2D(1, kernel_size = (1,1), activation = 'relu', padding='same', kernel_initializer = init)(pool2)\n conv3 = Conv2D(1, kernel_size = (1,1), activation = 'relu', padding='same', kernel_initializer = init)(pool3)\n conv4 = Conv2D(1, kernel_size = (1,1), activation = 'relu', padding='same', kernel_initializer = init)(pool4)\n\n up2 = tf.keras.layers.UpSampling2D(size=(2, 2), interpolation='nearest')(conv2)\n up3 = tf.keras.layers.UpSampling2D(size=(3, 3), interpolation='nearest')(conv3)\n up4 = tf.keras.layers.UpSampling2D(size=(6, 6), interpolation='nearest')(conv4)\n\n sub1 = keras.layers.Subtract()([last, conv1])\n sub2 = keras.layers.Subtract()([last, up2])\n sub3 = keras.layers.Subtract()([last, up3])\n sub4 = keras.layers.Subtract()([last, up4])\n\n second_conv1 = Conv2D(1, kernel_size = (1,1), activation = 'relu', padding='same', kernel_initializer = init)(sub1)\n second_conv2 = Conv2D(1, kernel_size = (1,1), activation = 'relu', padding='same', kernel_initializer = init)(sub2)\n second_conv3 = Conv2D(1, kernel_size = (1,1), activation = 'relu', padding='same', kernel_initializer = init)(sub3)\n second_conv4 = Conv2D(1, kernel_size = (1,1), activation = 'relu', padding='same', kernel_initializer = init)(sub4)\n\n mult1 = keras.layers.Multiply()([conv1, second_conv1])\n mult2 = keras.layers.Multiply()([up2, second_conv2])\n mult3 = keras.layers.Multiply()([up3, second_conv3])\n mult4 = keras.layers.Multiply()([up4, second_conv4])\n\n weighted = keras.layers.Add()([mult1, mult2, mult3, mult4])\n conc = tf.keras.layers.concatenate(inputs = [weighted, last])\n\n #Conv2D\n backend = Conv2D(512, (3, 3), activation='relu', dilation_rate = 2, kernel_initializer = init, padding = 'same')(conc)\n model.add(backend)\n model.add(Conv2D(512, (3, 3), activation='relu', dilation_rate = 2, kernel_initializer = init, padding = 'same'))\n model.add(Conv2D(512, (3, 3), activation='relu', dilation_rate = 2, kernel_initializer = init, padding = 'same'))\n model.add(Conv2D(256, (3, 3), activation='relu', dilation_rate = 2, kernel_initializer = init, padding = 'same'))\n model.add(Conv2D(128, (3, 3), activation='relu', dilation_rate = 2, kernel_initializer = init, padding = 'same'))\n model.add(Conv2D(64, (3, 3), activation='relu', dilation_rate = 2, kernel_initializer = init, padding = 'same'))\n model.add(Conv2D(1, (1, 1), activation='relu', dilation_rate = 1, kernel_initializer = init, padding = 'same'))\n\n adam_optimizer = Adam(lr=1e-5)\n sgd = SGD(lr = 1e-7, decay = (5*1e-4), momentum = 0.95)\n model.compile(optimizer=adam_optimizer, loss=\"MSE\", metrics=['mse'])\n\n model = init_weights_vgg(model, model_dir)\n\n return model\n\ndef eval_loss(model, x, y, quality=False):\n preds, DM, GT = [], [], []\n losses_SFN, losses_MAE, losses_MAPE, losses_RMSE = [], [], [], []\n for idx_pd in range(x.shape[0]):\n pred = model.predict(np.array([x[idx_pd]]))\n preds.append(np.squeeze(pred))\n DM.append(np.squeeze(np.array([y[idx_pd]])))\n GT.append(round(np.sum(np.array([y[idx_pd]])))) # To make sure the GT is an integral value\n print(len(preds), len(DM), len(GT))\n for idx_pd in range(len(preds)):\n losses_SFN.append(np.mean(np.square(preds[idx_pd] - DM[idx_pd]))) # mean of Frobenius norm\n losses_MAE.append(np.abs(np.sum(preds[idx_pd]) - GT[idx_pd]))\n losses_MAPE.append(np.abs(np.sum(preds[idx_pd]) - GT[idx_pd]) / GT[idx_pd])\n losses_RMSE.append(np.square(np.sum(preds[idx_pd]) - GT[idx_pd]))\n\n loss_SFN = np.sum(losses_SFN)\n loss_MAE = np.mean(losses_MAE)\n loss_MAPE = np.mean(losses_MAPE)\n loss_RMSE = np.sqrt(np.mean(losses_RMSE))\n if quality:\n psnr, ssim = [], []\n for idx_pd in range(len(preds)):\n data_range = max([np.max(preds[idx_pd]), np.max(DM[idx_pd])]) - min([np.min(preds[idx_pd]), np.min(DM[idx_pd])])\n psnr_ = compare_psnr(preds[idx_pd], DM[idx_pd], data_range=data_range)\n ssim_ = compare_ssim(preds[idx_pd], DM[idx_pd], data_range=data_range)\n psnr.append(psnr_)\n ssim.append(ssim_)\n return loss_MAE, loss_RMSE, loss_SFN, loss_MAPE, np.mean(psnr), np.mean(ssim)\n return loss_MAE, loss_RMSE, loss_SFN, loss_MAPE\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"code/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":16702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"134372877","text":"import matplotlib.pyplot as plt\nfig,ax = plt.subplots()\nimport pandas as pd\n\n\ndata= pd.read_csv(\"GBvideos.csv\")\nx = data['channel_title'].head(3)\ny1 = data['views'].head(3)\ny2 = data['likes'].head(3)\nax.plot(x,y1)\nax.plot(x,y2)\nax.plot(x,y1, marker=\"v\", linestyle=\"--\", color=\"r\")\nplt.show()\n","sub_path":"Market1.py","file_name":"Market1.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"375216126","text":"from bs4 import BeautifulSoup\nimport requests\nimport csv\n\nsource = 'https://www.gadgetbytenepal.com/category/mobile-price-in-nepal/'\n\n#the page rejects GET requests that do not identify a User-Agent\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'}\n\n# soup = BeautifulSoup(source, 'lxml')\ntry:\n url = requests.get(source, headers=headers)\nexcept requests.exceptions.RequestException as e:\n print(e)\n exit()\n\n#entire html\nsoup = BeautifulSoup(url.text, \"html.parser\")\n# print(soup.prettify())\n\n#filtering / parsing the needed sections\n# m_name = soup.find('div', class_='td-category-description')\n\n#getting mobile names from site\n# name = soup.find('table')\n# mobile_name = name.a.text\n\n#getting price\n# name = soup.find('table', attrs={'class': 'td:last-child'})\n# name = soup('tr')[-1]\n# mobile_price = name.text\n\nmobiles = soup.find('div', class_='td-category-description')\n\ncsv_file = open('gadgetbyte.csv', 'w')\n\ncsv_writer = csv.writer(csv_file)\ncsv_writer.writerow(['Mobile Name', 'Price'])\n\nfor mobile in mobiles.find_all('td'):\n # print(mobile.text) \n # print(mobile.next_sibling)\n mobile_name = mobile.text\n mobile_price = mobile.next_sibling\n\n print(mobile_name)\n print(mobile_price)\n \n csv_writer.writerow([mobile_name, mobile_price])\n\ncsv_file.close()\n# l = []\n# for tr in table_rows:\n# td = tr.find_all('td')\n# row = [tr.text for tr in td]\n# l.append(row)\n\n#mobile name and cost - 2ndway\n#name of brand\n# mobile_src = soup.find_all('td')\n# mobile = mobile_src.renderContents()\n# print(mobile)\n# brand_name = mobile.strip()\n\n# mobile_prc = soup.find(\"td\").find_next_sibling(\"td\")\n# mob = mobile_prc.renderContents()\n# mobile_price = mob.strip()\n\n# mobile_desc = [ mobile, mob ]\n\n# for mobile in mobile_desc:\n# print (mobile)\n \n\n","sub_path":"scrapping_gadgetbyte.py","file_name":"scrapping_gadgetbyte.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"159076652","text":"#!{PYTHON}\n\nfrom multiply_inference_engine import create_kaska_s1_inference_output_files\n\nimport logging\nimport sys\nimport yaml\n\nscript_progress_logger = logging.getLogger('ScriptProgress')\nscript_progress_logger.setLevel(logging.INFO)\nscript_progress_formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')\nscript_progress_logging_handler = logging.StreamHandler()\nscript_progress_logging_handler.setLevel(logging.INFO)\nscript_progress_logging_handler.setFormatter(script_progress_formatter)\nscript_progress_logger.addHandler(script_progress_logging_handler)\n\n# extract directory names from input arguments\nconfiguration_file = sys.argv[1]\nstart_date = sys.argv[2]\nstop_date = sys.argv[3]\ns1_stack_for_date_dir = sys.argv[4]\ns1_priors_dir = sys.argv[5]\noutput_dir = sys.argv[6]\n\n# setup parameters\nwith open(sys.argv[1]) as f:\n parameters = yaml.load(f)\nroi = parameters['General']['roi']\nspatial_resolution = parameters['General']['spatial_resolution']\nvariables = []\nfor model_dict in parameters['Inference']['forward_models']:\n if model_dict['type'] == 'kaska' and model_dict['data_type'] == 'Sentinel-1':\n requested_model_parameters = model_dict['output_parameters']\n for requested_model_parameter in requested_model_parameters:\n if requested_model_parameter not in variables:\n variables.append(requested_model_parameter)\n\nscript_progress_logger.info('0-100')\ncreate_kaska_s1_inference_output_files(s1_stack_file_dir=s1_stack_for_date_dir, \n priors_dir=s1_priors_dir, \n output_directory=output_dir, \n parameters=variables,\n roi=roi,\n spatial_resolution=spatial_resolution\n )\nscript_progress_logger.info('100-100')\n","sub_path":"multiply_ui/server/resources/scripts/create_s1_kaska_inference_output_files.py","file_name":"create_s1_kaska_inference_output_files.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"344085967","text":"from guizero import App, TextBox, PushButton, Text, info\napp = App()\n\n# Function definitions for your events go here.\ndef btn_go_clicked():\n info(\"Greetings\",\"Hello, \" + txt_name.value)\n\ndef btn_go2_clicked():\n info(\"Greetings\",\"Hello, \" + txt_name.value + \" \" + animal_name.value)\n\n# Your GUI widgets go here\nlbl_name = Text(app, text=\"Hello. What's your name?\") # this is the first text box asking for your name\ntxt_name = TextBox(app) # this is an empty box where you can enter your name\nlbl_animal = Text(app, text=\"Hello. What's your animal?\") \nanimal_name = TextBox(app)\nbtn_go = PushButton(app, command=btn_go_clicked, text=\"Done\") # this is the Done button - when you press it the function is called\nbtn_go2 = PushButton(app, command=btn_go2_clicked, text=\"Done\") # this is the Done button - when you press it the function is called\n\n# Show the GUI on the screen\napp.display()\n","sub_path":"eventgui2.py","file_name":"eventgui2.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"6253318","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\ntry:\n import queue\nexcept:\n import Queue as queue\nfrom threading import currentThread, Thread\nfrom contextlib import contextmanager\nfrom time import sleep\n\nstop_event = object()\n\n\nclass Pool(object):\n\n def __init__(self, max_create_num, task_queue_len=None):\n self.queue = queue.Queue(task_queue_len) if task_queue_len else queue.Queue() # 任务队列\n self.max_create_num = max_create_num # 最大可创建线程数\n self.stop = False # 强制退出标记\n self.end = False # 工作完成标记\n self.idle_list = [] # 空闲线程列表(调用返回当前等待中的线程)\n self.create_list = [] # 创建线程列表\n\n # 执行一个工作\n def start(self, func, args, callback=None):\n if not self.end: # 任务未完成\n # if len(self.idle_list) == 0 and len(self.create_list) < self.max_create_num:\n if len(self.create_list) < self.max_create_num:\n self.create_thread() # 工作线程列表,且线程创建数小于最大创建数\n task = [func, args, callback]\n self.queue.put(task) # 把任务加入任务队列\n return\n\n # 创建一个线程\n def create_thread(self):\n t = Thread(target=self.run)\n t.start()\n\n # 线程执行任务\n def run(self):\n current_thread = currentThread()\n self.create_list.append(currentThread()) # 把当前线程加入创建线程\n task = self.queue.get() # 取出待执行任务\n while task != stop_event:\n func, args, callback = task # 解析任务并执行\n try:\n res = func(*args)\n success = True\n except:\n res = None\n success = True\n if callback: # 任务执行完成,执行回调函数\n try:\n callback(res, success)\n except:\n pass\n # self.idle_list.append(current_thread) ->\n # task = self.queue.get() ->\n # self.idle_list.remove(current_thread)\n # with self.status(current_thread):\n # task = stop_event if self.stop else self.queue.get()\n task = stop_event if self.stop else self.queue.get()\n self.create_list.remove(current_thread)\n\n # 等待任务完成关闭线程\n def wait(self):\n self.end = True\n create_list_size = len(self.create_list)\n while create_list_size: # 任务队列队列添加(创建线程数的)结束事件\n self.queue.put(stop_event)\n create_list_size -= 1\n\n # 无论任务是否全部完成都关闭线程\n def kill(self):\n self.stop = True\n while self.create_list:\n self.queue.put(stop_event)\n\n # 记录当前空闲的线程\n # @contextmanager\n # def status(self, current_thread):\n # self.idle_list.append(current_thread)\n # try:\n # yield # yield返回值 -> __enter__返回值 -> with a as b 的 b\n # finally: # 退出with作用域时执行\n # self.idle_list.remove(current_thread)\n\n\ndef callback():\n pass\n\n\ndef action(val):\n print(val)\n\npool = Pool(10)\nfor i in range(100000):\n pool.start(action, (i,), callback)\nsleep(0.0001)\nprint(len(pool.create_list))\n\n","sub_path":"ThreadingPool/Thread.py","file_name":"Thread.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"648885120","text":"import tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom IPython import get_ipython\nipy = get_ipython()\nif ipy is not None:\n ipy.run_line_magic('matplotlib','inline')\n\nfile_path = 'weather.csv'\ndata=pd.read_csv(file_path, delimiter=',',header=0,skipinitialspace=True)\ndata.head(24)\n\nsolar = np.array(data['Solar'])\nnum_periods = 24\nf_horizon = 1\nx_data = solar[:(len(solar)-(num_periods*2))]\nx_batches = x_data.reshape(-1, num_periods, 1)\ny_data = solar[f_horizon:(len(solar)-(num_periods*2))+f_horizon]\ny_batches = y_data.reshape(-1, num_periods, 1)\nprint(y_batches.shape)\n\ndef test_data(series, forecast, num):\n testX = solar[-(num + forecast):][:num].reshape(-1, num_periods, 1)\n testY = solar[-(num):].reshape(-1, num_periods, 1)\n return testX, testY\nX_test, Y_test = test_data(solar, f_horizon, num_periods*2)\nprint(X_test.shape)\n\ntf.reset_default_graph()\ninputs = 1\nrnn_size = 100\noutput = 1\nlearning_rate=0.001\ndropout_keep_prob = tf.placeholder(tf.float32)\n\nX = tf.placeholder(tf.float32, [None, num_periods, 1])\nY = tf.placeholder(tf.float32, [None, num_periods, 1])\n\nrnn_cells=tf.contrib.rnn.BasicRNNCell(num_units=rnn_size, activation=tf.nn.relu)\nrnn_output, states = tf.nn.dynamic_rnn(rnn_cells, X, dtype=tf.float32)\n\n\noutput=tf.reshape(rnn_output, [-1, rnn_size])\nlogit=tf.layers.dense(output, 1, name=\"softmax\")\n\noutputs=tf.reshape(logit, [-1, num_periods, 1])\nprint(logit)\n\nloss = tf.reduce_sum(tf.square(outputs - Y))\n\naccuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logit, 1), tf.cast(Y, tf.int64)), tf.float32))\n\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntrain_step=optimizer.minimize(loss)\n\ninit=tf.global_variables_initializer()\n\nepochs = 1000\n\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\nsaver = tf.train.Saver()\n\nfor epoch in range(epochs):\n train_dict = {X: x_batches, Y: y_batches, dropout_keep_prob:0.5}\n sess.run(train_step, feed_dict=train_dict)\ny_pred=sess.run(outputs, feed_dict={X: X_test})\nsave_path = saver.save(sess, \"models/model2.ckpt\")\n#print(y_pred)\nprint(y_pred.shape)\ny_predict=y_pred.reshape(-1)\n# print len(y_predict)\n # for abc in range(len(y_predict)):\n # if y_predict[1,abc] <0:\n\t# y_predict[1,abc]=0\n#print(y_predict.shape)\n#y_predict=y_predict.reshape(1,48)\n\nfor i in range(len(y_predict)):\n\tabc = max(y_predict[i],0)\n\ty_predict[i] = abc\n\noutput_array = np.array(y_predict)\nnp.savetxt(\"my_output_file2.csv\", output_array, delimiter=\",\")\nprint(y_predict.shape)\nprint(y_predict)\n\nimport csv\ncsvData = y_predict\nwith open('predict.csv', 'a') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(csvData)\ncsvFile.close()\n\nfig=plt.figure()\nplt.title(\"Solar\", fontsize=14)\nplt.plot(pd.Series(np.ravel(Y_test)), \"bo\", markersize=10, label=\"Actual\")\nplt.plot(pd.Series(np.ravel(y_pred)), \"r.\", markersize=10, label=\"Forecast\")\nplt.legend(loc=\"upper left\")\nplt.xlabel(\"Time Periods\")\nplt.show()\nfig.savefig('SolarSS.png', \n facecolor=fig.get_facecolor(), \n edgecolor=fig.get_edgecolor(),\n dpi = fig.get_dpi())\n\nwith tf.Session() as sess:\n # Restore variables from disk.\n saver.restore(sess, \"models/model2.ckpt\")\n predict=sess.run(outputs, feed_dict={X: X_test})\n#print(predict)\n\n#row = predict\n#with open('predict.csv', 'a') as csvFile:\n# writer = csv.writer(csvFile)\n# writer.writerow(row)\n#csvFile.close()","sub_path":"luận văn 1705/rnn-weather2.py","file_name":"rnn-weather2.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"336650063","text":"\nfrom django.conf.urls import url, include\nfrom . import views\nurlpatterns = [\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^filtered_images/$', views.FilterView.as_view(), name='filtered_images'),\n url(r'^images/(?P[0-9]+)/$', views.OneImageView.as_view(), name='one_image'),\n url(r'^add_image/$', views.EditImageView.as_view(), name='add_image'),\n url(r'^edit_image/(?P[0-9]+)/$', views.EditImageView.as_view(), name='edit_image'),\n url(r'^remove/(?P[0-9]+)/$', views.RemoveImage.as_view(), name='remove'),\n # url(r'^login/$', views.LoginView.as_view(), name='login'),\n]\n","sub_path":"main_site/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"400986642","text":"import numpy as np\nfrom scipy.integrate import quad\nfrom scipy.interpolate import interp1d\nimport scipy.optimize\nimport matplotlib.pyplot as plt\nimport os\n\nclass Model:\n def __init__(self,minW = 7E-6,maxW = 14E-6,minT=273.15,maxT=573.15,responsivityPath = \"spectralResponseTau.txt\",gainPath = \"tauSignalVsTemp.txt\"):\n self.nLookupBins = 5000\n self.c1 = 1.1910E-16\n self.c2 = 0.014388\n self.minW = minW\n self.maxW = maxW\n self.minT = minT\n self.maxT = maxT\n \n self.dir = os.path.dirname(os.path.realpath(__file__))\n if not responsivityPath == None:\n self.responsivityPath = os.path.join(self.dir,responsivityPath)\n else:\n self.responsivityPath = None\n self.gainPath = os.path.join(self.dir,gainPath)\n \n self.responsivityLUT = None #get the responsivity of a detector at a given wavelength\n self.radianceLUT = None #get the radiance over the detector wavelength range at a given temperature\n self.temperatureLUT = None #get the temperature of a blackbody given a certain radiance\n self.digitalLevelLUT = None #get the digital level of the detector at a given radiance\n self.radianceDLLUT = None #get the radiance that corresponds to a given digital level \n self.generateLUTs()\n \n def generateLUTs(self):\n #create interpolation function for detector responsivity as a function of wavelength\n if not self.responsivityPath == None:\n responsivityData = np.genfromtxt(self.responsivityPath)\n self.responsivityLUT = interp1d(responsivityData[:,0]*1E-6, responsivityData[:,1],fill_value='extrapolate')\n wavelength = np.arange(self.minW,self.maxW,0.1E-6)\n print(\"LUT 1/5 generated\")\n #create interpolation function for radiance as a function of temperature and vice versa\n step = (self.maxT-self.minT)/self.nLookupBins\n temperature = np.arange(self.minT,self.maxT,step)\n radiance = self.blackbodyRadiance(temperature)\n self.radianceLUT = interp1d(temperature,radiance,fill_value='extrapolate')\n print(\"LUT 2/5 generated\")\n self.temperatureLUT = interp1d(radiance,temperature,fill_value='extrapolate')\n print(\"LUT 3/5 generated\")\n #create interpolation function for DL as a function of radiance and vice versa\n minSignal,maxSignal = 5000.0,12000.0\n step = (maxSignal-minSignal)/self.nLookupBins\n digitalLevel = np.arange(minSignal,maxSignal,step)\n gain,offset = self.getGainAndOffset()\n radiance = (digitalLevel-offset)/gain #DL = gain*radiance + offset\n self.digitalLevelLUT = interp1d(radiance,digitalLevel,fill_value='extrapolate')\n print(\"LUT 4/5 generated\")\n self.radianceDLLUT = interp1d(digitalLevel,radiance,fill_value='extrapolate') \n print(\"LUT 5/5 generated\")\n\n def getGainAndOffset(self):\n data = np.genfromtxt(self.gainPath)\n bbTemp = data[:,0] + 273.15\n radiance = self.blackbodyRadiance(bbTemp)\n digitalLevel = data[:,1]\n par,cov = scipy.optimize.curve_fit(self.linearFit,digitalLevel,radiance,p0=(1.0,1.0))\n gain,offset = par\n return gain,offset\n \n def blackbodyRadiance(self,T):\n #Radiance from a blackbody at a temperature T, scaled by the \n L = 0\n if type(T) is float:\n return self.integrateFunction(T)\n else:\n arr = np.zeros(T.size)\n for i in range(T.size):\n arr[i] = self.integrateFunction(T[i])\n return arr\n \n def integrateFunction(self,T):\n L = quad(self.spectralRadiance, self.minW, self.maxW, args=T)[0]\n \"\"\"\n nSections = 2\n step = (self.maxW-self.minW)/nSections\n L = 0\n\n for i in range(nSections):\n w1 = self.minW + i*step\n w2 = w1 + step\n L+= quad(self.spectralRadiance, w1, w2, args=T)[0]\n \"\"\"\n return L\n \n def spectralRadiance(self,wavelength,T):\n #Spectral radiance emitted by a blackbody of temperature T\n #at a given wavelength (Scaled by detector response)\n L = self.c1/(wavelength**5)/(np.exp(self.c2/wavelength/T)-1)/1E10\n r = 1.0\n if not self.responsivityLUT == None:\n r = self.responsivityLUT(wavelength)\n return L*r\n \n \n def linearFit(self,x,m,c):\n return m*x + c\n \n def testResponsivity(self):\n wavelength = np.arange(self.minW,self.maxW,0.1E-6)\n responsivity = self.responsivityLUT(wavelength)\n plt.plot(wavelength,responsivity)\n plt.ylim(ymin = 0, ymax = 1)\n plt.title(\"Responsivity vs wavelength\")\n plt.show()\n \n def testRadiance(self):\n temperature = np.arange(self.minT,self.maxT,0.1)\n radiance = self.radianceLUT(temperature)\n plt.plot(temperature,radiance)\n plt.title(\"Radiance vs temperature\")\n plt.show()\n \n def testDigitalLevel(self):\n digitalLevel = np.arange(5000,12000)\n radiance = self.radianceDLLUT(digitalLevel)\n plt.plot(radiance,digitalLevel)\n plt.title(\"Digital level vs radiance\") \n plt.show() \n \n \n def planckIntegral2(self,T):\n #https://people.physics.tamu.edu/krisciunas/planck.pdf\n #=prefix*integral [x^3/(e^x - 1)]\n #x = hf/kT\n #prefix = pi*(2h/c**2)*(k*T/h)**4\n pass\n\n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":5502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"487454717","text":"# create a 300x300 canvas.\n# fill it with a checkerboard pattern.\nfrom tkinter import *\n\ndef checkerboard():\n\n root = Tk()\n\n w = 300\n h = 300\n\n canvas = Canvas(root, width=w, height=h)\n canvas.pack()\n\n m = w/2\n a = w//8\n b = a\n\n for i in range(8):\n for j in range(8):\n if i%2 == j%2:\n canvas.create_polygon(i*a, j*b, (i+1)*a, 0+j*b, (i+1)*a, (j+1)*b, i*a, (j+1)*b, fill='black')\n\n root.mainloop()\n\ncheckerboard()\n","sub_path":"week-04/day-3/checkerboard.py","file_name":"checkerboard.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"83165839","text":"import random\nimport pygame\nfrom pygame.locals import QUIT, K_LEFT, K_RIGHT, K_UP, K_DOWN\nimport data.dataUtils as data\nfrom square import Square\nfrom snake import Snake\n\nclass Game:\n width = data.getConfig(\"width\")\n rows = data.getConfig(\"rows\")\n square_color = data.getConfig(\"squareColor\")\n initial_snake_pos = data.getConfig(\"initialSnakePos\")\n food_color = data.getConfig(\"foodColor\")\n line_color = data.getConfig(\"lineColor\")\n board_color = data.getConfig(\"boardColor\")\n\n def __init__(self, ai_mode=False):\n '''snake game'''\n self.snake = Snake(self.initial_snake_pos, self.square_color)\n self.food = Square(self.random_food_pos(), self.food_color)\n self.ai_mode = ai_mode\n self.surface = pygame.display.set_mode((self.width, self.width))\n self.clock = pygame.time.Clock()\n\n def get_game_map_rows(self):\n '''return rows config'''\n return self.rows\n\n def get_game_map_size(self):\n return (self.rows, self.rows)\n\n def get_snake_head_pos(self):\n '''return snake head pos [x, y]'''\n return self.snake.head.pos\n\n def get_snake_full_pos(self):\n '''return array for each of the snake body pos [x, y]'''\n return [body.pos for body in self.snake.body]\n\n def get_snake_dir(self):\n '''return snake dir [x, y]'''\n return self.snake.dir\n\n def get_food_pos(self):\n '''return food pos [x, y]'''\n return self.food.pos\n\n def get_score(self):\n '''return len of the snake body'''\n return len(self.snake.body)\n\n def is_snake_alive(self):\n '''return True if the snake is alive'''\n return self.snake.alive\n\n def kill_snake(self):\n '''set snake.alive to False'''\n self.snake.alive = False\n\n def move_snake_up(self):\n '''Move the snake up'''\n self.snake.move_snake_up()\n\n def move_snake_down(self):\n '''Move the snake down'''\n self.snake.move_snake_down()\n\n def move_snake_left(self):\n '''Move the snake left'''\n self.snake.move_snake_left()\n\n def move_snake_right(self):\n '''Move the snake right'''\n self.snake.move_snake_right()\n\n def draw_grid(self):\n '''draw visual of the game grid'''\n size_between = self.width // self.rows\n grid_x = 0\n grid_y = 0\n for l in range(self.rows):\n grid_x = grid_x + size_between\n grid_y = grid_y + size_between\n pygame.draw.line(self.surface, self.line_color, (grid_x, 0), (grid_x, self.width))\n pygame.draw.line(self.surface, self.line_color, (0, grid_y), (self.width, grid_y))\n\n def redraw_window(self):\n '''draw visual of the full game board'''\n self.surface.fill(self.board_color)\n self.snake.draw(self.surface)\n self.food.draw(self.surface)\n self.draw_grid()\n pygame.display.update()\n return pygame.surfarray.array3d(pygame.display.get_surface())\n\n def random_food_pos(self):\n '''return a valid random food position'''\n positions = self.snake.body\n while True:\n pos = [random.randrange(self.rows), random.randrange(self.rows)]\n if len(list(filter(lambda z: z.pos == pos, positions))) > 0:\n continue\n break\n return pos\n\n def update(self):\n '''update the game state'''\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.display.quit()\n pygame.quit()\n if self.snake.body[0].pos == self.food.pos:\n self.snake.add_cube()\n self.food = Square(self.random_food_pos(), self.food_color)\n if not self.is_snake_alive():\n print('Score:', len(self.snake.body))\n return self.redraw_window()\n\n def reset(self):\n '''reset the game'''\n self.snake.reset(data.getConfig(\"initialSnakePos\"))\n self.food = Square(self.random_food_pos(), self.food_color)\n self.snake.alive = True\n\n def quit_game(self):\n pygame.display.quit()\n pygame.quit()\n\n def start(self):\n '''Main loop of the game'''\n while True:\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n break\n\n if not self.ai_mode:\n pygame.time.delay(100)\n self.clock.tick(10)\n self.snake.move()\n \n if self.snake.body[0].pos == self.food.pos:\n self.snake.add_cube()\n self.food = Square(self.random_food_pos(), self.food_color)\n\n if not self.is_snake_alive():\n print('Score:', len(self.snake.body))\n self.snake.reset(data.getConfig(\"initialSnakePos\"))\n self.snake.alive = True\n break\n self.redraw_window()\n","sub_path":"snakeQ/gym-foo/gym_foo/envs/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"319028496","text":"from models.models import *\nfrom models.create_modules import CreateModules\nfrom models.b4jproject import B4JProject\nfrom models.sqlite_control import SQLiteControl\nimport argparse\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-f', help='Select the b4j project file')\nparser.add_argument('-db', help='Define Database to open')\nparser.add_argument('-dbp', help='Path to database file')\n\nargs = parser.parse_args()\n\n# print('Args: ')\n# print(args.f)\n# print(args.db)\n# print(args.dbp)\n\nargs.f = \"BE.b4j\"\nproj = B4JProject(load_from_file=args.f)\n# proj.db.print_info()\n\nsqlc = SQLiteControl(proj.db)\ncm = CreateModules(proj.db)\n\n# UPDATE:\n# Table files\n# Tables in database\ntbl = Table()\nfor tbl in proj.db.tables.values():\n # tbl = proj.db.tables[tbl_name]\n cm.update_table_file(tbl)\n sqlc.update_table(tbl, create_if_not_exists=True)\n\n\n# create data access file\nprint('> Creating DataAccess file...')\ncm.create_data_access_file()\nprint('>> Done.')\n\n# update b4j project file\nprint('> Updating B4j project file...')\nproj.update_file(args.f)\nprint('>> Done.')\n\n\n\n# proj = B4JProject(load_from_file='BE.b4j')\n\n# proj.db.print_info()\n\n# sqlc = SQLiteControl(proj.db)\n\n# tbl = proj.db.get_table('Loteamentos')\n\n# # tbl.print_info()\n# sqlc.update_table(tbl, create_if_not_exists=True)\n\n# # sqlc.add_column(tbl, 'Fiador', 'String')\n\n# sqlc.dispose() \n\n\n# db = Database('DBDados')\n# tbl1 = Table(file_name=\"Lote.bas\")\n# tbl2 = Table(file_name=\"Parcela.bas\")\n# tbl3 = Table(file_name='Loteamento.bas')\n\n# proj.db.add_table(tbl1)\n# proj.db.add_table(tbl2)\n# proj.db.add_table(tbl3)\n# proj.update_file('BE.b4j')\n\n# s = 'comando=argumento'\n# cmd, sep, code = s.partition('=')\n# print('{} {}'.format(cmd, code))\n\n# db.add_table(tbl1)\n# db.add_table(tbl2)\n# db.add_table(tbl3)\n\n# cm = CreateModules(db)\n# # cm.update_table_file(tbl1)\n","sub_path":"sqlm/sqlm.py","file_name":"sqlm.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"381896566","text":"from telethon import TelegramClient, events, types\nimport functions as f\nimport json\n\nwith open('config.json') as json_file:\n config = json.load(json_file)\n channels = config['channels']\n channel_ids = f.get_channel_ids(channels)\n client = TelegramClient(config['mobile_number'], config['api_id'], config['api_hash'])\n\n\n@client.on(events.NewMessage(chats=channel_ids)) \nasync def _(event):\n message = event.message\n channel = f.find_channel(int(event.message.to_id.channel_id), channels)\n\n if config['functions']['check_bad_words']['status']:\n message.raw_text = f.check_bad_words(message.raw_text, config['functions']['check_bad_words']['bad_words'])\n\n if config['functions']['change_links']['status']:\n message.raw_text = f.change_links(message.raw_text, channel)\n\n if isinstance(event.original_update, types.UpdateNewChannelMessage): \n await client.send_message(channel, message)\n\n\nif __name__ == '__main__':\n with client.start(config['mobile_number']):\n print('Press Ctrl+C to stop this')\n client.run_until_disconnected()\n","sub_path":"channels_agregator.py","file_name":"channels_agregator.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"76304255","text":"\"\"\"Helper models for CAP data models.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport json\nfrom datetime import datetime\nfrom os.path import dirname, join\n\nfrom flask import current_app\nfrom flask_babelex import gettext\nfrom invenio_search import Query, current_search_client\nfrom jsonref import JsonRef\nfrom speaklater import make_lazy_gettext\n\n_ = make_lazy_gettext(lambda: gettext)\n\n\nclass ObjectType(object):\n \"\"\"Class to load object types data.\"\"\"\n\n index_id = None\n index_internal_id = None\n\n @classmethod\n def _load_data(cls):\n \"\"\"Load object types for JSON data.\"\"\"\n if cls.index_id is None:\n with open(join(dirname(__file__), \"data\", \"objecttypes.json\")) \\\n as fp:\n data = json.load(fp)\n\n cls.index_internal_id = {}\n cls.index_id = {}\n for objtype in data:\n cls.index_internal_id[objtype['internal_id']] = objtype\n cls.index_id[objtype['id'][:-1]] = objtype\n\n @classmethod\n def _jsonloader(cls, uri, **dummy_kwargs):\n \"\"\"Local JSON loader for JsonRef.\"\"\"\n cls._load_data()\n return cls.index_id[uri]\n\n @classmethod\n def get(cls, value):\n \"\"\"Get object type value.\"\"\"\n cls._load_data()\n try:\n return JsonRef.replace_refs(\n cls.index_internal_id[value],\n jsonschema=True,\n loader=cls._jsonloader)\n except KeyError:\n return None\n\n @classmethod\n def get_by_dict(cls, value):\n \"\"\"Get object type dict with type and subtype key.\"\"\"\n if not value:\n return None\n\n print(\"}}}}}}}}} VALUES:::\")\n print(value)\n if 'subtype' in value:\n internal_id = \"{0}-{1}\".format(value['type'], value['subtype'])\n else:\n internal_id = value['type']\n return cls.get(internal_id)\n","sub_path":"cap/modules/records/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"127863941","text":"import sys\nfrom boto import ec2\n\n__author__ = 'sobbspt'\n\ncredentialProfile = sys.argv[1]\nregion = sys.argv[2]\ntagName = sys.argv[3]\ntagValue = sys.argv[4]\n\n\nec2conn = ec2.connect_to_region(region, profile_name=credentialProfile)\n\nreservations = ec2conn.get_all_instances(filters={\"tag:\"+tagName: tagValue})\n\ninstanceID = []\ni = 0\nfor res in reservations:\n for inst in res.instances:\n if inst.state == \"stopped\":\n ec2conn.start_instances(inst.id)\n i += 1\n\nprint(str(i) +\" instance(s) with tag \"+ tagName+\":\"+tagValue +\" was started\")\n","sub_path":"01_start-or-stop-instances/start-instances.py","file_name":"start-instances.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"504035415","text":"# pylint: disable=no-member\n\"\"\"\nPayments Store Service\n\nPaths:\n------\nGET /payments - Returns a list all of the Payments\nGET /payments/{id} - Returns the Payment with a given id number\nPOST /payments - creates a new Payment record in the database\nPUT /payments/{id} - updates a Payment record in the database\nDELETE /payments/{id} - deletes a Payment record in the database\n\"\"\"\n\nimport sys\nimport logging\nfrom flask import jsonify, request, url_for, make_response, abort\nfrom flask_api import status # HTTP Status Codes\nfrom werkzeug.exceptions import NotFound\n\n# For this example we'll use SQLAlchemy, a popular ORM that supports a\n# variety of backends including SQLite, MySQL, and PostgreSQL\nfrom service.models import Payment, DataValidationError\n\n# Import Flask application\nfrom . import app\n\n######################################################################\n# Error Handlers\n######################################################################\n@app.errorhandler(DataValidationError)\ndef request_validation_error(error):\n \"\"\" Handles Value Errors from bad data \"\"\"\n return bad_request(error)\n\n@app.errorhandler(status.HTTP_400_BAD_REQUEST)\ndef bad_request(error):\n \"\"\" Handles bad reuests with 400_BAD_REQUEST \"\"\"\n message = str(error)\n app.logger.warning(message)\n return jsonify(status=status.HTTP_400_BAD_REQUEST,\n error='Bad Request',\n message=message), status.HTTP_400_BAD_REQUEST\n\n@app.errorhandler(status.HTTP_404_NOT_FOUND)\ndef not_found(error):\n \"\"\" Handles resources not found with 404_NOT_FOUND \"\"\"\n message = str(error)\n app.logger.warning(message)\n return jsonify(status=status.HTTP_404_NOT_FOUND,\n error='Not Found',\n message=message), status.HTTP_404_NOT_FOUND\n\n@app.errorhandler(status.HTTP_405_METHOD_NOT_ALLOWED)\ndef method_not_supported(error):\n \"\"\" Handles unsuppoted HTTP methods with 405_METHOD_NOT_SUPPORTED \"\"\"\n message = str(error)\n app.logger.warning(message)\n return jsonify(status=status.HTTP_405_METHOD_NOT_ALLOWED,\n error='Method not Allowed',\n message=message), status.HTTP_405_METHOD_NOT_ALLOWED\n\n@app.errorhandler(status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)\ndef mediatype_not_supported(error):\n \"\"\" Handles unsuppoted media requests with 415_UNSUPPORTED_MEDIA_TYPE \"\"\"\n message = str(error)\n app.logger.warning(message)\n return jsonify(status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,\n error='Unsupported media type',\n message=message), status.HTTP_415_UNSUPPORTED_MEDIA_TYPE\n\n@app.errorhandler(status.HTTP_500_INTERNAL_SERVER_ERROR)\ndef internal_server_error(error):\n \"\"\" Handles unexpected server error with 500_SERVER_ERROR \"\"\"\n message = str(error)\n app.logger.error(message)\n return jsonify(status=status.HTTP_500_INTERNAL_SERVER_ERROR,\n error='Internal Server Error',\n message=message), status.HTTP_500_INTERNAL_SERVER_ERROR\n\n\n######################################################################\n# GET INDEX\n######################################################################\n@app.route('/')\ndef index():\n \"\"\" Root URL response \"\"\"\n return jsonify(name='Payment REST API Service',\n version='1.0',\n paths=url_for('list_payments', _external=True)\n ), status.HTTP_200_OK\n\n######################################################################\n# LIST ALL PAYMENT\n######################################################################\n@app.route('/payments', methods=['GET'])\ndef list_payments():\n \"\"\" Returns all of the Payments \"\"\"\n app.logger.info('Request for payments list')\n payments = []\n customer_id = request.args.get('customer_id')\n order_id = request.args.get('order_id')\n if customer_id:\n app.logger.info('Request for payments list with customer_id : %s', customer_id)\n payments = Payment.find_by_customer(customer_id)\n elif order_id:\n app.logger.info('Request for payments list with order_id : %s', order_id)\n payments = Payment.find_by_order(order_id)\n else:\n payments = Payment.all()\n\n results = [payment.serialize() for payment in payments]\n return make_response(jsonify(results), status.HTTP_200_OK)\n\n\n######################################################################\n# RETRIEVE A PAYMENTS\n######################################################################\n@app.route('/payments/', methods=['GET'])\ndef get_payments(payments_id):\n \"\"\"\n Retrieve a single Payment\n\n This endpoint will return a payment based on it's id\n \"\"\"\n app.logger.info('Request for payment with id: %s', payments_id)\n payment = Payment.find(payments_id)\n if not payment:\n raise NotFound(\"Payment with id '{}' was not found.\".format(payments_id))\n return make_response(jsonify(payment.serialize()), status.HTTP_200_OK)\n\n\n######################################################################\n# ADD A NEW PAYMENTS\n######################################################################\n@app.route('/payments', methods=['POST'])\ndef create_payments():\n \"\"\"\n Creates a Payment\n This endpoint will create a Payment based the data in the body that is posted\n \"\"\"\n app.logger.info('Request to create a payments')\n check_content_type('application/json')\n payment = Payment()\n payment.deserialize(request.get_json())\n payment.save()\n message = payment.serialize()\n location_url = url_for('get_payments', payments_id=payment.id, _external=True)\n return make_response(jsonify(message), status.HTTP_201_CREATED,\n {\n 'Location': location_url\n })\n\n\n# ######################################################################\n# # UPDATE AN EXISTING PAYMENT\n# ######################################################################\n@app.route('/payments/', methods=['PUT'])\ndef update_payments(payments_id):\n \"\"\"\n Update a Payment\n\n This endpoint will update a Payment based the body that is posted\n \"\"\"\n app.logger.info('Request to update payment with id: %s', payments_id)\n check_content_type('application/json')\n payment = Payment.find(payments_id)\n if not payment:\n raise NotFound(\"Payment with id '{}' was not found.\".format(payments_id))\n payment.deserialize(request.get_json())\n payment.id = payments_id\n payment.save()\n return make_response(jsonify(payment.serialize()), status.HTTP_200_OK)\n\n\n######################################################################\n# DELETE A PAYMENT\n######################################################################\n@app.route('/payments/', methods=['DELETE'])\ndef delete_payment(payments_id):\n \"\"\"\n Delete a payment\n This endpoint will delete a Payment based the id specified in the path\n \"\"\"\n app.logger.info('Request to delete payment with id: %s', payments_id)\n payment = Payment.find(payments_id)\n if payment:\n payment.delete()\n return make_response('', status.HTTP_204_NO_CONTENT)\n\n\n######################################################################\n# PERFORM A STATEFUL ACTION\n######################################################################\n@app.route('/payments//toggle', methods=['PUT'])\ndef toggle_payments_availability(payments_id):\n \"\"\"\n Toggle payment availability\n This toggles whether or not a payment is currently available\n \"\"\"\n app.logger.info('Request to toggle payment availability with id: %s', payments_id)\n payment = Payment.find(payments_id)\n if not payment:\n raise NotFound(\"Payment with id '{}' was not found.\".format(payments_id))\n payment.available = not payment.available\n payment.save()\n return make_response(jsonify(payment.serialize()), status.HTTP_200_OK)\n\n\n######################################################################\n# U T I L I T Y F U N C T I O N S\n######################################################################\n\ndef init_db():\n \"\"\" Initialies the SQLAlchemy app \"\"\"\n global app\n Payment.init_db(app)\n\ndef check_content_type(content_type):\n \"\"\" Checks that the media type is correct \"\"\"\n if request.headers['Content-Type'] == content_type:\n return\n app.logger.error('Invalid Content-Type: %s', request.headers['Content-Type'])\n abort(415, 'Content-Type must be {}'.format(content_type))\n\ndef initialize_logging(log_level=logging.INFO):\n \"\"\" Initialized the default logging to STDOUT \"\"\"\n if not app.debug:\n print('Setting up logging...')\n # Set up default logging for submodules to use STDOUT\n # datefmt='%m/%d/%Y %I:%M:%S %p'\n fmt = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'\n logging.basicConfig(stream=sys.stdout, level=log_level, format=fmt)\n # Make a new log handler that uses STDOUT\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(fmt))\n handler.setLevel(log_level)\n # Remove the Flask default handlers and use our own\n handler_list = list(app.logger.handlers)\n for log_handler in handler_list:\n app.logger.removeHandler(log_handler)\n app.logger.addHandler(handler)\n app.logger.setLevel(log_level)\n app.logger.propagate = False\n app.logger.info('Logging handler established')\n","sub_path":"service/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":9520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"231165705","text":"import torchvision\nimport torch.nn as nn\nfrom functools import partial\n\nvgg_models = {'vgg11': torchvision.models.vgg11,\n 'vgg13': torchvision.models.vgg13,\n 'vgg16': torchvision.models.vgg16,\n 'vgg19': torchvision.models.vgg19,\n 'vgg11_bn': torchvision.models.vgg11_bn,\n 'vgg13_bn': torchvision.models.vgg13_bn,\n 'vgg16_bn': torchvision.models.vgg16_bn,\n 'vgg19_bn': torchvision.models.vgg19_bn}\n\nclass VGG_var_channel(nn.Module):\n def __init__(self, model_type, pre=False ,in_channels=2, mode ='top'):\n super().__init__()\n\n if model_type not in vgg_models.keys():\n raise ValueError(f\"Model type {model_type} does not match any standard VGG model\")\n\n self.model = vgg_models[model_type](pretrained=pre)\n self.conv1 = list(self.model.features)[0]\n self.conv1 = self.change_channels(self.conv1, in_channels, mode=mode)\n \n def forward(self, x):\n x = self.conv1(x)\n for i,layer in enumerate(self.model.features):\n if i == 0:\n continue\n x = layer(x)\n\n return x\n \n def change_channels(self, conv, num_channels, mode='top'):\n bias = False if conv.bias is None else True\n \n new_layer = nn.Conv2d(\n in_channels=num_channels,\n out_channels = conv.out_channels,\n kernel_size = conv.kernel_size,\n stride = conv.stride,\n padding = conv.padding,\n bias=bias\n )\n\n # Reduce channels\n if conv.in_channels > num_channels:\n if mode == 'top':\n new_layer.weight.data = conv.weight.data[:,:num_channels,:,:]\n elif mode == 'random':\n import random\n indices = random.sample(range(conv.in_channels),num_channels)\n new_layer.weight.data = conv.weight.data[:, indices,:,:]\n else:\n raise ValueError(f\"Mode {mode} is not valid\")\n \n # Increase channels\n else:\n if mode =='top':\n new_layer.weight.data[:,:conv.in_channels,:,:] = conv.weight.data\n for i in range(num_channels - conv.in_channels):\n channel = conv.in_channels + i\n new_layer.weight.data[:, channel:channel+1, :, :] = conv.weight.data[:, i%conv.in_channels:i%conv.in_channels+1,:,:]\n elif mode == 'random':\n # Doesnt make much sense for expansion\n pass\n else:\n raise ValueError(f\"Mode {mode} is not valid\")\n \n # new_layer.weight = nn.Parameter(new_layer.weight)\n return new_layer\n\n","sub_path":"VGG.py","file_name":"VGG.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"259858496","text":"from django.conf.urls import url\nfrom .views import ArticleList, ArticleDetail, ArticleCreate, ArticleUpdate, ArticleDelete, MyArticles\n\napp_name = 'articles'\n\nurlpatterns = [\n url(r'^$', ArticleList.as_view(), name='list'),\n url(r'^(?P\\d+)$', ArticleDetail.as_view(), name=\"detail\"),\n url(r'^new-story$', ArticleCreate.as_view(), name=\"create\"),\n url(r'^p/(?P\\d+)/edit$', ArticleUpdate.as_view(), name=\"update\"),\n url(r'^my-stories$', MyArticles.as_view(), name=\"showMine\"),\n url(r'^p/(?P\\d+)/delete$', ArticleDelete.as_view(), name=\"delete\"),\n]\n","sub_path":"articles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"560354853","text":"\"\"\"Helper for constructing array structures with shape checks at runtime.\"\"\"\n\n\nimport dataclasses\nfrom typing import Optional, Tuple, cast\n\nfrom jax import numpy as jnp\nfrom typing_extensions import get_type_hints\n\nnull_array = cast(jnp.ndarray, None)\n# ^Placeholder value to be used as a dataclass field default, to enable structs that\n# contain only a partial set of values.\n#\n# An intuitive solution is to populate fields with a dummy default array like\n# `jnp.empty(shape=(0,))`), but this can cause silent broadcasting/tracing issues.\n#\n# So instead we use `None` as the default value. Which is nice because it leads to loud\n# runtime errors when uninitialized values are accidentally used.\n#\n# Note that the correct move would be to hint fields as `Optional[jnp.ndarray]`, but\n# this would result in code that's littered with `assert __ is not None` statements\n# and/or casts. Which is annoying. So instead we just pretend `None` is an array,\n\n\nclass ShapeAnnotatedStruct:\n \"\"\"Base class for dataclasses whose fields are annotated with expected shapes. Helps\n with assertions + checking batch axes.\n\n Example of an annotated field:\n\n array: Annotated[jnp.ndarray, (50, 150, 3)]\n\n \"\"\"\n\n def __getattribute__(self, name):\n out = super().__getattribute__(name)\n assert out is not None\n return out\n\n def check_shapes_and_get_batch_axes(self) -> Tuple[int, ...]:\n \"\"\"Make sure shapes of arrays are consistent with annotations, then return any\n leading batch axes (which should be shared across all contained arrays).\"\"\"\n\n assert dataclasses.is_dataclass(self)\n\n annotations = get_type_hints(type(self), include_extras=True)\n batch_axes: Optional[Tuple[int, ...]] = None\n\n # For each field...\n for field in dataclasses.fields(self):\n value = self.__getattribute__(field.name)\n if value is null_array:\n # Don't do anything for placeholder objects\n continue\n\n # Get expected shape, sans batch axes\n expected_shape = annotations[field.name].__metadata__[0]\n assert isinstance(expected_shape, tuple)\n\n # Get actual shape\n shape: Tuple[int, ...]\n if isinstance(value, float):\n shape = ()\n else:\n assert hasattr(value, \"shape\")\n shape = value.shape\n\n # Actual shape should be expected shape prefixed by some batch axes\n if len(expected_shape) > 0:\n assert shape[-len(expected_shape) :] == expected_shape\n field_batch_axes = shape[: -len(expected_shape)]\n else:\n field_batch_axes = shape\n\n if batch_axes is None:\n batch_axes = field_batch_axes\n assert batch_axes == field_batch_axes\n\n assert batch_axes is not None\n return batch_axes\n","sub_path":"lib/array_struct.py","file_name":"array_struct.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"507883602","text":"import io\nimport azure.storage\n\nclass AzureBlobIO(io.BytesIO):\n \"\"\"Class to read Azure blobs as a stream, using some buffer.\n \"\"\"\n\n def __init__(self, blob_service, container_name, blob_name, snapshot=None, chunksize=None):\n \"\"\"Build a stream from a blob to stream Azure service call.\n \"\"\"\n # Init the BytesIO instance\n #super().__init__()\n super()\n\n self._blob_service = blob_service\n self.container_name = container_name\n self.blob_name = blob_name\n self.snapshot = snapshot\n\n # Download as a stream\n self._blob = self._blob_service.get_blob_to_stream(self.container_name,\n self.blob_name,\n self,\n self.snapshot)\n self.seek(0)\n","sub_path":"azureblobio/azureblobio.py","file_name":"azureblobio.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"303740874","text":"import os, sys\nimport subprocess\nimport pandas as pd\n\n# custom libraries\nsystem = str(input('\\n' + 'Local or Server (L or S):'))\n\nif system == 'S':\n sys.path.insert(0, '/home/strachan/master/')\nelse:\n sys.path.insert(0, '/Users/cameronstrachan/master/')\n\nfrom modules import seq_core_lin as sc\n\ndf_representative_genomes = pd.read_csv('dataflow/00-meta/representative_genomes.csv', low_memory=False)\ngenomes = df_representative_genomes['user_genome'].tolist()\nfiles = [item + \".fna\" for item in genomes]\n\nfor file in files:\n\tcommand = 'mv dataflow/01-nucl/' + file + ' dataflow/to_move/'\n\tos.system(command)","sub_path":"campylobacter_cores/genomes_moves.py","file_name":"genomes_moves.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"381990625","text":"#!/usr/bin/env python3\n\nimport os\nfrom Crypto.PublicKey import RSA\nfrom Crypto import Random\n\ndef main():\n counter = 0\n\n # see if theres any other PEM files about\n\n dir = os.listdir('./')\n\n for thing in dir:\n if \".pem\" in thing or \".PEM\" in thing:\n counter += 1\n\n for _ in range(10):\n rng = Random.new().read\n RSAkey = RSA.generate(1024,rng)\n data = RSAkey.exportKey().decode()\n with open(\"rsa_{}.pem\".format(counter+1),'w') as file:\n file.write(data)\n counter += 1\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test/generateKeypairs.py","file_name":"generateKeypairs.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"594648079","text":"# -*- coding: utf-8 -*-\nimport os\nimport codecs\n\n# Use setuptools for these commands (they don't work well or at all\n# with distutils). For normal builds use distutils.\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n\n# shamelessly copied from VoroPy\ndef read(fname):\n try:\n content = codecs.open(\n os.path.join(os.path.dirname(__file__), fname),\n encoding='utf-8'\n ).read()\n except Exception:\n content = ''\n return content\n\nsetup(name='krypy',\n packages=['krypy', 'krypy.recycling'],\n version='2.1.7',\n description='Krylov subspace methods for linear systems',\n long_description=read('README.rst'),\n author='André Gaul',\n author_email='gaul@web-yard.de',\n url='https://github.com/andrenarchy/krypy',\n install_requires=['numpy (>=1.11)', 'scipy (>=0.17)'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering :: Mathematics'\n ],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"176270219","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport json\nimport os\nimport csv \nimport getopt\nimport sys\nimport argparse\n\ndef get_case_info(file_writer, file_content, reason, status=\"Pass\"):\n tmp_out = []\n info = []\n \n genomic = file_content['genomicData']\n ge_out = [ge_entry['Test Information']['Gene Name'] for ge_entry in genomic]\n for gene in gene_list:\n if gene in ge_out:\n # Output results to csv\n tmp_out.append(fileName.split('.')[0])\n tmp_out.append(gene)\n out.append(tmp_out)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Generate summary file')\n parser.add_argument('-c', '--case', help='path to convert file')\n parser.add_argument('-o', '--output', help='path to output file')\n parser.add_argument('-l', '--gene-list', help='list of genes you want to find in case')\n \n args = parser.parse_args()\n case_path = args.case\n list_file = open(args.gene_list, 'r')\n reader = csv.reader(list_file)\n gene_list = []\n out = []\n for row in reader:\n gene_list.append(row[0])\n \n if not os.path.exists(args.output):\n os.makedirs(args.output)\n\n output_filename = os.path.join(args.output, 'cases.csv')\n file_in_dir = [case for case in os.listdir(case_path) if case.split('.')[-1] == 'json']\n with open(output_filename, 'w') as csvfile:\n file_writer = csv.writer(csvfile, delimiter='\\t')\n\n for fileName in file_in_dir:\n test = fileName\n print(test)\n file_content = json.load(open(os.path.join(case_path, fileName)))\n get_case_info(file_writer, file_content, [])\n\n s = sorted(out, key = lambda x: (x[1], int(x[0])))\n for row in s:\n file_writer.writerow(row)\n","sub_path":"helper/find_case_by_gene.py","file_name":"find_case_by_gene.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"311265023","text":"\"\"\"\nBase formatter implementation\n\"\"\"\n\nimport abc\nimport typing\n\nimport packer.errors\n\n\nclass Formatter(metaclass=abc.ABCMeta):\n \"\"\"\n Base formatter implementation\n \"\"\"\n\n formatters: typing.ClassVar[typing.Dict[str, typing.Type]] = {}\n\n def __init_subclass__(cls):\n \"\"\"\n Register any subclasses as possible formatters\n \"\"\"\n\n if not hasattr(cls, 'format_name'):\n raise packer.errors.PackerError(\n f'{cls} is missing the format_name attribute',\n )\n\n format_name = getattr(cls, 'format_name')\n Formatter.formatters[format_name] = cls\n\n def __init__(self, data_stream: typing.Any) -> None:\n \"\"\"\n Creates a formatter loaded with the given data in a parsed form\n \"\"\"\n\n if hasattr(data_stream, 'readable'):\n self.data = self.parse(data_stream)\n else:\n self.data = data_stream\n\n @abc.abstractmethod\n def format_pretty(self) -> str:\n \"\"\"\n Formats the data in a human friendly form\n \"\"\"\n\n @abc.abstractmethod\n def format_compact(self) -> str:\n \"\"\"\n Formats the data in a compact form\n \"\"\"\n\n @abc.abstractmethod\n def parse(self, data_stream: typing.IO[typing.Any]) -> typing.Any:\n \"\"\"\n Parses the data stream and returns the parsed data\n \"\"\"\n\n @classmethod\n def get_formatter_class(cls, format_name: str) -> typing.Type:\n \"\"\"\n Gets a class for the given format type\n \"\"\"\n\n if format_name not in cls.formatters:\n raise packer.errors.PackerUnknownFormatType(\n f'Unrecognized format type: {format_name}',\n )\n\n return cls.formatters[format_name]\n","sub_path":"src/packer/formatters/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"57006391","text":"#p\nfrom collections import Counter \nstring=\"kabali\"\n\nn=int(input()) \nvalues=[]\ncount=0\nif n>=1 and n<=1000: \n for i in range(n):\n values.append((input()))\n\nfor val in values:\n if Counter(val)==Counter(string): \n count=count+1\nprint(count)\n","sub_path":"pla18.py","file_name":"pla18.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"29974921","text":"import math\n#import classes.MagneticField\nimport numpy as np\n#import time\nfrom constants import const\n\n\ndef R(x):\n radius = const.radius - (x - const.PG1X) * (const.PG2Y - const.PG1Y) / (const.PG2X - const.PG1X)\n radius[x <= const.PG1X] = const.radius\n radius[x > const.PG2X] = const.radius - (x[x > const.PG2X] - const.PG2X) * (const.PG3Y - const.PG2Y) / (const.PG3X - const.PG2X)\n return radius\ndef R_dbl(x):\n if x < const.x0 or x > const.xmax:\n return 0\n w = const.wall_list.get_y(x)\n\n return const.radius - w\n\ndef crossed_outer_boundary(x,y,z):\n if (x < const.x0 or x > const.xmax + 1 or y < const.y0 or y > const.ymax or z < const.z0 or z > const.zmax):\n return True\n return False\n\ndef inside_extraction_region(x,y,z):\n if(x > const.xmax * 0.5 and get_dist(y,z) < R(x)):\n return True\n return False\n\n\ndef get_dist(y,z):\n dist = np.sqrt((const.YCENTER - y) ** 2 + (const.ZCENTER - z) ** 2)\n return dist\n\n\ndef crossed_boundary(x,y,z):\n radius = math.sqrt((const.YCENTER - y) ** 2 + (const.ZCENTER - z) ** 2)\n if radius > R_dbl(x):\n return True\n return False\n\ndef create_id_matrix(comm,N):\n\n # Create matrix labeling each point on the grid\n\n # ix + 1 :: + 10\n # ix - 1 :: + 20\n\n # iy + 1 :: + 100\n # iy - 1 :: + 200\n\n # iz + 1 :: + 1000\n # iz - 1 :: + 2000\n\n # 0 :: Inside the chamber\n # 1 :: Outside the chamber\n\n matrix = np.empty((N,N,N))\n\n for i in range(N):\n for j in range(N):\n for k in range(N):\n\n ix = int(i * const.nx_global / N)\n iy = int(j * const.ny_global / N)\n iz = int(k * const.nz_global / N)\n\n\n\n if(crossed_boundary(ix,iy,iz)):\n matrix[i,j,k] = 1\n continue\n\n temp = 0\n\n if(crossed_boundary(ix + 1,iy,iz)):\n temp = temp + 10\n if(crossed_boundary(ix - 1,iy,iz)):\n temp = temp + 20\n if(crossed_boundary(ix,iy + 1,iz)):\n temp = temp + 100\n if(crossed_boundary(ix,iy - 1,iz)):\n temp = temp + 200\n if(crossed_boundary(ix,iy,iz + 1)):\n temp = temp + 1000\n if(crossed_boundary(ix,iy,iz - 1)):\n temp = temp + 2000\n\n matrix[i,j,k] = temp\n\n #if(matrix[ix,iy,iz] > 1):\n #ax.scatter(ix,iy,iz)\n return matrix\n\n\ndef create_poisson_matrix(id_matrix,charge_density,N):\n\n nx = N\n ny = N\n nz = N\n\n N3 = N * N * N\n poisson_matrix = np.zeros((N3,N3))\n rho_vec = [0]*(N*N*N)\n\n\n for k in range(N):\n for j in range(N):\n for i in range(N):\n\n\n\n ix = int(i * const.nx_global / N)\n iy = int(j * const.ny_global / N)\n iz = int(k * const.nz_global / N)\n i_j_k = i + (j + k * N) * N\n rho_vec[i_j_k] = charge_density[ix,iy,iz]\n poisson_matrix[i_j_k,i_j_k] = 1\n if(id_matrix[i,j,k] == 1):\n continue\n\n\n\n\n ip_j_k = (i + 1) + (j + k * N) * N\n im_j_k = (i - 1) + (j + k * N) * N\n\n i_jp_k = i + ((j + 1) + k * N) * N\n i_jm_k = i + ((j - 1) + k * N) * N\n\n i_j_kp = i + (j + (k + 1) * N) * N\n i_j_km = i + (j + (k - 1) * N) * N\n\n neighbour_list = [ip_j_k, im_j_k, i_jp_k, i_jm_k, i_j_kp, i_j_km]\n\n if(i == 0 or j == 0 or k == 0 or i == N - 1 or j == N - 1 or k == N - 1):\n continue\n\n\n poisson_matrix[i_j_k,i_j_k] = -6\n for neigbour in neighbour_list:\n poisson_matrix[neigbour,i_j_k] = 1\n\n\n #if(id_matrix[ix,iy,iz] == 1):\n #poisson_matrix = 0\n\n return rho_vec,poisson_matrix\ndef local_to_global(x_local,y_local,z_local,my_rank):\n cpuy = my_rank % const.nproc_y\n cpuz = int(my_rank / const.nproc_z)\n\n\n x_global = x_local\n y_global = y_local + const.ny_local\n z_global = z_local + const.nz_local\n\n\n return x_global,y_global,z_global\n\ndef global_to_local(x_global,y_global,z_global):\n\n x_local = x_global - np.floor(x_global / const.nx_local) * const.nx_local\n\n y_local = y_global - np.floor(y_global / const.ny_local) * const.ny_local\n\n z_local = z_global - np.floor(z_global / const.nz_local) * const.nz_local\n\n return x_local,y_local,z_local\n\ndef get_rank(x,y,z):\n\n\n\n cpux = np.floor(x / const.x_local).astype(np.int)\n\n cpuy = np.floor(y / const.y_local).astype(np.int)\n cpuz = np.floor(z / const.z_local).astype(np.int)\n\n cpux[cpux < 0] = -1000\n cpuy[cpuy < 0] = -1000\n cpuz[cpuz < 0] = -1000\n\n cpux[cpux >= const.nproc_x] = -1000\n cpuy[cpuy >= const.nproc_y] = -1000\n cpuz[cpuz >= const.nproc_z] = -1000\n\n rank = cpuy + const.nproc_y * cpuz + cpux\n #if(rank >= const.number_of_processors or rank < 0):\n # rank = -1\n return rank\n\ndef get_cpu_number(rank):\n\n cpux = 0\n cpuy = rank % const.nproc_y\n cpuz = int(rank / const.nproc_z)\n\n return cpux, cpuy, cpuz\n","sub_path":"PYPIC/helper_functions/Geometry.py","file_name":"Geometry.py","file_ext":"py","file_size_in_byte":5203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"54860776","text":"class Node:\n def __init__(self,data):\n self.data = data\n self.next = None\nclass Solution:\n def insert(self,head,data):\n p = Node(data)\n if head==None:\n head=p\n elif head.next==None:\n head.next=p\n else:\n start=head\n while(start.next!=None):\n start=start.next\n start.next=p\n return head\n def display(self,head):\n current = head\n while current:\n print(current.data,end=' ')\n current = current.next\n\n def removeDuplicates(self,head):\n current = head\n element_list = []\n prev = None\n while current:\n if current.data not in element_list:\n element_list.append(current.data)\n prev = current\n current = current.next\n else:\n prev.next = current.next\n current = current.next\n\n return head\n \"\"\" previous = head\n s = set()\n s.add(previous.data)\n current = previous.next\n while current:\n if current.data in s:\n previous.next = current.next\n else:\n s.add(current.data)\n previous = current\n current = current.next\n return head \"\"\"\n\n\nif __name__ == '__main__':\n mylist = Solution()\n T = int(input())\n head = None\n for i in range(T):\n data = int(input())\n head = mylist.insert(head, data)\n head = mylist.removeDuplicates(head)\n mylist.display(head);","sub_path":"day24_MoreLinkedlist.py","file_name":"day24_MoreLinkedlist.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"83921272","text":"from django.shortcuts import render\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nimport requests\nimport json\nimport random\nfrom django.shortcuts import render\nimport traceback\nfrom django.http import HttpResponse\nfrom .models import Product\n\n\nsearchkey = ['Microsoft TV', 'Apple TV']\n\nurl_list = [\"http://api.shopclues.com/api/v11/search?q=\"+searchkey[1]+\"&z=1&key=d12121c70dda5edfgd1df6633fdb36c0&page=2\",\n\"https://search.paytm.com/v2/search?userQuery=\"+searchkey[1]+\"&page_count=5&items_per_page=100\"]\n\n# Hitting random url from the list---\nurl = random.choice(url_list)\nresponse = requests.request(\"GET\", url).json()\n\n# Search fuction to find data in the API---\ndef searchaggview(request):\n try:\n noofresults =response['grid_layout']\n length = len(response['grid_layout'])\n except:\n noofresults =response['products']\n length = len(response['products'])\n print(length)\n product=[]\n prod_list=[]\n for i in range(0,length):\n try:\n product_id=noofresults[i]['product_id']\n except:\n product_id='NA'\n \n try:\n name=noofresults[i]['name']\n except:\n name=noofresults[i]['product']\n\n try:\n url=noofresults[i]['url']\n except:\n url=noofresults[i]['product_url']\n\n try:\n image_url=noofresults[i]['image_url']\n except:\n image_url='NA'\n\n try:\n offer_price=noofresults[i]['offer_price']\n except:\n offer_price=noofresults[i]['list_price']\n\n try:\n actual_price=noofresults[i]['actual_price']\n except:\n actual_price=noofresults[i]['retail_price']\n\n product=[product_id,name,url,image_url,offer_price,actual_price]\n prod_list.append(product)\n\n\n # Saving the fetched data into the database---\n product_db = Product()\n product_db.product_id = product_id\n product_db.name = name\n product_db.url = url\n product_db.image_url = image_url\n product_db.offer_price = offer_price\n product_db.actual_price = actual_price\n product_db.save()\n\n\n # Paginator to show 50 data per page---\n paginator = Paginator(prod_list, 50)\n page_number = request.GET.get('page')\n try:\n page_obj = paginator.page(page_number)\n except PageNotAnInteger:\n page_obj = paginator.page(1)\n except EmptyPage:\n page_obj = paginator.page(paginator.num_pages)\n print(page_obj)\n\n return render(request, 'searchapp.html',{\"prod_list\" : page_obj})\n\n\n","sub_path":"search_aggregator/searchapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"84875909","text":"# _*_coding:utf-8_*_\n# Created by #Suyghur, on 2019-03-18.\n# Copyright (c) 2019 3KWan.\n# Description :\nimport sys\n\nfrom mod_copy_and_build_res import CopyAndBuildRes\n\nif __name__ == '__main__':\n channelName = sys.argv[1]\n newVersion = sys.argv[2]\n libraryChannelPath = sys.argv[3]\n commonsdkPath = sys.argv[4]\n clz = CopyAndBuildRes(channelName, newVersion, libraryChannelPath, commonsdkPath)\n clz.copyJar2CommonLib()\n clz.modifyImplJavaClz()\n clz.buildCommonSDKProject()\n","sub_path":"commonsdk_python/main_copyAndBuildres.py","file_name":"main_copyAndBuildres.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"528089621","text":"import unittest\nfrom inject import assign_injectables\n\nclass TestAssignInjectables(unittest.TestCase):\n def test_it_should_assign_variables_that_are_injected_into_the_object(self):\n class WithInjectableConstructor(object):\n def __init__(self, foo, bar):\n assign_injectables(self, locals())\n\n foo = 'foo'\n bar = 'bar'\n\n under_test = WithInjectableConstructor(foo, bar)\n\n self.assertEquals(foo, under_test.foo, 'foo should be injected')\n self.assertEquals(bar, under_test.bar, 'bar should also be injected')\n self.assertFalse(hasattr(under_test, 'self'), 'self should not be injected')\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"assignment6/advgame/inject_test.py","file_name":"inject_test.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"37948631","text":"import numpy as np\n\nfrom bartpy.model import Model\nfrom bartpy.node import LeafNode\nfrom bartpy.samplers.sampler import Sampler\nfrom bartpy.samplers.scalar import NormalScalarSampler\n\n\nclass LeafNodeSampler(Sampler):\n \"\"\"\n Responsible for generating samples of the leaf node predictions\n Essentially just draws from a normal distribution with prior specified by model parameters\n\n Uses a cache of draws from a normal(0, 1) distribution to improve sampling performance\n \"\"\"\n\n def __init__(self,\n scalar_sampler=NormalScalarSampler(60000)):\n self._scalar_sampler = scalar_sampler\n\n def step(self, model: Model, node: LeafNode) -> float:\n sampled_value = self.sample(model, node)\n node.set_value(sampled_value)\n return sampled_value\n\n def sample(self, model: Model, node: LeafNode) -> float:\n prior_var = model.sigma_m ** 2\n n = node.data.n_obsv\n if n > 0:\n likihood_var = (model.sigma.current_value() ** 2) / n\n likihood_mean = node.data.summed_y() / node.data.n_obsv\n posterior_variance = 1. / (1. / prior_var + 1. / likihood_var)\n posterior_mean = likihood_mean * (prior_var / (likihood_var + prior_var))\n return posterior_mean + (self._scalar_sampler.sample() * np.power(posterior_variance / model.n_trees, 0.5))\n else:\n return 0\n","sub_path":"bartpy/samplers/leafnode.py","file_name":"leafnode.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"249452895","text":"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nfrom options.train_vmd_options import TrainVmdOptions\nfrom loaders import aligned_data_loader\nfrom models import pix2pixdata_model\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\nfrom skimage import exposure, transform\nimport imageio\nimport os\nimport logging\nimport argparse\nimport datetime\nimport shutil\nimport re\nimport json\nimport sys\nimport csv\nimport sort_people\nimport time\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# ファイル出力ログ用\nfile_logger = logging.getLogger(\"message\")\n\nlevel = {0: logging.ERROR,\n 1: logging.WARNING,\n 2: logging.INFO,\n 3: logging.DEBUG}\n\n# 入力値\nWIDTH = 512\n\ndef predict_video(now_str, video_path, depth_path, past_depth_path, interval, json_path, number_people_max, reverse_specific_dict, order_specific_dict, is_avi_output, end_frame_no, order_start_frame, verbose, opt):\n # Windows用に追加\n torch.multiprocessing.freeze_support()\n\n # 深度用サブディレクトリ\n subdir = '{0}/depth'.format(depth_path)\n if os.path.exists(subdir):\n # 既にディレクトリがある場合、一旦削除\n shutil.rmtree(subdir)\n os.makedirs(subdir)\n\n # 深度用サブディレクトリ(disparity)\n depth_pred_dir_path = '{0}/depth_disparity'.format(subdir)\n if os.path.exists(depth_pred_dir_path):\n # 既にディレクトリがある場合、一旦削除\n shutil.rmtree(depth_pred_dir_path)\n os.makedirs(depth_pred_dir_path)\n\n # ファイル用ログの出力設定\n log_file_path = '{0}/message.log'.format(depth_path)\n logger.debug(log_file_path)\n file_logger.addHandler(logging.FileHandler(log_file_path))\n file_logger.warning(\"深度推定出力開始 now: %s ---------------------------\", now_str)\n\n logger.addHandler(logging.FileHandler('{0}/{1}.log'.format(depth_path, __name__)))\n\n # 映像情報取得\n org_width, org_height, scale, width, height = get_video_info(video_path)\n\n logger.debug(\"org_width: %s, org_height: %s, scale: %s, width: %s, height: %s\", org_width, org_height, scale, width, height)\n\n for pidx in range(number_people_max):\n # 人数分サイズデータ出力\n size_idx_path = '{0}/{1}_{3}_idx{2:02d}/size.txt'.format(os.path.dirname(\n json_path), os.path.basename(json_path), pidx+1, now_str)\n os.makedirs(os.path.dirname(size_idx_path), exist_ok=True)\n sizef = open(size_idx_path, 'w')\n # 一行分を追記\n sizef.write(\"{0}\\n\".format(org_width))\n sizef.write(\"{0}\\n\".format(org_height))\n sizef.close()\n\n # フレーム開始INDEX取得\n start_json_name, start_frame, json_size = read_openpose_start_json(json_path)\n\n logger.info(\"number_people_max: %s, json_size: %s, start_frame: %s\", number_people_max, json_size, start_frame)\n\n # 深度アニメーションGIF用\n png_lib = []\n # 人数分の深度データ\n # pred_depth_ary = [[[0 for z in range(18)] for y in range(number_people_max)] for x in range(json_size)]\n pred_depth_ary = np.zeros((json_size,number_people_max,18))\n # 人数分の深度データ(追加分)\n pred_depth_support_ary = np.zeros((json_size,number_people_max,17))\n # 人数分の信頼度データ\n pred_conf_ary = np.zeros((json_size,number_people_max,18))\n # 人数分の信頼度データ(追加分)\n pred_conf_support_ary = np.zeros((json_size,number_people_max,17))\n # 人数分の深度画像データ\n pred_image_ary = [[] for x in range(json_size) ]\n # 過去ソートデータ(pastはsort_peopleで使ってるのでprev)\n prev_sorted_idxs = []\n\n # 深度用ファイル\n depthf_path = '{0}/depth.txt'.format(depth_path)\n # 信頼度用ファイル\n conff_path = '{0}/conf.txt'.format(depth_path)\n # ソート順用ファイル\n orderf_path = '{0}/order.txt'.format(depth_path)\n\n past_depthf_path = None\n past_conff_path = None\n past_orderf_path = None\n if past_depth_path is not None:\n past_depthf_path = '{0}/depth.txt'.format(past_depth_path)\n past_conff_path = '{0}/conf.txt'.format(past_depth_path)\n past_orderf_path = '{0}/order.txt'.format(past_depth_path)\n\n logger.info(\"past_depthf_path: %s\", past_depthf_path)\n logger.info(\"past_conff_path: %s\", past_conff_path)\n logger.info(\"past_orderf_path: %s\", past_orderf_path)\n\n if past_depthf_path is not None and os.path.exists(past_depthf_path) and past_conff_path is not None and os.path.exists(past_conff_path) and \\\n (order_start_frame == 0 or(order_start_frame > 0 and past_orderf_path is not None and os.path.exists(past_orderf_path))):\n # 深度ファイルが両方ある場合、それを読み込む\n\n # ----------------------\n pdepthf = open(past_depthf_path, 'r')\n\n fkey = -1\n fnum = 0\n # カンマ区切りなので、csvとして読み込む\n reader = csv.reader(pdepthf)\n\n for row in reader:\n fidx = int(row[0])\n if fkey != fidx:\n # キー値が異なる場合、インデックス取り直し\n fnum = 0\n\n pred_depth_ary[fidx][fnum] = np.array([float(x) for x in row[1:19]])\n pred_depth_support_ary[fidx][fnum] = np.array([float(x) for x in row[19:]])\n\n # 人物インデックス加算\n fnum += 1\n # キー保持\n fkey = fidx\n \n pdepthf.close()\n \n # 自分の深度情報ディレクトリにコピー\n shutil.copyfile(past_depthf_path, depthf_path)\n\n # ----------------------\n pconff = open(past_conff_path, 'r')\n\n fkey = -1\n fnum = 0\n # カンマ区切りなので、csvとして読み込む\n reader = csv.reader(pconff)\n\n for row in reader:\n fidx = int(row[0])\n if fkey != fidx:\n # キー値が異なる場合、インデックス取り直し\n fnum = 0\n\n pred_conf_ary[fidx][fnum] = np.array([float(x) for x in row[1:19]])\n pred_conf_support_ary[fidx][fnum] = np.array([float(x) for x in row[19:]])\n\n # 人物インデックス加算\n fnum += 1\n # キー保持\n fkey = fidx\n \n pconff.close()\n \n # 自分の信頼度情報ディレクトリにコピー\n shutil.copyfile(past_conff_path, conff_path)\n\n if order_start_frame > 0:\n # ソート開始フレームが指定されている場合、そこまで読み込む\n\n # ----------------------\n porderf = open(past_orderf_path, 'r')\n\n n = 0\n # カンマ区切りなので、csvとして読み込む\n reader = csv.reader(porderf)\n\n for row in reader:\n if (n < order_start_frame):\n prev_sorted_idxs.append([int(x) for x in row])\n else:\n break\n n += 1\n\n with open(orderf_path, 'w', newline='') as f:\n csv.writer(f).writerows(prev_sorted_idxs)\n \n for _eidx in range(number_people_max):\n # INDEX別情報をまるっとコピー\n past_idx_path = past_depth_path.replace('depth', 'idx{0:02d}'.format(_eidx+1))\n idx_path = '{0}/{1}_{3}_idx{2:02d}'.format(os.path.dirname(json_path), os.path.basename(json_path), _eidx+1, now_str)\n # 既に出来ているので一旦削除\n shutil.rmtree(idx_path)\n shutil.copytree(past_idx_path, idx_path)\n\n # 深度データと信頼度データを必要行まで上書き\n depth_idx_path = '{0}/{1}_{3}_idx{2:02d}/depth.txt'.format(os.path.dirname(json_path), os.path.basename(json_path), _eidx+1, now_str)\n \n with open(depth_idx_path, 'r') as f:\n lines = f.readlines()\n lines = lines[:order_start_frame]\n\n with open(depth_idx_path, 'w') as f:\n f.write(''.join(lines))\n\n conf_idx_path = '{0}/{1}_{3}_idx{2:02d}/conf.txt'.format(os.path.dirname(json_path), os.path.basename(json_path), _eidx+1, now_str)\n\n with open(conf_idx_path, 'r') as f:\n lines = f.readlines()\n lines = lines[:order_start_frame]\n\n with open(depth_idx_path, 'w') as f:\n f.write(''.join(lines))\n \n logger.warning(\"過去データコピー idx: %s\", _eidx+1)\n\n porderf.close()\n else: \n # 動画を1枚ずつ画像に変換する\n in_idx = 0\n cnt = 0\n cap = cv2.VideoCapture(video_path)\n img_list = []\n while(cap.isOpened()):\n # 動画から1枚キャプチャして読み込む\n flag, frame = cap.read() # Capture frame-by-frame\n\n # 深度推定のindex\n _idx = cnt - start_frame\n\n # 開始フレームより前は飛ばす\n if start_frame > cnt:\n cnt += 1\n continue\n \n # 終わったフレームより後は飛ばす\n # 明示的に終わりが指定されている場合、その時も終了する\n if flag == False or cnt >= json_size + start_frame or (end_frame_no > 0 and cnt >= end_frame_no):\n break\n\n # キャプチャ画像を読み込む\n img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2Lab))\n # 偏差\n img = np.float32(img)/255.0\n # サイズを小さくする\n img = transform.resize(img, (height, width))\n try:\n # コントラストをあげる\n img = exposure.equalize_adapthist(img)\n except:\n pass\n\n img_list.append(img)\n\n logger.debug(\"cnt: %s, _idx: %s, flag: %s, len(img_list): %s\", cnt, _idx, flag, len(img_list))\n\n if (_idx > 0 and _idx % interval == 0 and _idx < json_size) or (cnt >= json_size + start_frame - 1):\n start = time.time()\n\n eval_num_threads = 2\n video_data_loader = aligned_data_loader.DAVISCaptureDataLoader(img_list, opt.batchSize)\n video_dataset = video_data_loader.load_data()\n logger.debug('========================= Video dataset #images = %d =========' %\n len(video_data_loader))\n\n model = pix2pixdata_model.Pix2PixDataModel(opt)\n\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = True\n best_epoch = 0\n global_step = 0\n\n logger.debug(\n '================================= BEGIN VALIDATION ====================================='\n )\n\n logger.debug('TESTING ON VIDEO')\n\n model.switch_to_eval()\n \n # 深度ファイルを追記形式で開く\n depthf = open(depthf_path, 'a')\n conff = open(conff_path, 'a')\n \n for i, data in enumerate(video_dataset):\n stacked_img = data[0]\n\n # 1件だけ解析する\n pred, pred_d_ref = model.run_and_save_DAVIS_one(stacked_img)\n\n if level[verbose] < logging.INFO:\n # 一旦出力する\n np.savetxt('{0}/pred_{1:012d}.txt'.format(depth_pred_dir_path, in_idx), pred, fmt='%.5f')\n np.savetxt('{0}/predref_{1:012d}.txt'.format(depth_pred_dir_path, in_idx), pred_d_ref, fmt='%.5f')\n\n # logger.debug(\"pred: %s\", _idx)\n # logger.debug(pred)\n # logger.debug(\"len(pred): %s\", len(pred))\n # logger.debug(\"len(pred): %s\", len(pred))\n\n # 深度解析後の画像サイズ\n pred_width = len(pred)\n pred_height = len(pred)\n logger.debug(\"%s: pred_width: %s, pred_height: %s\", in_idx, pred_width, pred_height)\n\n # 該当シーンのJSONデータを読み込む\n file_name = re.sub(r'\\d{12}', \"{0:012d}\".format(in_idx + start_frame), start_json_name)\n _file = os.path.join(json_path, file_name)\n\n try:\n data = json.load(open(_file))\n except Exception as e:\n logger.warning(\"JSON読み込み失敗のため、空データ読み込み, %s %s\", _file, e)\n data = json.load(open(\"json/all_empty_keypoints.json\"))\n\n for dpidx in range(len(data[\"people\"]), number_people_max):\n # 人数分のデータが無い場合、空データを読み込む\n data[\"people\"].append(json.load(open(\"json/one_keypoints.json\")))\n \n # 深度解析後の画像サイズ\n pred_width = len(pred[0])\n pred_height = len(pred)\n logger.debug(\"pred_width: %s, pred_height: %s\", pred_width, pred_height)\n \n for dpidx in range(number_people_max):\n logger.debug(\"dpidx: %s, len(data[people]): %s\", dpidx, len(data[\"people\"]))\n for o in range(0,len(data[\"people\"][dpidx][\"pose_keypoints_2d\"]),3):\n oidx = int(o/3)\n # オリジナルの画像サイズから、縮尺を取得\n scale_org_x = data[\"people\"][dpidx][\"pose_keypoints_2d\"][o] / org_width\n scale_org_y = data[\"people\"][dpidx][\"pose_keypoints_2d\"][o+1] / org_height\n # logger.debug(\"scale_org_x: %s, scale_org_y: %s\", scale_org_x, scale_org_y)\n\n # 縮尺を展開して、深度解析後の画像サイズに合わせる\n pred_x = int(pred_width * scale_org_x)\n pred_y = int(pred_height * scale_org_y)\n\n if 0 <= pred_y < len(pred) and 0 <= pred_x < len(pred[pred_y]):\n # depths = pred[pred_y-3:pred_y+4,pred_x-3:pred_x+4].flatten()\n # for x_shift in range(-3,4):\n # for y_shift in range(-3, 4):\n # if 0 <= pred_x + x_shift < pred_width and 0 <= pred_y + y_shift < pred_height:\n # depths.append(pred[pred_y + y_shift][pred_x + x_shift])\n\n # 周辺3ピクセルで平均値を取る\n pred_list = pred[pred_y-1:pred_y+2,pred_x-1:pred_x+2].flatten()\n depth = 0 if len(pred_list) == 0 else np.mean(pred_list)\n\n logger.debug(\"pred_x: %s, pred_y: %s, depth: %s\", pred_x, pred_y, depth)\n\n pred_depth_ary[in_idx][dpidx][oidx] = depth\n pred_conf_ary[in_idx][dpidx][oidx] = data[\"people\"][dpidx][\"pose_keypoints_2d\"][o+2]\n pred_image_ary[in_idx] = pred\n else:\n # たまにデータが壊れていて、「9.62965e-35」のように取れてしまった場合の対策\n pred_depth_ary[in_idx][dpidx][oidx] = 0\n pred_conf_ary[in_idx][dpidx][oidx] = 0\n pred_image_ary[in_idx] = pred\n\n\n depth_support = np.zeros(17)\n conf_support = np.zeros(17)\n weights = [0.1,0.8,0.4,0.1,0.05,0.4,0.1,0.05,0.8,0.5,0.2,0.8,0.5,0.2,0.05,0.05,0.05,0.05]\n\n # Openposeで繋がっているライン上の深度を取得する\n for _didx, (start_idx, end_idx, start_w, end_w) in enumerate([(0,1,weights[0],weights[1]),(1,2,weights[1],weights[2]),(2,3,weights[2],weights[3]),(3,4,weights[3],weights[4]), \\\n (1,5,weights[1],weights[5]),(5,6,weights[5],weights[6]),(6,7,weights[6],weights[7]),(1,8,weights[1],weights[8]),(8,9,weights[8],weights[9]), \\\n (9,10,weights[9],weights[10]),(1,11,weights[1],weights[11]),(11,12,weights[11],weights[12]),(12,13,weights[12],weights[13]),(0,14,weights[0],weights[14]), \\\n (14,16,weights[14],weights[16]),(0,15,weights[0],weights[15]),(15,17,weights[15],weights[17])]):\n # オリジナルの画像サイズから、縮尺を取得\n start_scale_org_x = data[\"people\"][dpidx][\"pose_keypoints_2d\"][start_idx*3] / org_width\n start_scale_org_y = data[\"people\"][dpidx][\"pose_keypoints_2d\"][start_idx*3+1] / org_height\n start_conf = data[\"people\"][dpidx][\"pose_keypoints_2d\"][start_idx*3+2]\n # logger.debug(\"scale_org_x: %s, scale_org_y: %s\", scale_org_x, scale_org_y)\n\n # 縮尺を展開して、深度解析後の画像サイズに合わせる\n start_pred_x = int(pred_width * start_scale_org_x)\n start_pred_y = int(pred_height * start_scale_org_y)\n\n # オリジナルの画像サイズから、縮尺を取得\n end_scale_org_x = data[\"people\"][dpidx][\"pose_keypoints_2d\"][end_idx*3] / org_width\n end_scale_org_y = data[\"people\"][dpidx][\"pose_keypoints_2d\"][end_idx*3+1] / org_height\n end_conf = data[\"people\"][dpidx][\"pose_keypoints_2d\"][end_idx*3+2]\n # logger.debug(\"scale_org_x: %s, scale_org_y: %s\", scale_org_x, scale_org_y)\n\n # 縮尺を展開して、深度解析後の画像サイズに合わせる\n end_pred_x = int(pred_width * end_scale_org_x)\n end_pred_y = int(pred_height * end_scale_org_y)\n\n per_depth_support = []\n per_weight_support = []\n \n # 深度範囲\n pred_x_rng = abs(start_pred_x - end_pred_x)\n pred_y_rng = abs(start_pred_y - end_pred_y)\n\n # 短い方の距離を単位とする\n pred_per = min(pred_x_rng, pred_y_rng)\n\n # 軸\n pred_x_line = np.linspace( min(start_pred_x, end_pred_x), max(start_pred_x, end_pred_x), pred_per + 1, dtype=int )\n pred_y_line = np.linspace( min(start_pred_y, end_pred_y), max(start_pred_y, end_pred_y), pred_per + 1, dtype=int )\n\n # 重み\n pred_weigths = np.linspace( start_w, end_w, pred_per + 1 )\n\n for (x, y, w) in zip(pred_x_line, pred_y_line, pred_weigths):\n # 直線状の深度と重みを計算\n per_depth_support.append(pred[y][x])\n per_weight_support.append(w)\n\n # 重み付き平均を計算\n depth_support[_didx] = np.average(per_depth_support, weights=per_weight_support)\n conf_support[_didx] = np.mean([start_conf, end_conf])\n\n pred_depth_support_ary[in_idx][dpidx] = depth_support\n pred_conf_support_ary[in_idx][dpidx] = conf_support\n\n # ------------------\n\n # 深度データ\n depthf.write(\"{0}, {1},{2}\\n\".format(in_idx, ','.join([ str(x) for x in pred_depth_ary[in_idx][dpidx] ]), ','.join([ str(x) for x in pred_depth_support_ary[in_idx][dpidx] ])))\n # 信頼度データ\n conff.write(\"{0}, {1},{2}\\n\".format(in_idx, ','.join([ str(x) for x in pred_conf_ary[in_idx][dpidx] ]), ','.join([ str(x) for x in pred_conf_support_ary[in_idx][dpidx] ])))\n\n in_idx += 1\n\n # 一定間隔フレームおきにキャプチャした画像を深度推定する\n logger.warning(\"深度推定 idx: %s(%s) 処理: %s[sec]\", _idx, cnt, time.time() - start)\n\n img_list = []\n\n # 一旦閉じる\n depthf.close()\n conff.close()\n\n cnt += 1\n\n cap.release()\n cv2.destroyAllWindows()\n\n # 基準深度で再計算\n # zファイルの方は基準深度再計算なし\n pred_depth_ary, pred_depth_support_ary = recalc_depth(pred_depth_ary, pred_depth_support_ary)\n\n # 人物ソート\n sort_people.exec(pred_depth_ary, pred_depth_support_ary, pred_conf_ary, pred_conf_support_ary, pred_image_ary, video_path, now_str, subdir, json_path, json_size, number_people_max, reverse_specific_dict, order_specific_dict, start_json_name, start_frame, end_frame_no, org_width, org_height, png_lib, scale, prev_sorted_idxs, verbose)\n\n if is_avi_output:\n # MMD用背景AVI出力\n outputAVI(depth_path, json_path, number_people_max, now_str, start_frame, end_frame_no, start_json_name, org_width, org_height)\n\n if level[verbose] <= logging.INFO and len(png_lib) > 0:\n # 終わったらGIF出力\n logger.info(\"creating Gif {0}/movie_depth.gif, please Wait!\".format(os.path.dirname(depth_path)))\n imageio.mimsave('{0}/movie_depth.gif'.format(os.path.dirname(depth_path)), png_lib, fps=30)\n\n\n# 基準深度で再計算\ndef recalc_depth(pred_depth_ary, pred_depth_support_ary):\n pred_depth_ary = np.array(pred_depth_ary)\n pred_depth_support_ary = np.array(pred_depth_support_ary)\n\n # 基準となる深度\n base_depth = np.median(pred_depth_ary[0][pred_depth_ary[0] != 0])\n\n # # 深度0が含まれていると狂うので、ループしてチェックしつつ合算\n # pred_sum = 0\n # pred_cnt = 0\n # for pred_joint in depth_ary[0][0]:\n # if pred_joint > 0:\n # pred_sum += pred_joint\n # pred_cnt += 1\n\n # # 1人目の0F目の場合、基準深度として平均値を保存\n # base_depth = pred_sum / pred_cnt if pred_cnt > 0 else 0\n\n logger.info(\"基準深度取得: base_depth: %s\", base_depth) \n\n # 基準深度で入れ直し\n return np.where(pred_depth_ary != 0, (pred_depth_ary - base_depth) * 100, pred_depth_ary), np.where(pred_depth_support_ary != 0, (pred_depth_support_ary - base_depth) * 100, pred_depth_support_ary)\n\ndef outputAVI(depth_path, json_path, number_people_max, now_str, start_frame, end_frame_no, start_json_name, org_width, org_height):\n fourcc_names = [\"I420\"]\n\n if os.name == \"nt\":\n # Windows\n fourcc_names = [\"IYUV\"]\n\n # MMD用AVI出力 -----------------------------------------------------\n for fourcc_name in fourcc_names:\n try:\n # コーデックは実行環境によるので、自環境のMMDで確認できたfourccを総当たり\n # FIXME IYUVはAVI2なので、1GBしか読み込めない。ULRGは出力がULY0になってMMDで動かない。とりあえずIYUVを1GB以内で出力する\n fourcc = cv2.VideoWriter_fourcc(*fourcc_name)\n # 出力先AVIを設定する(MMD用に小さめ)\n out_path = '{0}/output_{1}.avi'.format(depth_path, fourcc_name)\n\n if os.name == \"nt\":\n # Windows\n op_avi_path = re.sub(r'json$', \"openpose.avi\", json_path)\n else:\n op_avi_path = re.sub(r'json/?', \"openpose.avi\", json_path)\n logger.info(\"op_avi_path: %s\", op_avi_path)\n # Openopse結果AVIを読み込む\n cnt = 0\n cap = cv2.VideoCapture(op_avi_path)\n\n avi_width = int(org_width*0.32)\n avi_height = int(org_height*0.32)\n\n out = cv2.VideoWriter(out_path, fourcc, 30.0, (avi_width, avi_height))\n \n while(cap.isOpened()):\n # 動画から1枚キャプチャして読み込む\n flag, frame = cap.read() # Capture frame-by-frame\n\n # 動画が終わっていたら終了\n if flag == False:\n break\n\n # # 開始フレームより前は飛ばす\n # if start_frame > cnt:\n # cnt += 1\n # continue\n\n for pidx, lcolor, rcolor in zip(range(number_people_max) \\\n , [(51,255,51), (255,51,51), (255,255,255), (51,255,255), (255,51,255), (255,255,51), (0,127,0), (127,0,0), (102,102,102), (0,127,127), (127,0,127), (127,127,0)] \\\n , [(51,51,255), (51,51,255), (51,51,255), (51,51,255), (51,51,255), (51,51,255), (0,0,127), (0,0,127), (0,0,127), (0,0,127), (0,0,127), (0,0,127)]):\n # 人物別に色を設定, colorはBGR形式\n # 【00番目】 左:緑, 右: 赤\n # 【01番目】 左:青, 右: 赤\n # 【02番目】 左:白, 右: 赤\n # 【03番目】 左:黄, 右: 赤\n # 【04番目】 左:桃, 右: 赤\n # 【05番目】 左:濃緑, 右: 赤\n # 【06番目】 左:濃青, 右: 赤\n # 【07番目】 左:灰色, 右: 赤\n # 【08番目】 左:濃黄, 右: 赤\n # 【09番目】 左:濃桃, 右: 赤\n idx_json_path = '{0}/{1}_{3}_idx{2:02d}/json/{4}'.format(os.path.dirname(json_path), os.path.basename(json_path), pidx+1, now_str, re.sub(r'\\d{12}', \"{0:012d}\".format(cnt + start_frame), start_json_name))\n # logger.warning(\"pidx: %s, color: %s, idx_json_path: %s\", pidx, color, idx_json_path)\n\n if os.path.isfile(idx_json_path):\n data = json.load(open(idx_json_path))\n\n for o in range(0,len(data[\"people\"][0][\"pose_keypoints_2d\"]),3):\n # 左右で色を分ける\n color = rcolor if int(o/3) in [2,3,4,8,9,10,14,16] else lcolor\n\n if data[\"people\"][0][\"pose_keypoints_2d\"][o+2] > 0:\n # 少しでも信頼度がある場��出力\n # logger.debug(\"x: %s, y: %s\", data[\"people\"][0][\"pose_keypoints_2d\"][o], data[\"people\"][0][\"pose_keypoints_2d\"][o+1])\n # cv2.drawMarker( frame, (int(data[\"people\"][0][\"pose_keypoints_2d\"][o]+5), int(data[\"people\"][0][\"pose_keypoints_2d\"][o+1]+5)), color, markerType=cv2.MARKER_TILTED_CROSS, markerSize=10)\n # 座標のXY位置に点を置く。原点が左上なので、ちょっとずらす\n cv2.circle( frame, (int(data[\"people\"][0][\"pose_keypoints_2d\"][o]+1), int(data[\"people\"][0][\"pose_keypoints_2d\"][o+1]+1)), 5, color, thickness=-1)\n \n # 縮小\n output_frame = cv2.resize(frame, (avi_width, avi_height))\n\n # 全人物が終わったら出力\n out.write(output_frame)\n\n # インクリメント\n cnt += 1\n\n if end_frame_no > 0 and cnt >= end_frame_no:\n break\n\n logger.warning('MMD用AVI: {0}'.format(out_path))\n\n # 出力に成功したら終了\n # break\n except Exception as e:\n logger.warning(\"MMD用AVI出力失敗: %s, %s\", fourcc_name, e)\n\n finally:\n # 終わったら開放\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n\n\n# Openposeの結果jsonの最初を読み込む\ndef read_openpose_start_json(json_path):\n # openpose output format:\n # [x1,y1,c1,x2,y2,c2,...]\n # ignore confidence score, take x and y [x1,y1,x2,y2,...]\n\n # load json files\n json_files = os.listdir(json_path)\n # check for other file types\n json_files = sorted([filename for filename in json_files if filename.endswith(\".json\")])\n\n # jsonのファイル数が読み取り対象フレーム数\n json_size = len(json_files)\n # 開始フレーム\n start_frame = 0\n # 開始フラグ\n is_started = False\n \n for file_name in json_files:\n logger.debug(\"reading {0}\".format(file_name))\n _file = os.path.join(json_path, file_name)\n\n if not os.path.isfile(_file):\n if is_started:\n raise Exception(\"No file found!!, {0}\".format(_file))\n else:\n continue\n\n try:\n data = json.load(open(_file))\n except Exception as e:\n logger.warning(\"JSON読み込み失敗のため、空データ読み込み, %s %s\", _file, e)\n data = json.load(open(\"tensorflow/json/all_empty_keypoints.json\"))\n\n # 12桁の数字文字列から、フレームINDEX取得\n frame_idx = int(re.findall(\"(\\d{12})\", file_name)[0])\n \n if (frame_idx <= 0 or is_started == False) and len(data[\"people\"]) > 0:\n # 何らかの人物情報が入っている場合に開始\n # 開始したらフラグを立てる\n is_started = True\n # 開始フレームインデックス保持\n start_frame = frame_idx\n\n # ループ終了\n break\n\n logger.warning(\"開始フレーム番号: %s\", start_frame)\n\n return json_files[0], start_frame, json_size\n\n\n\n# 映像解析縮尺情報\ndef get_video_info(video_path):\n # 映像サイズを取得する\n cap = cv2.VideoCapture(video_path)\n # 幅\n org_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n # 高さ\n org_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n logger.debug(\"width: {0}, height: {1}\".format(org_width, org_height))\n\n # 縮小倍率\n scale = WIDTH / org_width\n logger.debug(\"scale: {0}\".format(scale))\n \n height = int(org_height * scale)\n logger.debug(\"width: {0}, height: {1}\".format(WIDTH, height))\n\n return org_width, org_height, scale, WIDTH, height\n\n\n\ndef main():\n opt = TrainVmdOptions().parse() # set CUDA_VISIBLE_DEVICES before import torch\n\n logger.setLevel(level[opt.verbose])\n\n # 間隔は1以上の整数\n interval = opt.interval if opt.interval > 0 else 1\n\n # AVI出力有無\n is_avi_output = False if opt.avi_output == 'no' else True\n\n # 出力用日付\n if opt.now is None:\n now_str = \"{0:%Y%m%d_%H%M%S}\".format(datetime.datetime.now())\n else:\n now_str = opt.now\n\n # 日付+depthディレクトリ作成\n depth_path = '{0}/{1}_{2}_depth'.format(os.path.dirname(opt.json_path), os.path.basename(opt.json_path), now_str)\n os.makedirs(depth_path, exist_ok=True)\n\n # 過去深度ディレクトリ\n past_depth_path = opt.past_depth_path if opt.past_depth_path is not None and len(opt.past_depth_path) > 0 else None\n\n # 強制反転指定用辞書作成\n reverse_specific_dict = {}\n if opt.reverse_specific is not None and len(opt.reverse_specific) > 0:\n for frame in opt.reverse_specific.split(']'):\n # 終わりカッコで区切る\n if ':' in frame:\n # コロンでフレーム番号と人物を区切る\n frames = frame.lstrip(\"[\").split(':')[0]\n # logger.debug(\"frame: %s\", frame)\n # logger.debug(\"frames: %s\", frames)\n # logger.debug(\"frame.split(':')[1]: %s\", frame.split(':')[1])\n # logger.debug(\"frame.split(':')[1].split(','): %s\", frame.split(':')[1].split(','))\n if '-' in frames:\n frange = frames.split('-')\n if len(frange) >= 2 and frange[0].isdecimal() and frange[1].isdecimal():\n for f in range(int(frange[0]), int(frange[1])+1):\n # 指定フレームの辞書作成\n if f not in reverse_specific_dict:\n reverse_specific_dict[f] = {}\n\n # 人物INDEXとその反転内容を保持\n reverse_specific_dict[f][int(frame.split(':')[1].split(',')[0])] = frame.split(':')[1].split(',')[1]\n else: \n if frames not in reverse_specific_dict:\n # 該当フレームがまだない場合、作成\n reverse_specific_dict[int(frames)] = {}\n\n # 人物INDEXとその反転内容を保持\n reverse_specific_dict[int(frames)][int(frame.split(':')[1].split(',')[0])] = frame.split(':')[1].split(',')[1]\n\n logger.warning(\"反転指定リスト: %s\", reverse_specific_dict)\n\n paramf = open( depth_path + \"/reverse_specific.txt\", 'w')\n paramf.write(opt.reverse_specific)\n paramf.close()\n\n # 強制順番指定用辞書作成\n order_specific_dict = {}\n if opt.order_specific is not None and len(opt.order_specific) > 0:\n for frame in opt.order_specific.split(']'):\n # 終わりカッコで区切る\n if ':' in frame:\n # コロンでフレーム番号と人物を区切る\n frames = frame.lstrip(\"[\").split(':')[0]\n logger.debug(\"frames: %s\", frames)\n if '-' in frames:\n frange = frames.split('-')\n if len(frange) >= 2 and frange[0].isdecimal() and frange[1].isdecimal():\n for f in range(int(frange[0]), int(frange[1])+1):\n # 指定フレームの辞書作成\n order_specific_dict[f] = []\n\n for person_idx in frame.split(':')[1].split(','):\n if int(person_idx) in order_specific_dict[int(frames)]:\n logger.error(\"×順番指定リストに同じINDEXが指定されています。処理を中断します。 %s\", frame)\n return False\n order_specific_dict[f].append(int(person_idx))\n else: \n if frames not in order_specific_dict:\n # 該当フレームがまだない場合、作成\n order_specific_dict[int(frames)] = []\n\n for person_idx in frame.split(':')[1].split(','):\n if int(person_idx) in order_specific_dict[int(frames)]:\n logger.error(\"×順番指定リストに同じINDEXが指定されています。処理を中断します。 %s\", frame)\n return False\n order_specific_dict[int(frames)].append(int(person_idx))\n\n logger.warning(\"順番指定リスト: %s\", order_specific_dict)\n\n paramf = open( depth_path + \"/order_specific.txt\", 'w')\n paramf.write(opt.order_specific)\n paramf.close()\n\n # Predict the image\n predict_video(now_str, opt.video_path, depth_path, past_depth_path, interval, opt.json_path, opt.number_people_max, reverse_specific_dict, order_specific_dict, is_avi_output, opt.end_frame_no, opt.order_start_frame, opt.verbose, opt)\n\n logger.debug(\"Done!!\")\n logger.debug(\"深度推定結果: {0}\".format(depth_path +'/depth.txt'))\n\nif __name__ == '__main__':\n main()\n","sub_path":"predict_video.py","file_name":"predict_video.py","file_ext":"py","file_size_in_byte":36639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"239057162","text":"import os\r\n\r\nfrom flask import Blueprint\r\nfrom flask import redirect\r\nfrom flask import render_template\r\nfrom flask import request\r\nfrom flask import session\r\n\r\nfrom Core.AuthorityManager import RoleManager\r\nfrom Core.DepartmentManager import DepartmentManager\r\nfrom Core.PersonManager import PersonManager\r\nfrom Helper.OtherHelper import errorPage, successPage\r\nfrom Helper.UnlockHelp import getMemory, refreshMemory, cache\r\nfrom Helper.UserHelper import getImagePath, isLogin, loginRequired\r\nfrom config import (\r\n GET,\r\n GAP, POST)\r\n\r\n\r\nusers = Blueprint(\"users\", __name__, template_folder=\"templates\")\r\n\r\n\r\ntmpKey = None\r\ntmpRows = None\r\n\r\n\r\n@users.route(\"/login\", methods=GAP)\r\ndef login():\r\n if isLogin():\r\n return redirect(\"/\")\r\n if request.method == GET:\r\n return render_template(\"Views/login.html\", title=\"登录\")\r\n elif request.method == 'POST':\r\n username = request.form.get(\"username\", 0)\r\n password = request.form.get(\"password\", 0)\r\n if username and password != 0:\r\n session['user'] = username\r\n return redirect(\"/\")\r\n return render_template(\"Views/login.html\", title=\"登录\")\r\n\r\n\r\n@loginRequired\r\n@users.route(\"/logout\", methods=GET)\r\ndef logout():\r\n session.pop('user', True)\r\n return redirect(\"/\")\r\n\r\n\r\n@users.route(\"/\", methods=GET)\r\n@loginRequired\r\ndef index():\r\n return render_template(\"Views/index.html\", title=\"生物特征身份识别统一管理平台\", user=session['user'])\r\n\r\n\r\n@users.route(\"/user\", methods=GET)\r\n@loginRequired\r\ndef user():\r\n global tmpRows, tmpKey\r\n curPage = int(request.args.get(\"page\", 0))\r\n isReg = int(request.args.get(\"isReg\", 1))\r\n count = 15\r\n\r\n key = request.args.get(\"key\", \"\").strip()\r\n if key == \"\":\r\n data = PersonManager.getAllUserLimit(curPage * count, count, isReg)\r\n print(data.Err)\r\n rows = PersonManager.getCount(isReg)\r\n else:\r\n data = PersonManager.getUserBySearch(key, curPage * count, count, isReg)\r\n print(data.Err)\r\n if key != tmpKey:\r\n rows = PersonManager.getCount(isReg, key)\r\n tmpRows = rows\r\n else:\r\n rows = tmpRows\r\n allPage = int(rows / count)\r\n persons = data.Result\r\n return render_template(\"Views/User/user.html\", title=\"人员管理\", persons=persons, rows=rows, page=curPage, allPage=allPage,\r\n count=count, key=key, isReg=isReg)\r\n\r\n\r\n@users.route(\"/user/delete/\", methods=GET)\r\n@loginRequired\r\ndef delUser(uID):\r\n userInfo = PersonManager.getUserValue(uID)\r\n r = PersonManager.delUser(uID)\r\n if r.Suc:\r\n # if userInfo['hasLogo']:\r\n # imagePath = getImagePath(userInfo['id'])\r\n # os.remove(imagePath)\r\n refreshMemory()\r\n title = \"操作成功\"\r\n message = \"删除人员 \" + userInfo['userName'] + \" 成功\"\r\n return successPage(title, msg=message, url=\"/user\", parent=False)\r\n else:\r\n title = \"出错\"\r\n message = \"删除人员 \" + userInfo['userName'] + \" 失败: \" + str(r.Err)\r\n return errorPage(title=title, msg=message)\r\n\r\n\r\n@users.route(\"/user/mulDelete\", methods=POST)\r\n@loginRequired\r\ndef mulDelUser():\r\n title = None\r\n uIDs = request.form.get(\"uIDs\", 0)[:-1].split(\",\")\r\n print(uIDs)\r\n userName = \"\"\r\n for uID in uIDs:\r\n userInfo = PersonManager.getUserValue(uID)\r\n r = PersonManager.delUser(uID)\r\n if r.Suc:\r\n if userInfo['userLogo'] is not None:\r\n imagePath = getImagePath(userInfo['id'])\r\n os.remove(imagePath)\r\n title = \"操作成功\"\r\n userName += userInfo['userName'] + \", \"\r\n else:\r\n title = \"出错\"\r\n message = \"删除人员 \" + userInfo['userName'] + \" 失败: \" + str(r.Err)\r\n return errorPage(title=title, msg=message, url=\"/user\", parent=False)\r\n refreshMemory()\r\n message = \"删除人员 \" + userName + \" 成功\"\r\n return successPage(title=title, msg=message, url=\"/user\", parent=False)\r\n\r\n\r\n@users.route(\"/user/mulKind/\", methods=GAP)\r\n@loginRequired\r\ndef mulKind(uIDs):\r\n if request.method == 'POST':\r\n tmpUIDs = uIDs\r\n uIDs = uIDs[:-1].split(\",\")\r\n for uID in uIDs:\r\n departmentID = request.form.get(\"departmentID\")\r\n ret = PersonManager.updateUserDepartment(uID, departmentID)\r\n\r\n if not ret.Suc:\r\n refreshMemory()\r\n return errorPage(msg=str(ret.Err), url=\"/user/mulKind/\" + tmpUIDs, parent=False)\r\n refreshMemory()\r\n return successPage(msg=\"修改部门成功\", url=\"/user\", parent=False)\r\n elif request.method == 'GET':\r\n allDep = DepartmentManager.getDepartmentList()\r\n return render_template(\"Views/User/mulKind.html\", title=\"分配部门\", departments=allDep.Result)\r\n else:\r\n return errorPage()\r\n\r\n\r\n@users.route(\"/user/edit/\", methods=GAP)\r\n@loginRequired\r\ndef editUser(uID):\r\n if request.method == 'GET':\r\n r = PersonManager.getUser(uID)\r\n if r.Suc:\r\n r.Result[0]['roles'] = [line['id'] for line in PersonManager.getUserRoles(uID)]\r\n r.Result[0].pop(\"fv\", True)\r\n r.Result[0].pop(\"fp\", True)\r\n r.Result[0].pop(\"iris\", True)\r\n r.Result[0].pop(\"face\", True)\r\n r.Result[0].pop(\"userLogo\", True)\r\n allRole = RoleManager().getAll()\r\n allDep = DepartmentManager.getDepartmentList()\r\n print(allRole.Result)\r\n print(r.Result[0])\r\n return render_template(\"Views/User/edit.html\", title=\"人员修改\", person=r.Result[0], departments=allDep.Result, roles=allRole.Result)\r\n else:\r\n return errorPage(title=\"出错\", msg=\"出错\" + str(r.Err), url=\"/user\", parent=False)\r\n elif request.method == 'POST':\r\n roleIDs = request.form.getlist(\"roleIDs\")\r\n params = {\r\n 'userName': request.form.get(\"userName\").strip(),\r\n 'loginName': request.form.get(\"loginName\").strip(),\r\n 'gender': request.form.get(\"gender\").strip(),\r\n 'departmentID': request.form.get(\"departmentID\").strip(),\r\n 'userID': request.form.get(\"userID\").strip(),\r\n 'rights': request.form.get('rights').strip(),\r\n 'id': uID\r\n }\r\n r = PersonManager.updateUser(params)\r\n if r.Suc:\r\n ret = PersonManager.delFromMap(uID)\r\n if ret.Suc:\r\n for roleID in roleIDs:\r\n ret = PersonManager.updataMap(uID, roleID)\r\n if not ret.Suc:\r\n return errorPage(msg=\"添加角色时是出错:\" + str(ret.Err), url=\"/user/edit/\" + uID, parent=False)\r\n refreshMemory()\r\n return successPage(title=\"修改成功\", msg=\"修改成功\", url=\"/user\", parent=False)\r\n else:\r\n return errorPage(title=\"修改失败\", msg=\"修改失败: \" + str(r.Err) + \"证件号已存在\", url=\"/user\", parent=False)\r\n","sub_path":"Blueprint/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":7050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"559074358","text":"# https://leetcode.com/problems/two-sum-ii-input-array-is-sorted/\n\n# Given a 1-indexed array of integers numbers that is already sorted in non-decreasing order, find two numbers such that they add up to a specific target number. Let these two numbers be numbers[index1] and numbers[index2] where 1 <= first < second <= numbers.length.\n\n# Return the indices of the two numbers, index1 and index2, as an integer array [index1, index2] of length 2.\n\n# The tests are generated such that there is exactly one solution. You may not use the same element twice.\n\n \n\n# Example 1:\n\n# Input: numbers = [2,7,11,15], target = 9\n# Output: [1,2]\n# Explanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2.\n\n\nclass Solution:\n def twoSum(self, numbers: list[int], target: int) -> list[int]:\n start = 0\n end = len(numbers) - 1\n \n while start < end:\n total = numbers[start] + numbers[end]\n if total == target:\n return [start+1, end+1]\n \n if total > target:\n end -= 1\n else:\n start += 1 \n \n","sub_path":"leetcode/algo/two_sum_2.py","file_name":"two_sum_2.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"406532765","text":"from .exceptions import *\nimport random\n\n\nclass GuessAttempt(object):\n \n def __init__(self,guess,hit=None ,miss=None):\n self.hit = hit\n self.miss = miss\n \n if hit and miss:\n raise InvalidGuessAttempt() \n if hit :\n self.miss = False\n elif miss:\n self.hit = False\n \n\n def is_hit(self):\n return self.hit\n \n def is_miss(self):\n return self.miss\n\n\nclass GuessWord(object):\n \n def __init__(self,guess_word): \n self.answer = guess_word\n self.masked = len(guess_word) * \"*\"\n \n if not guess_word:\n raise InvalidWordException()\n \n\n def perform_attempt(self,letter):\n if not len(letter) == 1 :\n raise InvalidGuessedLetterException()\n \n if letter.lower() in self.answer.lower():\n masked = \"\"\n for position,item in enumerate(self.answer.lower()):\n if letter.lower() == item:\n masked += item\n else:\n masked += self.masked[position]\n self.masked = masked\n \n return GuessAttempt(letter,hit=True)\n else:\n return GuessAttempt(letter,miss=True)\n\n\nclass HangmanGame(object):\n \n WORD_LIST = ['rmotr', 'python', 'awesome']\n \n def __init__(self,word_list=None, number_of_guesses=5):\n \n self.remaining_misses = number_of_guesses\n self.previous_guesses = []\n if not word_list:\n word_list = self.WORD_LIST\n chosen_word = self.select_random_word(word_list)\n self.word = GuessWord(chosen_word)\n self.won = False\n self.finished = False\n self.lost = False\n \n @classmethod\n def select_random_word(self,list_of_words=None):\n if not list_of_words:\n raise InvalidListOfWordsException()\n return random.choice(list_of_words)\n \n def guess(self,letter):\n self.previous_guesses.append(letter.lower())\n \n if self.remaining_misses == 0 : \n raise GameFinishedException()\n \n if self.word.answer == self.word.masked :\n raise GameFinishedException()\n \n result = self.word.perform_attempt(letter)\n if result.is_miss():\n self.remaining_misses -= 1\n \n if self.word.answer == self.word.masked :\n self.finished = True\n self.won = True\n raise GameWonException() \n \n if self.remaining_misses == 0 :\n \n self.finished = True\n self.lost = True\n\n raise GameLostException()\n\n return result\n \n def is_finished(self):\n return self.finished\n \n def is_lost(self):\n return self.lost\n \n def is_won(self):\n return self.won\n \n \n","sub_path":"hangman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"460632407","text":"\"\"\"\nUse pdoc to make html documentation for modules\n\"\"\"\n\nfrom sigproc.__init__ import __version__ as currentVersion\n\"\"\"\nSemantic version string as defined within __init__.py\n\"\"\"\n\nimport pdoc\nimport markdown\nimport os\n\ndef make_html(moduleName_full):\n \n print(\"Creating docs for `{0}` module\".format(moduleName_full))\n \n h = pdoc.html(moduleName_full)\n \n moduleName = moduleName_full.split('.')[-1]\n \n f = open(\"../docs/{0}.html\".format(moduleName), 'w')\n \n f.write(h)\n f.close()\n \ndef markdown_to_html(fName_input):\n \n print(\"Rendering markdown text from {0}\".format(fName_input))\n \n fName_output = \"../docs/{0}.html\".format(os.path.splitext(fName_input)[0])\n \n markdown.markdownFromFile(input=(\"../\" + fName_input),\n output=fName_output)\n \n #%% \nprint(\"Creating documentation for version {0}\".format(currentVersion))\n \n# List modulues to be documented here\nmake_html(\"animate_data\")\nmake_html(\"decomposition\")\nmake_html(\"fdd\")\nmake_html(\"kalman\")\nmake_html(\"noise\")\nmake_html(\"sigproc\")\nmake_html(\"sonify\")\nmake_html(\"ssi\")\nmake_html(\"trigger\")\nmake_html(\"waveform\")\nmake_html(\"windeng\")\n\n\n#%%\n# List markdown files in root folder to be rendered\nmarkdown_to_html(\"README.md\")\nmarkdown_to_html(\"CHANGELOG.md\")\n\n\n\n\n","sub_path":"sigproc/make_docs.py","file_name":"make_docs.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"235361543","text":"from pornhub_api import PornhubApi\nfrom discord import Embed\nfrom src.utils import ErrorHandler\nimport asyncio\n\nclass PornhubSearch():\n @staticmethod\n async def search(bot, ctx, searchQuery, message):\n def videoEmbed(video):\n embed = Embed(title=video.title)\n embed.add_field(name='Video ID', value=video.video_id)\n embed.add_field(name='Views', value=video.views)\n embed.add_field(name='Rating', value=video.rating)\n embed.add_field(name='Pornstars', \n value=', '.join([pornstar.pornstar_name for pornstar in video.pornstars]) if video.pornstars != [] else 'None listed')\n embed.add_field(name='Publish Date', value=video.publish_date.strftime('%m/%d/%Y'))\n embed.add_field(name='Duration', value=video.duration)\n embed.add_field(name='Tags', \n value=', '.join([tag.tag_name for tag in video.tags] if video.tags != [] else 'None listed'),\n inline=False)\n \n embed.set_thumbnail(url=f'{video.default_thumb.scheme}://{video.default_thumb.host}/{video.default_thumb.path}')\n embed.url=f'{video.url.scheme}://{video.url.host}{video.url.path}?viewkey={video.video_id}'\n return embed\n try:\n data = PornhubApi().search.search(searchQuery).videos[0:10]\n embeds = list(map(videoEmbed, data))\n\n for index, item in enumerate(embeds): \n item.set_footer(text=f'Page {index+1}/{len(embeds)}\\nRequested by: {str(ctx.author)}')\n\n doExit, curPage = False, 0\n await message.add_reaction('🗑️')\n if len(embeds) > 1:\n await message.add_reaction('◀️')\n await message.add_reaction('▶️')\n \n while doExit == False:\n try:\n await message.edit(content=None, embed=embeds[curPage%len(embeds)])\n reaction, user = await bot.wait_for(\"reaction_add\", check=lambda reaction, user: all([user == ctx.author, str(reaction.emoji) in [\"◀️\", \"▶️\", \"🗑️\"], reaction.message == message]), timeout=60)\n await message.remove_reaction(reaction, user)\n \n if str(reaction.emoji) == '🗑️':\n await message.delete()\n doExit = True\n elif str(reaction.emoji) == '◀️':\n curPage-=1\n elif str(reaction.emoji) == '▶️':\n curPage+=1\n \n except asyncio.TimeoutError: \n raise\n except asyncio.CancelledError:\n pass\n\n except asyncio.TimeoutError:\n raise\n except asyncio.CancelledError:\n pass\n\n except Exception as e:\n await message.delete()\n await ErrorHandler(bot, ctx, e, searchQuery)\n finally: return\n","sub_path":"src/pornhub.py","file_name":"pornhub.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"406259326","text":"import copy\nimport itertools\nimport matplotlib.pylab as plt\nimport numpy as np\nimport scipy.special\nimport time\nimport torch\n\n\nclass JamData():\n '''\n JamData(): Class to hold current Jam Data in an object\n '''\n def __init__(self):\n '''\n __init__(): initialize JamData class\n '''\n self.alldata = False\n self.step = None\n self.friendly_pre = None\n self.friendly = None\n self.comms = None\n self.jammers_pre = None\n self.jammers = None\n self.torchstate = None\n self.numpystate = None\n self.adjacency = None\n self.logPjammers_prior = None\n self.logPjammers_predict = None\n self.update = None\n self.logPjammers_unnormalized = None\n self.logPjammers_posterior = None\n\n\nclass Jams():\n '''\n Jams() superclass which JamsGrid inherits from\n '''\n def __init__(self, ngrid=5, ncomms=1, nassets=1, njams=1, slope=10., nsteps=1, move=True, misspecified=False, delta=None, seed=None, push=True):\n '''\n __init__(self, ngrid=5, ncomms=1, nassets=1, njams=1, slope=10., nsteps=1, move=True, misspecified=False, delta=None, seed=None, push=True): initialization routine for class Jams\n '''\n self.ngrid = ngrid # grid points on map in 1D\n self.ncomms = ncomms\n self.nassets = nassets\n self.njams = njams\n self.slope = slope\n self.nsteps = nsteps\n self.move = move\n self.assume_move = move if not misspecified else not move\n self.delta = delta\n self.seed = seed\n self.push = push\n self.current = JamData()\n if self.seed is not None:\n np.random.seed(self.seed)\n torch.manual_seed(self.seed+1)\n self.hq = self.headquarters()\n self.nfriendly = len(self.hq) + self.ncomms + self.nassets # 1 for headquarters\n self.current.adjacency = torch.zeros((self.nfriendly, self.nfriendly), dtype=bool)\n self.assets0 = ((self.ngrid-1.1,self.ngrid-1.1),)\n self.assign_assets(self.assets0)\n self.friendly_initialize()\n self.jammer_initialize()\n self.stack = []\n self.currect_on_stack = False\n if self.delta == 'known':\n self.delta = tuple(int(round(k)) for k in self.tuple_of_all_jammers())\n if self.delta is None:\n self.current.logPjammers_unnormalized = torch.ones([self.ngrid]*2*self.njams)*(-2*self.njams)*np.log(self.ngrid) # logProb(jammers@loc); init to discrete-uniform on grid\n else:\n assert len(self.delta) == 2*self.njams\n self.current.logPjammers_unnormalized = torch.ones([self.ngrid]*2*self.njams)*(-np.inf)\n self.current.logPjammers_unnormalized[self.delta] = 0\n\n def headquarters(self):\n '''\n headquarters: return coordinates of headquarters on grid\n '''\n return [(0.1, 0.1)]\n\n \n def assign_assets(self, a0):\n '''\n assign_assets: returns a list of asset locations, copying assets0, then using randomly assigned assets up to self.nassets\n '''\n assets = []\n for a in range(self.nassets):\n if a < len(a0):\n assets.append(a0[a])\n else:\n assets.append(self.teleport_ongrid())\n self.assets0 = a0 # update assets0 with what is passed in\n self.assets = assets\n\n\n def teleport_ongrid(self):\n '''\n teleport_ongrid: select a random location on grid\n '''\n return (np.random.choice(self.ngrid), np.random.choice(self.ngrid))\n\n\n def teleport_offgrid(self):\n '''\n teleport_offgrid: select a random location within the bounds of the grid, but with probability 1, not on a gridpoint\n '''\n return tuple(np.random.uniform(low=0.0, high=self.ngrid-1, size=2))\n\n \n def teleport_offgrid(self):\n '''\n teleport_offgrid: select a random location within the bounds of the grid, but with probability 1, not on a gridpoint\n '''\n return tuple(np.random.uniform(low=0.0, high=self.ngrid-1, size=2))\n\n\n def teleport_comms(self):\n '''\n teleport_comms: select random locations on grid for comm(s)\n done once during every step of the loop\n '''\n comms = []\n for comm in self.comms_set: \n comms.append(self.teleport_offgrid())\n return comms\n\n\n def friendly_flatten(self, hq, comms, assets):\n '''\n friendly_flatten: combine all friendly units into one tuple, in specified order\n '''\n ff = []\n ff.extend(hq) # just one headquarters\n ff.extend(comms) # ncomm comm units\n ff.extend(assets) # nassets assets including len(assets0) predefined, others, if any, random\n return tuple(ff)\n\n\n def friendly_move(self):\n '''\n friendly_move: right now a wrapper for teleport_comm but will be generalized\n '''\n self.current.comms = self.teleport_comms()\n return self.friendly_flatten(self.hq, self.current.comms, self.assets)\n # self.current.friendly = \n\n \n def friendly_initialize(self):\n '''\n friendly_initialize: Initialize the friendly's\n '''\n self.comms_set = {(i + 1) for i in range(self.ncomms)}\n self.current.friendly = self.friendly_move()\n\n\n def teleport_jammers(self):\n '''\n teleport_jammers: select random locations on grid for jammers\n done once on class initialization\n '''\n self.current.jammers = []\n for _ in range(self.njams):\n self.current.jammers.append(self.teleport_offgrid())\n\n\n def jammer_initialize(self):\n '''\n jammers_initialize: initialize jammers\n '''\n self.teleport_jammers()\n\n\nclass JamsPoint(Jams):\n def __init__(self):\n super().__init__(**kwargs)\n\n\nclass JamsGrid(Jams):\n '''\n JamsGrid(): Main class for Jamming from 2D-Grid\n '''\n def __init__(self, **kwargs):\n '''\n __init__(self, ngrid=5, ncomms=1, nassets=1, njams=1, slope=10., nsteps=1, move=True, misspecified=False, delta=None, seed=None, push=True): initialization routine for class JamsGrid\n '''\n super().__init__(**kwargs)\n self.step = 0 # initialize counter for number of steps\n self.Jx, self.Jy = self.makeJxy()\n self.ddiff = torch.tensor([self.njams] + [self.ngrid, self.ngrid]*self.njams, dtype=float)\n self.ddiff1 = torch.zeros(self.njams)\n self.ambient_noise_power = 0\n self.makeMj()\n self.makeMf1()\n # All distributions are represented as logs for stability\n self.priorshape = self.current.logPjammers_unnormalized.shape\n\n\n def itertuple(self, dims):\n '''\n itertuple is a generator producing the multiindexes of all elements of a \"square\" tensor\n where the the square tensor has dim dimensions of all length self.ngrid\n usually dims = 2*self.njams (for calculations) or 2*self.njams-2 (for plotting)\n '''\n if dims == 0:\n yield tuple()\n return\n top = self.ngrid\n I = [0]*dims\n yield tuple(I)\n i = 0\n while True:\n I[i] += 1\n if I[i] >= top:\n assert I[i] == top\n I[i] = 0\n i += 1\n else:\n i = 0\n yield tuple(I)\n if i >= dims:\n break\n\n\n def makeJm(self, k):\n '''\n makeJm creates an np.array of size [self.ngrid]*(2*self.njams)\n the array returned projects the index onto the kth coordinate\n Jm[i1,i2,...,ik,...] = ik\n This is usefull with doing vectorized tensor arithmetic on ik\n Question: could I do the same thing with broadcasting?\n '''\n Jm = np.zeros([self.ngrid]*(2*self.njams))\n for I in self.itertuple(2*self.njams):\n Jm[I] = I[k] \n return Jm\n\n\n def makeJxy(self):\n '''\n makeJxy creates and returns 2 tensors Jrx, Jry. Each of these tensors has shape [self.njams] + [self.ngrid]*(2*self.njams)\n the zeroth dimension is j indexing jammers the rest of the dimensions are the size of the joint grid. \n The tensor components are Jrx[x1,y1,x2,y2,...,xj,...] = xj; for Jry[...] = yj; the x's and y's alternate\n Note xj and yj are integer indicies as well as xy-coordinates representing longitude and lattitude.\n This works because we put our xy-grid on integer range(self.ngrid) values of x and y.\n To generalize, transform Jx --> longitude(Jx) and Jy --> latitude(Jy) or whatever xy values you are using\n The Jrx is [x1, x2, x3, ..., xnjams]. Likewise for Jry.\n makeJxy is called once on object initialization and the values returned are stored as self.Jx and self.Jy\n '''\n Jx = []\n Jy = []\n for k in range(self.njams):\n Jx.append(self.makeJm(2*k))\n Jy.append(self.makeJm(2*k+1))\n # Update for non-integer x, y here\n Jrx = torch.tensor(np.array(Jx), dtype=float)\n Jry = torch.tensor(np.array(Jy), dtype=float)\n return Jrx, Jry\n\n\n def makeJxy1(self):\n '''\n makeJrxy1 creates and returns 2 tensors Jrx1, and Jry1. Each of these has shape [self.njams] -- 1D-tensor\n The jth component of Jrx1 is the veridical x-value of the jth jammer\n '''\n Jx1 = torch.tensor([self.current.jammers[kj][0] for kj in range(self.njams)], dtype=float) # each component single float, veridical x-location \n Jy1 = torch.tensor([self.current.jammers[kj][1] for kj in range(self.njams)], dtype=float) # each component single float, veridical y-location\n return Jx1, Jy1\n\n\n # def adjacent_grid_coord(self, old_coord):\n # new_coord = old_coord + np.random.choice([-1., 0., 1.])\n # if new_coord < 0.:\n # new_coord += 1.\n # elif new_coord > self.ngrid-1:\n # new_coord -= 1.\n # return new_coord\n\n\n def tuple_of_all_jammers(self):\n '''\n tuple_of_all_jammers(): takes veridical jammer locations and produces a tuple of all jammers suitable for an idx for list_of_neighbors\n and other functions\n '''\n idx = [] # idx is an index when indicates position on grid---but doesn't have to, and usually doesn't\n for kj in range(self.njams):\n idx.extend(list(self.current.jammers[kj]))\n return tuple(idx)\n\n\n def tuple_of_closest_grid_to_jammers(self):\n '''\n tuple_of_closest_grid_to_jammers(): same as tuple_of_all_jammers except rounds values to nearest grid point.\n '''\n floatjammers = self.tuple_of_all_jammers()\n return tuple((int(round(f)) for f in floatjammers))\n\n\n def list_of_tuples_for_each_jammer(self, idx):\n '''\n list_of_tuples_for_each_jammer: given an index into joint space, break tuple into list of tuples one for each jammer (x, y)\n inverts: list_of_tuples_for_each_jammer(tuple_of_all_jammers) = self.current.jammers\n '''\n listofjammers = []\n for kj in range(self.njams):\n listofjammers.append((idx[2*kj], idx[2*kj+1]))\n return listofjammers\n\n\n def jammers_move(self):\n '''\n jammers_move: returns new position of jammers, randomly selected from neighbors with equal weight\n '''\n # for kj in range(self.njams):\n # newx = self.adjacent_grid_coord(self.jammers[kj][0])\n # newy = self.adjacent_grid_coord(self.jammers[kj][1])\n # self.jammers[kj] = (newx, newy)\n if not self.move:\n return self.current.jammers\n old = self.tuple_of_all_jammers()\n neighbors = self.list_of_neighbors(old)\n new = np.random.choice(len(neighbors))\n return self.list_of_tuples_for_each_jammer(neighbors[new])\n\n\n def list_of_neighbors(self, idx):\n '''\n list_of_neighbors: given an index idx (real valued) into joint space, returns list of neighbors in the space; \n involves edge effexts, neighbors are one or none different from idx in each dimension\n '''\n assert idx[0] >= 0\n assert idx[0] <= self.ngrid - 1\n if idx[0] < 1:\n lowest = (idx[0] % 1)\n list1 = [lowest, lowest+1]\n elif idx[0] == self.ngrid - 1:\n highest = self.ngrid - 1\n list1 = [highest-1, highest]\n elif idx[0] > self.ngrid - 2: # > 8.0 when ngrid==10\n highest = (idx[0] % 1) + self.ngrid - 2\n list1 = [highest-1, highest]\n else:\n list1 = [idx[0]-1, idx[0], idx[0]+1]\n if len(idx) == 1:\n return list1\n elif len(idx) == 2:\n list2 = self.list_of_neighbors(idx[1:])\n return [a for a in itertools.product(list1, list2)]\n else:\n list2 = self.list_of_neighbors(idx[1:])\n return [(a, *b) for (a, (*b,)) in itertools.product(list1, list2)]\n\n\n # def number_of_boundaries(self, idx):\n # nbounds = 0\n # for i in idx:\n # if i < 0:\n # nbounds = -np.inf\n # print(\"Warning: number_of_boundaries outside of grid!\")\n # elif i == 0:\n # nbounds += 1\n # elif i == self.ngrid-1:\n # nbounds += 1\n # elif i > self.ngrid-1:\n # nbounds = -np.inf\n # print(\"Warning: number_of_boundaries outside of grid!\")\n # return nbounds\n\n\n # def weight_of_index(self, neighbor, idx):\n # assert len(idx) == 2*self.njams\n # centerweight = torch.tensor((2**self.number_of_boundaries(idx))/(3**(2*self.njams)))\n # neighborweight = torch.tensor((2**self.number_of_boundaries(neighbor))/(3**(2*self.njams)))\n # return torch.tensor([centerweight, neighborweight]).min()\n\n\n def jam_convolve(self, idx1, logP): # sum over idx0, value at idx1\n '''\n jam_convolve(idx1, logP): takes neighbor of idx1: list_of_neighbors(idx1)\n computes terms to sum from distribution logP\n each term: log(P[neighbor]/number_of_neighbors)\n '''\n neighbors = self.list_of_neighbors(idx1)\n terms = torch.tensor([logP[idx0] - torch.log(torch.tensor(len(self.list_of_neighbors(idx0)), dtype=float)) for idx0 in neighbors])\n return torch.logsumexp(terms, dim=0)\n\n\n def jammers_predict_args(self, logP):\n '''\n jammers_predict_args: version of function with calling and returning arguments\n '''\n newP = copy.deepcopy(logP)\n if not self.assume_move:\n return newP\n for idx1 in self.itertuple(2*self.njams):\n newP[idx1] = self.jam_convolve(idx1, logP)\n return newP\n\n\n def jammers_predict(self):\n '''\n jammers_predict: wrapper for jammers_predict_args that doesn't use arguments, takes them from self\n '''\n self.current.logPjammers_unnormalized = self.jammers_predict_args(self.current.logPjammers_unnormalized)\n\n\n def dist_jxy_to_friendly(self, jx, jy, kf=0):\n '''\n dist_jxy_to_friendly computes the Euclidean distance from jammer(s) (jx, jy) to a specified friendly (fx, fy) in the Cartesian plane\n jx, jy could be grid tensors (shape: [njams] + [GRIDSHAPE]) for grid of jammers or \n (shape: [njams]) for 1D veridical jammer locations in x and y\n kf specifies kf^th comm\n Might generalize Euclidean distance to distance on globe, but that probably isn't necessary\n '''\n #TODO see if you can let friendlies be a tensor to do all friendlies at once, faster\n fx = torch.tensor(self.current.friendly[kf][0], dtype=float)\n fy = torch.tensor(self.current.friendly[kf][1], dtype=float)\n return torch.sqrt((jx - fx)**2 + (jy - fy)**2)\n\n\n def dist_jxy_to_point(self, jx, jy, xy):\n '''\n dist_jxy_to_point computes the Euclidean distance from jammer(s) (jx, jy) to a specified point (px, py) in the Cartesian plane\n Might generalize Euclidean distance to distance on globe, but that probably isn't necessary\n '''\n px = torch.tensor(xy[0], dtype=float)\n py = torch.tensor(xy[1], dtype=float)\n return torch.sqrt((jx - px)**2 + (jy - py)**2)\n\n\n def power_friendly_at_friendly(self):\n '''\n power_friendly_at_friendly(): returns S, a tensor of shape [nfriendly, nfreindly] showing at [f1,f2] power of sender-friendly f1 at receiver-friendly f2\n S ia infinite on its diagonal (because power of a signal is infinite at its own point source)\n '''\n return torch.tensor([[# 0 if f1 == f2 else \n (#self.Mf1[f1]\n 1/self.dist_jxy_to_friendly(self.current.friendly[f1][0], \n self.current.friendly[f1][1], f2)**2) for f1 in range(self.nfriendly)] for f2 in range(self.nfriendly)])\n \n\n def power_jammer_at_friendly_grid(self):\n '''\n power_jammer_at_friendly_grid(): returns an ND-tensor of shape [self.nfriendly, self.njams]+GRID_SHAPE\n component (kf,kj,grid1...grid2j) is power at friendly kf of jammer kj when the jammers are located at grid1...grid2j\n '''\n #TODO There seems to be some redundant calulations here. Need just a two-dimensional grid not 2J-dims.\n return torch.stack([(1./(self.dist_jxy_to_friendly(self.Jx, self.Jy, kf)**2)) for kf in range(self.nfriendly)], dim=0) # Mj # friendly at 0th position\n\n\n def power_jammer_at_friendly_veridical(self):\n '''\n power_jammer_at_friendly_veridical(): returns a 2D-tensor of shape [self.nfriendly, self.njams]\n component (kf,kj) is power at friendly kf of jammer kj\n '''\n Jx1, Jy1 = self.makeJxy1()\n #TODO May be running above function an unnecesary number of times\n return torch.stack([(1./(self.dist_jxy_to_friendly(Jx1, Jy1, kf)**2)) for kf in range(self.nfriendly)], dim=0) # Mj\n\n\n def power_jammer_at_point_veridical(self, xy):\n '''\n power_jammer_at_point_veridical(xy): returns a 1D-tensor of shape [self.njams]\n component kj is power of jammer kj at point with coordinates xy (real-valued 2-tuple)\n '''\n Jx1, Jy1 = self.makeJxy1()\n #TODO May be running above function an unnecesary number of times\n return (1./(self.dist_jxy_to_point(Jx1, Jy1, xy)**2))\n\n\n def power_ambient(self):\n '''\n power_ambient(): Returns power in the atmosphere without any particular signal (constant, self.ambient_noise_power, set for class, 0 by default, for now)\n '''\n return self.ambient_noise_power\n\n\n def power_background_at_friendly_veridical(self):\n '''\n power_background_at_friendly_veridical(): Computes a tensor of shape [nfriendly] with component kf equal to the background power at friendly kf\n background power equals power of all jammers at friendly kf plus ambient_noise_power\n computed as Power (kf, kj)---(at friendly kf, from veridical jammer kj).sum(over_jammers_kj) + power_ambient\n '''\n return self.power_jammer_at_friendly_veridical().sum(dim=1) + self.power_ambient()\n\n\n def power_background_at_friendly_grid(self):\n '''\n power_background_at_friendly_grid(): Computes a tensor of shape [nfriendly]+GRID_SHAPE with component (kf,x1,y1,x2,y2,...,xj,yj) equal to\n the background power at friendly kf assuming the j (aka self.njams) jammers are in positions (x1,y1,x2,y2,...,xj,yj)\n computes T[kf,GRIDPOINT] = (Power at friendly kf from J_JAMMERS_IN_2J_GRIDPOINT).sum(over_J_jammers) + power_ambient\n '''\n return self.power_jammer_at_friendly_grid().sum(dim=1) + self.power_ambient()\n\n\n def power_background_at_point_veridical(self, xy):\n '''\n power_background_at_point_veridical(xy): Returns power of background (positive real number) at point (xy) (real-tuple (x,y) coordinates in battlefield)\n with ambient noise and jammers at their veridical locations\n '''\n return self.power_jammer_at_point_veridical(xy).sum(dim=0) + self.power_ambient()\n\n\n def sjr_db_veridical(self):\n '''\n sjr_db_veridical(): Computes a tensor of shape [nfriendly, nfriendly] specifically (sender-friendly, receiver-friendly)\n using S=power_friendly_at_friendly(), a tensor of shape [nfriendly, nfreindly] showing at [f1,f2] power of sender-friendly f1 at receiver-friendly f2\n S is infinite on its diagonal (because power of a signal is infinite at its own point source)\n power_background_at_friendly_veridical(), a 1D tensor of shape [nfriendly] (all-background-power(all-jammers+ambient)-at-receiver-friendly)\n here broadcast into 2D [nfriendly, nfriendly]\n returns 10*torch.log10(self.power_friendly_at_friendly()/self.power_background_at_friendly_veridical())\n Broadcasting hint: (F/B)_i,k = F_i,k/B_k as desired, so that k in both cases refers to receiver-friendly\n '''\n return 10*torch.log10(self.power_friendly_at_friendly()/self.power_background_at_friendly_veridical())\n\n\n def prepare_background(self):\n '''\n prepare_background(): Prepares the tensor returned by power_background_at_friendly_grid() for broadcasting in the computation of signal-to-noise ratio S/B\n returns a tensor of shape: GRID_SHAPE+[1, nfriendly])\n\n '''\n j = self.njams\n perm = [k+1 for k in range(2*j)]\n perm.append(0)\n return self.power_background_at_friendly_grid().permute(perm).unsqueeze(2*j)\n\n\n def prepare_db(self, db):\n '''\n prepare_db(db): permute the 2*j and 2*(j+1) dimensions of tensor db to the first two dimensions where j=self.njams\n used for grid calculation of db=S/B where for broadcasting reasons the dimensions are in a temporary condition.\n '''\n j = self.njams\n perm = [2*j, 2*j+1]\n perm.extend([k for k in range(2*j)])\n return db.permute(perm)\n\n\n def sjr_db_grid(self):\n '''\n sjr_db_grid(): Signal_to_Jamming_Ratio_Decibels\n returns: (in decibels) R=S/B; R.shape=[nfriendly,nfriendly]+GRID_SHAPE\n S (Signal) equals power_friendly_at_friendly(): S.shape=[nfriendly, nfriendly]\n S is infinite on its diagonal (because power of a signal is infinite at its own point source)\n that means that R[sender,receiver,x1,y1,x2,y2,...,xj,yj] = Infinity whenever sender=receiver\n but this is right and it still seems to work.\n B (Background): equals prepare_background() with B.shape=GRID_SHAPE+[1,nfriendly]\n prepare_background() calls power_background_at_friendly_grid() to get a tensor with same elements but different shape \n (original shape=[nfriendly]+GRID_SIZE) which prepare_background() permutes and adds a singleton dimension \n accordingly for broadcasting in the efficient computation of db=S/B (converted to decibels).\n After the computation db.shape=GRID_SHAPE+[nfriendly,nfriendly] which is returned permuted to shape=[nfriendly,nfriendly]+GRID_SHAPE\n by the method prepare_db(db)\n '''\n S = self.power_friendly_at_friendly()\n B = self.prepare_background()\n db = 10*torch.log10(S/B)\n return self.prepare_db(db)\n\n\n def makeMj(self):\n '''\n makeMj(): Make tensor of constants for free propagation model M/r^2 for power of jammers at distance\n '''\n self.Mj = torch.ones((self.njams)) # Will make this more general later\n\n\n def makeMf1(self):\n '''\n makeMj(): Make tensor of constants for free propagation model M/r^2 for power of friendlies at distance\n '''\n self.Mf1 = torch.ones((self.nfriendly)) # Will make this more general later\n\n\n # def distdiff(self, target, jx, jy, kc=0):\n # '''\n # distdiff computes the difference between the distances: comm <--> jammer minus comm <--> target\n # if distance comm <--> jammer is larger than distance comm <--> target, then\n # difference is positive and probability of successful transmission is closer to one\n # '''\n # targetx = torch.tensor(target[0], dtype=float)\n # targety = torch.tensor(target[1], dtype=float)\n # dist_c2j = self.dist_to_comm(jx, jy, kc)\n # dist_c2t = self.dist_to_comm(targetx, targety, kc)\n # return dist_c2j - dist_c2t\n\n\n def sig(self, x):\n '''\n logsig returns sigmoid function (expit) of its argument x\n scaled so that result is independent of grid size assuming the same self.slope\n before adjusting for grid size: returned \"scipy.special.expit(2*self.slope*x)\" in this case\n self.slope was slope of sigmoid (before log) specifed at x=0 where there is \n equal distance between comm-jammer and comm-target, where value of sigmoid (before log) is expit=1/2\n '''\n return scipy.special.expit(2*self.slope*x/self.ngrid)\n\n\n def logsig(self, x):\n '''\n logsig(x): returns the log of the sigmoid function\n '''\n # return torch.log(scipy.special.expit(2*self.slope*x/self.ngrid))\n return torch.log(self.sig(x))\n\n\n # def loglikelihood_ddiff(self, target, jx, jy, kc=0):\n # '''\n # loglikelihood: log-likelihood of successful communication between comm and target with specified jammer location(s)\n # Depending on jammer_x and jammer_y will compute for whole grid or just veridical locations (see wrappers below)\n # '''\n # ddiff = self.distdiff(target, jx, jy, kc)\n # return self.logsig(ddiff).sum(axis=0) # axis=0 is jammer num, add logs because jamming from different jammers independent\n\n\n def loglikelihood_grid(self):\n '''\n loglikelihood_grid: Computes tensor of shape [nfriendly, nfriendly]+GRID_SHAPE.\n loglikelihood = Log-Probability(Data|State) where Data are all successful (all True) connections between friendlies;\n State is jammer locations (each gridpoint)\n Unlike for loglikelihood_veridical() there will be many location, not just one (all gridpoints instead of veridical jammer locations).\n Index into tensor is a tuple that specifies all j-jammer locations on battlefield, a (2*njams)-tuple (x1, y1, x2, y2, ..., xj, yj)\n '''\n return self.logsig(self.sjr_db_grid())\n\n\n def loglikelihood_veridical(self):\n '''\n loglikelihood_veridical: log-probabilities of successful connections given veridical jammer locations: tensor shape [nfriendly, nfriendly] (sender, receiver)\n Here there is just one location (veridical jammer locations, not many locations, one for each grid-point, like above for loglikelihood_grid())\n returns T, a tensor of shape [nfriendly, nfriendly]\n where T[f_sender, f_reciever] is the log probability of a successful connection: sender --> receiver with veridical jammer locations\n loglikelihood = Log-Probability(Data|State) where Data is True for all onnections between friendlies, and\n State is veridical jammer locations\n '''\n return self.logsig(self.sjr_db_veridical())\n\n\n def weights(self):\n '''\n weights(): returns the veridical probabilities of the nfriendly*nfriendly radio connects (weights of graph of connections)\n same as loglikelihood_veridical but doesn't take log\n '''\n return self.sig(self.sjr_db_veridical())\n\n\n def loglikelihood_obs(self, adjacency):\n '''\n loglikelihood_obs(adjacency): like loglikelihood_grid() but pass in actual communication (True/False) between sender-friendly and receiver-friendly (not just all True)\n (True or False for each tuple of friendlies), specified in the method's arguement: adjacency\n pass component as True for likelihood of successful communication, probability: (p)---taken_from loglikelihood_grid()\n pass component as False for likelihood of unsuccessful communication probability: (1-p)---also computed_from loglikelihood_grid()\n The True/False's are the elements of the adjacency matrix / 2D Tensor, adjacency.shape = [nfriendly, nfriendly]\n returns tensor shaped [nfriendly, nfriendly]+GRID_SHAPE\n loglikelihood = Log-Probability(Data|State) where Data is True or False for connections between friendlies---specified in adjacency\n State is jammer locations (grid)\n depends on self.loglikelihood_grid() which computes for Data=True for all friendly connections instead of varying according to adjacency.\n '''\n log_p_success = self.loglikelihood_grid()\n log_p_obs = torch.zeros(log_p_success.shape)\n for f1 in range(self.nfriendly):\n for f2 in range(self.nfriendly):\n log_p_obs[f1,f2] = log_p_success[f1,f2] if adjacency[f1,f2] else torch.log(1 - torch.exp(log_p_success[f1,f2])) # Computes for f1, f2, and whole grid\n return log_p_obs\n\n\n def update_jammers(self, adjacency):\n '''\n update_jammers(adjacency): returns a tensor T with T.shape=SHAPE_OF_GRID\n T[idx]=T[x1,y1,x2,y2,...,xj,yj] is logP[DATA_specified_as_adjacency|jammer_STATE_given_by_idx]\n At each grid points adds all (nfriendly*nfriendly) loglikelihoods for that grid point.\n (equilivalent to multiplying assumed-independent probabilities) of all sender-->receiver connection probabilities\n This result will add to last-posterior/this-prior to accomplish Bayesian update\n '''\n # Does account for missing information from out of reach friendlies\n return self.loglikelihood_obs(adjacency).sum(dim=0).sum(dim=0) # First two slots are for to from friendly; add them up as independent\n\n\n def try_to_contact(self, sender, receiver):\n '''\n try_to_contact(sender, receiver) flips a \"coin\" (usually unfair) to simulate success or failure to communicate based on likelihood derived from veridical jammer locations\n '''\n p = self.weights()\n return torch.tensor(np.random.choice([True, False],p=(p[sender, receiver], 1.-p[sender, receiver])))\n\n\n def all_try(self):\n '''\n all_try() flips coins (usually unfair) to simulate success or failure to communicate for all pairs of friendlies: calls try_to_contact nfriendly**2 times \n '''\n adjacency = torch.zeros((self.nfriendly, self.nfriendly), dtype=bool)\n for sender in range(self.nfriendly):\n for receiver in range(self.nfriendly):\n adjacency[sender, receiver] = self.try_to_contact(sender, receiver)\n return adjacency\n\n\n def normalize(self, distrib):\n '''\n normalize is required after a Bayesian update to make the distributions sum to 1.\n However this is not necessary to do within the run() loop or to get maximum aposteriori estimates of Jammer location\n normalize is called at the end of the loop.\n '''\n priorshape = distrib.shape\n flat = torch.nn.functional.log_softmax(distrib.flatten(), dim=0) # New Prior equals normalized Posterior\n return flat.reshape(priorshape)\n\n\n def normalize_unnormalized(self):\n '''\n normalize_unnormalized: wrapper for normalize that normalizes posterior (not used in code but might be on command line)\n '''\n return self.normalize(self.current.logPjammers_unnormalized)\n\n\n def run(self, steps=1, record=False):\n '''\n run(steps, record) runs model (without plotting for steps number of step and record data on last step if this flag is set\n '''\n self.alldata = False\n self.current_on_stack = False\n for s in range(steps):\n if record and s==steps-1:\n self.current.friendly_pre = copy.deepcopy(self.current.friendly)\n self.current.jammers_pre = copy.deepcopy(self.current.jammers)\n self.step += 1\n self.current.step = copy.deepcopy(self.step)\n self.current.friendly = self.friendly_move() # teleports comms to new locations stored in self.friendly\n self.current.jammers = self.jammers_move()\n self.current.adjacency = self.all_try() # Next line uses random number generatation and depends on random state\n if record and s==steps-1:\n self.current.logPjammers_prior = self.current.logPjammers_unnormalized\n self.current.logPjammers_predict = self.jammers_predict_args(self.current.logPjammers_prior)\n self.current.update = self.update_jammers(self.current.adjacency)\n self.current.logPjammers_unnormalized = self.current.logPjammers_predict + self.current.update\n self.current.logPjammers_posterior = self.normalize(self.current.logPjammers_unnormalized)\n self.current.torchstate = torch.get_rng_state()\n self.current.numpystate = np.random.get_state()\n self.current.alldata = True\n else:\n self.current.logPjammers_unnormalized = self.jammers_predict_args(self.current.logPjammers_unnormalized) + self.update_jammers(self.current.adjacency)\n\n\n def pushstack(self):\n '''\n pushstack(): push recorded data onto stack\n '''\n if self.current.alldata is True:\n self.stack.append(copy.deepcopy(self.current))\n self.current_on_stack = True\n\n\n def popstack(self):\n '''\n popstack(): pop recorded data from stack\n '''\n if self.current_on_stack:\n self.stack.pop()\n if len(self.stack) == 0:\n print(\"Bottom of stack reached!\")\n return\n self.current = copy.deepcopy(self.stack[-1])\n self.step = copy.deepcopy(self.current.step)\n self.alldata = True\n self.current_on_stack = True\n torch.set_rng_state(self.current.torchstate)\n np.random.set_state(self.current.numpystate)\n\n\n def advance(self, steps=1):\n '''\n advance(steps) run with record on, push last step's data to stack, and plot result\n '''\n self.run(steps, record=True)\n if self.push:\n self.pushstack()\n self.render()\n\n\n def retreat(self, stacksteps=1):\n '''\n retreat(stacksteps): pop stacksteps from stack and plot last popped data\n '''\n for _ in range(stacksteps):\n self.popstack()\n self.render()\n\n\n def logcumsumexp(self, x, dim):\n '''\n logcumsumexp(x, dim): like logsumexp() except cumulative\n '''\n # slow implementation (taken from web), but ok for now\n if (dim != -1) or (dim != x.ndimension() - 1):\n x = x.transpose(dim, -1)\n out = []\n for i in range(1, x.size(-1) + 1):\n out.append(torch.logsumexp(x[..., :i], dim=-1, keepdim=True))\n out = torch.cat(out, dim=-1)\n if (dim != -1) or (dim != x.ndimension() - 1):\n out = out.transpose(-1, dim)\n return out\n\n\n def credible(self, logP=None, C=0.95):\n '''\n credible(logP, C): Computes a C-level credible set by ordering the probabilities of each grid point,\n and taking the largest that add up to (as close as possible) but below C.\n default value of logP is self.current.logPjammers\n default value for C is 0.95 (95% credible)\n Credible sets are like confidence intervals but Bayesian and discrete\n Normalizes the distribution first.\n '''\n if logP is None:\n logP = self.current.logPjammers_unnormalized\n the_sort = self.normalize(logP).flatten().sort(descending=True)\n included = self.logcumsumexp(the_sort.values, dim=0) < np.log(C)\n idx = np.where(np.diff(included))[0][0]\n inside = the_sort.indices[:idx]\n # boundary = the_sort.indices[idx+1]\n credible_set = torch.zeros(included.shape)\n credible_set[inside] = True\n credible_set = credible_set.reshape(logP.shape)\n return credible_set\n\n\n def jammer_tuple_in_credible_set(self, logP=None, C=0.95):\n '''\n jammer_tuple_in_credible_set(logP, C): tests and returns True or False whether the closest grid point to jammers is in credible set\n '''\n credible_set = self.credible(logP, C) # logP defaults to self.logPjammers_unnormalized\n jammer = self.tuple_of_closest_grid_to_jammers()\n return (credible_set[jammer] == 1.).item() # Returns True or False\n\n\n def credible_set_cardinality(self, logP=None, C=0.95):\n '''\n credible_set_cardinality(logP, C) number of points inside the credible set\n '''\n credible_set = self.credible(logP, C) # logP defaults to self.logPjammers_unnormalized\n return int(torch.sum(credible_set, dim=list(range(2*self.njams))).item()) # Sum over all dimensions\n\n\n def credible_2D(self, logP=None, C=0.95):\n '''\n credible_2D(logP, C) produce a projection of the credible set\n numbers in the projection indicate numbers of points that project there.\n '''\n credible_set = self.credible(logP, C) # logP defaults to self.logPjammers_unnormalized\n return self.marginal(credible_set, logs=False).T\n\n\n def video(self, nframes):\n '''\n video(nframes): create and save frames for video in file ./video/frame##.png\n '''\n for f in range(nframes):\n self.advance()\n plt.savefig('video/frame'+str(f)+'.png')\n plt.clf()\n\n\n def normalizer(self, logP):\n '''\n normalizer(logP): compute and return the contant that normalizes distribution logP\n '''\n return torch.logsumexp(logP.flatten(), dim=0)\n\n\n def marginal(self, joint, logs=True):\n '''\n marginal: needed for plotting in 2D if joint density has greater than 2 dimensions\n adds probabilities for each x_njams and y_njams on 2D grid\n where njams references the marginal of last jammer position x and y (last two coordinates of joint distribution).\n All jammer positions are equivalent and should give same answer, but this way book keeping simplifies.\n '''\n dims_summing_across = 2*self.njams-2 # two less dimensions than number of dimensions in joint distribution\n nterms = self.ngrid**dims_summing_across # number of terms in sum for each x and y in the computation of the marginal\n terms_rearranged_for_sum = torch.zeros((nterms, self.ngrid, self.ngrid)) # coordinates are (term, x, y)\n for term_number, term_multiindex in enumerate(self.itertuple(dims_summing_across)): # enumerate all term_multiindecies in a tensor with two less coordinates\n terms_rearranged_for_sum[term_number] = joint[term_multiindex] # each side of this assignment is a 2D tensor in x and y\n if not logs:\n return torch.sum(terms_rearranged_for_sum, dim=0)\n return torch.logsumexp(terms_rearranged_for_sum, dim=0) # equivalent to: convert to probabilities, add across terms, then retake log\n\n\n def annotations(self, titleprefix=''):\n '''\n annotations: add annotations to plot\n '''\n col = 'lightblue' if self.current.step == 0 else 'black'\n for kf in range(self.nfriendly):\n if kf==0: # headquarters\n plt.text(self.current.friendly[kf][0], self.current.friendly[kf][1], \"Headquarters\", color=col)\n elif kf in self.comms_set: # comms\n plt.text(self.current.friendly[kf][0], self.current.friendly[kf][1], \"Comm\", color=col)\n else: # assets\n plt.text(self.current.friendly[kf][0], self.current.friendly[kf][1], \"Asset\", color=col)\n for kj in range(self.njams):\n plt.text(self.current.jammers[kj][0], self.current.jammers[kj][1],\"Jammer\", color=col)\n estimates = [e.item() for e in self.estimates()]\n for ej in self.list_of_tuples_for_each_jammer(estimates):\n plt.text(ej[0], ej[1],\"Estimate\", color=col)\n plt.title(titleprefix + \"Steps = \" + str(self.current.step))\n plt.show()\n\n\n def connections(self):\n '''\n connections(): Displays arrows between friendlies on graph assuming there is a radio connection between them as determined by self.current.adjacency\n '''\n ax = plt.gca()\n for f2 in range(self.nfriendly):\n for f1 in range(f2):\n x = self.current.friendly[f1][0]\n y = self.current.friendly[f1][1]\n dx = self.current.friendly[f2][0] - x\n dy = self.current.friendly[f2][1] - y\n assert f1 < f2\n if self.current.adjacency[f1,f2] and not self.current.adjacency[f2, f1]:\n style = '->'\n elif self.current.adjacency[f2,f1] and not self.current.adjacency[f1, f2]:\n style = '<-'\n elif self.current.adjacency[f1,f2] and self.current.adjacency[f2, f1]:\n style = '<->'\n else:\n continue\n ax.annotate(\"\", xy=(x+dx,y+dy), xytext=(x, y), arrowprops=dict(arrowstyle=style, color='lightgreen'))\n # plt.arrow(x,y,dx,dy, width=0.002, head_width=0.006, head_length=0.003)\n # else:\n # pass\n #plt.arrow(x,y,dx,dy, width=0.002, head_width=0.006, head_length=0.003, linestyle=':', color='red')\n\n\n def render(self):\n '''\n render: plots the marginal of the unnormalized posterior\n '''\n plt.clf()\n plt.imshow(self.marginal(self.current.logPjammers_unnormalized).T, cmap='hot', interpolation='nearest') # transpose to get plot right\n self.annotations()\n self.connections()\n\n\n def render_background(self):\n '''\n render_background: plots the power at background\n '''\n BG = torch.zeros((self.ngrid, self.ngrid), dtype=float)\n for g1 in range(self.ngrid):\n for g2 in range(self.ngrid):\n BG[g1, g2] = self.power_background_at_point_veridical((g1, g2))\n plt.clf()\n plt.imshow(BG.T, cmap='hot', interpolation='nearest')\n self.annotations()\n self.connections()\n\n\n def render_posterior(self):\n '''\n render: plots the marginal of the posterior\n '''\n assert self.current.alldata is True\n plt.clf()\n plt.imshow(self.marginal(self.current.logPjammers_posterior).T, cmap='hot', interpolation='nearest') # transpose to get plot right\n self.annotations()\n self.connections()\n\n\n def render_update(self):\n '''\n render_update: draws I don't know what; update is not a distribution so marginal might not mean anything for njams>1\n '''\n assert self.current.alldata is True\n plt.clf()\n plt.imshow(self.marginal(self.current.update).T, cmap='hot', interpolation='nearest') # transpose to get plot right\n self.annotations(\"Update Before: \")\n\n\n def render_prediction(self):\n '''\n render: plots the marginal\n '''\n assert self.current.alldata is True\n plt.clf()\n plt.imshow(self.marginal(self.current.logPjammers_predict).T, cmap='hot', interpolation='nearest') # transpose to get plot right\n self.annotations(\"Prediction Before: \")\n\n\n def render_prior(self):\n '''\n render: plots the marginal\n '''\n assert self.current.alldata is True\n plt.clf()\n plt.imshow(self.marginal(self.current.logPjammers_prior).T, cmap='hot', interpolation='nearest') # transpose to get plot right\n self.annotations(\"Prior Before: \")\n\n\n def unravel_index(self, index, shape):\n '''\n unravel_index: copied from the web, converts an integer index to a multiindex for a tensor of specified shape\n needed for finding the grid indicies from argmax's output that point to the maximum aposteriori grid location for jammers\n '''\n out = []\n for dim in reversed(shape):\n out.append(index % dim)\n index = index // dim\n return tuple(reversed(out))\n\n\n def estimates(self):\n '''\n estimates: produces the maximum aposteriori estimates of the jammer locations based on the information currently in grid\n '''\n imax = self.current.logPjammers_unnormalized.argmax()\n return self.unravel_index(imax, tuple([self.ngrid]*(2*self.njams)))\n\n\n def logjoint_iid_from_logmarginal(self, logmarginal):\n '''\n logjoint_iid_from_logmarginal: Construct a joint distribution from a marginal under the assumption that coordinates are iid\n iid means independent and identically distributed\n In this case, it's pairs of coordinates x and y\n '''\n logjoint_iid = torch.zeros([self.ngrid]*(2*self.njams)) # same shape as logjoint\n for index_joint in self.itertuple(2*self.njams):\n for kj in range(self.njams):\n logjoint_iid[index_joint] += logmarginal[index_joint[2*kj], index_joint[2*kj+1]]\n return logjoint_iid\n\n\n def conditional(self, joint, freeze):\n '''\n conditional: create the conditional distribution from the joint distribution\n if the joint distribution is P(x1, y1, x2, y2, x3, y3)\n then conditional([4,5,3,1]) is the distribution P(x3, y3|x1=4, y1=5, x2=3, y1=1)\n '''\n return self.normalize(joint[freeze])\n\n\n def show_conditional(self, freeze):\n '''\n show_conditional(freeze): shows conditional distribution by freezing freeze (all but 2 dimensions)\n '''\n assert len(freeze) + 2 == len(self.current.logPjammers_unnormalized.shape)\n plt.imshow(self.conditional(self.current.logPjammers_unnormalized, freeze).T, cmap='hot', interpolation='nearest') # transpose to get plot right\n self.annotations()\n\n\n def test_independence(self):\n '''\n test_independence(): Test to see if joint probability factors as a product of its marginals (it doesn't)\n '''\n logjoint = self.current.logPjammers_unnormalized\n logmargin = self.marginal(logjoint)\n logjoint_iid = self.logjoint_iid_from_logmarginal(logmargin)\n return logjoint_iid.allclose(logjoint)\n\n\n def save(self, fname): # A class method, see load below\n '''\n save(fname): save object with filename fname\n '''\n with open(fname, 'wb') as f:\n torch.save(self, f)\n\n\ndef load(fname): # Not a class method, see save above\n '''\n load(fname): load an object saved with method above\n '''\n with open(fname, 'rb') as f:\n return torch.load(f)\n\n\ndef evaluate_credible_coverage(n, C=0.95):\n '''\n evaluate_credible_coverage(n, C): Sample n times and print results to see if coverage is close to the credible level, C\n '''\n results = []\n card = []\n for sample in range(n):\n start = time.time()\n J = JamsGrid(ngrid=8, ncomms=2, njams=2, move=True, seed=sample)\n J.run(2)\n results.append(J.jammer_tuple_in_credible_set(C))\n card.append(J.credible_set_cardinality(C))\n elapsed = time.time() - start\n print('sample', sample, 'of', n, ':', results[-1], 'Time', elapsed, 'eta', elapsed*(n-sample),\n np.array(results).sum(), 'successes in', len(results), 'trials; set cardinality mean', np.mean(np.array(card)), 'sd', np.std(np.array(card)))\n return results, card\n\n\nclass test1D():\n '''\n Unfinished classed for doing 1 dimensional grids: Jammers and Comms move in 1D\n '''\n def __init__(self, ngrid=64, slope=1):\n self.ngrid = ngrid\n self.slope = 1\n\n\n def run(self):\n self.grid = torch.zeros((self.ngrid, self.ngrid))\n for j in range(self.ngrid):\n for c in range(self.ngrid):\n self.grid[j, c] = torch.abs(torch.tensor(c) - torch.tensor(j)) - torch.abs(torch.tensor(c))\n\n\n def render0(self):\n l = plt.imshow(self.grid, cmap='hot', interpolation='nearest')\n\n\n def logsig(self, x):\n return torch.log(scipy.special.expit(torch.true_divide(2*self.slope*x, self.ngrid)))\n\n\n def render2(self):\n l = plt.imshow(self.logsig(self.grid), cmap='hot', interpolation='nearest')\n\n","sub_path":"tele/findjam.py","file_name":"findjam.py","file_ext":"py","file_size_in_byte":50377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"520968900","text":"# -*- coding: utf-8 -*- ?\nimport sqlite3\nimport sys\nfrom time import localtime as time\n\n# данный модуль осуществляет работу с новостной базой даннх, а именно добавление и чтение новостей.\n\n\n# добавление новости\ndef add_post(author, tag, src, description, label,img,txt ):\n\n\tdate = \".\".join(map(str,time()[0:3]))\n\n\tdb = sqlite3.connect(\"news.db\")\n\twith db:\n\t\tcursor = db.cursor()\n\t\t\n\t\t#добавляем новость в таблицу со всеми новостями\n\t\tcursor.execute(\"INSERT INTO allposts(author, tag, src, description, date, label,img,txt ) VALUES(?, ?, ?, ?, ?, ?, ?, ?);\", (author, tag, src, description, date, label,img,txt ))\n\n\t\t# добавляем ссылку на новость (id) в таблицу с новостями по определенному тегу\n\t\tpost_id = cursor.lastrowid\n\t\tcursor.execute(\"CREATE TABLE IF NOT EXISTS %s(id INTEGER PRIMARY KEY, post_id INT)\" % tag)\n\t\tcursor.execute(\"INSERT INTO %s(post_id) VALUES(?)\" % tag, (str(post_id),))\n\n\t\treturn post_id\n\n\n# получение определенного колличества новостей по определенному тегу\ndef get_posts(tag,page,count):\n\n\tposts = [] # список в который мы будем записывать новости\n\n\tdb = sqlite3.connect(\"news.db\")\n\twith db:\n\t\tcursor = db.cursor()\n\n\t\t# cursor.execute(\"SELECT id FROM allposts\")\n\t\t# last_posts_id = cursor.fetchall()[-1][0]\n\n\t\tcursor.execute(\"SELECT post_id FROM %s WHERE id>? AND id?\",(last_posts_id - page*count + 1,last_posts_id - (page*count+count)))\n\t\trows = cursor.fetchall()[::-1] # переворачиваем, что бы сначала отображались последние новости\n\t\tif rows is not None:\n\n\t\t\tfor row in rows:\n\t\t\t\t# преобразуем полученый кортеж в словарь (необходимо для шаблона)\n\t\t\t\tpost = {}\n\n\t\t\t\ti = 0\n\t\t\t\tfor val in (\"id\",\"author\", \"tag\", \"src\", \"description\", \"date\", \"label\",\"img\",\"txt\", ):\n\t\t\t\t\tpost[val] = str(row[i])\n\t\t\t\t\ti+=1\n\t\t\t\tposts.append(post)\n\n\treturn posts\n","sub_path":"app/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":3398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"138077223","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\ndef feval(funcName, *args):\n return eval(funcName)(*args)\n\nfig = plt.figure()\naxl = fig.gca(projection='3d')\n\nduffing = lambda t,x,v,m,b,c,d: (b*x-c*x**3)/m -(d/m)*v\n\nn = 0\nm = 10\n\nt = 10\nx = 0.1\nv = 0\n\nb = 0.01\nc = 0.01\nd = 0.01\n\ntfin = 2000\n\npt = []\npv = []\npx = []\nh = t/100\n\nwhile(t < tfin):\n n = n*1\n for i in range(m):\n a = feval('duffing', t, x, v, m, b,c,d)\n k1 = h*a\n a = feval('duffing', t+0.5*h, x+h*0.5*v, v+0.5*k1, m, b,c,d)\n k2 = h*a\n a = feval('duffing', t+0.5*h, x+0.5*h*(v+0.5*k1), v+0.5*k2, m, b,c,d)\n k3 = h*a\n a = feval('duffing', t+h, x+h*v+h*k2*0.5, v+k3, m,b,c,d)\n k4 = h*a\n x = x+h*v+h*(k1+k2+k3)/6\n v = v+(k1+2*k2+2*k3+k4)/6\n t = t+h\n if x > np.pi:\n x = x-2*np.pi\n if x < -np.pi:\n x = x+2*np.pi\n\n px.append(x)\n pv.append(v)\n pt.append(t)\n\n\naxl.plot(px,pv,pt,'.',markersize=2,color='purple')\n#plt.plot(px,pv,'.')\nplt.xlabel('x')\nplt.ylabel('v')\n\nplt.show()\n","sub_path":"lab10/lab10_3b.py","file_name":"lab10_3b.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"215444700","text":"from test import TCBase, check_status_code\nfrom test.requests import team_post_request\nfrom model.user import UserModel\n\n\nclass TeamPostTest(TCBase):\n\n def check_user_team(self, user_id='test', team_number=0):\n user = UserModel.objects(user_id=user_id).first()\n self.assertEqual(user.team.team_id, team_number)\n\n @check_status_code(201)\n def test_success_team_post(self):\n rv = team_post_request(self)\n self.check_user_team(team_number=1)\n return rv\n\n @check_status_code(204)\n def test_already_has_team(self):\n team_post_request(self)\n self.check_user_team(team_number=1)\n\n rv = team_post_request(self, team_number=2)\n self.check_user_team(team_number=1)\n return rv\n\n @check_status_code(205)\n def test_wrong_team_number1(self):\n rv = team_post_request(self, team_number=-1)\n self.check_user_team()\n return rv\n\n @check_status_code(205)\n def test_wrong_team_number2(self):\n rv = team_post_request(self, team_number=5)\n self.check_user_team()\n return rv\n","sub_path":"Server/test/team/test_team_post.py","file_name":"test_team_post.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"467687606","text":"import numpy as np\nimport math\n\ndef list_primes(n):\n \"\"\"Input must be >= 6. Returns an array of primes < n. \n Slightly adapted from stackoverflow\n \"fasted-way-to-list-all-primes-below-n\"\"\"\n \n sieve = np.ones(math.floor(n/3) + (n % 6 == 2), dtype=np.bool) \n for i in range(1, int(math.floor(n**0.5)/3) + 1):\n if sieve[i]:\n k = (3*i + 1)|1 # ORs the last btye, nearest larger odd number.\n sieve[int(k*k/3)::2*k] = False\n sieve[int(k*(k-2*(i & 1) + 4)/3)::2*k] = False\n return np.r_[2, 3, ((3*np.nonzero(sieve)[0][1:]+1)|1)]\n\n\ndef is_prime(n):\n \"\"\"Input n >= 0; returns True if prime and False if not prime.\"\"\"\n if (n == 2) or (n == 3): \n return True\n if (n < 2) or (n % 6 not in [1, 5]):\n return False\n else:\n # I can probably optimize this part since I'm checking mod 6.\n for x in range(3, int(math.floor(n**0.5) + 1), 2):\n if n % x == 0:\n return False\n return True\n\n\ndef fib_below(n, a=1, b=1):\n \"\"\"Lists fib numbers below n. a, b are starting values.\"\"\"\n fibs = [a, b]\n while a + b < n:\n fibs.append(a + b)\n a, b = b, a + b\n return fibs\n\n\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"175443492","text":"#build_rupture_set_index\n\n\"\"\"\nSimple script to creawte valid URLs to the rupture sets built\n\nonly to be used until we have automated rupture reporting\n\n\"\"\"\n\nimport os\n# import os.path\nimport shutil\nimport fnmatch\nfrom pathlib import PurePath, Path\n\nimport base64\nimport json\nimport collections\n\nfrom nshm_toshi_client.toshi_client_base import ToshiClientBase\n\nclass ToshiFile(ToshiClientBase):\n\n def __init__(self, url, s3_url, auth_token, with_schema_validation=True, headers=None ):\n super(ToshiFile, self).__init__(url, auth_token, with_schema_validation, headers)\n self._s3_url = s3_url\n\n\n def get_file_meta_as_dict(self, id):\n qry = '''\n query download_file ($id:ID!) {\n node(id: $id) {\n __typename\n ... on File {\n meta{ k v }\n }\n }\n }'''\n\n # print(qry)\n input_variables = dict(id=id)\n executed = self.run_query(qry, input_variables)\n\n retval = dict()\n for kv in executed['node']['meta']:\n retval[kv['k']] = kv['v']\n return retval\n\n\n\nAPI_URL = os.getenv('NZSHM22_TOSHI_API_URL', \"http://127.0.0.1:5000/graphql\")\nAPI_KEY = os.getenv('NZSHM22_TOSHI_API_KEY', \"\")\nS3_URL = os.getenv('NZSHM22_TOSHI_S3_URL',\"http://localhost:4569\")\n\n\nclass IndexBuilder():\n\n _patterns = ['index.html',]# '*.zip']\n\n # iso_date = \"2021-05-26\"\n set_number = '01'\n thinning = \"0.0\"\n max_inversion_time = 480\n round_number = 1\n index_file = None\n\n def __init__(self, path, date_path ):\n self._dir_name = path\n self._date_path = date_path\n\n\n def old_get_template(self, index_file, short_name, round_number, max_inv_time, thin_factor):\n return f'''\n
  • \n Solution ({short_name}) {self._rupture_class}, thin({thin_factor}), {max_inv_time} mins, Round {round_number}\n
  • '''\n\n\n def get_template(self, index_file):\n return f'''
  • {str(index_file)[1:].replace('-', ' ').replace('/index.html', '')}
  • '''\n\n\n def build_line(self, root, filename):\n\n\n # short_name = file_meta['fault_model']\n # round_number = file_meta['round_number']\n # max_inv_time = file_meta['max_inv_time']\n # root = file_meta['root']\n # filename = file_meta['filename']\n\n index_file = PurePath(root.replace(self._dir_name, ''), filename)\n\n return self.get_template(index_file)\n\n\n\n def build(self):\n file_meta = dict()\n filekey = None\n\n # headers={\"x-api-key\":API_KEY}\n # file_api = ToshiFile(API_URL, S3_URL, None, with_schema_validation=True, headers=headers)\n\n lines = []\n for root, dirs, files in os.walk(self._dir_name):\n for pattern in self._patterns:\n for filename in fnmatch.filter(files, pattern):\n\n # # print(filename, root)\n\n # # foldername = root.split('/')[-1]\n # # filekey, round_number, max_inv_time = foldername.split('-')[1:4]\n\n # # print(filekey)\n # if not filekey in file_meta.keys():\n # metadata = file_api.get_file_meta_as_dict(filekey)\n # metakey = f\"{metadata['fault_model']}-{metadata['thinning_factor']}-{max_inv_time}-{round_number}\"\n\n # #enrich the dict\n # metadata['filekey'] = filekey\n # metadata['root'] = root\n # metadata['filename'] = filename\n # metadata['round_number'] = round_number\n # metadata['max_inv_time'] = max_inv_time\n\n # file_meta[metakey] = metadata\n\n lines. append(self.build_line(root, filename))\n return lines\n # #sort\n # od = collections.OrderedDict(sorted(file_meta.items()))\n\n # for key, value in od.items():\n # self.build_line(key, value)\n\n\n\nclass DownloadBuilder():\n\n _patterns = ['*.zip',]\n set_number = '01'\n\n def __init__(self, path, date_path ):\n self._dir_name = path\n self._date_path = date_path\n\n def get_template(self, solution_file):\n return f'''
  • Download {solution_file}
  • '''\n\n\n def build_line(self, root, filename):\n index_file = PurePath(root.replace(self._dir_name, ''), filename)\n return self.get_template(index_file)\n\n\n def build(self):\n lines = []\n for root, dirs, files in os.walk(self._dir_name):\n for pattern in self._patterns:\n for filename in fnmatch.filter(files, pattern):\n\n lines. append(self.build_line(root, filename))\n return lines\n\n\nclass ReportMetaBuilder():\n \"\"\"\n find the metadata.json and make this available for the HTML\n \"\"\"\n _patterns = ['metadata.json',]# '*.zip']\n set_number = '01'\n\n def __init__(self, path, date_path ):\n self._dir_name = path\n self._date_path = date_path\n\n def build(self):\n file_meta = dict()\n filekey = None\n\n lines = []\n for root, dirs, files in os.walk(self._dir_name):\n for pattern in self._patterns:\n for filename in fnmatch.filter(files, pattern):\n folder_path = PurePath(root)\n if len(folder_path.parts) - len(PurePath(self._dir_name).parts) == 1:\n #print(root, filename)\n key = PurePath(root).parts[-1]\n #print(key)\n value = json.load(open(PurePath(folder_path, filename), 'r'))\n #print(value['task_arguments'])\n '''\n e.g {'rupture_set_file_id': 'RmlsZTo0ODMuMFN3cTRN', 'generation_task_id': 'UnVwdHVyZUdlbmVyYXRpb25UYXNrOjE4M0FoblN5',\n 'solution_file': '/home/chrisbc/DEV/GNS/opensha-new/nshm-nz-opensha/src/python/automation/tmp/UnVwdHVyZUdlbmVyYXRpb25UYXNrOjE4M0FoblN5/InversionSolution-RmlsZTo2-rnd0-t1380_RmlsZTo0ODMuMFN3cTRN.zip',\n 'short_name': 'CFM_0_9_SANSTVZ_D90-0.1', 'rupture_class': 'Azimuth', 'max_inversion_time': '1380', 'completion_energy': '0.05', 'round_number': '0'}\n '''\n\n solution_name = PurePath( value['task_arguments']['solution_file']).name\n #print(solution_name)\n solution_filepath = Path(folder_path, '..', value['task_arguments']['generation_task_id'], solution_name).resolve()\n #print(solution_filepath)\n #rel_path = os.path.relpath(solution_filepath, start = PurePath(self._dir_name))\n info = dict(\n key = key,\n meta = value['task_arguments'],\n solution_relative_path = os.path.relpath(solution_filepath, start = PurePath(self._dir_name)),\n index_path = os.path.relpath(PurePath(folder_path, \"DiagnosticsReport\", \"index.html\"), start = PurePath(self._dir_name)),\n )\n\n #TODO: ugly workaround, FIXME\n rupture_class = \"Azimuth\"\n azim_len = len(\"UnVwdHVyZUdlbmVyYXRpb25UYXNrOjE4NXN4Zjhp/InversionSolution-RmlsZTo2-rnd0-t1380_RmlsZTo1MDcuMDdaMkFp.zip\")\n if len(info['solution_relative_path']) > azim_len:\n rupture_class = \"Coulomb\"\n info['meta']['rupture_class'] = rupture_class\n\n lines.append(info)\n return lines\n\n\n def get_template(self, info, mfd_dirs):\n \"\"\"\n {'key': 'RmlsZTo0NTkuMDlnaEda', 'meta': {'rupture_set_file_id': 'RmlsZTo0NTkuMDlnaEda',\n 'generation_task_id': 'UnVwdHVyZUdlbmVyYXRpb25UYXNrOjE4MUNqSFFa',\n 'short_name': 'CFM_0_9_SANSTVZ_D90-0.1', 'rupture_class': 'Azimuth', 'max_inversion_time': '1380', 'completion_energy': '0.2', 'round_number': '0'},\n 'solution_relative_path': 'UnVwdHVyZUdlbmVyYXRpb25UYXNrOjE4MUNqSFFa/InversionSolution-RmlsZTo2-rnd0-t1380_RmlsZTo0NTkuMDlnaEda.zip',\n 'index_path': 'RmlsZTo0NTkuMDlnaEda/DiagnosticsReport/index.html'}\n\n \"\"\"\n m = info['meta']\n report_info = f\"{m['short_name']} {m['rupture_class']} energy({m['completion_energy']}) round({m['round_number']})\"\n\n if m['rupture_set_file_id'] in mfd_dirs:\n extra_link = f' Named MFDS'\n else:\n extra_link = ''\n\n return f'''
  • {report_info} \n Diagnostics report \n Download solution file\n {extra_link}
  • '''\n\n\n\n\nif __name__ == \"__main__\":\n\n #rupture_class = \"Azimuth\" #\"Coulomb\"\n\n # report_builder = IndexBuilder(\n # # path = '/home/chrisbc/DEV/GNS/opensha-new/DATA/2021-05-26-01',\n # path = './tmp',\n\n # date_path = \"2021-05-26\")\n\n # for line in sorted(report_builder.build()):\n # print(line)\n\n # downloads = DownloadBuilder(\n # path = '/home/chrisbc/DEV/GNS/opensha-new/DATA/2021-05-26-01',\n # date_path = \"2021-05-26\")\n\n # for line in sorted(downloads.build()):\n # print(line)\n\n\n mfd_dirs = [\n \"RmlsZTo1MjIuMDN2ZktR\",\n \"RmlsZTo1MTAuMDlDVUsy\",\n \"RmlsZTo0OTguMEtiUnJE\",\n \"RmlsZTo0ODYuMDI4N2dr\",\n \"RmlsZTo0NzQuMG1CdVhq\",\n \"RmlsZTo0NjguMEczcFVT\",\n \"RmlsZTo1MTkuMG9XR0dF\",\n \"RmlsZTo1MDcuMDdaMkFp\",\n \"RmlsZTo0OTUuMFVLcm5B\",\n \"RmlsZTo0ODMuMFN3cTRN\",\n \"RmlsZTo0NzEuMFpqckZx\",\n \"RmlsZTo0NTkuMDlnaEda\"]\n\n\n\n #ReportMetaBuilder\n meta_builder = ReportMetaBuilder(\n path = '/home/chrisbc/DEV/GNS/opensha-new/DATA/2021-06-01-01',\n date_path = \"2021-06-01\")\n\n def sort_fn(info):\n key = info['meta']['short_name']\n key += info['meta']['rupture_class']\n key += info['meta']['completion_energy']\n return key\n\n\n for line in sorted(meta_builder.build(), key=sort_fn):\n line['meta'].pop('solution_file') #to verbose\n print(meta_builder.get_template(line, mfd_dirs))\n\n\n # for line in sorted(report_builder.build()):\n # print(line)","sub_path":"src/python/automation/build_rupture_set_index.py","file_name":"build_rupture_set_index.py","file_ext":"py","file_size_in_byte":10629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"51367504","text":"APPLICATION_NAME = 'Trip Match'\nSECRET_KEY = 'secret key'\nALLOWED_EXTENSIONS = {'bmp', 'jpg', 'jpeg', 'png'}\nPER_PAGE = 12\n# --------------------------------------------------\n# DATABASE_URI = 'sqlite:///{0}/testdb.db'.format(os.getcwd())\n# DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, '..', '..', 'testdb.db')\nDATABASE_URI = 'postgresql://tripmatch_user:123456@localhost/tripmatch_db'\nDEBUG = True\n# EXPLAIN_TEMPLATE_LOADING = True\nSQLALCHEMY_ECHO = True","sub_path":"instance/config/development_config.py","file_name":"development_config.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"476242266","text":"# -- constraints --\n\nTIMES_EXECUTION = 10000\nLEARNING_RATE = 0.9\n\nMAX_COLUNS = 6\nMAX_ROWS = 6\n\nTOTAL_STATES = MAX_COLUNS * MAX_ROWS\n\nTARGET_STATE = 35 # STATE THAT HAS RECOMPENSE\nTARGET_R = 10\n","sub_path":"Q-learning/src/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"609198463","text":"import csv\nimport pypyodbc\nfrom geopy.distance import vincenty\nimport random\n\n\ndef collect_pairs(outfile):\n with open(outfile,'w', newline='') as f:\n csv_writer = csv.writer(f, delimiter=',')\n wrow = ('a_station', 'b_station', 'distance')\n csv_writer.writerow(wrow)\n with pypyodbc.connect(\"DRIVER={SQL Server};SERVER=miranda;DATABASE=bikeshare;Trusted_Connection=true\") as conx:\n curr = conx.cursor()\n retrieved_data = curr.execute(\"\"\"\n select a.stationid A_station, b.stationid B_station,\n a.lat a_lat, a.long a_long,\n b.lat b_lat, b.long b_long\n from stations a\n cross join stations b\n where a.stationid < b.stationid\n \"\"\")\n for row in retrieved_data:\n a_lat = row['a_lat']\n a_long = row['a_long']\n b_lat = row['b_lat']\n b_long = row['b_long']\n a_station = row['a_station']\n b_station = row['b_station']\n a_pt = (a_lat, a_long)\n b_pt = (b_lat, b_long)\n distance = vincenty(a_pt, b_pt).miles\n wrow = (a_station, b_station, distance)\n csv_writer.writerow(wrow)\n \n\n\nif __name__ == \"__main__\" :\n\n output_file = r'C:\\Users\\Ken\\Documents\\2015\\bikeshare\\cross_ab_distance.csv'\n collect_pairs(output_file)\n","sub_path":"BikeShareMapReduce/StationPairDistance/ABCrossPairDistance.py","file_name":"ABCrossPairDistance.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"346798142","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 13 20:50:18 2019\n\n@author: Rajeshwari\n\"\"\"\n\n#multiple regression\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n#%matplotlib inline\n# Importing the dataset\nadvertise = pd.read_csv('Advertising.csv')\n\nadvertise.describe()\n\n#Next, we'll check for skewness\nprint (\"Skew is:\", advertise.sales.skew())\nplt.hist(advertise.sales, color='green')\nplt.xlabel('sales', size = 10)\nplt.ylabel('freq', size = 10)\nplt.title('histogram distribution', size = 6)\nplt.show()\n\ntarget = np.log(advertise.sales)\nprint (\"Skew is:\", target.skew())\nplt.hist(target, color='yellow')\nplt.xlabel('sales', size = 10)\nplt.ylabel('freq', size = 10)\nplt.title('logarithmic histogram distribution', size = 6)\nplt.show()\n\n\n#Working with Numeric Features\nnumeric_features = advertise.select_dtypes(include=[np.number])\n\n##Null values\nnulls = pd.DataFrame(advertise.isnull().sum().sort_values(ascending=False)[:25])\nnulls.columns = ['Null Count']\nnulls.index.name = 'Feature'\nprint(nulls)\n\n##handling missing value\nA = advertise.select_dtypes(include=[np.number]).interpolate().dropna()\nprint(sum(A.isnull().sum() != 0))\n\ncorr = advertise.corr()\n\nprint (corr['sales'].sort_values(ascending=False)[:5], '\\n')\nprint (corr['sales'].sort_values(ascending=False)[-5:])\n\nsns.boxplot(advertise[\"sales\"],orient= \"v\")\nplt.title(\"sales Outlier detection\", size=15)\nplt.xlabel(\"sales\", size=15)\nplt.ylabel(\"freq\")\nplt.show()\n\nadvertise.drop(advertise[advertise[\"sales\"] < 25].index)\n\nX = advertise.iloc[:, :-2].values\ny = advertise.iloc[:, 4].values\n\nX.shape\n\nX=X[:,1:]\n\n\nX_train, X_test, Y_train, Y_test= train_test_split(X,y, test_size=0.3, random_state=5)\n\nregressor=LinearRegression()\nregressor.fit(X_train,Y_train)\ny_pred=regressor.predict(X_test)\n\n\nrmse = np.sqrt(mean_squared_error(Y_test, y_pred))\nprint(\"Root Mean Squared Error: {}\".format(rmse))\nr2=r2_score(Y_test,y_pred)\nprint(\"R2 score: {}\".format(r2))\n\nplt.scatter(y_pred,Y_test,alpha=0.9,color='r')\nplt.show()","sub_path":"Lab 2/Source/Q4.py","file_name":"Q4.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"513925985","text":"#\n# @lc app=leetcode id=82 lang=python3\n#\n# [82] Remove Duplicates from Sorted List II\n#\n# https://leetcode.com/problems/remove-duplicates-from-sorted-list-ii/description/\n#\n# algorithms\n# Medium (32.34%)\n# Total Accepted: 172.8K\n# Total Submissions: 532.8K\n# Testcase Example: '[1,2,3,3,4,4,5]'\n#\n# Given a sorted linked list, delete all nodes that have duplicate numbers,\n# leaving only distinct numbers from the original list.\n# \n# Example 1:\n# \n# \n# Input: 1->2->3->3->4->4->5\n# Output: 1->2->5\n# \n# \n# Example 2:\n# \n# \n# Input: 1->1->1->2->3\n# Output: 2->3\n# \n# \n#\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # my own: three pointers, slow...\n # def deleteDuplicates(self, head: ListNode) -> ListNode:\n # if not head or not head.next: # there's zero or one element\n # return head\n # dummy = ListNode(None)\n # dummy.next = head\n # h_orig = dummy\n # prev = dummy\n # i = head\n # j = head.next\n # if i.val == j.val:\n # dup = True\n # else:\n # dup = False\n # while 1:\n # if i.val == j.val:\n # j = j.next\n # dup = True\n # elif dup:\n # prev.next = j\n # i = j\n # j = j.next\n # dup = False\n # else:\n # prev = i\n # i = j\n # j = j.next\n # dup = False\n\n # if not j:\n # if dup:\n # prev.next = j\n # break\n # return h_orig.next\n \n # two pointers:\n def deleteDuplicates(self, head: ListNode) -> ListNode:\n dummy = ListNode(0)\n dummy.next = head\n prev = dummy\n curr = head\n while curr:\n if curr.next and curr.val == curr.next.val:\n # NOTE: the use of while loop within the while loop\n while curr.next and curr.val == curr.next.val:\n curr = curr.next\n prev.next = curr.next\n curr = curr.next\n else:\n prev = curr\n curr = curr.next\n return dummy.next\n\n\n\n","sub_path":"82.remove-duplicates-from-sorted-list-ii.py","file_name":"82.remove-duplicates-from-sorted-list-ii.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"385270708","text":"import numpy as np\nimport math\nfrom simulation.kf_localizer import KFLocalizer\n\ndef vel_motion_model(state, action, delta_time, insert_noise=False):\n x, y, angle = state\n v, w = action\n\n new_x = x + v * math.cos(angle) * delta_time\n new_y = y + v * math.sin(angle) * delta_time\n new_angle = angle + delta_time * w\n \n if insert_noise and v > 0.01:\n new_x += np.random.normal(scale=0.01)\n new_y += np.random.normal(scale=0.01)\n new_angle += np.random.normal(scale=0.01)\n \n return (new_x, new_y, new_angle)\n\n\ndef triangulate(x1, y1, r1, x2, y2, r2, x3, y3, r3):\n # Using formula from: https://www.101computing.net/cell-phone-trilateration-algorithm/\n A = -2 * x1 + 2 * x2\n B = -2 * y1 + 2 * y2\n C = r1 ** 2 - r2 ** 2 - x1 ** 2 + x2 ** 2 - y1 ** 2 + y2 ** 2\n D = -2 * x2 + 2 * x3\n E = -2 * y2 + 2 * y3\n F = r2 ** 2 - r3 ** 2 - x2 ** 2 + x3 ** 2 - y2 ** 2 + y3 ** 2\n\n if (E * A - B * D) == 0.0:\n # Two points lie on each others line, return None\n # TODO: this is solvable, just not with this method\n return None\n\n x = (C * E - F * B) / (E * A - B * D)\n y = (C * D - A * F) / (B * D - A * E)\n\n return x, y\n\n\nclass Robot:\n def __init__(self, start_x, start_y, start_angle, scenario, collision, radius=20,\n max_v=100, v_step=10, n_sensors=12, max_sensor_length=100, omni_sensor_range=150):\n self.x = start_x\n self.y = start_y\n self.scenario = scenario\n self.collision = collision\n self.radius = radius\n self.max_v = max_v\n self.angle = start_angle # In radians\n\n if scenario == \"evolutionary\":\n self.motion_model = \"diff_drive\"\n elif scenario == \"localization\":\n self.omni_sensor_range = omni_sensor_range\n self.motion_model = \"vel_drive\"\n self.beacons = [] # beacons have format (beacon, distance)\n # Initialize localization\n state_mu = (self.x, self.y, self.angle)\n state_std = np.identity(3) * 0.01\n self.localizer = KFLocalizer(state_mu=state_mu, state_std=state_std, motion_model=lambda *args: vel_motion_model(*args, insert_noise=True))\n self.passed_time = 0\n\n else:\n raise NameError(\"Invalid scenario name\")\n\n if self.motion_model == \"diff_drive\":\n self.v_step = v_step\n self.n_sensors = n_sensors # The amount of sensors used for collecting environment data\n self.max_sensor_length = max_sensor_length\n self.sensor_data = []\n\n self.l = 2 * self.radius\n self.vl = 0\n self.vr = 0\n self.velocity = (self.vr - self.vl / 2)\n self.w = (self.vr - self.vl) / self.l\n self.R, self.icc = self.calculate_icc()\n\n elif self.motion_model == \"vel_drive\":\n self.angle_step = 0.20 * math.pi\n self.angle_change = 0\n self.v = 0\n self.v_step = v_step\n self.rotate_left = False\n self.rotate_right = False\n self.pressed_arrows = False\n\n\n\n def update_vr(self, direction):\n # Used in the diff_drive scenario\n if direction == 0:\n # Stop\n self.vr = 0\n return\n\n r_vr = self.vr + direction * self.v_step\n\n if -self.max_v <= r_vr <= self.max_v:\n self.vr = r_vr\n else:\n # Over speed limit\n pass\n\n def update_vl(self, direction):\n # Used in the diff_drive scenario\n if direction == 0:\n # Stop\n self.vl = 0\n return\n\n r_vl = self.vl + direction * self.v_step\n\n if -self.max_v <= r_vl <= self.max_v:\n self.vl = r_vl\n else:\n # Over speed limit\n pass\n\n def update_v(self, direction):\n # Used in the vel_drive scenario\n if direction == 0:\n # Stop\n self.v = 0\n return\n if abs(direction) == 1:\n # WSAD keys\n r_v = self.v + direction * self.v_step\n else:\n r_v = direction\n\n if abs(r_v) > self.max_v:\n # Over max v set to max v\n self.v = r_v / abs(r_v) * self.max_v\n else:\n # within speed limit\n self.v = r_v\n\n def update_angle(self, direction):\n if direction == 0:\n # Stop\n self.angle_change = 0\n return\n\n # Used in the vel_drive scenario\n self.angle_change += direction * self.angle_step\n\n def calculate_icc(self):\n \"\"\"Returns the radius and the (x,y) coordinates of the center of rotation\"\"\"\n # Calculate center of rotation\n diff = self.vr - self.vl\n R = self.l / 2 * (self.vl + self.vr) / (diff if diff != 0 else 0.0001) # avoid division by zero\n icc = (\n self.x - R * math.sin(self.angle),\n self.y + R * math.cos(self.angle)\n )\n return R, icc\n\n def differential_drive(self, delta_time):\n # Get the new center of rotation and speed\n self.R, self.icc = self.calculate_icc()\n self.w = (self.vr - self.vl) / self.l\n\n # Determine the new angle keep it within 2 pi\n # w is basically theta because we just assume time was 1\n angle_change = self.w * delta_time\n\n # Based on the speed and the angle find the new requested location\n if (self.vr == self.vl) and (self.vr != 0):\n r_x = self.x + self.vr * math.cos(self.angle) * delta_time\n r_y = self.y + self.vr * math.sin(self.angle) * delta_time\n else:\n icc_x = self.icc[0]\n icc_y = self.icc[1]\n r_x = (math.cos(angle_change) * (self.x - icc_x) -\n math.sin(angle_change) * (self.y - icc_y) +\n icc_x)\n r_y = (math.sin(angle_change) * (self.x - icc_x) +\n math.cos(angle_change) * (self.y - icc_y) +\n icc_y)\n\n r_angle = (self.angle + angle_change) % (2 * math.pi)\n\n return r_x, r_y, r_angle\n\n def velocity_based_drive(self, delta_time):\n # Make use of booleans such that pressing the other direction does not cancel the button press\n if self.rotate_left and self.rotate_right:\n # No change\n self.angle_change = 0\n self.pressed_arrows = True\n elif self.rotate_left:\n self.angle_change = -1\n self.pressed_arrows = True\n elif self.rotate_right:\n self.angle_change = 1\n self.pressed_arrows = True\n else:\n if self.pressed_arrows:\n self.angle_change = 0\n self.pressed_arrows = False\n\n state = (self.x, self.y, self.angle)\n action = (self.v, self.angle_change)\n return vel_motion_model(state, action, delta_time)\n\n def update(self, delta_time):\n if self.motion_model == \"diff_drive\":\n r_x, r_y, r_angle = self.differential_drive(delta_time)\n elif self.motion_model == \"vel_drive\":\n r_x, r_y, r_angle = self.velocity_based_drive(delta_time)\n \n # Predict our location, we assume that the noise is greater the faster we move\n motion_noise = np.diag([self.v * 0.05, self.v * 0.075, self.v * 0.05]) * delta_time\n self.localizer.predict((self.v, self.angle_change), delta_time, motion_noise)\n\n # Save the x and y for the speed calculation\n x_tmp = self.x\n y_tmp = self.y\n\n if self.collision:\n self.check_collision(r_x, r_y, r_angle)\n else:\n self.x = r_x\n self.y = r_y\n self.angle = r_angle\n\n if self.motion_model == \"diff_drive\":\n self.collect_sensor_data()\n\n if self.scenario == \"localization\":\n self.passed_time += delta_time\n # Scan for beacons\n # TODO: no error implemented yet, do this in scan for beacons method\n self.beacons = self.scan_for_beacons()\n if (len(self.beacons) >= 3) and (self.passed_time > 1):\n # correct our position, we assume that the sensors have a constant noise\n z = np.array(self.location_from_beacons())\n sensor_noise = np.diag([2, 2, 2])\n self.localizer.correct(z, sensor_noise)\n self.passed_time = 0\n\n # To calculate the actual speed\n self.velocity = math.sqrt((x_tmp - self.x) ** 2 + (y_tmp - self.y) ** 2)\n\n def check_collision(self, r_x, r_y, r_angle):\n \"\"\"\n @param r_x: aspired x position after time step\n @param r_y: aspired y position after time step\n @param r_angle: aspired angle after time step\n \"\"\"\n collision = self.world.slide_collision((self.x, self.y), (r_x, r_y), self.radius)\n if collision is None:\n # No collision\n self.x = r_x\n self.y = r_y\n else:\n # Slide\n self.x = collision.x\n self.y = collision.y\n\n self.angle = r_angle\n\n def scan_for_beacons(self):\n beacons_in_range = self.world.get_beacons(self.x, self.y, self.omni_sensor_range)\n\n for i in range(len(beacons_in_range)):\n error = 0\n # TODO: introduce error here\n beacons_in_range[i] = (beacons_in_range[i][0], beacons_in_range[i][1] + error)\n\n return beacons_in_range\n\n def location_from_beacons(self):\n # Estimate the location of the robot from the beacons\n f = []\n for beacon in self.beacons:\n beacon = beacon[0]\n # Note true x and y value of the robot since this is independent on the predicted location\n r = math.sqrt((beacon.x - self.x) ** 2 + (beacon.y - self.y) ** 2)\n r += np.random.normal(scale=0.1)\n # -1 times since our y coordinate system is inverted\n phi = math.atan2(-1 * (beacon.y - self.y), beacon.x - self.x) - self.angle\n f.append((r, phi, beacon.location))\n \n # Three points can lie on the same line, in which case our function cant handle it, try other ways\n shift = 0\n tria_loc = []\n while (shift + 3 <= len(f)):\n tria_loc.append(list(\n triangulate(\n f[0 + shift][2][0], f[0 + shift][2][1], f[0 + shift][0],\n f[1 + shift][2][0], f[1 + shift][2][1], f[1 + shift][0],\n f[2 + shift][2][0], f[2 + shift][2][1], f[2 + shift][0])\n ))\n shift += 1\n\n \n tria_loc = np.mean(np.array(tria_loc), axis=0)\n x, y = tria_loc\n # Now that we know the position get the angle, we well only use one beacon for this\n angle = math.atan2(-1 * (self.beacons[0][0].y - y), self.beacons[0][0].x - x) - f[0][1]\n return x, y, angle\n\n def collect_sensor_data(self):\n raycast_length = self.radius + self.max_sensor_length\n delta_angle = (math.pi * 2) / self.n_sensors\n\n self.sensor_data = []\n for sensor_id in range(self.n_sensors):\n # Note the sensor angle is relative to our own angle\n sensor_angle = self.angle + delta_angle * sensor_id\n\n # Note instead of calculating the position of the sensor\n # We just send a raycast from the center of our agent\n hit, dist, line = self.world.raycast(self.x, self.y, sensor_angle,\n raycast_length)\n dist -= self.radius\n self.sensor_data.append((hit, dist))\n","sub_path":"simulation/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":11644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"614640265","text":"from talon import Module, screen, ui, cron, app, actions, clip\nfrom talon.canvas import Canvas\nfrom typing import Optional\nfrom datetime import datetime\nimport os\n\nmod = Module()\n\ndefault_folder = \"\"\nif app.platform == \"windows\":\n default_folder = os.path.expanduser(os.path.join(\"~\", r\"OneDrive\\Pictures\"))\nif not os.path.isdir(default_folder):\n default_folder = os.path.join(\"~\", \"Pictures\")\n\nscreenshot_folder = mod.setting(\n \"screenshot_folder\",\n type=str,\n default=default_folder,\n desc=\"Where to save screenshots. Note this folder must exist.\",\n)\n\n\n@mod.action_class\nclass Actions:\n def screenshot(screen_number: Optional[int] = None):\n \"\"\"Takes a screenshot of the entire screen and saves it to the pictures folder.\n Optional screen number can be given to use screen other than main.\"\"\"\n screen = get_screen(screen_number)\n screenshot_rect(screen.rect)\n\n def screenshot_window():\n \"\"\"Takes a screenshot of the active window and saves it to the pictures folder\"\"\"\n win = ui.active_window()\n screenshot_rect(win.rect, win.app.name)\n\n def screenshot_selection():\n \"\"\"Triggers an application is capable of taking a screenshot of a portion of the screen\"\"\"\n if app.platform == \"windows\":\n actions.key(\"super-shift-s\")\n elif app.platform == \"mac\":\n actions.key(\"ctrl-shift-cmd-4\")\n elif app.platform == \"linux\":\n actions.key(\"shift-printscr\")\n\n def screenshot_clipboard(screen_number: Optional[int] = None):\n \"\"\"Takes a screenshot of the entire screen and saves it to the clipboard.\n Optional screen number can be given to use screen other than main.\"\"\"\n screen = get_screen(screen_number)\n clipboard_rect(screen.rect)\n\n def screenshot_window_clipboard():\n \"\"\"Takes a screenshot of the active window and saves it to the clipboard\"\"\"\n win = ui.active_window()\n clipboard_rect(win.rect)\n\n\ndef screenshot_rect(rect: ui.Rect, title: str = \"\"):\n flash_rect(rect)\n img = screen.capture_rect(rect)\n path = get_screenshot_path(title)\n img.write_file(path)\n\n\ndef clipboard_rect(rect: ui.Rect):\n flash_rect(rect)\n img = screen.capture_rect(rect)\n clip.set_image(img)\n\n\ndef get_screenshot_path(title: str = \"\"):\n if title:\n title = f\" - {title.replace('.', '_')}\"\n date = datetime.now().strftime(\"%Y-%m-%dT%H-%M-%S\")\n filename = f\"Screenshot {date}{title}.png\"\n folder_path = screenshot_folder.get()\n path = os.path.expanduser(os.path.join(folder_path, filename))\n return os.path.normpath(path)\n\n\ndef flash_rect(rect: ui.Rect):\n def on_draw(c):\n c.paint.style = c.paint.Style.FILL\n c.paint.color = \"ffffff\"\n c.draw_rect(rect)\n cron.after(\"150ms\", canvas.close)\n\n canvas = Canvas.from_rect(rect)\n canvas.register(\"draw\", on_draw)\n canvas.freeze()\n\n\ndef get_screen(screen_number: Optional[int] = None) -> ui.Screen:\n if screen_number == None:\n return screen.main_screen()\n return actions.user.screens_get_by_number(screen_number)\n","sub_path":"code/screenshot.py","file_name":"screenshot.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"108889533","text":"from flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext.script import Manager\nimport os\nfrom flask import Flask\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napp = Flask(__name__)\nmanager = Manager(app)\n\napp.config['SQLALCHEMY_DATABASE_URI'] =\\\n\t'sqlite:///' + os.path.join(basedir, 'data.sqlite')\n\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\n\ndb = SQLAlchemy(app)\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(80), unique=True)\n email = db.Column(db.String(120), unique=True)\n\n def __init__(self, username, email):\n self.username = username\n self.email = email\n\n def __repr__(self):\n return '' % self.username\n\n# db.create_all()\n\n# admin = User('admin', 'admin@qq.com')\n# guest = User('guest', 'guest@qq.com')\n\n# db.session.add(admin)\n# db.session.add(guest)\n# db.session.commit()\n\n\nif __name__ == '__main__':\n\tmanager.run()\t","sub_path":"dbtest1.py","file_name":"dbtest1.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"247031078","text":"from perfrunner.helpers.cbmonitor import with_stats\nfrom perfrunner.tests.index import IndexTest, DevIndexTest\n\n\nclass QueryTest(IndexTest):\n\n COLLECTORS = {'latency': True, 'query_latency': True}\n\n @with_stats\n def access(self):\n super(QueryTest, self).timer()\n\n def run(self):\n self.load()\n self.wait_for_persistence()\n\n self.compact_bucket()\n\n self.hot_load()\n\n self.define_ddocs()\n self.build_index()\n\n self.workload = self.test_config.access_settings\n self.access_bg_with_ddocs()\n self.access()\n\n\nclass QueryThroughputTest(QueryTest):\n\n def run(self):\n super(QueryThroughputTest, self).run()\n self.reporter.post_to_sf(\n self.metric_helper.calc_avg_couch_views_ops()\n )\n\n\nclass QueryLatencyTest(QueryTest):\n\n def run(self):\n super(QueryLatencyTest, self).run()\n\n self.reporter.post_to_sf(\n *self.metric_helper.calc_query_latency(percentile=80)\n )\n if self.remote.os != 'Cygwin' and \\\n self.test_config.stats_settings.post_rss:\n self.reporter.post_to_sf(*self.metric_helper.calc_max_beam_rss())\n\n\nclass DevQueryLatencyTest(DevIndexTest, QueryLatencyTest):\n\n pass\n","sub_path":"perfrunner/tests/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"495507382","text":"import sys\nimport os\n\nimport hou\n\nfrom core import database\nfrom core import file_path\nfrom core import settings\n\n# Get data\neve_root = os.environ['EVE_ROOT']\nasset_id = int(sys.argv[-1])\neve_data = database.EveData(settings.SQL_FILE_PATH.format(eve_root))\nasset = eve_data.get_asset(asset_id)\n\n# Create EveFilePath\nfile_type = database.EveFile.file_types['asset_hip']\nfile_path_asset = file_path.EveFilePath()\n\n# Get asset_type dictionary\nasset_type_dic = None\nfor asset_type in database.Asset.asset_types:\n if database.Asset.asset_types[asset_type]['id'] == asset.type:\n asset_type_dic = database.Asset.asset_types[asset_type]\n\n# Create file path string\nfile_path_asset.build_path_asset_hip(file_type, asset_type_dic, asset.name, '001')\nscene_path = file_path_asset.version_control()\n\n# Save file\nif scene_path:\n hou.hipFile.save(scene_path)\n","sub_path":"Eve/tools/houdini/create_asset.py","file_name":"create_asset.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"412995269","text":"import subprocess\n\nend_words = ('stop', 'off', 'no')\n\n\ndef say(text):\n subprocess.call(['say', text])\n\n\ndef turn_on_purifier_if_off(purifier):\n from src.speech.recognizer import match_word_equals\n\n if not purifier.is_on:\n say(\"Do you want to turn the purifier on?\")\n if match_word_equals(\"yes\"):\n turn_purifier_on(purifier)\n return True\n return False\n\n\ndef say_current_pollution_level(purifier):\n say(f\"Current pollution level is {purifier.aqi}\")\n\n\ndef say_current_humidity_level(purifier):\n say(f\"Current humidity level is {purifier.humidity} %\")\n\n\ndef manipulate_fan_speed(purifier):\n\n from src.speech.recognizer import match_word\n\n purifier.change_to_fav_mode()\n say(\"Say UP or DOWN\")\n\n while 1:\n word = match_word()\n if word in end_words:\n return\n\n manipulate_fan_speed_by_word(purifier, word)\n\n\ndef manipulate_fan_speed_by_word(purifier, word):\n if word == \"up\":\n say(\"Okay, up it is\")\n purifier.speed_up_fan()\n\n if word == \"down\":\n say(\"Okay, down it is\")\n purifier.slow_down_fan()\n\n\ndef turn_purifier_on(purifier):\n say(f\"Turning purifier on\")\n purifier.on()\n","sub_path":"src/speech/speaker.py","file_name":"speaker.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"487602082","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Importing the required libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n# import redis\n# from flask import Flask\nimport streamlit as sl\nimport streamlit.components.v1 as components\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, LSTM, Dense, Embedding, Flatten, TimeDistributed, Dropout, LSTMCell, RNN, Bidirectional, Concatenate, Layer\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.keras import backend as K\nfrom PIL import Image\n\n\n\n\n\n\nsl.set_page_config(layout=\"wide\")\nimg = Image.open(\"NomusAI.png\")\nsl.image(img)\n\nsl.write('''\n # Introduction\n The law has language at its heart, so it’s not surprising that software that operates on natural language has played a role in some areas of the legal profession for a long time.NLP plays several roles in several domains of the judicial world and one of those roles is autocompletion of legal documents. And in our project we aim to implement an autocompletion system for legal documents which uses nlp works by learning human language, using context, prior queries and results to predict what attorneys need in their searches.\n\n''')\n\n\n\n\n\nsl.write(''' # Corpus''')\n\n\n\n# #################\n\n\n\n#Opening the dataset\nfile = open(r\"holla.txt\", 'r', encoding='utf-8')\n\ncorpus = [line for line in file]\n\n#Data \ncorpus[50:60]\n\n\ncol5,col6 = sl.columns((1,1))\ncol5.header('Functional Requirements')\ncol5.write('''\nUse of cloud services: The dataset used to train the neural network is rather large, i.e. a concatenation of several pdf files, and therefore, the program cannot be executed in a local system. Use of cloud services such as Google Colab or Microsoft Azure is used to overcome the above-mentioned problem.\nInput file format: The dataset provided as the input must be a .txt file\nLSTM: The encoder-decoder LSTM architecture is used in this project to carry out the sequence to sequence prediction.\n''')\n\ncol6.header(\"Non-Functional Requirements\")\ncol6.write('''\nTestability: The program should be easily testable in order to evaluate the outcome, such as the accuracy of the model.\nTransparency: The working of the program should be transparent so that the user can have a basic understanding of how the model works.\nLocalization: Primarily meant for usage in the field of law/judiciary.\n''')\n\nsl.write('''\n # Model\n\n''')\ncol12, col13 = sl.columns((1,1))\ncol12.write('''\n This is a sequence to sequence model which basically is a method of encoder - decoder based language processing model that maps a fixed - length inpiut with a fixed length output where the length of the input and the output may differ. \n''')\ncol12.text(\"\")\ncol12.text(\"\")\ncol12.text(\"\")\nimg111 = Image.open(\"modelencdec.png\")\ncol12.image(img111, use_column_width = True)\nimg3 = Image.open(\"flow.png\")\ncol13.image(img3, use_column_width=True)\n\n\n#Function to pre-process the data\ndef clean_special_chars(text, punct):\n for p in punct:\n text = text.replace(p, '')\n return text\n\n#Removing the unecessary punctuations and special characters \ndef preprocess(data):\n output = []\n punct = '#$%&*+-/<=>@[\\\\]^_`{|}~\\t\\n'\n for line in data:\n pline= clean_special_chars(line.lower(), punct)\n output.append(pline)\n return output \n\n\ndef generate_dataset():\n \n processed_corpus = preprocess(corpus) \n output = []\n for line in processed_corpus:\n token_list = line\n for i in range(1, len(token_list)):\n data = []\n # Adding a start and an end token to the sentence so that the model know when to start and stop predicting\n # The EOS token is important: the explicit \"end\" token allows the decoder to emit arbitrary-length sequences.\n # The SOS is more important for the decoder: the decoder will progress by taking the tokens it emits as inputs \n # (along with the embedding and hidden state), so before it has emitted anything it needs a token to start with.\n x_ngram = ' '+ token_list[:i+1] + ' '\n y_ngram = ' '+ token_list[i+1:] + ' '\n data.append(x_ngram)\n data.append(y_ngram) #generating pairs \n output.append(data)\n df = pd.DataFrame(output, columns=['input','output'])\n return output, df \n\n\nclass LanguageIndex():\n def __init__(self, lang):\n self.lang = lang\n self.word2idx = {} #dictionary\n self.idx2word = {} #dictionary\n self.vocab = set() #set\n self.create_index() #function\n def create_index(self):\n for phrase in self.lang:\n self.vocab.update(phrase.split(' ')) #splitting the text where there is a space\n self.vocab = sorted(self.vocab)\n self.word2idx[\"\"] = 0 #keeping the padding tokens as 0 index\n self.idx2word[0] = \"\" #keeping the 0 index to the padding tokens\n for i,word in enumerate(self.vocab):\n self.word2idx[word] = i + 1 #each word will have its own index\n self.idx2word[i+1] = word \n\ndef max_length(t):\n return max(len(i) for i in t) #finding the maximum length for the input and output\n\ndef load_dataset():\n pairs,df = generate_dataset()\n\n out_lang = LanguageIndex(sp for en, sp in pairs)\n in_lang = LanguageIndex(en for en, sp in pairs)\n \n input_data = [[in_lang.word2idx[s] for s in en.split(' ')] for en, sp in pairs]\n output_data = [[out_lang.word2idx[s] for s in sp.split(' ')] for en, sp in pairs]\n\n max_length_in, max_length_out = max_length(input_data), max_length(output_data)\n input_data = tf.keras.preprocessing.sequence.pad_sequences(input_data, maxlen=max_length_in, padding=\"post\") #padding to the max_length for input_data\n output_data = tf.keras.preprocessing.sequence.pad_sequences(output_data, maxlen=max_length_out, padding=\"post\") #padding to the max_length for output_data\n\n return input_data, output_data, in_lang, out_lang, max_length_in, max_length_out, df\n \n\n\n# In[6]:\n\n\ninput_data, teacher_data, input_lang, target_lang, len_input, len_target, df = load_dataset()\n# We use teacher forcing method, which works by using the actual or expected output from the training dataset at \n# the current time step y(t) as input in the next time step X(t+1), rather than the output generated by the network.\n\ntarget_data = [[teacher_data[n][i+1] for i in range(len(teacher_data[n])-1)] for n in range(len(teacher_data))]\n\n\n\ntarget_data = tf.keras.preprocessing.sequence.pad_sequences(target_data, maxlen=len_target, padding=\"post\")\n\ntarget_data = target_data.reshape((target_data.shape[0], target_data.shape[1], 1))\n\n# Shuffle all of the data in unison. This training set has the longest (e.g. most complicated) data at the end,\n# so a simple Keras validation split will be problematic if not shuffled.\n\np = np.random.permutation(len(input_data))\n\n\ninput_data = input_data[p]\nteacher_data = teacher_data[p]\ntarget_data = target_data[p]\n\n\n# In[7]:\n\n\n# Displaying the dataframe of x_ngram and y_ngram\npd.set_option('display.max_colwidth', -1)\nBUFFER_SIZE = len(input_data)\nBATCH_SIZE = 128\nembedding_dim = 300\nunits = 128\nvocab_in_size = len(input_lang.word2idx)\nvocab_out_size = len(target_lang.word2idx)\n\nprint(vocab_out_size)\nprint(vocab_in_size)\ndf.head(10)\n\n\n# In[8]:\n\n\n# Create the Encoder layers first.\nencoder_inputs = Input(shape=(len_input,))\nencoder_emb = Embedding(input_dim=vocab_in_size, output_dim=embedding_dim)\n\n#Using Bidirectional LSTM\nencoder_lstm = Bidirectional(LSTM(units=units, return_sequences=True, return_state=True))\nencoder_out, fstate_h, fstate_c, bstate_h, bstate_c = encoder_lstm(encoder_emb(encoder_inputs))\nstate_h = Concatenate()([fstate_h,bstate_h])\nstate_c = Concatenate()([bstate_h,bstate_c])\nencoder_states = [state_h, state_c]\n\n\n# Creating the Decoder layers.\ndecoder_inputs = Input(shape=(None,))\ndecoder_emb = Embedding(input_dim=vocab_out_size, output_dim=embedding_dim)\ndecoder_lstm = LSTM(units=units*2, return_sequences=True, return_state=True)\ndecoder_lstm_out, _, _ = decoder_lstm(decoder_emb(decoder_inputs), initial_state=encoder_states)\n\n\n# Two dense layers added to this model to improve inference capabilities.\ndecoder_d1 = Dense(units, activation=\"relu\")\ndecoder_d2 = Dense(vocab_out_size, activation=\"softmax\")\ndecoder_out = decoder_d2(Dropout(rate=.2)(decoder_d1(Dropout(rate=.2)(decoder_lstm_out))))\n\n\n# Creating the training model that combines the encoder and the decoder.\nmodel = Model(inputs = [encoder_inputs, decoder_inputs], outputs= decoder_out)\n\n# Using sparse_categorical_crossentropy so we don't have to expand decoder_out into a massive one-hot array.\n\nmodel.compile(optimizer= 'adam', loss=\"sparse_categorical_crossentropy\", metrics=['sparse_categorical_accuracy'])\n\ncol3,col4 = sl.columns(2)\ncol3.header('Model Summary')\nimg6 = Image.open(\"msumm.png\")\ncol3.image(img6, use_column_width=True)\n\n\n\ncol10,col11 = sl.columns(2)\ncol10.header(\"Model Metrics - Accuracy\")\nimg3 = Image.open(\"macc.png\")\nimg4 = Image.open(\"mloss.png\")\ncol10.image(img3, use_column_width=True)\ncol11.header(\"Model Metrics - Loss\")\ncol11.image(img4, use_column_width=True)\n\nsl.write('''\nTo Evaluate our model, we are using \"Rouge Score\" i.e. \n**\"Recall Oriented Understudy Gist Evaluation\".**\n\nROUGE is a set of metrics rather than just one. ROUGE-N measures the number of matching n-grams between our model-generated text and a 'reference'. \n\nSimilarly, for ROUGE-1 we would be measuring the match rate of unigrams between our model output reference. \n\nROUGE-2 and ROUGE-3 would use bigrams and trigrams respectively. \n\nROUGE-L Measures the long common subsequence (LCS) between our model output and the 'reference. \n\nEach of these provides the 'Recall', 'Precision' and 'F1' score.\n\nWe are using ROUGE-1, ROUGE-2 and ROUGE-L in particular.\n\n''')\n\n\n\n\n\n\n#pip install tensorflow-estimator==2.5.*\n\n\n\n#Using the Early stopping method\n\nfrom tensorflow.keras.callbacks import EarlyStopping\nearlyStop=EarlyStopping(monitor=\"val_loss\",verbose=2,mode='min',patience=1)\n\n\n\n\nhistory = model.fit([input_data, teacher_data],target_data,\n batch_size= BATCH_SIZE,\n epochs=2,\n validation_split=0.2,\n steps_per_epoch = 100,\n callbacks = [earlyStop])\n\n\n\n# Training Results\n# plt.plot(history.history['loss'], label=\"Training loss\")\n# plt.plot(history.history['val_loss'], label=\"Validation loss\")\n# plt.show()\n\n\n\n\n# Creating the encoder model from the tensors we previously declared.\nencoder_model = Model(encoder_inputs, [encoder_out, state_h, state_c])\n\n# Generate a new set of tensors for our new inference decoder.\n\ninf_decoder_inputs = Input(shape=(None,), name=\"inf_decoder_inputs\")\n# We'll need to force feed the two state variables into the decoder each step.\nstate_input_h = Input(shape=(units*2,), name=\"state_input_h\")\nstate_input_c = Input(shape=(units*2,), name=\"state_input_c\")\ndecoder_res, decoder_h, decoder_c = decoder_lstm(\n decoder_emb(inf_decoder_inputs), \n initial_state=[state_input_h, state_input_c])\n\ninf_decoder_out = decoder_d2(decoder_d1(decoder_res))\ninf_model = Model(inputs=[inf_decoder_inputs, state_input_h, state_input_c], \n outputs=[inf_decoder_out, decoder_h, decoder_c])\n\n\n# Convert the given string into a vector of word IDs\n\ndef sentence_to_vector(sentence, lang):\n\n pre = sentence\n vec = np.zeros(len_input)\n sentence_list = [lang.word2idx[s] for s in pre.split(' ')]\n for i,w in enumerate(sentence_list):\n vec[i] = w\n return vec\n\n# Given an input string, an encoder model (infenc_model) and a decoder model (infmodel),\ndef translate(input_sentence, infenc_model, infmodel):\n sv = sentence_to_vector(input_sentence, input_lang)\n sv = sv.reshape(1,len(sv))\n [emb_out, sh, sc] = infenc_model.predict(x=sv)\n \n i = 0\n start_vec = target_lang.word2idx[\"\"]\n stop_vec = target_lang.word2idx[\"\"]\n \n cur_vec = np.zeros((1,1))\n cur_vec[0,0] = start_vec\n cur_word = \"\"\n output_sentence = \"\"\n\n while cur_word != \"\" and i < (len_target-1):\n i += 1\n if cur_word != \"\":\n output_sentence = output_sentence + \" \" + cur_word\n x_in = [cur_vec, sh, sc]\n [nvec, sh, sc] = infmodel.predict(x=x_in)\n cur_vec[0,0] = np.argmax(nvec[0,0])\n cur_word = target_lang.idx2word[np.argmax(nvec[0,0])]\n return output_sentence\n\n\nsl.write('''\n # Try it out -\n''')\n\n\ntest = [\n 'owner may',\n 'All rights',\n 'will comply',\n 'INCLUDING WITHOUT',\n 'EACH PARTY ',\n 'The term of this',\n 'trustee or receiver ',\n 'a '\n]\n \noutput = [] \nfor t in test: \n output.append({\"Input seq\":t.lower(), \"Pred. Seq\":translate(t.lower(), encoder_model, inf_model)})\n\nresults_df = pd.DataFrame.from_dict(output) \nresults_df.head(len(test))\n\ninp = sl.text_input(\"Enter something here\")\n\n\noutput = [] \n\noutput.append({\"Input seq\":inp.lower(), \"Pred. Seq\":translate(inp.lower(), encoder_model, inf_model)})\n\nresults_df = pd.DataFrame.from_dict(output) \nresults_df.head(len(test))\n\nsl.write(results_df)\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"413243938","text":"import cx_Oracle\r\n\r\nip = 'orase.fh-hagenberg.at'\r\nport = 1521\r\nSID = 'ORCL'\r\ndsn_tns = cx_Oracle.makedsn(ip, port, SID)\r\n#etablish database connection\r\ndb = cx_Oracle.connect('S1710458017', '******', dsn_tns)\r\n#get cursor for select\r\ncur = db.cursor()\r\ncur.execute('select * FROM person')\r\n#iterate over results and print \r\nfor result in cur:\r\n print(result)\r\n \r\n","sub_path":"3.Semester/DB/Übung/Uebung6/DB_connect.py","file_name":"DB_connect.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"27527276","text":"from base import octopus, clean_db\n\n\ndef test_online_add_column(octopus, clean_db):\n \"\"\"\n Verify that we can add columns to a stream while not affecting running CQs\n \"\"\"\n octopus.create_stream('stream0', c0='integer')\n\n octopus.create_cv('cv0', 'SELECT c0 FROM stream0')\n octopus.insert('stream0', ('c0',), [(n,) for n in range(0, 1000)])\n result = list(octopus.execute('SELECT * FROM cv0'))\n\n assert len(result) == 1000\n\n for row in result:\n for col in row:\n assert col is not None\n\n octopus.execute('ALTER STREAM stream0 ADD c1 integer')\n\n octopus.create_cv('cv1', 'SELECT c0, c1 FROM stream0')\n octopus.insert('stream0', ('c0', 'c1'),\n [(n, n) for n in range(1000, 2000)])\n result = list(octopus.execute('SELECT * FROM cv1 WHERE c1 >= 1000'))\n\n assert len(result) == 1000\n\n for row in result:\n for col in row:\n assert col is not None\n\n octopus.execute('ALTER STREAM stream0 ADD c2 integer')\n octopus.create_cv('cv2', 'SELECT c0, c1, c2 FROM stream0')\n octopus.insert('stream0', ('c0', 'c1', 'c2'),\n [(n, n, n) for n in range(2000, 3000)])\n result = list(octopus.execute('SELECT * FROM cv2 WHERE c2 >= 2000'))\n\n assert len(result) == 1000\n\n for row in result:\n for col in row:\n assert col is not None\n\n octopus.execute('ALTER STREAM stream0 ADD c3 integer')\n octopus.create_cv('cv3', 'SELECT c0, c1, c2, c3 FROM stream0')\n octopus.insert('stream0', ('c0', 'c1', 'c2', 'c3'),\n [(n, n, n, n) for n in range(3000, 4000)])\n result = list(octopus.execute('SELECT * FROM cv3 WHERE c3 >= 3000'))\n\n assert len(result) == 1000\n\n for row in result:\n for col in row:\n assert col is not None\n\n octopus.execute('ALTER STREAM stream0 ADD c4 integer')\n octopus.create_cv('cv4', 'SELECT c0, c1, c2, c3, c4 FROM stream0')\n octopus.insert('stream0', ('c0', 'c1', 'c2', 'c3', 'c4'),\n [(n, n, n, n, n) for n in range(4000, 5000)])\n result = list(octopus.execute('SELECT * FROM cv4 WHERE c4 >= 4000'))\n\n assert len(result) == 1000\n\n for row in result:\n for col in row:\n assert col is not None\n\ndef test_online_drop_column(octopus, clean_db):\n octopus.create_stream('stream1', c0='integer')\n\n try:\n octopus.execute('ALTER STREAM stream1 DROP c0')\n assert False\n except:\n pass\n","sub_path":"src/test/py/test_typed_streams.py","file_name":"test_typed_streams.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"607050665","text":"from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7\nimport fluid_only_var\n\n#\n#\n# setting the domain size for the problem to be solved\ndomain_size = fluid_only_var.domain_size\n\n#\n#\n# ATTENTION: here the order is important\n\n# including kratos path\n# kratos_root/benchmarking\nkratos_benchmarking_path = '../../../../benchmarking'\nimport sys\nsys.path.append(kratos_benchmarking_path)\nimport benchmarking\n\nfrom KratosMultiphysics import *\nfrom KratosMultiphysics.IncompressibleFluidApplication import *\n\n\ndef BenchmarkCheck(time, model_part):\n max_press = 0.0\n min_press = 0.0\n vel2min = 10000.0\n id_min_vel = 0\n x_min_vel = 0.0\n y_min_vel = 0.0\n for node in model_part.Nodes:\n press = node.GetSolutionStepValue(PRESSURE)\n if(press > max_press):\n max_press = press\n elif(press < min_press):\n min_press = press\n\n x = node.X\n y = node.Y\n vel = node.GetSolutionStepValue(VELOCITY)\n vel2 = vel[0] ** 2 + vel[1] ** 2\n if(x > 0.1 and x < 0.9 and y > 0.1 and y < 0.9):\n if(vel2 < vel2min):\n vel2min = vel2\n id_min_vel = node.Id\n x_min_vel = node.X\n y_min_vel = node.Y\n\n benchmarking.Output(time, \"Time\")\n benchmarking.Output(min_press, \"minimum pressure\", 0.00001)\n benchmarking.Output(max_press, \"maximum pressure\", 0.00001)\n benchmarking.Output(\n id_min_vel,\n \"Id of the node with minimum velocity norm\",\n 0.0)\n benchmarking.Output(x_min_vel, \"coord x minimum velocity norm\", 0.0)\n benchmarking.Output(y_min_vel, \"coord y minimum velocity norm\", 0.0)\n\n# defining a model part for the fluid and one for the structure\nfluid_model_part = ModelPart(\"FluidPart\")\n\n#\n# importing the solvers needed\nSolverType = fluid_only_var.SolverType\nif(SolverType == \"fractional_step\"):\n import fractional_step_solver\n fractional_step_solver.AddVariables(fluid_model_part)\nelif(SolverType == \"pressure_splitting\"):\n import decoupled_solver_eulerian\n decoupled_solver_eulerian.AddVariables(fluid_model_part)\nelif(SolverType == \"monolithic_solver_eulerian\"):\n import monolithic_solver_eulerian\n monolithic_solver_eulerian.AddVariables(fluid_model_part)\nelif(SolverType == \"monolithic_solver_eulerian_compressible\"):\n import monolithic_solver_eulerian_compressible\n monolithic_solver_eulerian_compressible.AddVariables(fluid_model_part)\nelse:\n raise \"solver type not supported: options are fractional_step - \\\n\tpressure_splitting - monolithic_solver_eulerian - \\\n\tmonolithic_solver_eulerian_compressible\"\n\n# introducing input file name\ninput_file_name = fluid_only_var.problem_name\n\n# reading the fluid part\ngid_mode = GiDPostMode.GiD_PostBinary\nmultifile = MultiFileFlag.MultipleFiles\ndeformed_mesh_flag = WriteDeformedMeshFlag.WriteUndeformed\nwrite_conditions = WriteConditionsFlag.WriteElementsOnly\ngid_io = GidIO(\n input_file_name,\n gid_mode,\n multifile,\n deformed_mesh_flag,\n write_conditions)\nmodel_part_io_fluid = ModelPartIO(input_file_name)\nmodel_part_io_fluid.ReadModelPart(fluid_model_part)\n\n# setting up the buffer size: SHOULD BE DONE AFTER READING!!!\nfluid_model_part.SetBufferSize(3)\n\n# adding dofs\nif(SolverType == \"fractional_step\"):\n fractional_step_solver.AddDofs(fluid_model_part)\nelif(SolverType == \"pressure_splitting\"):\n decoupled_solver_eulerian.AddDofs(fluid_model_part)\nelif(SolverType == \"monolithic_solver_eulerian\"):\n monolithic_solver_eulerian.AddDofs(fluid_model_part)\nelif(SolverType == \"monolithic_solver_eulerian_compressible\"):\n monolithic_solver_eulerian_compressible.AddDofs(fluid_model_part)\n\n# select here the laplacian form!!!!!!!!!!!!!!!!!\nlaplacian_form = 1\n\nif(laplacian_form >= 2):\n for node in fluid_model_part.Nodes:\n node.Free(PRESSURE)\n\n# check to ensure that no node has zero density or pressure\nfor node in fluid_model_part.Nodes:\n if(node.GetSolutionStepValue(DENSITY) == 0.0):\n print(\"node \", node.Id, \" has zero density!\")\n raise 'node with zero density found'\n if(node.GetSolutionStepValue(VISCOSITY) == 0.0):\n print(\"node \", node.Id, \" has zero viscosity!\")\n raise 'node with zero VISCOSITY found'\n\n# creating the solvers\n# fluid solver\nif(SolverType == \"fractional_step\"):\n fluid_solver = fractional_step_solver.IncompressibleFluidSolver(\n fluid_model_part, domain_size)\n fluid_solver.laplacian_form = laplacian_form\n # standard laplacian form\n fluid_solver.predictor_corrector = False\n fluid_solver.max_press_its = fluid_only_var.max_press_its\n fluid_solver.velocity_linear_solver = SkylineLUFactorizationSolver()\n fluid_solver.pressure_linear_solver = SkylineLUFactorizationSolver()\n\n fluid_solver.Initialize()\nelif(SolverType == \"pressure_splitting\"):\n fluid_solver = decoupled_solver_eulerian.\\\n DecoupledSolver(fluid_model_part, domain_size)\n oss_switch = fluid_only_var.use_oss\n dynamic_tau = fluid_only_var.dynamic_tau\n# pPrecond = ILU0Preconditioner()\n pPrecond = DiagonalPreconditioner()\n fluid_solver.pressure_linear_solver = BICGSTABSolver(1e-3, 5000, pPrecond)\n# fluid_solver.linear_solver = SuperLUSolver()\n fluid_solver.rel_vel_tol = 1e-4\n fluid_solver.abs_vel_tol = 1e-6\n fluid_solver.rel_pres_tol = 1e-4\n fluid_solver.abs_pres_tol = 1e-6\n fluid_solver.use_inexact_newton = False\n fluid_model_part.ProcessInfo.SetValue(OSS_SWITCH, oss_switch)\n fluid_model_part.ProcessInfo.SetValue(DYNAMIC_TAU, dynamic_tau)\n fluid_solver.Initialize()\nelif(SolverType == \"monolithic_solver_eulerian\"):\n fluid_solver = monolithic_solver_eulerian.MonolithicSolver(\n fluid_model_part, domain_size)\n oss_switch = fluid_only_var.use_oss\n dynamic_tau = fluid_only_var.dynamic_tau\n fluid_model_part.ProcessInfo.SetValue(OSS_SWITCH, oss_switch)\n fluid_model_part.ProcessInfo.SetValue(DYNAMIC_TAU, dynamic_tau)\n fluid_solver.Initialize()\nelif(SolverType == \"monolithic_solver_eulerian_compressible\"):\n fluid_solver = monolithic_solver_eulerian_compressible.MonolithicSolver(\n fluid_model_part, domain_size)\n oss_switch = fluid_only_var.use_oss\n dynamic_tau = fluid_only_var.dynamic_tau\n fluid_model_part.ProcessInfo.SetValue(OSS_SWITCH, oss_switch)\n fluid_model_part.ProcessInfo.SetValue(DYNAMIC_TAU, dynamic_tau)\n fluid_solver.Initialize()\n\n\nprint(\"fluid solver created\")\n\n# settings to be changed\nDt = fluid_only_var.Dt\nfull_Dt = Dt\ninitial_Dt = 0.001 * full_Dt # 0.05 #0.01\nfinal_time = fluid_only_var.max_time\noutput_step = fluid_only_var.output_step\n\nout = 0\n\n\n# mesh to be printed\nmesh_name = 0.0\ngid_io.InitializeMesh(mesh_name)\ngid_io.WriteMesh(fluid_model_part.GetMesh())\ngid_io.FinalizeMesh()\n\ngid_io.InitializeResults(mesh_name, (fluid_model_part).GetMesh())\n\n\ntime = 0.0\nstep = 0\nwhile(time < final_time):\n\n if(step < 5):\n Dt = initial_Dt\n else:\n Dt = full_Dt\n\n time = time + Dt\n fluid_model_part.CloneTimeStep(time)\n\n if(step >= 3):\n fluid_solver.Solve()\n BenchmarkCheck(time, fluid_model_part)\n\n if(out == output_step):\n if(SolverType == \"fractional_step\" or SolverType == \"pressure_splitting\"):\n gid_io.WriteNodalResults(PRESSURE, fluid_model_part.Nodes, time, 0)\n gid_io.WriteNodalResults(VELOCITY, fluid_model_part.Nodes, time, 0)\n else:\n gid_io.WriteNodalResults(PRESSURE, fluid_model_part.Nodes, time, 0)\n gid_io.WriteNodalResults(\n AIR_PRESSURE,\n fluid_model_part.Nodes,\n time,\n 0)\n gid_io.WriteNodalResults(\n WATER_PRESSURE,\n fluid_model_part.Nodes,\n time,\n 0)\n gid_io.WriteNodalResults(VELOCITY, fluid_model_part.Nodes, time, 0)\n # gid_io.WriteNodalResults(DISPLACEMENT,fluid_model_part.Nodes,time,0)\n gid_io.WriteNodalResults(\n MESH_VELOCITY,\n fluid_model_part.Nodes,\n time,\n 0)\n gid_io.WriteNodalResults(\n IS_STRUCTURE,\n fluid_model_part.Nodes,\n time,\n 0)\n gid_io.WriteNodalResults(\n IS_BOUNDARY,\n fluid_model_part.Nodes,\n time,\n 0)\n gid_io.WriteNodalResults(\n IS_POROUS,\n fluid_model_part.Nodes,\n time,\n 0)\n gid_io.WriteNodalResults(\n IS_FREE_SURFACE,\n fluid_model_part.Nodes,\n time,\n 0)\n # gid_io.PrintOnGaussPoints(THAWONE,fluid_model_part,time)\n # gid_io.PrintOnGaussPoints(THAWTWO,fluid_model_part,time)\n gid_io.WriteNodalResults(ADVPROJ, fluid_model_part.Nodes, time, 0)\n gid_io.WriteNodalResults(DIVPROJ, fluid_model_part.Nodes, time, 0)\n gid_io.WriteNodalResults(DENSITY, fluid_model_part.Nodes, time, 0)\n gid_io.WriteNodalResults(\n DENSITY_AIR,\n fluid_model_part.Nodes,\n time,\n 0)\n # gid_io.WriteNodalResults(NODAL_H,fluid_model_part.Nodes,time,0)\n gid_io.WriteNodalResults(\n VISCOSITY,\n fluid_model_part.Nodes,\n time,\n 0)\n gid_io.WriteNodalResults(\n SOUND_VELOCITY,\n fluid_model_part.Nodes,\n time,\n 0)\n gid_io.WriteNodalResults(\n AIR_SOUND_VELOCITY,\n fluid_model_part.Nodes,\n time,\n 0)\n\n out = 0\n\n out = out + 1\n step = step + 1\n\ngid_io.FinalizeResults()\n","sub_path":"applications/incompressible_fluid_application/test_examples/cavity2D.gid/test_fractstep_cont_laplacian.py","file_name":"test_fractstep_cont_laplacian.py","file_ext":"py","file_size_in_byte":9895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"18914478","text":"'''\nThis file visualizes the normalized activation of each filter.\n'''\nfrom queue import Full\nfrom matplotlib import image\nfrom torchvision.models import resnet50, vit_b_16, alexnet, vgg11\nfrom PIL import Image\nfrom torchvision import datasets, transforms\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torchvision\nfrom torch.utils import model_zoo\nimport cv2\nfrom torchvision import utils\nimport numpy as np\n\n\ndef save_image_matrix(image_list, filename, size):\n '''\n 列表形式输入\n Date:2021.9.10\n Function: Save a list of figure in the form of a matrix m*n\n Param:\n image_list: a list of images, the images has to be two dims.\n filename: The filename\n size: the width and height of unified image\n '''\n\n matrix = []\n m, n = size\n assert len(image_list) >= m*n, \"The number of images is less than m*n.\"\n for i in range(n):\n matrix.append(torch.cat(image_list[i*m:i*m+m],2))\n utils.save_image(torch.cat(matrix, 1), filename)\n\n\ndef show_cam_on_image(img: np.ndarray,\n mask: np.ndarray,\n use_rgb: bool = False,\n colormap: int = cv2.COLORMAP_JET) -> np.ndarray:\n heatmap = cv2.applyColorMap(np.uint8(255 * mask), colormap)\n if use_rgb:\n heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)\n heatmap = np.float32(heatmap) / 255\n\n if np.max(img) > 1:\n raise Exception(\n \"The input image should np.float32 in the range [0, 1]\")\n cam = heatmap + img\n cam = cam / np.max(cam)\n return np.uint8(255 * cam)\n\ndef load_model(model_name):\n model_urls = {\n 'resnet50_trained_on_SIN': 'resnet50_train_60_epochs-c8e5653e.pth.tar',\n 'resnet50_trained_on_SIN_and_IN': 'resnet50_train_45_epochs_combined_IN_SF-2a0d100e.pth.tar',\n 'resnet50_trained_on_SIN_and_IN_then_finetuned_on_IN': 'resnet50_finetune_60_epochs_lr_decay_after_30_start_resnet50_train_45_epochs_combined_IN_SF-ca06340c.pth.tar',\n 'alexnet_trained_on_SIN': 'alexnet_train_60_epochs_lr0.001-b4aa5238.pth.tar',\n 'vgg16_trained_on_SIN': 'vgg16_train_60_epochs_lr0.01-6c6fcc9f.pth.tar'\n }\n SIN_models = [\"resnet50_trained_on_SIN\", \"resnet50_trained_on_SIN_and_IN\", \"resnet50_trained_on_SIN_and_IN_then_finetuned_on_IN\", \"vgg16_trained_on_SIN\", \"alexnet_trained_on_SIN\"]\n if model_name in SIN_models:\n if \"resnet50\" in model_name:\n print(\"Using the ResNet50 architecture.\")\n model = torchvision.models.resnet50(pretrained=False)\n model = torch.nn.DataParallel(model).cuda()\n checkpoint = model_zoo.load_url(model_urls[model_name])\n elif \"vgg16\" in model_name:\n print(\"Using the VGG-16 architecture.\")\n # download model from URL manually and save to desired location\n model = torchvision.models.vgg16(pretrained=False)\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n checkpoint = model_zoo.load_url(model_urls[model_name])\n elif \"alexnet\" in model_name:\n print(\"Using the AlexNet architecture.\")\n model = torchvision.models.alexnet(pretrained=False)\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n checkpoint = model_zoo.load_url(model_urls[model_name])\n model.load_state_dict(checkpoint[\"state_dict\"])\n elif model_name in ['resnet50_cutmix','resnet50_feature_cutmix', 'resnet101_cutmix']:\n checkpoint = torch.load(f'./pretrained_models/data_augmentation_models/cutmix/{model_name}.tar') \n if 'resnet50' in model_name:\n model = torchvision.models.resnet50()\n elif 'resnet101' in model_name:\n model = torchvision.models.resnet101()\n model = torch.nn.DataParallel(model).cuda() \n model.load_state_dict(checkpoint['state_dict']) \n elif model_name == 'resnet152_cutmix':\n checkpoint = torch.load('./pretrained_models/data_augmentation_models/cutmix/resnet152_cutmix.pth') \n model = torchvision.models.resnet152()\n model.load_state_dict(checkpoint) \n model = torch.nn.DataParallel(model).cuda() \n elif model_name == 'resnext_cutmix':\n checkpoint = torch.load('./pretrained_models/data_augmentation_models/cutmix/resnext_cutmix.pth.tar') \n model = timm.create_model('resnext101_32x4d')\n model.load_state_dict({k.replace('module.',''):checkpoint[k] for k in checkpoint}) \n model = torch.nn.DataParallel(model).cuda() \n elif model_name in ['resnet50_mixup','resnet50_manifold_mixup']:\n checkpoint = torch.load(f'./pretrained_models/data_augmentation_models/mixup/{model_name}.tar') \n model = torchvision.models.resnet50()\n model = torch.nn.DataParallel(model).cuda() \n model.load_state_dict(checkpoint['state_dict']) \n elif model_name == 'resnet50_cutout':\n checkpoint = torch.load(f'./pretrained_models/data_augmentation_models/resnet50_cutout.tar') \n model = torchvision.models.resnet50()\n model = torch.nn.DataParallel(model).cuda() \n model.load_state_dict(checkpoint['state_dict']) \n\n elif model_name == 'resnet50_augmix':\n checkpoint = torch.load('./pretrained_models/data_augmentation_models/resnet50_augmix.tar') \n arch = checkpoint['arch']\n model = torchvision.models.__dict__[arch]()\n model = torch.nn.DataParallel(model).cuda() \n model.load_state_dict(checkpoint['state_dict']) \n elif model_name == 'resnet50_deepaugment':\n checkpoint = torch.load('./pretrained_models/data_augmentation_models/deepaugment/deepaugment.pth.tar') \n arch = checkpoint['arch']\n model = torchvision.models.__dict__[arch]()\n model = torch.nn.DataParallel(model).cuda() \n model.load_state_dict(checkpoint['state_dict']) \n elif model_name == 'resnet50_deepaugment_augmix':\n checkpoint = torch.load('./pretrained_models/data_augmentation_models/deepaugment/deepaugment_and_augmix.pth.tar') \n arch = checkpoint['arch']\n model = torchvision.models.__dict__[arch]()\n model = torch.nn.DataParallel(model).cuda() \n model.load_state_dict(checkpoint['state_dict']) \n elif model_name == 'resnext101_32x8d_deepaugment_augmix':\n checkpoint = torch.load('./pretrained_models/data_augmentation_models/deepaugment/resnext101_augmix_and_deepaugment.pth.tar') \n arch = checkpoint['arch']\n model = torchvision.models.__dict__[arch]()\n model = torch.nn.DataParallel(model).cuda() \n model.load_state_dict(checkpoint['state_dict']) \n elif model_name == 'adv':\n checkpoint = torch.load('./pretrained_models/data_augmentation_models/adversarial_imagenet_model_weights_2px.pth.tar') \n arch = checkpoint['arch']\n model = torchvision.models.__dict__[arch]()\n model = torch.nn.DataParallel(model).cuda() \n model.load_state_dict(checkpoint['state_dict']) \n elif model_name == '21k':\n model = timm.create_model('vit_base_patch16_224_miil', pretrained=True)\n model = torch.nn.DataParallel(model).cuda() \n elif model_name in ['ANT','ANT_SIN','ANT3x3','ANT3x3_SIN','Gauss_mult','Gauss_sigma_0.5','Speckle']:\n model = torchvision.models.resnet50()\n checkpoint = torch.load(f'./pretrained_models/data_augmentation_models/game_of_noise/{model_name}_Model.pth') \n model.load_state_dict(checkpoint['model_state_dict'])\n model = torch.nn.DataParallel(model).cuda() \n \n elif model_name in timm_models:\n model = timm.create_model(model_name, pretrained=True)\n model = torch.nn.DataParallel(model).cuda()\n\n ''' \n elif model_name in torchvision_models:\n model = getattr(torchvision.models,model_name)(pretrained=True)\n model = torch.nn.DataParallel(model).cuda()\n\n '''\n model.eval() \n return model\n\n\n\nrandom_model = resnet50().cuda()\nimagenet_model = resnet50(pretrained=True).cuda()\nSIN_model = load_model('resnet50_trained_on_SIN')\ncutout_model = load_model('resnet50_cutout')\ncutmix_model = load_model('resnet50_cutmix')\nmixup_model = load_model('resnet50_mixup')\naugmix_model = load_model('resnet50_augmix')\nant_model = load_model('ANT_SIN')\ndeepaugment_model = load_model('resnet50_deepaugment')\naugmix_deepaugment_model = load_model('resnet50_deepaugment_augmix')\n\n\n\n\n\nname = 'bottle/bottle2'\nimgs = [f'./datasets/SIL/silhouettes/{name}.png'] + [f'./datasets/SIL/ag/ag_i{i}_hor/{name}.png' for i in range(4,16,2)]\n\n'''\nparams = [('random',random_model),\n ('imagenet',imagenet_model),\n ('SIN',SIN_model),\n ('cutout',cutout_model),\n ('mixup',mixup_model),\n ('augmix',augmix_model),\n ('deepaugment',deepaugment_model),\n ('augmix_deepaugment',augmix_deepaugment_model),]\n'''\nparams = [#('random',random_model),\n ('imagenet',imagenet_model),\n #('augmix',augmix_model),\n ('deepaugment',deepaugment_model),\n #('augmix_deepaugment',augmix_deepaugment_model),\n ]\n\ntarget_layers = ['conv1','bn1','relu','maxpool']\n\n\nwith torch.no_grad():\n \n for model_name, model in params: \n print(model_name)\n imgs = ['./datasets/SIL/ag/ag_i6_hor/bottle/bottle2.png']\n for img_path in imgs:\n img = Image.open(img_path)\n #img = Image.open('./datasets/SIN/ag/ag_i4_hor/bird/bird9.png')\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n #transform = transforms.Compose([transforms.ToTensor()])\n rgb_img = transforms.ToTensor()(img).detach().cpu().numpy()\n rgb_img = np.transpose(rgb_img, (1,2,0))\n\n\n input_tensor = transform(img).unsqueeze(0)\n\n #kernel_i = 47\n if model_name in ['random', 'imagenet']:\n model.target_layers = [getattr(model, target_layer) for target_layer in target_layers]\n\n elif model_name in ['SIN', 'augmix', 'deepaugment', 'augmix_deepaugment', 'cutout', 'mixup']:\n model.target_layers = [getattr(model.module, target_layer) for target_layer in target_layers]\n #model.target_layers = [model.module.stem[0],model.module.stem[1]]\n _x = input_tensor.cuda()\n for target_layer in model.target_layers:\n _x = target_layer(_x) \n result_imgs = [] \n x_max = 0\n for _ in range(64):\n x = _x[0][_].cpu().detach().numpy()\n #x = x[0][47].cpu().detach().numpy()\n x = cv2.resize(x, (224, 224))\n \n x = x-np.min(x)\n x_max = max(x_max, np.max(x))\n for _ in range(64):\n #x = torch.mean(_x, (0,1)).cpu().detach().numpy()\n x = _x[0][_].cpu().detach().numpy()\n #x = x[0][47].cpu().detach().numpy()\n x = cv2.resize(x, (224, 224))\n \n x = x-np.min(x)\n\n x = x/(x_max+1e-9)\n grayscale_cam = x\n\n # In this example grayscale_cam has only one image in the batch:\n\n #grayscale_cam = grayscale_cam[0, :]\n #_rgb_img = np.zeros_like(rgb_img)\n visualization = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)\n visualization = torch.Tensor(visualization).permute(2,0,1)/255.\n\n result_imgs.append(visualization)\n save_image_matrix(result_imgs, f'{model_name}_hor6_visual_mp.png', (8,8))\n #save_image_matrix(result_imgs, f'test.png', (len(result_imgs)//len(params),len(params)))\n \n\n\n","sub_path":"4_visualize_filter_activation.py","file_name":"4_visualize_filter_activation.py","file_ext":"py","file_size_in_byte":11856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"91315117","text":"'''\nThe idea of this module is to obtain the values from a controller\nWe use a PS4 controller connected via bluetooth\nThe values are stored in a dictionary and show in realtime\n'''\nimport pygame #Detect key strokes\nfrom time import sleep #Sleep is for the delays\n\npygame.init() #Initialize pygame\ncontroller = pygame.joystick.Joystick(0) #Only one controller\ncontroller.init()\n\n#Dictionary for buttons and the axis of the joysticks, so everything is together\nbuttons = {'x':0, 'o':0, 't':0,'s':0,\n 'L1':0, 'R1':0, 'L2':0, 'R2':0,\n 'share':0, 'option':0,\n 'axis1':0., 'axis2':0., 'axis3':0., 'axis4':0.} #0 is not pressed and 1 pressed. Axis are float and range from -1 to 1\n#4 axis because each joystick can go up or down, and right or left\naxiss = [0., 0., 0., 0., 0., 0.] #\n\ndef getJS(name = ''):\n global buttons\n #Retrieve any events\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION: #Analog sticks\n axiss[event.axis] = round(event.value, 2) #Store them in the list\n elif event.type == pygame.JOYBUTTONDOWN: #Button pressed\n #print(event.dict, event.joy, event.button, 'PRESSED')\n for x,(key,val) in enumerate(buttons.items()):\n if x < 10:\n if controller.get_button(x):\n buttons[key] = 1 #When button pressed, it changes the value to 1\n elif event.type == pygame.JOYBUTTONUP:\n #print(event.dict, event.joy, event.button, 'RELEASED')\n for x,(key,val) in enumerate(buttons.items()):\n if x < 10:\n if event.button == x:\n buttons[key] = 0 #When button is released it changes the value to 0\n\n #To remove element 2 since axis numbers are 0 1 3 4\n buttons['axis1'], buttons['axis2'], buttons['axis3'], buttons['axis4'] = [axiss[0], axiss[1], axiss[3], axiss[4]]\n\n if name == '':\n return buttons #Return all the bottoms\n else:\n return buttons[name] #Return the particular key we asked for\n\ndef main():\n #print(getJS()) #Get all values\n #sleep(0.05)\n print(getJS('share')) #Get only share button values\n sleep(0.05)\n\nif __name__ == '__main__':\n while True:\n main()\n","sub_path":"JoystickModule.py","file_name":"JoystickModule.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"145872377","text":"from train_nf import train_nf\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.stats import multivariate_normal\nimport os, sys\nfrom efn_util import get_flowdict, print_flowdict, drawEtas, drawPoissonRates, drawPoissonCounts\nfrom plot_utils import plotContourTruncatedNormal\nos.chdir('../');\n\nexp_fam = 'prp_tn';\nD = int(sys.argv[1]);\nfully_connected_layers = int(sys.argv[2]);\nplanar_layers = int(sys.argv[3]);\nspinner_layers = int(sys.argv[4]);\nnonlin_spinner_layers = int(sys.argv[5]);\n\nflow_dict = get_flowdict(fully_connected_layers, planar_layers, spinner_layers, nonlin_spinner_layers);\nflow_ids = flow_dict['flow_ids'];\nflow_repeats = flow_dict['flow_repeats'];\nprint_flowdict(flow_dict);\n\ncost_type = 'KL';\nM_eta = 100;\nlr_order = -3;\nrandom_seed = 0;\nmax_iters = 20000;\ncheck_rate = 100;\n\nif (D==2):\n\tmu = np.array([[1, 5]]);\n\tSigma = np.expand_dims(np.array([[1, .5], [.5, 1]]), 0);\n\tnp.random.seed(0);\n\tN = 5;\n\tz = np.array([5, 1]);\n\tx = drawPoissonCounts(z, N);\n\txs = np.expand_dims(x, 0);\n\txlim = 10;\n\tylim = 10;\n\tplotContourTruncatedNormal(mu[0], Sigma[0], xlim, ylim, 100);\n\tplt.figure();\n\tprint(x);\n\tH, xedges, yedges = np.histogram2d(x[0], x[1], bins=8, range=[[0,xlim],[0,ylim]], normed=True)\n\tplt.imshow(H.T, origin='lower', interpolation='none');\n\tplt.show();\nelse:\n\traise NotImplementedError;\n\n\nparams = {'mu':mu, 'Sigma':Sigma, 'x':xs, 'lambda':z, 'D':D};\n\nlog_P, X, R2s, KLs, it = train_nf(exp_fam, params, flow_dict, cost_type, \\\n\t\t M_eta, lr_order, random_seed, max_iters, check_rate);\n","sub_path":"efn/validation/test_flow_prp_compatability.py","file_name":"test_flow_prp_compatability.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"290753358","text":"from __future__ import division\nfrom __future__ import print_function\nimport argparse, chainer, time, sys\nimport numpy as np\nimport chainer.functions as F\nfrom chainer import cuda\nfrom model import Model\nfrom aae.optim import Optimizer, GradientClipping\nfrom aae.utils import onehot, printr, clear_console\nfrom aae.dataset.semi_supervised import Dataset\nimport aae.sampler as sampler\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--batchsize\", \"-b\", type=int, default=64)\n\tparser.add_argument(\"--total-epochs\", \"-e\", type=int, default=5000)\n\tparser.add_argument(\"--num-labeled-data\", \"-nl\", type=int, default=100)\n\tparser.add_argument(\"--gpu-device\", \"-g\", type=int, default=0)\n\tparser.add_argument(\"--grad-clip\", \"-gc\", type=float, default=5)\n\tparser.add_argument(\"--learning-rate\", \"-lr\", type=float, default=0.0001)\n\tparser.add_argument(\"--momentum\", \"-mo\", type=float, default=0.1)\n\tparser.add_argument(\"--optimizer\", \"-opt\", type=str, default=\"adam\")\n\tparser.add_argument(\"--seed\", type=int, default=0)\n\tparser.add_argument(\"--model\", \"-m\", type=str, default=\"model.hdf5\")\n\targs = parser.parse_args()\n\n\tnp.random.seed(args.seed)\n\n\tmodel = Model()\n\tmodel.load(args.model)\n\n\tmnist_train, mnist_test = chainer.datasets.get_mnist()\n\timages_train, labels_train = mnist_train._datasets\n\timages_test, labels_test = mnist_test._datasets\n\n\t# normalize\n\timages_train = (images_train - 0.5) * 2\n\timages_test = (images_test - 0.5) * 2\n\n\tdataset = Dataset(train=(images_train, labels_train), \n\t\t\t\t\t test=(images_test, labels_test), \n\t\t\t\t\t num_labeled_data=args.num_labeled_data, \n\t\t\t\t\t num_classes=model.ndim_y)\n\tprint(\"#labeled:\t{}\".format(dataset.get_num_labeled_data()))\n\tprint(\"#unlabeled:\t{}\".format(dataset.get_num_unlabeled_data()))\n\t_, labels = dataset.get_labeled_data()\n\tprint(\"labeled data:\", labels)\n\n\ttotal_iterations_train = len(images_train) // args.batchsize\n\n\t# optimizers\n\toptimizer_encoder = Optimizer(args.optimizer, args.learning_rate, args.momentum)\n\toptimizer_encoder.setup(model.encoder)\n\tif args.grad_clip > 0:\n\t\toptimizer_encoder.add_hook(GradientClipping(args.grad_clip))\n\n\toptimizer_decoder = Optimizer(args.optimizer, args.learning_rate, args.momentum)\n\toptimizer_decoder.setup(model.decoder)\n\tif args.grad_clip > 0:\n\t\toptimizer_decoder.add_hook(GradientClipping(args.grad_clip))\n\n\toptimizer_discriminator_z = Optimizer(args.optimizer, args.learning_rate, args.momentum)\n\toptimizer_discriminator_z.setup(model.discriminator_z)\n\tif args.grad_clip > 0:\n\t\toptimizer_discriminator_z.add_hook(GradientClipping(args.grad_clip))\n\n\toptimizer_discriminator_y = Optimizer(args.optimizer, args.learning_rate, args.momentum)\n\toptimizer_discriminator_y.setup(model.discriminator_y)\n\tif args.grad_clip > 0:\n\t\toptimizer_discriminator_y.add_hook(GradientClipping(args.grad_clip))\n\n\toptimizer_cluster_head = Optimizer(args.optimizer, args.learning_rate, args.momentum)\n\toptimizer_cluster_head.setup(model.cluster_head)\n\tif args.grad_clip > 0:\n\t\toptimizer_cluster_head.add_hook(GradientClipping(args.grad_clip))\n\n\tusing_gpu = False\n\tif args.gpu_device >= 0:\n\t\tcuda.get_device(args.gpu_device).use()\n\t\tmodel.to_gpu()\n\t\tusing_gpu = True\n\txp = model.xp\n\n\t# 0 -> true sample\n\t# 1 -> generated sample\n\tclass_true = np.zeros(args.batchsize, dtype=np.int32)\n\tclass_fake = np.ones(args.batchsize, dtype=np.int32)\n\tif using_gpu:\n\t\tclass_true = cuda.to_gpu(class_true)\n\t\tclass_fake = cuda.to_gpu(class_fake)\n\n\ttraining_start_time = time.time()\n\tfor epoch in range(args.total_epochs):\n\n\t\tsum_loss_generator \t\t= 0\n\t\tsum_loss_discriminator \t= 0\n\t\tsum_loss_autoencoder \t= 0\n\t\tsum_loss_supervised \t= 0\n\t\tsum_loss_cluster_head \t= 0\n\t\tsum_discriminator_z_confidence_true = 0\n\t\tsum_discriminator_z_confidence_fake = 0\n\t\tsum_discriminator_y_confidence_true = 0\n\t\tsum_discriminator_y_confidence_fake = 0\n\n\t\tepoch_start_time = time.time()\n\t\tdataset.shuffle()\n\n\t\t# training\n\t\tfor itr in range(total_iterations_train):\n\t\t\t# update model parameters\n\t\t\twith chainer.using_config(\"train\", True):\n\t\t\t\t# sample minibatch\n\t\t\t\tx_u = dataset.sample_unlabeled_minibatch(args.batchsize, gpu=using_gpu)\n\t\t\t\tx_l, y_l, _ = dataset.sample_labeled_minibatch(args.batchsize, gpu=using_gpu)\n\t\t\t\t\n\t\t\t\t### reconstruction phase ###\n\t\t\t\tif True:\n\t\t\t\t\ty_onehot_u, z_u = model.encode_x_yz(x_u, apply_softmax_y=True)\n\t\t\t\t\trepr_u = model.encode_yz_representation(y_onehot_u, z_u)\n\t\t\t\t\tx_reconstruction_u = model.decode_representation_x(repr_u)\n\t\t\t\t\tloss_reconstruction_u = F.mean_squared_error(x_u, x_reconstruction_u)\n\n\t\t\t\t\ty_onehot_l, z_l = model.encode_x_yz(x_l, apply_softmax_y=True)\n\t\t\t\t\trepr_l = model.encode_yz_representation(y_onehot_l, z_l)\n\t\t\t\t\tx_reconstruction_l = model.decode_representation_x(repr_l)\n\t\t\t\t\tloss_reconstruction_l = F.mean_squared_error(x_l, x_reconstruction_l)\n\n\t\t\t\t\tloss_reconstruction = loss_reconstruction_u + loss_reconstruction_l\n\n\t\t\t\t\tmodel.cleargrads()\n\t\t\t\t\tloss_reconstruction.backward()\n\t\t\t\t\toptimizer_encoder.update()\n\t\t\t\t\toptimizer_cluster_head.update()\n\t\t\t\t\toptimizer_decoder.update()\n\n\t\t\t\t### adversarial phase ###\n\t\t\t\tif True:\n\t\t\t\t\ty_onehot_fake_u, z_fake_u = model.encode_x_yz(x_u, apply_softmax_y=True)\n\n\t\t\t\t\tz_true = sampler.gaussian(args.batchsize, model.ndim_z, mean=0, var=1)\n\t\t\t\t\ty_onehot_true = sampler.onehot_categorical(args.batchsize, model.ndim_y)\n\t\t\t\t\tif using_gpu:\n\t\t\t\t\t\tz_true = cuda.to_gpu(z_true)\n\t\t\t\t\t\ty_onehot_true = cuda.to_gpu(y_onehot_true)\n\n\t\t\t\t\tdz_true = model.discriminate_z(z_true, apply_softmax=False)\n\t\t\t\t\tdz_fake = model.discriminate_z(z_fake_u, apply_softmax=False)\n\t\t\t\t\tdy_true = model.discriminate_y(y_onehot_true, apply_softmax=False)\n\t\t\t\t\tdy_fake = model.discriminate_y(y_onehot_fake_u, apply_softmax=False)\n\n\t\t\t\t\tdiscriminator_z_confidence_true = float(xp.mean(F.softmax(dz_true).data[:, 0]))\n\t\t\t\t\tdiscriminator_z_confidence_fake = float(xp.mean(F.softmax(dz_fake).data[:, 1]))\n\t\t\t\t\tdiscriminator_y_confidence_true = float(xp.mean(F.softmax(dy_true).data[:, 0]))\n\t\t\t\t\tdiscriminator_y_confidence_fake = float(xp.mean(F.softmax(dy_fake).data[:, 1]))\n\n\t\t\t\t\tloss_discriminator_z = F.softmax_cross_entropy(dz_true, class_true) + F.softmax_cross_entropy(dz_fake, class_fake)\n\t\t\t\t\tloss_discriminator_y = F.softmax_cross_entropy(dy_true, class_true) + F.softmax_cross_entropy(dy_fake, class_fake)\n\t\t\t\t\tloss_discriminator = loss_discriminator_z + loss_discriminator_y\n\n\t\t\t\t\tmodel.cleargrads()\n\t\t\t\t\tloss_discriminator.backward()\n\t\t\t\t\toptimizer_discriminator_z.update()\n\t\t\t\t\toptimizer_discriminator_y.update()\n\n\t\t\t\t### generator phase ###\n\t\t\t\tif True:\n\t\t\t\t\ty_onehot_fake_u, z_fake_u = model.encode_x_yz(x_u, apply_softmax_y=True)\n\n\t\t\t\t\tdz_fake = model.discriminate_z(z_fake_u, apply_softmax=False)\n\t\t\t\t\tdy_fake = model.discriminate_y(y_onehot_fake_u, apply_softmax=False)\n\n\t\t\t\t\tloss_generator = F.softmax_cross_entropy(dz_fake, class_true) + F.softmax_cross_entropy(dy_fake, class_true)\n\n\t\t\t\t\tmodel.cleargrads()\n\t\t\t\t\tloss_generator.backward()\n\t\t\t\t\toptimizer_encoder.update()\n\n\t\t\t\t### supervised phase ###\n\t\t\t\tif True:\n\t\t\t\t\tlogit_l, _ = model.encode_x_yz(x_l, apply_softmax_y=False)\n\t\t\t\t\tloss_supervised = F.softmax_cross_entropy(logit_l, y_l)\n\n\t\t\t\t\tmodel.cleargrads()\n\t\t\t\t\tloss_supervised.backward()\n\t\t\t\t\toptimizer_encoder.update()\n\n\t\t\t\t### additional cost ###\n\t\t\t\tif True:\n\t\t\t\t\tdistance = model.compute_distance_of_cluster_heads()\n\t\t\t\t\tloss_cluster_head = -F.sum(distance)\n\n\t\t\t\t\tmodel.cleargrads()\n\t\t\t\t\tloss_cluster_head.backward()\n\t\t\t\t\toptimizer_cluster_head.update()\n\n\t\t\t\tsum_loss_discriminator \t+= float(loss_discriminator.data)\n\t\t\t\tsum_loss_supervised \t+= float(loss_supervised.data)\n\t\t\t\tsum_loss_generator \t\t+= float(loss_generator.data)\n\t\t\t\tsum_loss_autoencoder \t+= float(loss_reconstruction.data)\n\t\t\t\tsum_loss_cluster_head\t+= float(model.nCr(model.ndim_y, 2) * model.cluster_head_distance_threshold + loss_cluster_head.data)\n\t\t\t\tsum_discriminator_z_confidence_true += discriminator_z_confidence_true\n\t\t\t\tsum_discriminator_z_confidence_fake += discriminator_z_confidence_fake\n\t\t\t\tsum_discriminator_y_confidence_true += discriminator_y_confidence_true\n\t\t\t\tsum_discriminator_y_confidence_fake += discriminator_y_confidence_fake\n\n\t\t\tprintr(\"Training ... {:3.0f}% ({}/{})\".format((itr + 1) / total_iterations_train * 100, itr + 1, total_iterations_train))\n\n\t\tmodel.save(args.model)\n\n\t\tlabeled_iter_train = dataset.get_iterator(args.batchsize * 20, train=True, labeled=True, gpu=using_gpu)\n\t\tunlabeled_iter_train = dataset.get_iterator(args.batchsize * 20, train=True, unlabeled=True, gpu=using_gpu)\n\t\taverage_accuracy_l = 0\n\t\taverage_accuracy_u = 0\n\t\tfor x_l, true_label in labeled_iter_train:\n\t\t\twith chainer.no_backprop_mode() and chainer.using_config(\"train\", False):\n\t\t\t\ty_onehot_l, _ = model.encode_x_yz(x_l, apply_softmax_y=True)\n\t\t\t\taccuracy = F.accuracy(y_onehot_l, true_label)\n\t\t\t\taverage_accuracy_l += float(accuracy.data)\n\n\t\tfor x_u, true_label in unlabeled_iter_train:\n\t\t\twith chainer.no_backprop_mode() and chainer.using_config(\"train\", False):\n\t\t\t\ty_onehot_u, _ = model.encode_x_yz(x_u, apply_softmax_y=True)\n\t\t\t\taccuracy = F.accuracy(y_onehot_u, true_label)\n\t\t\t\taverage_accuracy_u += float(accuracy.data)\n\n\t\taverage_accuracy_l /= labeled_iter_train.get_total_iterations()\n\t\taverage_accuracy_u /= unlabeled_iter_train.get_total_iterations()\n\t\t\t\n\t\tclear_console()\n\t\tprint(\"Epoch {} done in {} sec - loss: g={:.5g}, d={:.5g}, a={:.5g}, s={:.5g}, c={:.5g} - disc_z: true={:.1f}%, fake={:.1f}% - disc_y: true={:.1f}%, fake={:.1f}% - acc: l={:.2f}%, u={:.2f}% - total {} min\".format(\n\t\t\tepoch + 1, int(time.time() - epoch_start_time), \n\t\t\tsum_loss_generator / total_iterations_train, \n\t\t\tsum_loss_discriminator / total_iterations_train, \n\t\t\tsum_loss_autoencoder / total_iterations_train, \n\t\t\tsum_loss_supervised / total_iterations_train, \n\t\t\tsum_loss_cluster_head / total_iterations_train, \n\t\t\tsum_discriminator_z_confidence_true / total_iterations_train * 100, \n\t\t\tsum_discriminator_z_confidence_fake / total_iterations_train * 100, \n\t\t\tsum_discriminator_y_confidence_true / total_iterations_train * 100, \n\t\t\tsum_discriminator_y_confidence_fake / total_iterations_train * 100, \n\t\t\taverage_accuracy_l * 100,\n\t\t\taverage_accuracy_u * 100,\n\t\t\tint((time.time() - training_start_time) // 60)))\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"AAE/Chainer implementation/chainer_aae_2/run/semi-supervised/dim_reduction/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"275282584","text":"S = input()\nT = input()\n\nans = []\nfor l in range(len(S) - len(T) + 1):\n U = S[l: l + len(T)]\n for u, t in zip(U, T):\n if u == '?':\n continue\n if u != t:\n break\n else:\n ans.append((S[:l] + T + S[l + len(T):]).replace('?', 'a'))\n\nans.sort()\nprint(ans[0] if len(ans) > 0 else 'UNRESTORABLE')\n","sub_path":"AtCoder/abc/076c_4.py","file_name":"076c_4.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"157573543","text":"import os\nimport csv\n\n\n# 当前路径和项目root路径, 可以根据需求改变../..\nthis_file_path = os.path.split(os.path.realpath(__file__))[0]\nroot_path = os.path.abspath(os.path.join(this_file_path, \"../..\"))\n\n# 创建rules存储文件\nrules_dir = 'data/2_concept_rules'\nrules_dir = os.path.join(root_path, rules_dir)\n\nrules_file_name = 'rules'\nrules_confirmed_file_name = 'rules_confirmed'\nrules_todo_file_name = 'rules_todo'\nrules_removed_file_name = 'rules_removed'\n\nrules_file_path = os.path.join(rules_dir, rules_file_name)\nrules_confirmed_file_path = os.path.join(rules_dir, rules_confirmed_file_name)\nrules_todo_file_path = os.path.join(rules_dir, rules_todo_file_name)\nrules_removed_file_path = os.path.join(rules_dir, rules_removed_file_name)\n\nrules_file = open(rules_file_path)\nrules_removed_file = open(rules_removed_file_path, 'w')\nrules_confirmed_file = open(rules_confirmed_file_path, 'w')\nrules_todo_file = open(rules_todo_file_path, 'w')\n\nall_lines = csv.reader(rules_file)\nrules_removed_file_writer = csv.writer(rules_removed_file)\nrules_confirmed_file_writer = csv.writer(rules_confirmed_file)\nrules_todo_file_writer = csv.writer(rules_todo_file)\n\nremove_list = ['人口', '面积']\nconfirm_list = []\n\ncount = 0\nfor line in all_lines:\n # print(line)\n if line[2] in remove_list:\n rules_removed_file_writer.writerow(line)\n count += 1\n\nrules_removed_file.close()\nrules_todo_file.close()\nrules_confirmed_file.close()\nprint(count)\n","sub_path":"modules/learning/concept_form_rules/2_rule_clean.py","file_name":"2_rule_clean.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"43435770","text":"from CybORG import CybORG\nimport inspect\n\n\n# Set up CybORG\nprint(\"Setup\")\npath = str(inspect.getfile(CybORG))\npath = path[:-10] + '/Shared/Scenarios/Scenario1KillchainBlue.yaml' # Change this to pick your agents\ncyborg = CybORG(path, 'sim')\n\nfor i in range(1):\n print(f\"Game: {i}\")\n cyborg.start(50)\n cyborg.reset()\n\n","sub_path":"CybORG/CybORG/Agents/_Demo_Agents.py","file_name":"_Demo_Agents.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"136963214","text":"#Importamos las librerias necesarias para el ejemplo Gym, Numpy y keras que incluye tensorflow\nimport gym #la libreria de los propios juegos que usaremos para entrenar\nimport random #Numeros Aleatorios\nimport numpy as np #Matematico\nfrom keras.models import Sequential #Modelo de Keras\nfrom keras.layers import Dense #Capas de Keras\nfrom keras.optimizers import Adam #Optimizacion de Keras\n\nenv = gym.make('CartPole-v1') #creamos nuestro entorno de trabajo el juego que simularemos aunque no sea de atari cunple con el objetivo\nenv.reset()\ngoal_steps = 500 #definimos el numero de pasos para el entrenamiento, los episodios que tendra para entrenar mientras mas episodios mejor jugara la AI \nscore_requirement = 60 #puntuacion requerida para que pase el siguiente episodio \ninitial_games = 10000 #Entrenamiento inicial\n\n#Función que ejecuta un bucle para hacer varias acciones para jugar el \n#juego.Por eso, intentar jugaremos hasta 500 pasos como máximo como anteriormente especificamos y donde podriamos cambiar la cifra\ndef play_a_random_game_first():\n try:\n for step_index in range(goal_steps):\n #env.render() #Para representar el juego\n action = env.action_space.sample() #Elegimos acción al azar\n #Acción aleatoria a través de la función que elige los \n #los resultado del siguiente paso, según la acción pasada como\n #parametro\n observation, reward, done, info = env.step(action)\n print(\"Paso {}:\".format(step_index)) #el paso del que va del paso 1 al paso 500\n print(\"Acción: {}\".format(action)) #la accion que realizo en dicho paso/episodio\n print(\"Observacion: {}\".format(observation)) #las observaciones que tuvo\n print(\"Recompensa: {}\".format(reward))#recompensa dada o negada\n print(\"Done: {}\".format(done))#verdadero o falso si termino de entrenar los 500 pasos\n print(\"Info: {}\".format(info))#la nformacion que recolecto\n if done:#Si juego completado\n break\n finally:\n env.reset()\n\nplay_a_random_game_first()\n#Preparando datos de Entrenamiento\ndef model_data_preparation():\n #inicializamos los arrays con los datos de entrenamiento y las puntuaciones que llevara la AI\n training_data = [] \n accepted_scores = [] \n #Jugamos 10000 veces para obtener unos datos representativos suficientes para que jugue decentemente\n for game_index in range(intial_games):\n score = 0 #inicializamos variables\n game_memory = []\n previous_observation = []\n #inidicamos que se ejeccute 500 veces\n for step_index in range(goal_steps):\n action = random.randrange(0, 2)#Acción aleatoria.Iz=0 y De=1\n observation, reward, done, info = env.step(action)\n #almacenamos puntuacion\n if len(previous_observation) > 0:\n game_memory.append([previous_observation, action])\n \n previous_observation = observation\n score += reward\n if done:\n break\n \n if score >= score_requirement:\n accepted_scores.append(score)\n for data in game_memory:\n if data[1] == 1:\n output = [0, 1]\n elif data[1] == 0:\n output = [1, 0]\n training_data.append([data[0], output])\n \n #resteamos entorno y lo mostramos por pantalla\n env.reset()\n\n print(accepted_scores)\n \n return training_data\n\ntraining_data = model_data_preparation()\n\n#Con esta función contruimos nuestros mmodelo.Nuestra red neuronal \ndef build_model(input_size, output_size):\n model = Sequential()\n model.add(Dense(128, input_dim=input_size, activation='relu'))\n model.add(Dense(52, activation='relu'))\n model.add(Dense(output_size, activation='linear'))\n model.compile(loss='mse', optimizer=Adam())\n\n return model\n \n#creamos la función que entrenará nuestro modelo\ndef train_model(training_data):\n X = np.array([i[0] for i in training_data]).reshape(-1, len(training_data[0][0]))\n y = np.array([i[1] for i in training_data]).reshape(-1, len(training_data[0][1]))\n model = build_model(input_size=len(X[0]), output_size=len(y[0]))\n \n model.fit(X, y, epochs=10)\n return model\n \ntrained_model = train_model(training_data)\n\n\nscores = []#inicializamos puntuaciones y array de elecciones\nchoices = []\nfor each_game in range(1):#jugamos 100 partidas\n score = 0\n prev_obs = []\n for step_index in range(goal_steps):#Jugamos 500 pasos por partida\n \n env.render()\n if len(prev_obs)==0:\n action = random.randrange(0,2)#en el primer paso elegimos movimiento al azar\n else:\n #A partir del segundo paso conocemos el estado actual del juego.\n #Entonces, tomaremos esa observación y se la daremos a nuestro \n #modelo para predecir qué acción debemos tomar. \n action = np.argmax(trained_model.predict(prev_obs.reshape(-1, len(prev_obs)))[0])\n #guardamos elección\n choices.append(action)\n new_observation, reward, done, info = env.step(action)\n prev_obs = new_observation\n score+=reward\n if done:\n break\n\n env.reset()\n scores.append(score)\n\nprint(scores)\nprint('Average Score:',sum(scores)/len(scores))\nprint('choice 1:{} choice 0:{}'.format(choices.count(1)/len(choices),choices.count(0)/len(choices)))","sub_path":"gym-demo.py","file_name":"gym-demo.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"24750458","text":"from matplotlib import pyplot as plt\nfrom matplotlib.widgets import RectangleSelector\nimport numpy as np\n\nclass ROIfigure():\n\n def __init__(self,img,coords=None):\n self.img=img\n self.coords=coords\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111)\n self.ax.imshow(img)\n self.rec = self.CreateRectangle()\n self.rec.to_draw.set_visible(True)\n plt.show()\n\n def CreateRectangle(self):\n rec = RectangleSelector(self.ax, self.rectangle_callback, drawtype= 'box'\n , useblit= False, button= [1,3], minspanx=5, minspany=5,\n spancoords='data', interactive = True)\n return rec\n\n def rectangle_callback(self,eclick,erelease):\n self.x1, self.y1 = eclick.xdata, eclick.ydata\n self.x2, self.y2 = erelease.xdata, erelease.ydata\n self.width = int(self.x2 - self.x1)\n self.height = int(self.y2 - self.y1)\n self.xoff = int(self.x1)\n self.yoff = int(self.y1)\n self.rec.extents = (self.xoff, self.xoff + self.width, self.yoff, self.yoff + self.height)\n self.coords=(self.xoff,self.yoff,self.width,self.height)\n\n\ndef crop_box(frame, crop_coords=None):\n if crop_coords is None:\n crop=ROIfigure(frame, coords=crop_coords)\n crop_coords = crop.coords\n\n cropped_frame = frame[crop_coords[1]:crop_coords[1]+crop_coords[3], crop_coords[0]:crop_coords[0]+crop_coords[2],:]\n mask_img = np.ones(np.shape(frame))\n return cropped_frame, mask_img, crop_coords\n\n\n\n","sub_path":"video/crop_box.py","file_name":"crop_box.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"160915577","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright (C) 2011 ~ 2012 Deepin, Inc.\n# 2011 ~ 2012 Wang Yong\n# \n# Author: Wang Yong \n# Maintainer: Wang Yong \n# \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nfrom PyQt5.QtCore import pyqtSlot, pyqtProperty, QObject\nfrom constant import CONFIG_DIR\nimport os\nimport sqlite3\nfrom deepin_utils.file import touch_file\n\nclass Database(QObject):\n def __init__(self):\n QObject.__init__(self)\n self.video_db_path = os.path.join(CONFIG_DIR, \"video_db\")\n touch_file(self.video_db_path)\n self.video_db_connect = sqlite3.connect(self.video_db_path)\n self.video_db_cursor = self.video_db_connect.cursor()\n \n self.video_db_cursor.execute(\n \"CREATE TABLE IF NOT EXISTS settings(key PRIMARY KEY NOT NULL, value)\"\n )\n self.video_db_cursor.execute(\n \"CREATE TABLE IF NOT EXISTS videos(video_path PRIMARY KEY NOT NULL, video_position)\"\n )\n \n @pyqtSlot(str, int, result=bool) \n def record_video_position(self, video_path, video_position):\n self.video_db_cursor.execute(\n \"INSERT OR REPLACE INTO videos VALUES(?, ?)\", \n (unicode(video_path), str(video_position))\n )\n self.video_db_connect.commit()\n \n return True\n \n @pyqtSlot(str, result=int)\n def fetch_video_position(self, video_path):\n self.video_db_cursor.execute(\n \"SELECT video_position FROM videos WHERE video_path=?\" , [video_path]\n )\n results = self.video_db_cursor.fetchall()\n if len(results) > 0:\n return int(results[0][0])\n else:\n return 0\n \n def getValue(self, key):\n self.video_db_cursor.execute(\n \"SELECT value FROM settings WHERE key=?\", (key,)\n )\n result = self.video_db_cursor.fetchone()\n \n return result and result[0]\n \n def setValue(self, key, value):\n self.video_db_cursor.execute(\n \"INSERT OR REPLACE INTO settings VALUES(?, ?)\", (key, value)\n )\n self.video_db_connect.commit()\n \n @pyqtProperty(str)\n def playlist_local(self):\n return self.getValue(\"playlist_local\") or \"\"\n \n @playlist_local.setter\n def playlist_local(self, value):\n self.setValue(\"playlist_local\", value)\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"229365619","text":"import bpy\nimport os\nfrom ..main.common import imageFromPath\n\nclass MeshDecalEmissive:\n def __init__(self, BasePath,image_format):\n self.BasePath = BasePath\n self.image_format = image_format\n def create(self,DecalEmissive,Mat):\n\n CurMat = Mat.node_tree\n CurMat.nodes['Principled BSDF'].inputs['Specular'].default_value = 0\n\n if DecalEmissive.get(\"DiffuseColor2\"):\n dCol2 = CurMat.nodes.new(\"ShaderNodeRGB\")\n dCol2.location = (-450,200)\n dCol2.hide = True\n dCol2.label = \"DiffuseColor2\"\n dCol2.outputs[0].default_value = (float(DecalEmissive[\"DiffuseColor2\"][\"Red\"])/255,float(DecalEmissive[\"DiffuseColor2\"][\"Green\"])/255,float(DecalEmissive[\"DiffuseColor2\"][\"Blue\"])/255,float(DecalEmissive[\"DiffuseColor2\"][\"Alpha\"])/255)\n \n CurMat.links.new(dCol2.outputs[0],CurMat.nodes['Principled BSDF'].inputs['Base Color'])\n\n if DecalEmissive.get(\"DiffuseAlpha\"):\n aThreshold = CurMat.nodes.new(\"ShaderNodeValue\")\n aThreshold.location = (-300,0)\n aThreshold.outputs[0].default_value = float(DecalEmissive[\"DiffuseAlpha\"])\n aThreshold.hide = True\n aThreshold.label = \"DiffuseAlpha\"\n\n if DecalEmissive.get(\"DiffuseColor\"):\n emColor = CurMat.nodes.new(\"ShaderNodeRGB\")\n emColor.location = (-800,-100)\n emColor.hide = True\n emColor.label = \"DiffuseColor\"\n emColor.outputs[0].default_value = (float(DecalEmissive[\"DiffuseColor\"][\"Red\"])/255,float(DecalEmissive[\"DiffuseColor\"][\"Green\"])/255,float(DecalEmissive[\"DiffuseColor\"][\"Blue\"])/255,float(DecalEmissive[\"DiffuseColor\"][\"Alpha\"])/255)\n\n if DecalEmissive.get(\"DiffuseTexture\"):\n emTeximg = imageFromPath(self.BasePath + DecalEmissive[\"DiffuseTexture\"],self.image_format)\n \n emTexNode = CurMat.nodes.new(\"ShaderNodeTexImage\")\n emTexNode.location = (-800,200)\n emTexNode.image = emTeximg\n emTexNode.label = \"DiffuseTexture\"\n\n mulNode = CurMat.nodes.new(\"ShaderNodeMixRGB\")\n mulNode.inputs[0].default_value = 1\n mulNode.blend_type = 'MULTIPLY'\n mulNode.location = (-550,50)\n\n CurMat.links.new(emColor.outputs[0],mulNode.inputs[1])\n CurMat.links.new(emTexNode.outputs[0],mulNode.inputs[2])\n CurMat.links.new(mulNode.outputs[0],CurMat.nodes['Principled BSDF'].inputs['Emission'])\n\n CurMat.nodes['Principled BSDF'].inputs['Emission Strength'].default_value = DecalEmissive[\"EmissiveEV\"]","sub_path":"i_scene_cp77_gltf/material_types/meshdecalemissive.py","file_name":"meshdecalemissive.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"293216938","text":"import src.class_vessel_coordinator as vc\nimport src.class_draggable_rectangle as drect\nimport src.common as common\nimport shapely.geometry as sgeom\nimport pickle\nimport matplotlib.pyplot as plt, cartopy\nimport src.class_time_keeper as tk\nimport sys\ncartopy.config['pre_existing_data_dir ']='\\\\cache'\n\n# test plot\n# fig2,ax2= plt.subplots(subplot_kw={'projection': cartopy.crs.PlateCarree()}, figsize=(30, 30))\n\n# region Initial parameters\nmy_time_keeper = tk.timekeeper()\ngrid_block_size_degrees = 5\nscriptfilepath = common.get_calling_script_directory_path(sys)\nlogFilePath = scriptfilepath + r'\\geometric_operation_log.txt'\nvc_pickle_file = scriptfilepath + r'\\\\cache\\\\vc_'+ str(grid_block_size_degrees) + '_degree.pickle'\n\n# Configure cache parameters\nrecreate_cache = False\nsave_vc = False\nload_vc_from_file = True\n\n\n#endregion\n\n#region Events\ndef onclick(event):\n print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %\n ('double' if event.dblclick else 'single', event.button,\n event.x, event.y, event.xdata, event.ydata))\n\n#endregion\nif __name__ == '__main__':\n\n # region Initialize all class objects\n my_time_keeper.start_timing_event(event_name='main')\n\n if recreate_cache == True:\n # Initialize vessel coordinator - a complex class that initializes the earth and map artist\n my_vc = vc.Vessel_Coordinator(grid_block_size_degrees=grid_block_size_degrees,recreate_cache_boolean=True)\n # else:\n # my_vc = vc.Vessel_Coordinator(grid_block_size_degrees=grid_block_size_degrees,recreate_cache_boolean=False)\n\n\n if save_vc:\n # region Save to pickle operation\n my_time_keeper.start_timing_event(event_name='Save vc to pickle file')\n with open(vc_pickle_file, 'wb') as handle:\n pickle.dump(my_vc, handle, protocol=pickle.HIGHEST_PROTOCOL)\n my_time_keeper.stop_timing_event(event_name='Save vc to pickle file')\n # endregion\n\n if load_vc_from_file:\n my_time_keeper.start_timing_event(event_name='Loading vc from pickle file')\n with open(vc_pickle_file, 'rb') as handle:\n my_vc = pickle.load(handle)\n my_time_keeper.stop_timing_event(event_name='Loading vc from pickle file')\n #endregion\n\n #region Connect all event functions to figure canvas\n gl = my_vc.my_map_artist.ax.gridlines(crs=cartopy.crs.PlateCarree(), linewidth=0.5, color='gray', alpha=0.8,\n linestyle='-', draw_labels=True)\n drect.fig = my_vc.my_map_artist.fig\n cid = my_vc.my_map_artist.fig.canvas.mpl_connect('button_press_event', onclick)\n\n my_time_keeper.stop_timing_event(event_name='main')\n\n vessel_optimized_track, exit_code = my_vc.get_optimal_route((-15,-36),(40,6))\n print(exit_code)\n my_vc.my_map_artist.plot_vessel_track(vessel_optimized_track,'C Valentine')\n # vessel_optimized_track = my_vc.get_optimal_route((20, 140), (-20, -90))\n # my_vc.my_map_artist.plot_vessel_track(vessel_optimized_track, 'Minerva Pisces')\n # vessel_optimized_track = my_vc.get_optimal_route((0, 85), (44, 156))\n # my_vc.my_map_artist.plot_vessel_track(vessel_optimized_track, 'Long Island')\n # my_vc.my_map_artist.save_plot(scriptfilepath + r'/vessel-route.png')\n my_vc.my_map_artist.display()\n\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"261361378","text":"#Author: Noah Leuthaeuser\n#Referenced this Bokeh tutorial heavily: https://towardsdatascience.com/data-visualization-with-bokeh-in-python-part-one-getting-started-a11655a467d4\n\nfrom bokeh.io import output_file, show\nfrom bokeh.layouts import widgetbox, layout, row, column\nfrom bokeh.models.widgets import Select, DataTable, TableColumn, Div\nfrom bokeh.plotting import figure\nfrom bokeh.io import show, output_notebook\nfrom bokeh.models import ColumnDataSource, NumeralTickFormatter, HoverTool\nfrom bokeh.plotting import figure, curdoc\nfrom bokeh.palettes import Spectral11\nfrom bokeh.transform import factor_cmap\nfrom bokeh.server.server import Server\nfrom bokeh.core.properties import value\nfrom math import pi\nimport pandas as pd\nimport numpy as np\n\n#slice datasets for names and generate list of pitches for that pitcher\ndef make_dataset(name):\n dfPlot = dfOut.loc[dfOut[\"mlb_name\"] == name].drop_duplicates()\n dfTable = dfStats.loc[dfStats[\"mlb_name\"]== name]\n name = np.array_str(dfPlot[\"mlb_name\"].unique())[2:-2]\n pitches = dfPlot[\"pitch_type\"].unique()\n source = ColumnDataSource(dfPlot)\n stats_source = ColumnDataSource(dfTable)\n return source,pitches, stats_source \n\n#create plot of pitch quality for each pitch type\ndef make_plot_qual(data, name, pitches,all_pitches): \n \n p = figure(x_range=pitches, plot_height=250,plot_width = 400, toolbar_location=None, title= name, \n y_axis_label = \"Pitch Quality\", x_axis_label = \"Pitch Type\")\n\n p.vbar(x='pitch_type', top='quality', width=0.9, source=data,line_color='white',\n fill_color=factor_cmap('pitch_type', palette=Spectral11, factors=all_pitches))\n\n pitch_names = {\"FA\": \"Fastball\",\"FF\":\"Four-seam Fastball\", \"FT\": \"Two-seam Fastball\",\n \"FC\": \"Cutter\", \"FS\":\"Sinking Fastball\",\"FO\":\"Pitch Out\",\"SI\":\"Sinker\",\"SL\":\"Slider\",\n \"CU\":\"Curveball\",\"KC\":\"Knuckle-curve\",\"EP\":\"Eephus\",\"CH\":\"Changeup\",\"SC\":\"Screwball\",\n \"KN\":\"Knuckleball\"}\n\n hover = HoverTool(tooltips=[(\"Num pitches\", \"@pitch_count\"),(\"Quality\",\"@quality{0.000}\"),\n (\"Avg quality\",\"@avg_qual{0.000}\"),(\"Pitch name\",\"@pitch_name\")])\n p.add_tools(hover)\n return p\n\n#create stacked plot of pitch result breakdown \ndef make_plot_breakdown(data,pitches):\n pcts = [\"ball_pct\",\"foul_pct\",\"strike_pct\",\"in_play_pct\"]\n headers = [\"Ball %\",\"Foul %\",\"Strike %\",\"In Play %\"]\n colors = [\"#c9d9d3\", \"#718dbf\", \"#e84d60\",\"#99d594\"]\n \n hover = HoverTool(tooltips=[(\"Num pitches\", \"@pitch_count\")])\n\n p = figure(x_range=pitches,y_range = (-.2,1), plot_height=250,plot_width = 400, title=\"Breakdown\",\n toolbar_location=None, tools=\"\")\n p.vbar_stack(pcts, x='pitch_type', width=0.9, source=data,color = colors, \n legend = [value(x) for x in headers])\n\n p.legend.location = 'bottom_right'\n p.legend.orientation = 'horizontal'\n p.legend.padding= 2\n p.yaxis[0].formatter = NumeralTickFormatter(format=\"0.0%\") \n p.add_tools(hover)\n return p\n\n#create data table to display pitcher stats from 2017 \ndef make_table(data, name):\n columns = [\n TableColumn(field=\"teamID\", title=\"Team\"),\n TableColumn(field=\"ERA\", title=\"ERA\"),\n TableColumn(field=\"IP\", title=\"IP\"),\n TableColumn(field = \"SO\", title = \"SO\"),\n TableColumn(field = \"HR\", title = \"HR\"),\n TableColumn(field = \"WHIP\", title = \"WHIP\")\n ]\n #Deserialization error that prints when changing the pitcher is a known issue in Bokeh\n #doesn't effect performance of the display\n #https://github.com/bokeh/bokeh/issues/7417\n data_table = DataTable(source=data, columns=columns, width=300, height=200, index_position = None)\n return data_table\n\n#update callback for first pitcher section \ndef update(attr,old,new): \n name = select.value\n new_src,pitches, stats_src = make_dataset(name)\n p1_qual.x_range.factors = list(pitches)\n p1_qual.title.text = name\n p1_break.x_range.factors = list(pitches)\n data.data.update(new_src.data)\n stats.data.update(stats_src.data)\n\n#update callback for second pitcher section \ndef update2(attr,old,new): \n name = select2.value\n new_src,pitches, stats_src = make_dataset(name)\n p2_qual.x_range.factors = list(pitches)\n p2_break.x_range.factors = list(pitches)\n p2_qual.title.text = name\n data2.data.update(new_src.data)\n stats2.data.update(stats_src.data)\n\n#read datasets and create list of all pitch types\ndfOut = pd.read_csv(\"data/pitch_quality.csv\")\ndfStats = pd.read_csv(\"data/pitcher_stats.csv\")\nall_pitches = dfOut[\"pitch_type\"].unique()\n\n#initialize names for first view\nname = \"Chris Sale\"\nname2 = \"Zack Greinke\"\n\n#generate initial datasets\ndata, pitches, stats = make_dataset(name)\ndata2,pitches2, stats2 = make_dataset(name2)\n\n#make plots and tables \np1_qual = make_plot_qual(data,name,pitches,all_pitches)\np2_qual = make_plot_qual(data2,name2,pitches2,all_pitches)\np1_break = make_plot_breakdown(data,pitches)\np2_break = make_plot_breakdown(data2,pitches2)\ndata_table1 = make_table(stats,name)\ndata_table2 = make_table(stats2,name2)\n\n#create list of pitchers for dropdown menu \npitchers = dfOut[\"mlb_name\"].drop_duplicates().tolist()\npitchers.sort(key=str.lower)\n\n#dropdown menu initializations\nselect = Select(title=\"Pitcher:\", value=\"\", options= pitchers)\nselect.on_change('value',update)\n\nselect2 = Select(title=\"Pitcher:\", value=\"\", options= pitchers)\nselect2.on_change('value',update2)\n\n#title divs for tables\ndiv = Div(text = \"2017 Stats\",height = 10, width = 300)\ndiv2 = Div(text = \"2017 Stats\",height = 10, width = 300)\n\n#format layout and output\nlayout = layout([[column(widgetbox(select), row(p1_qual,p1_break,column(div,data_table1)))], \n [column(widgetbox(select2), row(p2_qual,p2_break,column(div2,data_table2)))]])\n\ncurdoc().add_root(layout)\n\n\n","sub_path":"pitch_quality.py","file_name":"pitch_quality.py","file_ext":"py","file_size_in_byte":5798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"53199355","text":"#enconding=utf8\nimport tornado.ioloop\nimport tornado.httpserver\nimport json\nimport tornado.web\nimport pymongo as mo\nimport logging,signal\nimport time,os\n\nfrom tornado.options import define, options\n\ndefine(\"port\", default=8000, help=\"run on the given port\", type=int)\ndefine('service', default=\"\", help=\"Server name\", type=str)\nMAX_WAIT_SECONDS_BEFORE_SHUTDOWN = 1\nserver = None\n\nconnection=mo.MongoClient(\"112.124.11.214\",27017)\ndb=connection[\"wedding\"]\ndb.authenticate(\"wedding\",\"wedding123\")\n\ndef sig_handler(sig, frame):\n logging.warning('Caught Signal: %s', sig)\n tornado.ioloop.IOLoop.instance().add_callback(shutdown)\n\ndef shutdown():\n \"\"\"shutdown \"\"\"\n global server\n logging.info('Stopping HttpServer..')\n server.stop()\n \n logging.info('IOLoop Will be Terminate in %s Seconds...', MAX_WAIT_SECONDS_BEFORE_SHUTDOWN)\n instance = tornado.ioloop.IOLoop.instance()\n deadline = time.time() + MAX_WAIT_SECONDS_BEFORE_SHUTDOWN\n \n def terminate():\n now = time.time()\n if now < deadline and (instance._callbacks or instance._timeouts):\n instance.add_timeout(now + 1, terminate)\n else:\n instance.stop()\n logging.info('Shutdown...')\n\n terminate() \n\n\n#class IndexHandler(tornado.web.RequestHandler):\n\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self):\n greeting = self.get_argument('greeting', 'Hello')\n self.write(greeting + ', tornado!')\n\ndef wedding(port, start_ioloop=True):\n \"\"\"\n Run server on the specified port. If start_ioloop is True (default),\n the tornado IOLoop will be started immediately.\n \"\"\"\n global server\n app = tornado.web.Application([\n (r\"/\", IndexHandler),\n ])\n\n #app.config = config\n server = tornado.httpserver.HTTPServer(app, xheaders=True)\n server.bind(port)\n \n # signal register\n signal.signal(signal.SIGINT, sig_handler)\n signal.signal(signal.SIGTERM, sig_handler)\n\n server.start()\n ioloop = tornado.ioloop.IOLoop.instance()\n if start_ioloop:\n ioloop.start()\n\n\nif __name__ == \"__main__\":\n tornado.options.parse_command_line()\n logging.info(\"Starting Tornado service on port %d\" % options.port)\n wedding(options.port)\n #tornado.ioloop.IOLoop.instance().start()","sub_path":"wedding_server.py","file_name":"wedding_server.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"588654245","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\n\nclass Solution:\n # @param {TreeNode} root the root of the binary tree\n # @return {List[str]} all root-to-leaf paths\n def binaryTreePaths(self, root):\n # Write your code here\n self.res = []\n self.helper(root, [])\n return self.res\n\n # 这道题的特殊之处在于是在node为叶子是添加结果并返回,而不是在node为空时。\n # return: None\n def helper(self, root, path):\n if not root:\n return\n\n if root.left is None and root.right is None:\n path.append(str(root.val))\n self.res.append('->'.join(path))\n path.pop()\n return\n\n path.append(str(root.val))\n self.helper(root.left, path)\n self.helper(root.right, path)\n path.pop()\n","sub_path":"lintcode/树/binary-tree-paths.py","file_name":"binary-tree-paths.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"187122429","text":"from recommender.core.association_rules.arules_utils import read_rules, create_transactions, find_matches\nfrom recommender.core.utils.export_import_tools import import_dic, download_name\n\n#create_transactions(threshold=5)\nrules = read_rules(\"data/association_rules/rules_0.00012.csv\")\nentry = \"0439136369,0439064872,0747545111,0440224764,0613496744,0312282540, 044021145X\"\n\nbooks = import_dic(\"data/association_rules/isbn_to_books\")\n\nf = lambda x: books[x]\n\nprint(\"[Association Rules] Recommendations\")\nprint(\"--- You like: \")\n\nfor x in entry.split(','):\n try:\n print(x, f(x))\n except KeyError:\n print(\"Using download \", x, download_name(x))\n\nprint(\"--- Then you may like:\")\n\nresults = find_matches(rules, entry, query_type=\"ain\")\n\nfor result in results:\n if result[5] == \"in\":\n print(\"Book %s is recommended by: %s with support %0.2f confidence %0.2f and lift %0.2f\"%(result[1],\n result[6],\n result[2],\n result[3],\n result[4]))\n else:\n print(\"Book %s is recommended by: %s with support %0.2f confidence %0.2f and lift %0.2f\" % (result[1],\n result[6],\n result[2],\n result[3],\n result[4]))\n\n","sub_path":"recommender/test/test_creation_rules.py","file_name":"test_creation_rules.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"630408873","text":"#! usr/bin/env python\n\n\"\"\"Only for Soft-Q Learned policies\n\nhttps://arxiv.org/pdf/1803.06773.pdf\n\n\nneed to have saved replay buffer to begin with\nthat means that in train_algo.py need to modify\nrun_experiment function and set save_full_state=True\nfor one of the parameters in the SQL algorithm\n\n\n--env Hex-v1 -n com_test --snapshot1 data/local/hex-v1/exp5/low-1-sql-Random-01/params.pkl --snapshot2 data/local/hex-v1/exp5/low-1-sql-Random-00/params.pkl\n--env Hex-v1 -n com_test --snapshot1 data/local/hex-v1/exp5-1/low-1-sql-Random-00/params.pkl --snapshot2 data/local/hex-v1/exp5-2/low-1-sql-Random-00/params.pkl\n\n--env 1 -n com_test --snapshot1 data/local/1l/test/1l-test-00/params.pkl --snapshot2 data/local/1l/test2/1l-test2-00/params.pkl\n\n\"\"\"\n\nfrom schema.algos2.sql.sql_kernel import adaptive_isotropic_gaussian_kernel\nfrom schema.algos2.sql.sql_instrument import run_sql_experiment\nfrom schema.launch_exp.instrument import VariantGenerator\nfrom schema.algos2.qv_funcs.value_functions import SumQFunction\nfrom schema.algos2.replay_buff.replay_buffer import UnionBuffer\nfrom schema.algos2.policies.policies import StochasticNNPolicy\nfrom schema.utils.utils import timestamp, PROJECT_PATH\nfrom schema.algos2.envs.base.normalized_env import normalize\nfrom schema.algos2.sampler.sampler import DummySampler\nfrom schema.algos2.sql.sql import SQLAlgorithm\nfrom schema.algos2.envs.base.gym_env import GymEnv\nimport numpy as np\nfrom schema.utils import tf_utils\nimport tensorflow as tf\nimport argparse\nimport joblib\nimport os\n\nSHARED_PARAMS = {\n 'seed': 1, # [1, 2, 3]\n 'policy_lr': 3E-4,\n 'qf_lr': 3E-4,\n 'discount': 0.99,\n 'layer_size': 128,\n 'batch_size': 128,\n 'max_pool_size': 1E6,\n 'n_train_repeat': 1,\n 'epoch_length': 2, # 1000\n 'kernel_particles': 16,\n 'kernel_update_ratio': 0.5,\n 'value_n_particles': 16,\n 'td_target_update_interval': 1000,\n 'snapshot_mode': 'last',\n 'snapshot_gap': 100,\n}\nENV_PARAMS = { # Envs for Hex see __init__ in envs dir\n 'Hex-v1': { # 3 DoF\n 'prefix': 'hex-v1',\n 'env_name': 'Hex-v1',\n 'max_path_length': 2, # 1000\n 'n_epochs': 1, # 500\n 'reward_scale': 30,\n 'legs': 1,\n },\n 'HiHex-v1': { # 3 DoF\n 'prefix': 'hex-v1',\n 'env_name': 'HiHex-v1',\n 'max_path_length': 2, # 1000\n 'n_epochs': 1, # 500\n 'reward_scale': 30,\n 'legs': 1,\n },\n 'OtHex-v1': { # 3 DoF\n 'prefix': 'hex-v1',\n 'env_name': 'OtHex-v1',\n 'max_path_length': 2, # 1000\n 'n_epochs': 1, # 500\n 'reward_scale': 30,\n 'legs': 1,\n }\n}\n\nfrom schema.algos2.envs.__init__ import init_hex\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='Hex-v1')\n parser.add_argument('--exp_name', '-n', type=str, default=timestamp())\n parser.add_argument('--mode', type=str, default='local')\n parser.add_argument('--log_dir', type=str, default=None)\n parser.add_argument('--snapshot1', type=str, default='')\n parser.add_argument('--snapshot2', type=str, default='')\n args = parser.parse_args()\n\n return args\n\n\ndef get_variants(args):\n env_params = ENV_PARAMS[args.env]\n params = SHARED_PARAMS\n params.update(env_params)\n\n vg = VariantGenerator()\n for key, val in params.items():\n if isinstance(val, list):\n vg.add(key, val)\n else:\n vg.add(key, [val])\n\n vg.add('snapshot1', (args.snapshot1, ))\n vg.add('snapshot2', (args.snapshot2, ))\n\n return vg\n\ndef load_buffer_and_qf(filename, graph):\n \"\"\"\n if graph == 'policy1':\n graph_1 = tf.Graph()\n with graph_1.as_default():\n init_graph_1 = tf.global_variables_initializer()\n with tf.Session(graph=graph_1) as sess_1:\n sess_1.run(init_graph_1)\n data1 = joblib.load(os.path.join(PROJECT_PATH, filename))\n if graph == 'policy2':\n graph_2 = tf.Graph()\n with graph_2.as_default():\n init_graph_2 = tf.global_variables_initializer()\n with tf.Session(graph=graph_2) as sess_2:\n sess_2.run(init_graph_2)\n data2 = joblib.load(os.path.join(PROJECT_PATH, filename))\n if graph == 'policy1':\n data = data1\n if graph == 'policy2':\n data = data2\n \"\"\"\n # with tf.Graph().as_default():\n with tf_utils.get_default_session().as_default():\n # with tf_utils.variable_scope(scope): # reuse=tf.AUTO_REUSE by default in tf_utils\n\n data = joblib.load(os.path.join(PROJECT_PATH, filename))\n\n return data['replay_buffer'], data['qf']\n\n\ndef run_experiment(variant):\n init_hex(\n lw_policy=None,\n observation_dim=168,\n leg_indices='0',\n other_policy='Random',\n control_lv='low',\n switch=False,\n output_dim=3)\n # TODO: shouldn't need to provide log_dir, bug\n env = normalize(GymEnv(variant['env_name'], log_dir=PROJECT_PATH + '/data'))\n ac_dim = np.int64(env._wrapped_env.env.input_dim)\n\n # TODO: check if change order if that changes values. It shouldn't\n buffer1, qf1 = load_buffer_and_qf(variant['snapshot1'], 'policy1')\n buffer2, qf2 = load_buffer_and_qf(variant['snapshot2'], 'policy2')\n\n sampler = DummySampler(\n batch_size=variant['batch_size'],\n max_path_length=variant['max_path_length'])\n\n buffer = UnionBuffer(buffers=(buffer1, buffer2))\n\n qf = SumQFunction(env.spec, q_functions=(qf1, qf2), ac_dim=ac_dim)\n\n M = variant['layer_size']\n scope_n = get_fn(args, variant) + args.exp_name\n\n policy = StochasticNNPolicy(env_spec=env.spec, hidden_layer_sizes=(M, M), ac_dim=ac_dim, name=scope_n + '_policy')\n\n algorithm = SQLAlgorithm(\n args=args,\n epoch_length=variant['epoch_length'],\n n_epochs=variant['n_epochs'],\n n_train_repeat=variant['n_train_repeat'],\n eval_render=False,\n eval_n_episodes=1,\n sampler=sampler,\n env=env,\n pool=buffer,\n qf=qf,\n policy=policy,\n kernel_fn=adaptive_isotropic_gaussian_kernel,\n kernel_n_particles=variant['kernel_particles'],\n kernel_update_ratio=variant['kernel_update_ratio'],\n value_n_particles=variant['value_n_particles'],\n td_target_update_interval=variant['td_target_update_interval'],\n qf_lr=variant['qf_lr'],\n policy_lr=variant['policy_lr'],\n discount=variant['discount'],\n reward_scale=variant['reward_scale'],\n save_full_state=True,\n ac_dim=ac_dim,\n save_only_policy=False,\n high_lv_control=False,\n train_policy=True,\n train_qf=False,\n use_saved_qf=True)\n\n algorithm.train()\n\n\ndef launch_experiments(variant_generator, args):\n variants = variant_generator.variants()\n print('Launching {} experiments.'.format(len(variants)))\n\n for i, variant in enumerate(variants):\n full_experiment_name = variant['prefix']\n full_experiment_name += '-' + args.exp_name + '-' + str(i).zfill(2)\n\n run_sql_experiment(\n run_experiment,\n mode=args.mode,\n variant=variant,\n exp_prefix=variant['prefix'] + '/' + args.exp_name,\n exp_name=full_experiment_name,\n n_parallel=1,\n seed=variant['seed'],\n terminate_machine=True,\n log_dir=args.log_dir,\n snapshot_mode=variant['snapshot_mode'],\n snapshot_gap=variant['snapshot_gap'],\n sync_s3_pkl=True)\n\n\ndef get_fn(args, variant, i=0):\n q = i\n i = '' if i == 0 else i\n legs = len(list(set([int(k) for k in str(args.leg_indices[0])])))\n other_policy = 'Policy' if '/' in args.other_policy else args.other_policy\n full_experiment_name = args.control_lv \\\n + '-' + str(legs) \\\n + '-' + args.alg \\\n + '-' + other_policy \\\n + '-' + str(i).zfill(2)\n while os.path.isdir(os.path.abspath(__file__ + '../../..')\n + '/data/local/'\n + variant['prefix']\n + '/' + args.exp_name\n + '/' + full_experiment_name):\n q += 1\n full_experiment_name = args.control_lv \\\n + '-' + str(legs) \\\n + '-' + args.alg \\\n + '-' + other_policy \\\n + '-' + str(q).zfill(2)\n return full_experiment_name\n\nif __name__ == '__main__':\n args = parse_args()\n variant_generator = get_variants(args)\n # variants = variant_generator.variants()\n # for i, variant in enumerate(variants):\n # run_experiment(variant)\n launch_experiments(variant_generator, args)","sub_path":"abstract_x/combine_policies.py","file_name":"combine_policies.py","file_ext":"py","file_size_in_byte":8803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"340898863","text":"import sys\n\ncodon = {\"TTT\":\"F\", \"TTC\":\"F\", \"TTA\":\"L\", \"TTG\":\"L\",\n \"TCT\":\"S\", \"TCC\":\"S\", \"TCA\":\"S\", \"TCG\":\"S\",\n \"TAT\":\"Y\", \"TAC\":\"Y\", \"TAA\":\"*\", \"TAG\":\"*\",\n \"TGT\":\"C\", \"TGC\":\"C\", \"TGA\":\"*\", \"TGG\":\"W\",\n \"CTT\":\"L\", \"CTC\":\"L\", \"CTA\":\"L\", \"CTG\":\"L\",\n \"CCT\":\"P\", \"CCC\":\"P\", \"CCA\":\"P\", \"CCG\":\"P\",\n \"CAT\":\"H\", \"CAC\":\"H\", \"CAA\":\"Q\", \"CAG\":\"Q\",\n \"CGT\":\"R\", \"CGC\":\"R\", \"CGA\":\"R\", \"CGG\":\"R\",\n \"ATT\":\"I\", \"ATC\":\"I\", \"ATA\":\"I\", \"ATG\":\"M\",\n \"ACT\":\"T\", \"ACC\":\"T\", \"ACA\":\"T\", \"ACG\":\"T\",\n \"AAT\":\"N\", \"AAC\":\"N\", \"AAA\":\"K\", \"AAG\":\"K\",\n \"AGT\":\"S\", \"AGC\":\"S\", \"AGA\":\"R\", \"AGG\":\"R\",\n \"GTT\":\"V\", \"GTC\":\"V\", \"GTA\":\"V\", \"GTG\":\"V\",\n \"GCT\":\"A\", \"GCC\":\"A\", \"GCA\":\"A\", \"GCG\":\"A\",\n \"GAT\":\"D\", \"GAC\":\"D\", \"GAA\":\"E\", \"GAG\":\"E\",\n \"GGT\":\"G\", \"GGC\":\"G\", \"GGA\":\"G\", \"GGG\":\"G\"}\n\ndef readseqs(f):\n \"\"\"Read sequences from a FASTA-formatted file and return as a\n dictionary.\"\"\"\n seqs = {}\n for line in f:\n if line[0] == \">\":\n current = line.split()[0].strip()\n seqs[current] = \"\"\n else:\n seqs[current] += line.strip().upper()\n return seqs\n\ndef revcomp(seq):\n \"\"\"Returns the reverse complement of sequence seq.\"\"\"\n comp = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G'}\n res = \"\"\n for c in seq:\n res += comp.get(c, \"N\")\n return res[::-1]\n\ndef translate(seq, shift):\n \"\"\"Returns translation of sequence seq, in frame shift shift.\"\"\"\n end = (len(seq) - shift) % 3\n if end > 0:\n seq = seq[shift:-end]\n else:\n seq = seq[shift:]\n\n res = \"\"\n for i in range(0, len(seq) - 1, 3):\n res += codon.get(seq[i:(i + 3)], \"X\")\n\n return res\n\nif __name__ == \"__main__\":\n try:\n infile = sys.argv[1]\n except:\n infile = input(\"Input file: \")\n with open(infile) as f:\n seqs = readseqs(f)\n\n for (key, seq) in seqs.items():\n print(key)\n\n frames = []\n for shift in [0, 1, 2]:\n frames.append(max(translate(seq, shift).split(\"*\"), key=len))\n frames.append(max(translate(revcomp(seq), shift).split(\"*\"), key=len))\n\n print(max(frames, key=len))\n","sub_path":"a2/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"30066273","text":"# -*- coding: utf-8 -*-\nimport networkx as nx\nimport random\n\n\n\n# Testni graf\nmGraph = nx.gn_graph(10)\nfor edge in mGraph.edges():\n print (edge)\n\n\n\n## 1: Funkcija za iskanje števila povezav za vsak node\ndef getNodeInputAndOutputCount(g):\n #Inicializacija slovarjev\n countedIn = {}\n countedOut = {}\n #Za vsak rob - \n for edge in g.edges_iter():\n #če krajišča še ni v števcih vhoda ali izhoda, ga dodaj in nastavi na 0\n if not edge[0] in countedOut:\n countedOut[edge[0]] = 0\n if not edge[1] in countedOut:\n countedOut[edge[1]] = 0\n if not edge[0] in countedIn:\n countedIn[edge[0]] = 0\n if not edge[1] in countedIn:\n countedIn[edge[1]] = 0\n\n #Krajišču izvora roba prištej ena v števcu izhodov \n countedOut[edge[0]] = countedOut[edge[0]] + 1\n #Krajišču ponora roba prištej ena v števcu vhodov\n countedIn[edge[1]] = countedIn[edge[1]] + 1\n #Vrni oba slovarja v obliki {Krajišče:Števec, ...}\n return (countedIn,countedOut) \n\nprint (\"\")\nprint (\"\")\nprint (\"1: IN AND OUT DEGREES OF NODES\")\nprint (\"\")\n#Uporabi funkcijo in poberi rezultate\n(inConnections,outConnections) = getNodeInputAndOutputCount(mGraph)\n#Za vsako krajišče izpiši identifikator krajišča in pripadajoče število vhodov in izhodov\nfor node in mGraph.nodes():\n print (\"Node\", node)\n print (\"In-degree:\", inConnections[node],\", Out-degree:\",outConnections[node])\n\n\n## 2: Funkcija za iskanje 3 verig\ndef getThreeChainCount(g):\n #Inicializacija treh slovarjev za števce, vsakemu krajišču se šteje kolikokrat je A, kolikokrat B in kolikokrat C\n countedA = {}\n countedB = {}\n countedC = {}\n \n #Nastavi začetne vrednosti\n for node in g.nodes():\n countedA[node]=0\n countedB[node]=0\n countedC[node]=0\n\n #Za vsako krajišče\n for node in g.nodes():\n #Za vsak rob IZ tega krajišča\n for edge_1 in g.edges(node):\n #Če ne obstaja povratna povezava (A-B,B-A)\n if not (edge_1[1],edge_1[0]) in g.edges(node):\n #Za vsak rob iz krajišča na koncu drugega roba (B-C)\n for edge_2 in g.edges(edge_1[1]):\n #Če ne obstaja povezava C-A in če ne obstaja povezava A-C (imamo usmerjene grafe)\n if not (edge_2[1],node) in g.edges() and not (node, edge_2[1]) in g.edges():\n #Prvo krajišče je nastopalo kot krajišče A, krajišče na koncu roba 1 ko krajišče B in krajišče na koncu drugega roba kot krajišče C. Prištej števce, kot je prav.\n countedA[node]=countedA[node]+1\n countedB[edge_1[1]]=countedB[edge_1[1]]+1\n countedC[edge_2[1]]=countedC[edge_2[1]]+1\n #Vrni števce\n return (countedA,countedB,countedC)\n\nprint (\"\")\nprint (\"\")\nprint (\"2: THREE PART CHAIN PARTICIPATION\")\nprint (\"\")\n#Uporabi funkcijo za pridobitev števcev\n(cA,cB,cC) = getThreeChainCount(mGraph)\n#Za vsako krajišče izpiši krajišče ter kolikokrat nastopa v kateri vlogi\nfor node in mGraph.nodes():\n print (\"Node\", node)\n print (\"A:\", cA[node],\"B:\", cB[node], \"C:\", cC[node])\n\n\n## 3_1: Iskanje podomrežij\n#Rekurzivna funkcija za iskanje podomrežij, za začetek glej findSubGraphs(g)\ndef findSubConnections(g, node, subGraph, usedNodes):\n #Dodaj to krajišče med uporavljena\n usedNodes.append(node)\n #Za vsak rob iz tega krajišča\n for edge in g.edges(node):\n #Dodaj ta rob podgrafu\n subGraph.add_edge(*edge)\n #Če konec roba še ni obiskano krajišče\n if not edge[1] in usedNodes:\n #Poišči vse povezave iz tega krajišča. Zadeva se ustavi, ko krajišče nima izhodnih povezav - je robno.\n findSubConnections(g,edge[1],subGraph, usedNodes)\n \ndef findSubGraphs(g):\n #Inicializacija slovarja za shranjevanje podgrafov ter vrste za shranjevanje že obiskanih vozlišč, da se v primeru cikla ne... no, zaciklamo\n arrayDict = {}\n usedNodes = []\n #Za vsako krajišče\n for node in g.nodes():\n #Dodaj ga med uporabljene\n usedNodes.append(node)\n #Ustvari prazen podgraf\n sg = nx.DiGraph()\n #Podgrafu dodaj vse povezave, ki so dosegljive s tega krajišča - rekurzivno, glej funkcijo. Argumenti so graf, trenutno krajišče, podgraf ki se mora zapolniti, ter spremenljivka uporabljenih krajišč\n findSubConnections(g,node,sg,usedNodes)\n #Če je število krajišč v podgrafu različno od celotnega števila krajišč (torej manjše), ga dodaj v slovar podgrafov. Če je enako nas ne zanima.\n if len(sg.nodes()) != len(g.nodes()):\n arrayDict[node]=sg\n #Vrni slovar podgrafov\n return arrayDict\n\nprint (\"\")\nprint (\"\")\nprint (\"3: SUBGRAPHS THAT DO NOT INCLUDE ALL NODES\")\nprint (\"\")\n#Uporabi funkcijo\nsubGraphDictionary = findSubGraphs(mGraph)\n#Za vsak vnos v slovarju izpiši začetno krajišče ter krajišča celotnega podgrafa\nfor node,subGraph in subGraphDictionary.iteritems():\n print (\"Starting at node\", node,\":\",subGraph.nodes())\n\n\n## 4: Izračun PageRank-a\ndef getPageRank(g, possibility, stepMultiplier):\n #Inicializacija števca obiskov, nastavljenega na 0 za vsako krajišče\n counter = {}\n for node in g.nodes():\n counter[node]=0\n #Naključno krajišče. Random.sample(populacija,število primerkov), vrne [krajišče], [0] spremeni to v krajišče.\n currentNode = random.sample(g.nodes(), 1)[0]\n #To krajišče je bilo obiskano, prištej mu en obisk\n counter[currentNode]= counter[currentNode]+1\n #Za število definiranih korakov\n for i in range(0,stepMultiplier*len(g.nodes())):\n #Če je poljubna vrednost večja ali enaka damferju (pri possibility=0.1 je možnost da bo večja 90%)\n if random.random()>=possibility:\n #Vsa sosedna krajišča spakiraj v eno vrsto\n neighbours = []\n for edge in g.edges(currentNode):\n #Sosednje krajišče je krajišče na koncu roba\n neighbours.append(edge[1])\n #Če sploh je kak sosed\n if len(neighbours)>0:\n #Izberi naključnega in mu prištej obisk\n currentNode = random.sample(neighbours, 1)[0]\n counter[currentNode]= counter[currentNode]+1\n else:\n #Če ni soseda se teleportiraj na poljubno mesto na grafu in mu prištej obisk\n currentNode = random.sample(g.nodes(), 1)[0]\n counter[currentNode]= counter[currentNode]+1\n else:\n #Če poljubna vrednost ni večja, se teleportiraj in krajišču prištej obisk\n currentNode = random.sample(g.nodes(), 1)[0]\n counter[currentNode]= counter[currentNode]+1\n #Vrni slovar {Krajišče:število obiskov}\n return counter\n\n\nprint (\"\")\nprint (\"\")\nprint (\"4: PAGE RANK CALCULATIONS\")\nprint (\"\")\n#Uporabi funkcijo\nPageRank = getPageRank(mGraph,0.1,20)\n#Izpiši celotno število korakov\nprint (\"Total steps:\", 20*len(mGraph.nodes()))\n#Izpiši krajišče: število obiskov\nfor key,value in PageRank.iteritems():\n print (str(key)+\":\", \"Visited:\", value)\n","sub_path":"Seminarska naloga/Main1.py","file_name":"Main1.py","file_ext":"py","file_size_in_byte":7226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"30248927","text":"#!/usr/bin/env python3\n\nfrom urllib.request import urlopen\nfrom urllib.parse import urljoin, urlsplit, urlunsplit\nfrom lxml import etree\n\n\ndef del_whitespace(s):\n return ''.join(s.split())\n\nif __name__ == '__main__':\n base_url = 'https://repka.ua/products/smartfony/?view=grid&sf=CATALOG_PRICE_4&so=desc&PAGEN_1={0}'\n product_xpath = (\"//div[@class='catalog-product-name']/a/text()\"\n \"|\"\n \"//div[@class='catalog-product-images']/a/img[1]/@src\"\n \"|\"\n \"//div[@class='price-uah']/span/text()\"\n )\n products = []\n for i in range(1, 11):\n url = base_url.format(i)\n tree = etree.parse(urlopen(url), etree.HTMLParser())\n raw_data = tree.xpath(product_xpath)\n products += [\n (name.strip(), urljoin(url, image), del_whitespace(price))\n for name, image, price in zip(*[iter(raw_data)] * 3)\n ]\n if len(products) >= 20:\n break\n\n data_file = 'repka.xml'\n with etree.xmlfile(data_file, encoding='utf-8') as xml_file:\n xml_file.write_declaration()\n with xml_file.element('products'):\n for name, image, price in products:\n xml_file.write(etree.Element('product', name=name, image=image, price=price))","sub_path":"databases/semester-5/lab_1/fetch_repka.py","file_name":"fetch_repka.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"596746377","text":"from classes.Helpers import getInputFiles, printVerbose, debugPrintInputInfo\nfrom util.updateGroups import update_groups\n\n\ndef handle_groups_file_update(outdir, groupsfile, clustering_groups_files_uncount):\n \"\"\"Checks if the user specified groups file exists, and updates the groupings with clustering data.\n Returns a list of the most up to date groups files.\n :param outdir: Filepath to the directory where outputfiles will be written.\n :param groupsfile: Optional filepath to the .groups file, or a folder of .groups files to use as a reference.\n :param clustering_groups_files_uncount: The output groups file from clustering, with trailing replication counts\n removed from sequence names. Names in this file should match those used in the user-specified groups file\n groupsfile.\n :return: A list of filenames pointing to the most up to date groups files.\n \"\"\"\n most_recent_groups_files = clustering_groups_files_uncount\n if groupsfile:\n # Try to grab groups files\n user_specified_groups_files = getInputFiles(groupsfile, critical=False)\n # If we have files at the given location\n if len(user_specified_groups_files) != 0:\n most_recent_groups_files = user_specified_groups_files\n printVerbose(\"Updating .groups files with clustering data\")\n debugPrintInputInfo(most_recent_groups_files, \"used as groups references\")\n update_groups(most_recent_groups_files, clustering_groups_files_uncount, outdir, \"postcluster\")\n printVerbose(\"Done updating .groups files.\")\n most_recent_groups_files = getInputFiles(outdir, \"postcluster*.groups\")\n else:\n printVerbose(\"No name files provided, assuming singletons...\\n\")\n return most_recent_groups_files\n","sub_path":"src/ARMS/cluster/Cluster_Helpers.py","file_name":"Cluster_Helpers.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"96222925","text":"\"\"\"\n该模块定义了用户自定义类,用于临时和使用序列化永久存储\n用户的个人配置,如运行Kubernetes master的主机等\n\"\"\"\nimport shelve\n\nclass UserConfig():\n\tdef __init__(self, configname, hostaddress,\n\t\t\tclustercount):\n\t\tself.configname = configname\n\t\tself.hostaddress = hostaddress\n\t\tself.clustercount = clustercount\n\ndef serializedAndSave(userconfig):\n\tassert(type(userconfig) == UserConfig)\n\t\n\tdb = shelve.open(\"userconfigdb\")\n\tdb[userconfig.configname] = userconfig\n\tdb.close()\n\ndef getAndRestoreUserconfig(name):\n\t\n\tdb = shelve.open(\"userconfigdb\")\n\treturn db.get(name)\n\n","sub_path":"userconfig.py","file_name":"userconfig.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"218321089","text":"from typing import List\nfrom Leetcode.utils import perf\n\n\nimport collections\n\n\nclass LightBeams:\n\n def __init__(self):\n self.diag_incr = collections.defaultdict(int) # slope is 1\n self.diag_decr = collections.defaultdict(int) # slope is -1\n self.vert = collections.defaultdict(int)\n self.horiz = collections.defaultdict(int)\n\n def add(self, ri, ci):\n self.diag_incr[ri - ci] += 1\n self.diag_decr[ri + ci] += 1\n self.vert[ci] += 1\n self.horiz[ri] += 1\n\n def remove(self, ri, ci):\n self.diag_incr[ri - ci] -= 1\n self.diag_decr[ri + ci] -= 1\n self.vert[ci] -= 1\n self.horiz[ri] -= 1\n\n def is_illuminated(self, ri, ci):\n return (\n self.vert[ci] > 0\n or self.horiz[ri] > 0\n or self.diag_incr[ri - ci] > 0\n or self.diag_decr[ri + ci] > 0\n )\n\n\nclass Solution:\n # Key Insights:\n # 1) Don't use a 2D array for the grid because\n # 2) You can decrease the computations if you don't have check that neighbors are in-bounds\n # 3) Therefore, use a set for the grid\n\n # tc: O(N + Q)\n # sc: O(N)\n @perf\n def gridIllumination(self, N: int, lamps: List[List[int]],\n queries: List[List[int]]) -> List[int]:\n N = N\n lamps_unique = set()\n light_beams = LightBeams()\n ans = []\n\n for lamp in map(tuple, lamps):\n if lamp not in lamps_unique:\n lamps_unique.add(lamp)\n light_beams.add(*lamp)\n\n for ri, ci in queries:\n if light_beams.is_illuminated(ri, ci):\n ans.append(1)\n self.turn_off_region(ri, ci, lamps_unique, light_beams)\n else:\n ans.append(0)\n\n return ans\n\n def region(self, ri, ci):\n for nri in range(ri-1, ri+2):\n for nci in range(ci-1, ci+2):\n yield nri, nci\n\n def turn_off_region(self, ri, ci, lamps, light_beams):\n for point in self.region(ri, ci):\n if point in lamps:\n lamps.remove(point)\n light_beams.remove(*point)\n","sub_path":"Leetcode/grid_illumination/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"383862269","text":"'''\nYou are given a perfect binary tree where all leaves are on the same level, \nand every parent has two children. The binary tree has the following definition:\nstruct Node {\n int val;\n Node *left;\n Node *right;\n Node *next;\n}\nPopulate each next pointer to point to its next right node. If there is no next right node, \nthe next pointer should be set to NULL.\nInitially, all next pointers are set to NULL.\n\nFollow up:\n- You may only use constant extra space.\n- Recursive approach is fine, you may assume implicit stack space does not count as extra space for this problem.\n\nExample 1:\nInput: root = [1,2,3,4,5,6,7]\nOutput: [1,#,2,3,#,4,5,6,7,#]\nExplanation: Given the above perfect binary tree (Figure A), \nyour function should populate each next pointer to point to \nits next right node, just like in Figure B. The serialized output \nis in level order as connected by the next pointers, \nwith '#' signifying the end of each level.\n\nConstraints:\n- The number of nodes in the given tree is less than 4096.\n- -1000 <= node.val <= 1000\n'''\n\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\"\"\"\nfrom collections import deque\nclass Solution:\n def connect(self, root: 'Node') -> 'Node':\n '''\n A solution using O(n) extra space.\n '''\n if not root:\n return\n \n q = deque()\n q.append(root)\n while q:\n level_len = len(q)-1\n node = q.popleft()\n for _ in range(level_len):\n node.next = q.popleft()\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n node = node.next\n \n return root\n\n def connect(self, root: 'Node') -> 'Node':\n '''\n Solve it recursively.\n '''\n if not root:\n return\n \n def helper(node):\n if not node.left: # if it is a leaf node\n return\n node.left.next = node.right # connect two children\n if node.next: # if grandchildren exist.\n node.right.next = node.next.left # connect two grandchildren\n helper(node.left)\n helper(node.right)\n \n helper(root)\n return root","sub_path":"Leetcode/Intermediate/Tree and graph/116_Populating_Next_Right_Pointers_in_Each_Node.py","file_name":"116_Populating_Next_Right_Pointers_in_Each_Node.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"238729608","text":"\"\"\"\nA static parser for a java test class\n\"\"\"\nimport plyj.parser as plyj\nimport plyj.model as model\n\nclass JavaTestClassFileParser(object):\n parser = plyj.Parser()\n\n @classmethod\n def parse_and_return_methodnameTannotations(cls, java_test_file_path):\n with open(java_test_file_path, 'r') as tfile:\n tree = cls.parser.parse_file(tfile)\n for type_decl in tree.type_declarations:\n methodnameTannotations = [] # (method_name, ['Test', 'Deprecated'])\n for method_decl in [decl for decl in type_decl.body if type(decl) is model.MethodDeclaration]:\n annotations = []\n for modifier in method_decl.modifiers:\n if type(modifier) is model.Annotation:\n annotations.append(modifier.name.value)\n methodnameTannotations.append((method_decl.name, annotations))\n return methodnameTannotations","sub_path":"JavaTracerUtils/java_testclass_parser.py","file_name":"java_testclass_parser.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"304470499","text":"\"\"\"\nMask R-CNN\nConfigurations and data loading code for MS COCO.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nimport math\nfrom PythonAPI.pycocotools.coco import COCO\nfrom PythonAPI.pycocotools import mask as maskUtils\nfrom extract_category_imgIds import get_xy_exclude_img_ids as get_xy_labels_data\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"./\")\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import model as modellib, utils\n\n\n# COCO Dataset\ndataset = 'coco_datasets'\nDEFAULT_DATASET_YEAR = \"2014\"\n\n# Constants\nlabel = 'bus'\nnetwork_labels = [label]\nlabel_size = 500\nimages_per_weight = 10\n\n\nclass CocoConfig(Config):\n # Give the configuration a recognizable name\n NAME = \"coco\"\n\n # We use a GPU with 12GB memory, which can fit two images.\n # Adjust down if you use a smaller GPU.\n IMAGES_PER_GPU = 1\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 80 # Background + catagory\n\n # Number of training steps per epoch\n STEPS_PER_EPOCH = images_per_weight\n\n VALIDATION_STEPS = 20\n\n # Adjust learning rate if needed\n LEARNING_RATE = 0.0001\n\n # Skip detections with < 70% confidence\n DETECTION_MIN_CONFIDENCE = 0.7\n\n DEFAULT_LOGS_DIR = '../drive/My Drive/mrcnn_bus_init_weights/logs'\n\n\nclass CocoDataset(utils.Dataset):\n def load_coco(self, dataset_dir, subset, network_cats, year=DEFAULT_DATASET_YEAR, class_ids=None,\n return_subset_size=False):\n \"\"\"Load a subset of the COCO dataset.\"\"\"\n coco = COCO(\"../drive/My Drive/{}/annotations/instances_{}{}.json\".format(dataset_dir, subset, year))\n\n # Load all classes or a subset?\n if not class_ids:\n # All classes\n class_ids = sorted(coco.getCatIds())\n\n # Add classes\n for i in class_ids:\n self.add_class(\"coco\", i, coco.loadCats(i)[0][\"name\"])\n\n selected_class_ids = coco.getCatIds(catNms=network_cats)\n image_ids = []\n if subset == 'train':\n cat_id = selected_class_ids[0]\n cat_img_ids = (get_xy_labels_data(coco, cat_id, label_size))[0]\n image_ids.extend(cat_img_ids)\n else:\n for cat_id in selected_class_ids:\n image_ids.extend(list(coco.getImgIds(catIds=[cat_id])))\n # Remove duplicates\n image_ids = list(set(image_ids))\n\n print(subset, 'size:', len(image_ids))\n\n # Add images\n for i in image_ids:\n self.add_image(\n \"coco\", image_id=i,\n # path=os.path.join(image_dir, coco.imgs[i]['file_name']),\n path=coco.imgs[i]['coco_url'],\n width=coco.imgs[i][\"width\"],\n height=coco.imgs[i][\"height\"],\n annotations=coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=selected_class_ids, iscrowd=None)))\n\n if return_subset_size:\n return len(image_ids)\n\n def load_mask(self, image_id):\n \"\"\"Load instance masks for the given image.\n\n Different datasets use different ways to store masks. This\n function converts the different mask format to one format\n in the form of a bitmap [height, width, instances].\n\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # If not a COCO image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"coco\":\n return super(CocoDataset, self).load_mask(image_id)\n\n instance_masks = []\n class_ids = []\n annotations = self.image_info[image_id][\"annotations\"]\n # Build mask of shape [height, width, instance_count] and list\n # of class IDs that correspond to each channel of the mask.\n for annotation in annotations:\n class_id = self.map_source_class_id(\n \"coco.{}\".format(annotation['category_id']))\n if class_id:\n m = self.annToMask(annotation, image_info[\"height\"],\n image_info[\"width\"])\n # Some objects are so small that they're less than 1 pixel area\n # and end up rounded out. Skip those objects.\n if m.max() < 1:\n continue\n # Is it a crowd? If so, use a negative class ID.\n if annotation['iscrowd']:\n # Use negative class ID for crowds\n class_id *= -1\n # For crowd masks, annToMask() sometimes returns a mask\n # smaller than the given dimensions. If so, resize it.\n if m.shape[0] != image_info[\"height\"] or m.shape[1] != image_info[\"width\"]:\n m = np.ones([image_info[\"height\"], image_info[\"width\"]], dtype=bool)\n instance_masks.append(m)\n class_ids.append(class_id)\n\n # Pack instance masks into an array\n if class_ids:\n mask = np.stack(instance_masks, axis=2).astype(np.bool)\n class_ids = np.array(class_ids, dtype=np.int32)\n return mask, class_ids\n else:\n # Call super class to return an empty mask\n return super(CocoDataset, self).load_mask(image_id)\n\n def image_reference(self, image_id):\n \"\"\"Return a link to the image in the COCO Website.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"coco\":\n return \"http://cocodataset.org/#explore?id={}\".format(info[\"id\"])\n else:\n super(CocoDataset, self).image_reference(image_id)\n\n # The following two functions are from pycocotools with a few changes.\n\n def annToRLE(self, ann, height, width):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE to RLE.\n :return: binary mask (numpy 2D array)\n \"\"\"\n segm = ann['segmentation']\n if isinstance(segm, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, height, width)\n rle = maskUtils.merge(rles)\n elif isinstance(segm['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, height, width)\n else:\n # rle\n rle = ann['segmentation']\n return rle\n\n def annToMask(self, ann, height, width):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.\n :return: binary mask (numpy 2D array)\n \"\"\"\n rle = self.annToRLE(ann, height, width)\n m = maskUtils.decode(rle)\n return m\n\n\nif __name__ == '__main__':\n # Configurations\n config = CocoConfig()\n config.display()\n\n # Create model\n model = modellib.MaskRCNN(mode=\"training\", config=config, model_dir=config.DEFAULT_LOGS_DIR)\n\n # Training dataset\n dataset_train = CocoDataset()\n train_subset_size = dataset_train.load_coco(dataset, \"train\", network_labels, return_subset_size=True)\n dataset_train.prepare()\n\n # Validation dataset\n dataset_val = CocoDataset()\n dataset_val.load_coco(dataset, \"val\", network_labels)\n dataset_val.prepare()\n\n # Training: fine tune all layers\n total_epochs = int(math.ceil(train_subset_size / images_per_weight))\n print('Training on coco', network_labels, 'data')\n model.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=total_epochs,\n layers='all')\n\n print('\\nFinish process.')\n","sub_path":"get_random_init_w.py","file_name":"get_random_init_w.py","file_ext":"py","file_size_in_byte":7863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"64317539","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ai', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='ailog',\n name='contextual',\n field=models.CharField(choices=[('active', 'The item is active'), ('success', 'Something proceeds successfully'), ('warning', 'Giving user a warning'), ('danger', 'The operation is dangerous'), ('info', 'Providing some information'), ('', 'There is nothing happening')], max_length=10, default=''),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='ai',\n name='inst_file',\n field=models.FileField(upload_to='ai/inst_pool/%Y-%m-%d'),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='ai',\n name='src_file',\n field=models.FileField(upload_to='ai/src_pool/%Y-%m-%d'),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='ai',\n name='version',\n field=models.PositiveIntegerField(default=0),\n preserve_default=True,\n ),\n ]\n","sub_path":"ai/migrations/0002_auto_20150425_2231.py","file_name":"0002_auto_20150425_2231.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"93685737","text":"def progress_bar(progress, characters_count = 20,\n erase_line = True,\n empty_bar = '.', filled_bar = '=', filled_edge = '>',\n prefix = '', postfix = '',\n add_space_around = True):\n \"\"\"\n Prints progress bar.\n :param progress: percentage (0..1) of progress, or int number of characters filled in progress bar.\n :param characters_count: length of the bar in characters.\n :param erase_line: preform return carriage.\n :param empty_bar: empty bar character.\n :param filled_bar: progress character.\n :param filled_edge: progress character on the borderline between progressed and empty,\n set to None to disable.\n :param prefix: progress bar prefix.\n :param postfix: progress bar postfix.\n :param add_space_around: add space after prefix and before postfix.\n :return:\n \"\"\"\n\n space_characters = ' \\t\\n'\n if add_space_around:\n if len(prefix) > 0 and prefix[-1] not in space_characters:\n prefix += ' '\n\n if len(postfix) > 0 and postfix[0] not in space_characters:\n postfix = ' ' + postfix\n\n if erase_line:\n print('\\r', end = '')\n\n progress_num = int(characters_count * progress)\n if filled_edge is None:\n print(prefix + filled_bar * progress_num + empty_bar * (characters_count - progress_num) + postfix, end = '')\n else:\n bar_str = prefix + filled_bar * progress_num\n bar_str += filled_edge * min(characters_count - progress_num, 1)\n bar_str += empty_bar * (characters_count - progress_num - 1)\n bar_str += postfix\n\n print(bar_str, end = '')\n\n","sub_path":"utils/print_tools.py","file_name":"print_tools.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"487936966","text":"class Color(object):\n \"\"\"\n utility to return ansi colored text.\n \"\"\"\n\n colors = {\n 'black': 30,\n 'red': 31,\n 'green': 32,\n 'yellow': 33,\n 'blue': 34,\n 'magenta': 35,\n 'cyan': 36,\n 'white': 37,\n 'bgred': 41,\n 'bggrey': 100\n }\n\n prefix = '\\033['\n\n suffix = '\\033[0m'\n\n def colored(self, text, color=None):\n if color not in self.colors:\n color = 'white'\n\n clr = self.colors[color]\n return (self.prefix+'%dm%s'+self.suffix) % (clr, text)\n\n\ncolored = Color().colored\n\nimport logging\nfrom logging import Formatter, getLogger, StreamHandler\n\n\nclass ColoredFormatter(Formatter):\n\n def format(self, record):\n\n message = record.getMessage()\n\n mapping = {\n 'INFO': 'cyan',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'bgred',\n 'DEBUG': 'bggrey',\n 'SUCCESS': 'green'\n }\n\n clr = mapping.get(record.levelname, 'white')\n\n return colored(record.levelname, clr) + ': ' + message\n\nlogger = logging.getLogger(__name__)\nhandler = StreamHandler()\nformatter = ColoredFormatter()\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n# set success level\nlogging.SUCCESS = 25 # between WARNING and INFO\nlogging.addLevelName(logging.SUCCESS, 'SUCCESS')\nsetattr(logger, 'success', lambda message, *args: logger._log(logging.SUCCESS, message, args))\n\nif __name__ == '__main__':\n logger.setLevel(logging.DEBUG)\n logger.info('info')\n logger.success('success')\n logger.debug('debug')\n logger.warning('warning')\n logger.error('error')\n logger.critical('critical')\n\n","sub_path":"all-gists/5635505/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"490048424","text":"\nstringLength = 343\ndarkLength = 0\nfarLeftLength = 12\ncameraLength = 10\nleftLength = 90\ncenterLength = 58\nrightLength = 140\nfarRightLength = 33\n\ndefaultConfig = {\n \"holiday\": \"Dark\",\n}\n\nfrom ha import *\nfrom ha.interfaces.neopixelInterface import *\nfrom ha.interfaces.fileInterface import *\nfrom ha.controls.holidayLightControl import *\nfrom ha.rest.restServer import *\n\n# colors\nlightblue = color(0,0,190)\ndarkblue = color(0,0,31)\n\n# dictionary of patterns\npatterns = {\"On\": [Segment(0, stringLength, [on])],\n \"Off\": [Segment(0, stringLength, [off])],\n \"Dark\": [Segment(0, stringLength, [off])],\n \"White\": [Segment(0, stringLength, [white])],\n \"Pink\": [Segment(0, stringLength, [pink])],\n \"Red\": [Segment(0, stringLength, [red])],\n \"Orange\": [Segment(0, stringLength, [orange])],\n \"Yellow\": [Segment(0, stringLength, [yellow])],\n \"Green\": [Segment(0, stringLength, [green])],\n \"Blue\": [Segment(0, stringLength, [blue])],\n \"Purple\": [Segment(0, stringLength, [purple])],\n \"Cyan\": [Segment(0, stringLength, [cyan])],\n \"Magenta\": [Segment(0, stringLength, [magenta])],\n \"Rust\": [Segment(0, stringLength, [rust])],\n \"Indigo\": [Segment(0, stringLength, [indigo])],\n \"Christmas\": [Segment(0, farLeftLength, 4*[green]+4*[white]+4*[red]),\n Segment(0, cameraLength, [off]),\n Segment(0, leftLength, 4*[green]+4*[white]+4*[red]),\n Segment(0, centerLength, [white]),\n Segment(0, rightLength+farRightLength, 4*[green]+4*[white]+4*[red]),\n ],\n \"Christmas color\": [Segment(0, farLeftLength, [red]+[green]+[purple]+[blue]+[yellow]),\n Segment(0, cameraLength, [off]),\n Segment(0, leftLength, [red]+[green]+[purple]+[blue]+[yellow]),\n Segment(0, centerLength, [red]+[green]+[purple]+[blue]+[yellow]),\n Segment(0, rightLength+farRightLength, [red]+[green]+[purple]+[blue]+[yellow]),\n ],\n \"Christmas sparkle\": [Segment(0, farLeftLength, 5*[green]+2*[white]+5*[red], CrawlAnimation(direction=1)),\n Segment(0, cameraLength, [off]),\n Segment(0, leftLength, 5*[green]+2*[white]+5*[red], CrawlAnimation(direction=1)),\n Segment(0, centerLength, [white], SparkleAnimation(rate=2, factor=.7)),\n Segment(0, rightLength+farRightLength, 5*[green]+2*[white]+5*[red], CrawlAnimation(direction=0)),\n ],\n \"New years eve\": [Segment(0, farLeftLength, 2*[red]+2*[yellow]+2*[green]+2*[orange]+2*[blue]+2*[white], SparkleAnimation(rate=1, factor=.7)),\n Segment(0, cameraLength, [off]),\n Segment(0, leftLength, 2*[red]+2*[yellow]+2*[green]+2*[orange]+2*[blue]+2*[white], SparkleAnimation(rate=1, factor=.7)),\n Segment(0, centerLength, [white], SparkleAnimation(rate=1, factor=.7)),\n Segment(0, rightLength+farRightLength, 2*[red]+2*[yellow]+2*[green]+2*[orange]+2*[blue]+2*[white], SparkleAnimation(rate=2, factor=.7)),\n ],\n \"Hanukkah\": [Segment(0, stringLength, 7*[blue]+3*[white])],\n \"St. Andrews day\": [Segment(0, stringLength, 7*[lightblue]+2*[darkblue]+1*[white]+1*[darkblue]+1*[white]+2*[darkblue])],\n \"October\": [Segment(0, stringLength, 3*[orange]+3*[rust]+3*[purple])],\n \"Halloween\": [Segment(0, stringLength, 5*[orange]+3*[rust]+2*[purple])],\n \"Spooky\": [Segment(0, farLeftLength, [orange]),\n Segment(0, cameraLength, [off]),\n Segment(0, leftLength, 5*[orange]+3*[rust]+2*[purple]),\n Segment(0, centerLength, [orange]),\n Segment(0, rightLength, 5*[orange]+3*[rust]+2*[purple], FlickerAnimation(rate=1)),\n Segment(0, farRightLength, 5*[orange]+3*[rust]+2*[purple], FlickerAnimation()),\n ],\n \"Election day\": [Segment(0, farLeftLength, 10*[red]+10*[white]+10*[blue]),\n Segment(0, cameraLength, [off]),\n Segment(0, leftLength, 10*[red]+10*[white]+10*[blue], CrawlAnimation(direction=1)),\n Segment(0, centerLength, [white], SparkleAnimation(rate=1)),\n Segment(0, rightLength+farRightLength, 10*[red]+10*[white]+10*[blue], CrawlAnimation(direction=0)),\n ],\n \"Inauguration day\": [Segment(0, farLeftLength, 10*[red]+10*[white]+10*[blue]),\n Segment(0, cameraLength, [off]),\n Segment(0, leftLength, 10*[red]+10*[white]+10*[blue], CrawlAnimation(direction=1)),\n Segment(0, centerLength, [white], SparkleAnimation(rate=1)),\n Segment(0, rightLength+farRightLength, 10*[red]+10*[white]+10*[blue], CrawlAnimation(direction=0)),\n ],\n \"Valentines day\": [Segment(0, stringLength, 1*[white]+2*[pink]+5*[red]+2*[pink])],\n \"St Patricks day\": [Segment(0, stringLength, [green])],\n \"May day\": [Segment(0, stringLength, 2*[green]+2*[yellow])],\n \"Mardi gras\": [Segment(0, stringLength, 3*[purple]+3*[yellow]+3*[green], SparkleAnimation(rate=5))],\n \"Presidents day\": [Segment(0, stringLength, 3*[red]+3*[white]+3*[blue])],\n \"Memorial day\": [Segment(0, stringLength, 3*[red]+3*[white]+3*[blue])],\n \"4th of July\": [Segment(0, farLeftLength, 10*[red]+10*[white]+10*[blue]),\n Segment(0, cameraLength, [off]),\n Segment(0, leftLength, 10*[red]+10*[white]+10*[blue]),\n Segment(0, centerLength, [red]+[white]+[blue], SparkleAnimation(rate=1)),\n Segment(0, rightLength+farRightLength, 10*[red]+10*[white]+10*[blue]),\n ],\n \"Bastille day\": [Segment(0, stringLength, 10*[red]+10*[white]+10*[blue])],\n \"Cinco de Mayo\": [Segment(0, stringLength, 10*[green]+10*[white]+10*[red])],\n \"Juneteenth\": [Segment(0, stringLength, 10*[red]+10*[yellow]+10*[green])],\n \"Easter\": [Segment(0, stringLength, [yellow]+[blue]+[green]+[cyan]+[magenta])],\n \"Sweden day\": [Segment(0, stringLength, 5*[blue]+5*[yellow])],\n \"Canada day\": [Segment(0, stringLength, 5*[red]+5*[white])],\n \"Fall\": [Segment(0, stringLength, 5*[red]+5*[orange]+5*[rust]+5*[orange])],\n \"Gay pride\": [Segment(0, stringLength, [pink]+[red]+[orange]+[yellow]+[green]+[blue]+[purple], SparkleAnimation(rate=3))],\n \"Holi\": [Segment(0, stringLength, [red]+[yellow]+[blue]+[green]+[orange]+[purple]+[pink]+[magenta])],\n \"Columbus day\": [Segment(0, stringLength, [green]+[white]+[red])],\n \"MLK day\": [Segment(0, stringLength, [white]+[red]+[yellow]+[rust])],\n \"Spectrum\": [Segment(0, stringLength, [red]+[orange]+[yellow]+[green]+[blue]+[purple], CrawlAnimation(direction=1))],\n \"Rabbit\": [Segment(0, stringLength, 3*[rust]+3*[red]+3*[orange]+3*[yellow]+3*[white]+3*[yellow]+3*[orange]+3*[red]+3*[rust]+73*[off],\n CrawlAnimation(rate=1))],\n \"Random\": [Segment(0, stringLength, [off], RandomColorAnimation(rate=5))],\n }\n\nif __name__ == \"__main__\":\n stateChangeEvent = threading.Event()\n\n # Interfaces\n neopixelInterface = NeopixelInterface(\"neopixelInterface\", None, length=stringLength)\n configData = FileInterface(\"configData\", fileName=stateDir+\"lights.conf\", initialState=defaultConfig)\n\n # Persistent config data\n holiday = MultiControl(\"holiday\", configData, \"holiday\", values=sorted(list(patterns.keys())),\n group=[\"Lights\", \"Holiday\"], label=\"Holiday colors\")\n\n holidayLights = HolidayLightControl(\"holidayLights\", neopixelInterface, patterns, holiday, type=\"light\",\n group=[\"Lights\", \"Holiday\"], label=\"Holiday lights\")\n\n# Tasks\n # # 2020\n # holidayTasks = [\n # Task(\"offTask\", SchedTime( hour=12, minute=0), holiday, \"Off\"),\n # Task(\"valentinesTask\", SchedTime( month=Feb, day=14, hour=12, minute=0), holiday, \"Valentines day\"),\n # Task(\"presidentsTask\", SchedTime(year=2020, month=Feb, day=17, hour=12, minute=0), holiday, \"Presidents day\"),\n # Task(\"mardigrasTask\", SchedTime(year=2020, month=Feb, day=[22,23,24,25], hour=12, minute=0), holiday, \"Mardi gras\"),\n # Task(\"stpatricksTask\", SchedTime( month=Mar, day=17, hour=12, minute=0), holiday, \"St Patricks day\"),\n # Task(\"easterTask\", SchedTime(year=2020, month=Apr, day=[11,12], hour=12, minute=0), holiday, \"Easter\"),\n # Task(\"maydayTask\", SchedTime( month=May, day=1, hour=12, minute=0), holiday, \"May day\"),\n # Task(\"cincodemayoTask\", SchedTime( month=May, day=5, hour=12, minute=0), holiday, \"Cinco de Mayo\"),\n # Task(\"swedenTask\", SchedTime( month=Jun, day=6, hour=12, minute=0), holiday, \"Sweden day\"),\n # Task(\"prideTask\", SchedTime(year=2020, month=Jun, day=12, hour=12, minute=0), holiday, \"Pride day\"),\n # Task(\"flagTask\", SchedTime( month=Jun, day=14, hour=12, minute=0), holiday, \"Flag day\"),\n # Task(\"canadaTask\", SchedTime( month=Jul, day=1, hour=12, minute=0), holiday, \"Canada day\"),\n # Task(\"july3Task\", SchedTime( month=Jul, day=3, hour=12, minute=0), holiday, \"Presidents day\"),\n # Task(\"july4Task\", SchedTime( month=Jul, day=4, hour=12, minute=0), holiday, \"4th of July\"),\n # Task(\"bastilleTask\", SchedTime( month=Jul, day=14, hour=12, minute=0), holiday, \"Bastille day\"),\n # Task(\"fallTask\", SchedTime( month=Sep, day=21, hour=12, minute=0), holiday, \"Fall\"),\n # Task(\"halloweenTask\", SchedTime(year=2020, month=Oct, day=[25,26,27,28,29,30], hour=12, minute=0), holiday, \"Halloween\"),\n # Task(\"spookyTask\", SchedTime( month=Oct, day=31, hour=12, minute=0), holiday, \"Spooky\"),\n # Task(\"electionTask\", SchedTime(year=2020, month=Nov, day=3, hour=12, minute=0), holiday, \"Election day\"),\n # Task(\"thanksgivingTask\", SchedTime(year=2020, month=Nov, day=26, hour=12, minute=0), holiday, \"Fall\"),\n # Task(\"christmasTaskNov\", SchedTime( month=Nov, day=[28,29,30], hour=12, minute=0), holiday, \"Christmas\"),\n # Task(\"christmasTask\", SchedTime( month=Dec, hour=12, minute=0), holiday, \"Christmas\"),\n # Task(\"hanukkahTask\", SchedTime(year=2020, month=Dec, day=10, hour=12, minute=0), holiday, \"Hanukkah\"),\n # Task(\"christmasSparkleTask\", SchedTime( month=Dec, day=[24,25], hour=12, minute=0), holiday, \"Christmas sparkle\"),\n # Task(\"newYearsEveTask\", SchedTime( month=Dec, day=31, hour=12, minute=0), holiday, \"New years eve\"),\n # ]\n\n # 2021\n # holidayTasks = [\n # Task(\"darkTask\", SchedTime( hour=12, minute=0), holiday, \"Dark\", group=\"Holiday\"),\n # Task(\"inaugurationTask\", SchedTime(year=2021, month=Jan, day=20, hour=12, minute=0), holiday, \"Inauguration day\", group=\"Holiday\"),\n # Task(\"valentinesTask\", SchedTime( month=Feb, day=14, hour=12, minute=0), holiday, \"Valentines day\", group=\"Holiday\"),\n # Task(\"presidentsTask\", SchedTime(year=2021, month=Feb, day=15, hour=12, minute=0), holiday, \"Presidents day\", group=\"Holiday\"),\n # Task(\"mardigrasTask\", SchedTime(year=2021, month=Feb, day=16, hour=12, minute=0), holiday, \"Mardi gras\", group=\"Holiday\"),\n # Task(\"stpatricksTask\", SchedTime( month=Mar, day=17, hour=12, minute=0), holiday, \"St Patricks day\", group=\"Holiday\"),\n # Task(\"easterTask\", SchedTime(year=2021, month=Apr, day=[3,4], hour=12, minute=0), holiday, \"Easter\", group=\"Holiday\"),\n # Task(\"maydayTask\", SchedTime( month=May, day=1, hour=12, minute=0), holiday, \"May day\", group=\"Holiday\"),\n # Task(\"cincodemayoTask\", SchedTime( month=May, day=5, hour=12, minute=0), holiday, \"Cinco de Mayo\", group=\"Holiday\"),\n # Task(\"memorialdayTask\", SchedTime(year=2021, month=May, day=[30,31], hour=12, minute=0), holiday, \"Memorial day\", group=\"Holiday\"),\n # Task(\"swedenTask\", SchedTime( month=Jun, day=6, hour=12, minute=0), holiday, \"Sweden day\", group=\"Holiday\"),\n # Task(\"flagTask\", SchedTime( month=Jun, day=14, hour=12, minute=0), holiday, \"Flag day\", group=\"Holiday\"),\n # Task(\"juneteenthTask\", SchedTime( month=Jun, day=19, hour=12, minute=0), holiday, \"Juneteenth\", group=\"Holiday\"),\n # Task(\"canadaTask\", SchedTime( month=Jul, day=1, hour=12, minute=0), holiday, \"Canada day\", group=\"Holiday\"),\n # Task(\"july3Task\", SchedTime( month=Jul, day=3, hour=12, minute=0), holiday, \"Presidents day\", group=\"Holiday\"),\n # Task(\"july4Task\", SchedTime( month=Jul, day=4, hour=12, minute=0), holiday, \"4th of July\", group=\"Holiday\"),\n # Task(\"bastilleTask\", SchedTime( month=Jul, day=14, hour=12, minute=0), holiday, \"Bastille day\", group=\"Holiday\"),\n # Task(\"fallTask\", SchedTime( month=Sep, day=21, hour=12, minute=0), holiday, \"Fall\", group=\"Holiday\"),\n # Task(\"octoberTask\", SchedTime(year=2021, month=Oct, hour=12, minute=0), holiday, \"October\", group=\"Holiday\"),\n # Task(\"halloweenTask\", SchedTime(year=2021, month=Oct, day=[25,26,27,28,29,30], hour=12, minute=0), holiday, \"Halloween\", group=\"Holiday\"),\n # Task(\"spookyTask\", SchedTime( month=Oct, day=31, hour=12, minute=0), holiday, \"Spooky\", group=\"Holiday\"),\n # Task(\"veteransTask\", SchedTime(year=2021, month=Nov, day=11, hour=12, minute=0), holiday, \"Presidents day\", group=\"Holiday\"),\n # Task(\"thanksgivingTask\", SchedTime(year=2021, month=Nov, day=25, hour=12, minute=0), holiday, \"Fall\", group=\"Holiday\"),\n # Task(\"christmasTaskNov\", SchedTime( month=Nov, day=[28,29,30], hour=12, minute=0), holiday, \"Christmas\", group=\"Holiday\"),\n # Task(\"hanukkahTask\", SchedTime(year=2021, month=Nov, day=28, hour=12, minute=0), holiday, \"Hanukkah\", group=\"Holiday\"),\n # Task(\"stAndrewsTask\", SchedTime(year=2021, month=Nov, day=30, hour=12, minute=0), holiday, \"St. Andrews day\", group=\"Holiday\"),\n # Task(\"christmasTask\", SchedTime( month=Dec, hour=12, minute=0), holiday, \"Christmas color\", group=\"Holiday\"),\n # Task(\"christmasSparkleTask\", SchedTime( month=Dec, day=[24,25], hour=12, minute=0), holiday, \"Christmas sparkle\", group=\"Holiday\"),\n # Task(\"newYearsEveTask\", SchedTime( month=Dec, day=31, hour=12, minute=0), holiday, \"New years eve\", group=\"Holiday\"),\n # ]\n\n # 2022\n holidayTasks = [\n Task(\"darkTask\", SchedTime( hour=12, minute=0), holiday, \"Dark\", group=\"Holiday\"),\n Task(\"valentinesTask\", SchedTime( month=Feb, day=[13,14], hour=12, minute=0), holiday, \"Valentines day\", group=\"Holiday\"),\n Task(\"presidentsTask\", SchedTime(year=2022, month=Feb, day=21, hour=12, minute=0), holiday, \"Presidents day\", group=\"Holiday\"),\n Task(\"mardigrasTask\", SchedTime(year=2022, month=Mar, day=1, hour=12, minute=0), holiday, \"Mardi gras\", group=\"Holiday\"),\n Task(\"stpatricksTask\", SchedTime( month=Mar, day=17, hour=12, minute=0), holiday, \"St Patricks day\", group=\"Holiday\"),\n Task(\"easterTask\", SchedTime(year=2022, month=Apr, day=17, hour=12, minute=0), holiday, \"Easter\", group=\"Holiday\"),\n Task(\"maydayTask\", SchedTime( month=May, day=1, hour=12, minute=0), holiday, \"Easter\", group=\"Holiday\"),\n Task(\"cincodemayoTask\", SchedTime( month=May, day=5, hour=12, minute=0), holiday, \"Cinco de Mayo\", group=\"Holiday\"),\n Task(\"memorialdayTask\", SchedTime(year=2022, month=May, day=[29,30], hour=12, minute=0), holiday, \"Memorial day\", group=\"Holiday\"),\n Task(\"swedenTask\", SchedTime( month=Jun, day=6, hour=12, minute=0), holiday, \"Sweden day\", group=\"Holiday\"),\n Task(\"flagTask\", SchedTime( month=Jun, day=14, hour=12, minute=0), holiday, \"Flag day\", group=\"Holiday\"),\n Task(\"juneteenthTask\", SchedTime( month=Jun, day=19, hour=12, minute=0), holiday, \"Juneteenth\", group=\"Holiday\"),\n Task(\"canadaTask\", SchedTime( month=Jul, day=1, hour=12, minute=0), holiday, \"Canada day\", group=\"Holiday\"),\n Task(\"july3Task\", SchedTime( month=Jul, day=3, hour=12, minute=0), holiday, \"Presidents day\", group=\"Holiday\"),\n Task(\"july4Task\", SchedTime( month=Jul, day=4, hour=12, minute=0), holiday, \"4th of July\", group=\"Holiday\"),\n Task(\"bastilleTask\", SchedTime( month=Jul, day=14, hour=12, minute=0), holiday, \"Bastille day\", group=\"Holiday\"),\n Task(\"fallTask\", SchedTime( month=Sep, day=21, hour=12, minute=0), holiday, \"Fall\", group=\"Holiday\"),\n Task(\"octoberTask\", SchedTime(year=2022, month=Oct, hour=12, minute=0), holiday, \"October\", group=\"Holiday\"),\n Task(\"halloweenTask\", SchedTime(year=2022, month=Oct, day=[29,30], hour=12, minute=0), holiday, \"Halloween\", group=\"Holiday\"),\n Task(\"spookyTask\", SchedTime( month=Oct, day=31, hour=12, minute=0), holiday, \"Spooky\", group=\"Holiday\"),\n Task(\"veteransTask\", SchedTime( month=Nov, day=11, hour=12, minute=0), holiday, \"Presidents day\", group=\"Holiday\"),\n Task(\"thanksgivingTask\", SchedTime(year=2022, month=Nov, day=24, hour=12, minute=0), holiday, \"Fall\", group=\"Holiday\"),\n Task(\"christmasTaskNov\", SchedTime( month=Nov, day=[26,27,28,29,30], hour=12, minute=0), holiday, \"Christmas\", group=\"Holiday\"),\n Task(\"hanukkahTask\", SchedTime(year=2022, month=Dec, day=18, hour=12, minute=0), holiday, \"Hanukkah\", group=\"Holiday\"),\n Task(\"christmasTask\", SchedTime( month=Dec, hour=12, minute=0), holiday, \"Christmas color\", group=\"Holiday\"),\n Task(\"christmasSparkleTask\", SchedTime( month=Dec, day=[24,25], hour=12, minute=0), holiday, \"Christmas sparkle\", group=\"Holiday\"),\n Task(\"newYearsEveTask\", SchedTime( month=Dec, day=31, hour=12, minute=0), holiday, \"New years eve\", group=\"Holiday\"),\n ]\n\n # Schedule\n schedule = Schedule(\"schedule\", tasks=holidayTasks)\n\n # Resources\n resources = Collection(\"resources\", resources=[holidayLights, holiday]+holidayTasks, event=stateChangeEvent)\n restServer = RestServer(\"holiday\", resources, event=stateChangeEvent, label=\"Holiday lights\")\n\n # Start interfaces\n configData.start()\n neopixelInterface.start()\n schedule.start()\n restServer.start()\n","sub_path":"holidayApp.py","file_name":"holidayApp.py","file_ext":"py","file_size_in_byte":19975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"29815085","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 29 13:48:47 2020\r\n\r\n@author: tvo\r\n\r\n\r\nTesting with Parallel comptuting \r\n\"\"\"\r\ndef reduce_mem_usage(df, verbose=True):\r\n '''\r\n Function to reduce memory usage of the dataframe,\r\n by reducing type of data\r\n \r\n Example:\r\n out_value_modi = reduce_mem_usage(out_value_modi)\r\n '''\r\n import numpy as np\r\n \r\n \r\n numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\r\n start_mem = df.memory_usage().sum() / 1024**2\r\n for col in df.columns:\r\n col_type = df[col].dtypes\r\n if col_type in numerics:\r\n c_min = df[col].min()\r\n c_max = df[col].max()\r\n if str(col_type)[:3] == 'int':\r\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\r\n df[col] = df[col].astype(np.int8)\r\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\r\n df[col] = df[col].astype(np.int16)\r\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\r\n df[col] = df[col].astype(np.int32)\r\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\r\n df[col] = df[col].astype(np.int64)\r\n else:\r\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\r\n df[col] = df[col].astype(np.float16)\r\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\r\n df[col] = df[col].astype(np.float32)\r\n else:\r\n df[col] = df[col].astype(np.float64)\r\n\r\n end_mem = df.memory_usage().sum() / 1024**2\r\n #print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))\r\n #print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))\r\n\r\n return df\r\n\r\ndef cal_linear_alg_lstsq(df_landcover, df_lst, df_emiswb, df_emis_repre, row, col, kernel_l,moving_pixel=4):\r\n '''\r\n This function :\r\n - convert to fraction map for each land cover \r\n There will be 8 matrix corresponsing to 8 land cover class \r\n - calculate using linear algrabra \r\n \r\n Input:\r\n df_landcover: pandas Dataframe holds fraction values of each land cover class\r\n df_lst: pandas Dataframe holds ECOSTRESS LST values per each pixel\r\n df_emiswb: pandas Dataframe holds ECOSTRESS EmisWB values per each pixel\r\n df_emis_repre: the representative Emis values for each land cover class\r\n row: number of row of the whole image\r\n col: number of col of the whole image\r\n kerner_list: list of kernel window size (e.g. [10,20,30,40])\r\n bounds: upper and lower bound values for constaint optimization, optional, default is None\r\n which means no bounds\r\n type: measured using \"radiance\" or \"emissivity\" functions\r\n radiance: define whether calculating using \"radiance\" or not, by default is False which means\r\n calculating using \"emissivity\" function\r\n \r\n Output:\r\n out_value: pandas Dataframe contains the output of Linear Algebra for each pixel\r\n columns=['value','nrow','ncol','indep_value','out_value']:\r\n 'value': list of fraction of land cover properties, extracting from coeff_matrix_df\r\n 'nrow','ncol': indexes of row and col\r\n 'indep_value': ECOSTRESS LST and Emissivity values, extracting from indep_matrix_df\r\n 'out_value': list of temperture of land cover properties, as results of Linear Algebra \r\n \r\n \r\n Example:\r\n # Kernel Test:\r\n row = 47\r\n col = 54\r\n \r\n # Staten Island\r\n row = 303\r\n col = 243\r\n \r\n # Staten Island (490 m resolution)\r\n row = 43\r\n col = 34\r\n \r\n \r\n # Testing with different kernel sizes\r\n\r\n # Emissivity function\r\n coeff_df_matrix_list, indep_df_matrix_list = cal_linear_alg_lstsq(df_landcover_concat, df_lst, df_emiswb, df_emis_repre, \r\n row, col, kernel_l=[25], moving_pixel = 5)\r\n \r\n \r\n\r\n \r\n '''\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n import numpy.linalg as la\r\n import pandas as pd\r\n from scipy import linalg\r\n from scipy.optimize import lsq_linear\r\n import time\r\n \r\n # Starting time\r\n start_time = time.time()\r\n\r\n \r\n\r\n \r\n # Create an empy 3D array-like e.g. numpy array or list which holds\r\n # 8 fraction map\r\n fraction_map = np.empty((8,row,col),dtype=np.float64) \r\n indices_map = np.empty((8,row,col),dtype=np.float64) \r\n \r\n \r\n # Groupping the dataframe by Class, thus results in 8 classes\r\n df_grp = df_landcover.groupby('Class')\r\n \r\n # Looping over 8 land cover class\r\n for i in range(9): \r\n # Pass the i=0 as the class code starts from 1\r\n if i == 0:\r\n pass\r\n else:\r\n fraction_map[i-1] = df_grp.get_group(i).fraction.values.reshape(row,col)\r\n indices_map[i-1] = df_grp.get_group(i).Value.values.reshape(row,col)\r\n \r\n \r\n \r\n\r\n\r\n \r\n # Reading df_lst contains LST value for each pixel and assign as independent value\r\n indepent_matrix = df_lst.MEAN.values.reshape(row,col)\r\n emis_matrix = df_emiswb.MEAN.values.reshape(row,col)\r\n \r\n\r\n \r\n '''\r\n Trying with a new approach\r\n from_dict function: 03.28.2020\r\n '''\r\n \r\n # New version: from_dict function: 03.28.2020\r\n # Create an empty pandas Dataframe with columns = 'value','nrow','ncol'\r\n # with the purpose of indexing each pixel with old row and column indexes\r\n \r\n coeff_matrix_df_dict = {}\r\n indep_matrix_df_dict = {}\r\n \r\n j = 0\r\n for nrow in range(row):\r\n for ncol in range(col): \r\n # Ingnoring NoData value\r\n #if fraction_map[:,nrow,ncol].mean() == -9999:\r\n if fraction_map[:,nrow,ncol].mean() == -9999 or indepent_matrix[nrow,ncol].mean() == 0 or emis_matrix[nrow,ncol].mean() == 0:\r\n pass\r\n else:\r\n \r\n for i in range(8):\r\n coeff_matrix_df_dict[j] = {'index':indices_map[:,nrow,ncol][i],\r\n 'class':i,\r\n 'value_fraction':fraction_map[:,nrow,ncol][i], # value of fraction fj\r\n 'nrow':nrow,\r\n 'ncol':ncol,\r\n 'indep_value':indepent_matrix[nrow,ncol],\r\n 'value_emis':list(df_emis_repre[\"Emis\"].values)[i],\r\n 'value_emis_sum':emis_matrix[nrow,ncol],\r\n 'out_value':np.nan,\r\n 'residual':np.nan}\r\n indep_matrix_df_dict[j] = {'index':indices_map[:,nrow,ncol][i],\r\n 'class':i,\r\n 'value_lst':indepent_matrix[nrow,ncol],\r\n 'value_emis_sum':emis_matrix[nrow,ncol],\r\n 'nrow':nrow,\r\n 'ncol':ncol}\r\n \r\n j = j + 1\r\n print(nrow,ncol)\r\n \r\n print('Fishiing wrapping the dataframe ..... ')\r\n \r\n\r\n \r\n coeff_matrix_df = pd.DataFrame.from_dict(coeff_matrix_df_dict,'index')\r\n indep_matrix_df = pd.DataFrame.from_dict(indep_matrix_df_dict,'index')\r\n \r\n print('Length of coeff_matrix_df '+str(len(coeff_matrix_df)))\r\n # New version: from_dict function\r\n \r\n \r\n coeff_df = []\r\n indep_df = []\r\n\r\n # Testing with jumping kernel windows, e.g. it is not neccessary to moving every 1 pixel but instead of moving every \r\n # 4 pixels. Doing so would speed up much time calculation especially when we consider the whole NYC domain. \r\n \r\n coeff_df_matrix_list = []\r\n indep_df_matrix_list = []\r\n \r\n start_time = time.time()\r\n\r\n nrow = -moving_pixel # Starting from nrow index -movingpixe, so it will make up with moving 4 pixels\r\n \r\n count = 0 # Set counter of kernel\r\n for kernel in kernel_l:\r\n while nrow < row:\r\n nrow = nrow + moving_pixel\r\n \r\n \r\n ncol = -moving_pixel # Starting from nrow index -movingpixel\r\n while ncol < col:\r\n ncol = ncol + moving_pixel\r\n \r\n \r\n # Applying linear algebra function for each kernel window:\r\n # Can consider parallel from this step for each kernel\r\n \r\n # Extracting coeff_matrix values for each kernel window and assign it to a new dataframe\r\n coeff_df = coeff_matrix_df.loc[(coeff_matrix_df['nrow'] >= nrow) & \r\n (coeff_matrix_df['nrow'] < nrow + kernel) & \r\n (coeff_matrix_df['ncol'] >= ncol) & \r\n (coeff_matrix_df['ncol'] < ncol + kernel)]\r\n \r\n # Extracting independent values for each kernel window and assign it to a new dataframe\r\n indep_df = indep_matrix_df.loc[(indep_matrix_df['nrow'] >= nrow) & \r\n (indep_matrix_df['nrow'] < nrow + kernel) & \r\n (indep_matrix_df['ncol'] >= ncol) & \r\n (indep_matrix_df['ncol'] < ncol + kernel)]\r\n \r\n \r\n # Ignoring kernel windows does not have the same size with kernel*kernel\r\n # It could happend when moving window close to the edge\r\n if len(coeff_df) < 9*8: # As we consider 8 elements of land cover class \r\n pass\r\n else:\r\n # Insert column of kernel index \r\n # coeff_df.insert(len(coeff_df.columns),column='count',value=count)\r\n \r\n # Append current coeff_df and indep_df \r\n coeff_df_matrix_list.append(reduce_mem_usage(coeff_df))\r\n indep_df_matrix_list.append(reduce_mem_usage(indep_df))\r\n \r\n count = count + 1\r\n print(count)\r\n \r\n print(time.time() - start_time)\r\n \r\n return coeff_df_matrix_list, indep_df_matrix_list\r\n\r\n\r\n\r\ndef least_sq(coeff_df, indep_df, radiance, bounds, _type):\r\n '''\r\n Function to apply Least-squared solutions for Linear Algebra equation.\r\n The func is to apply for each kernel \r\n \r\n '''\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n import numpy.linalg as la\r\n import pandas as pd\r\n from scipy import linalg\r\n from scipy.optimize import lsq_linear\r\n import time\r\n \r\n # Starting time\r\n start_time = time.time()\r\n \r\n # Test .apply method: 03.28.2020\r\n if radiance is True:\r\n # Coefficient matrix values \r\n coeff_df_element = coeff_df.groupby('index')['value_fraction'].apply(lambda x: x.values).to_numpy()\r\n \r\n # Independent values\r\n indep_df_element = np.array(list(map(lambda x:pow(x,4),\r\n indep_df.groupby('index')[\"value_lst\"].apply(lambda x: x.values[0]).to_numpy())))\r\n \r\n else:\r\n coeff_df_grp = coeff_df.groupby('index')\r\n # LST^4:\r\n lst4 = np.array(list(map(lambda x:pow(x,4),\r\n indep_df.groupby('index')[\"value_lst\"].apply(lambda x: x.values[0]).to_numpy())))\r\n # emissivity:\r\n emis_sum = indep_df.groupby('index')[\"value_emis_sum\"].apply(lambda x: x.values[0]).to_numpy()\r\n \r\n # Independent values: Element-wise multiplication \r\n indep_df_element = [a * b for a, b in zip(lst4,emis_sum)]\r\n\r\n # Coefficiene matrix values: fraction i * emis i \r\n coeff_df_element = list(coeff_df_grp[\"value_fraction\"].apply(lambda x: x.values) * coeff_df_grp[\"value_emis\"].apply(lambda x: x.values))\r\n \r\n # Applying Least-square solutions with bounds or without bounds for Li\r\n # Linera Algebra equations:\r\n\r\n\r\n # Applying optimze function: Testing with Scipy package \r\n if bounds is not None:\r\n res = lsq_linear(coeff_df_element,np.array(indep_df_element).reshape(len(indep_df_element),)\r\n , bounds=bounds)\r\n \r\n else:\r\n res = lsq_linear(coeff_df_element,np.array(indep_df_element).reshape(len(indep_df_element),))\r\n \r\n\r\n \r\n # New Version: 03.28.2020\r\n # Adding values of x to column 'out_value':\r\n if radiance is True:\r\n # Solution: x = 4sqrt(x)\r\n\r\n coeff_df['out_value'] = np.array([res.x**(1/4)]*int(len(coeff_df)/8)).flatten()\r\n #coeff_df['residual'] = np.array([res.fun**(1/4)]*int(len(coeff_df)/8)).flatten()\r\n\r\n else:\r\n\r\n\r\n coeff_df['out_value'] = np.array([res.x**(1/4)]*int(len(coeff_df)/8)).flatten()\r\n \r\n coeff_df['residual'] = np.array([res.fun]*8).flatten()\r\n \r\n \r\n \r\n \r\n\r\n \r\n # Adding type colum such as radiance or temperature\r\n coeff_df.insert(len(coeff_df.columns),column='type',value=_type)\r\n\r\n \r\n \r\n #print('Processing time'+str(end_time))\r\n \r\n \r\n return coeff_df\r\n\r\n\r\n\r\n\r\ndef collect_result(result):\r\n global results\r\n \r\n results.append(result)\r\n\r\ndef split_df(df, max_rows):\r\n '''\r\n Splitting up the dataframe to multi parts with defined maximum rows\r\n '''\r\n max_rows = max_rows\r\n dataframes = []\r\n while len(df) > max_rows:\r\n top = df[:max_rows]\r\n dataframes.append(top)\r\n df = df[max_rows:]\r\n else:\r\n dataframes.append(df)\r\n \r\n return dataframes\r\n\r\ndef byHDF(dfs):\r\n '''\r\n Function to creat a 'store' to store the big data under .hdf file and \r\n call it when we need to use\r\n '''\r\n store=pd.HDFStore('df_all.h5')\r\n for df in dfs:\r\n store.append('df',df,data_columns=list('0123'))\r\n #del dfs\r\n df=store.select('df')\r\n store.close()\r\n os.remove('df_all.h5')\r\n return df\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n#def parallel():\r\n '''\r\n To apply this script, be sure we have:\r\n df_landcover_concat\r\n df_emiswb\r\n df_lst\r\n df_emis_repre\r\n \r\n \r\n '''\r\n \r\n \r\n from multiprocessing import Pool\r\n import multiprocessing as mp\r\n import pandas as pd\r\n import time\r\n import numpy as np\r\n import gc\r\n import os\r\n from linearalg_np_test_ver01 import dbf_to_df_date, emis_pure_pixel\r\n \r\n start_time = time.time()\r\n\r\n \r\n date_code = '2019-07-26T222034UTC/'\r\n #date_code = '2019-09-21T063150UTC/'\r\n # Linux: \r\n path = '/nas/rhome/tvo/py_code/aes509/data/whole_nyc_70m/'\r\n path_sub = '/nas/rhome/tvo/py_code/aes509/data/whole_nyc_70m/'+date_code # certain date \r\n\r\n \r\n # Step 0: Load required files\r\n df_landcover_concat = pd.read_pickle(path+'df_landcover_concat')\r\n df_emiswb = pd.read_pickle(path_sub+'df_emiswb')\r\n df_lst = pd.read_pickle(path_sub+'df_lst')\r\n df_emis_repre = pd.read_pickle(path_sub+'df_emis_repre')\r\n\r\n # Step 1: Splitting the data by kernel\r\n # Whole NYC\r\n row = 884\r\n col = 786\r\n \r\n # Set bounds\r\n bound_min = 295\r\n bound_max = 310\r\n \r\n\r\n \r\n kernel_list=[[10]]\r\n moving_pixel = [5]\r\n \r\n \r\n\r\n path = path_sub\r\n \r\n # Paralleling new version version\r\n pool = mp.Pool(mp.cpu_count())\r\n \r\n \r\n df_landcover_concat = [df_landcover_concat]*len(kernel_list)\r\n df_lst = [df_lst]*len(kernel_list)\r\n #df_emiswb = [df_emiswb]*3\r\n #df_emis_repre = [df_emis_repre]*3\r\n results = []\r\n results_total = []\r\n\r\n for i in range(len(kernel_list)):\r\n \r\n # Trying parallel in parallel \r\n pool.apply_async(cal_linear_alg_lstsq, \r\n args=(df_landcover_concat[i], # Converting dtype to float64 \r\n df_lst[i], # Converting dtype to float64 \r\n df_emiswb,\r\n df_emis_repre,\r\n row, \r\n col, \r\n kernel_list[i],\r\n moving_pixel[i]), \r\n callback=collect_result)\r\n \r\n \r\n # Close Pool and let all processes complete\r\n pool.close()\r\n pool.join()# postpones the execution of next line of code until all processes in the queue are done.\r\n \r\n # Fisnhing first paralelling:\r\n print('Fishning first para mission....')\r\n \r\n # Clean up memory\r\n gc.collect()\r\n print('Length of results '+str(len(results)))\r\n \r\n # Assign results varaible to new variable,as it will be defined empty inside next loop\r\n results_total = results\r\n \r\n # Delete the current results \r\n del results\r\n \r\n # Collect results: \r\n # Running paralleing for each case each kernel step by step\r\n for i in range(len(results_total)):\r\n print('Running for Kernel '+str(kernel_list[i])+' and Moving Step '+str(moving_pixel[i]))\r\n \r\n coeff_df_matrix_list = results_total[i][0]\r\n #print(coeff_df_matrix_list)\r\n print(len(coeff_df_matrix_list))\r\n indep_df_matrix_list = results_total[i][1]\r\n \r\n \r\n coeff_df_matrix_list = [reduce_mem_usage(coeff_df_matrix) for coeff_df_matrix in coeff_df_matrix_list]\r\n indep_df_matrix_list = [reduce_mem_usage(indep_df_matrix) for indep_df_matrix in indep_df_matrix_list]\r\n \r\n \r\n # Merge the coeef_df_matrix and indep_df_matrix element-wise\r\n data = [[coeff_df_matrix, indep_df_matrix] for coeff_df_matrix, indep_df_matrix in zip(coeff_df_matrix_list,indep_df_matrix_list)]\r\n \r\n n_kernel = len(coeff_df_matrix_list)\r\n # Delete coeff_df and indep_df\r\n del coeff_df_matrix_list\r\n del indep_df_matrix_list\r\n gc.collect()\r\n \r\n # Step2: Applying parallelizing\r\n pool = mp.Pool(mp.cpu_count())\r\n \r\n results = []\r\n print('Numer of kernel for processing '+str(n_kernel))\r\n \r\n print('Starting to parallelizing second time......')\r\n \r\n \r\n \r\n # Use loop to parallelize\r\n \r\n \r\n for coeff_df, indep_df in data:\r\n #print(coeff_df.astype(np.float64).info())\r\n \r\n pool.apply_async(least_sq, \r\n args=(coeff_df.astype(np.float64), # Converting dtype to float64 \r\n indep_df.astype(np.float64), # Converting dtype to float64 \r\n False, (bound_min**4,bound_max**4), 'Emis'), \r\n callback=collect_result)\r\n \r\n \r\n # Close Pool and let all processes complete\r\n pool.close()\r\n pool.join()# postpones the execution of next line of code until all processes in the queue are done.\r\n \r\n # Sort results \r\n # out_value_list = pd.concat(results)\r\n \r\n print('Finishing parallelizing in '+str(time.time() - start_time)+' seconds')\r\n \r\n \r\n # Splitting up the dataframe\r\n try:\r\n out_value_list_split = split_df(pd.concat(results),max_rows=1000000)\r\n \r\n for _,frame in enumerate(out_value_list_split):\r\n try:\r\n os.mkdir(path+'out_value_list_kernel_'+str(kernel_list[i][0])+'_moving_'+str(moving_pixel[i])+'/')\r\n except:\r\n pass\r\n \r\n frame.to_csv(path+'out_value_list_kernel_'+str(kernel_list[i][0])+'_moving_'+str(moving_pixel[i])+'/out_value_list_'+str(_) + '.csv', index=False)\r\n \r\n \r\n # In case encounter memory issue, split the results list to multi lists \r\n except:\r\n# out_value_list_split = []\r\n# for j in np.arange(0,len(results),10):\r\n# out_value_list = split_df(pd.concat(results[j:j+10]), max_rows=1000000)\r\n# out_value_list_split.append(out_value_list)\r\n \r\n \r\n \r\n for k in range(len(results)):\r\n try:\r\n os.mkdir(path+'out_value_list_kernel_'+str(kernel_list[i][0])+'_moving_'+\r\n str(moving_pixel[i])+'/')\r\n except:\r\n pass\r\n \r\n results[k].to_csv(path+'out_value_list_kernel_'+str(kernel_list[i][0])+'_moving_'+\r\n str(moving_pixel[i])+'/out_value_list_'+str(k)+ \r\n '.csv', \r\n index=False)\r\n \r\n # Delete the previous element to have more empty RAM memo\r\n \r\n gc.collect()\r\n \r\n\r\n \r\n \r\n\r\n\r\n\r\n\r\n ","sub_path":"linearalg_np_test_ver02_linux.py","file_name":"linearalg_np_test_ver02_linux.py","file_ext":"py","file_size_in_byte":21374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"107056238","text":"import json\nimport requests\nfrom flask import Flask, request\nfrom utils import config_map, secrets\nfrom utils import respond_with_json\n\napp = Flask(__name__)\n\nport = int(config_map['yelp-port'])\ndef yelp_routes(app, config_map, secrets):\n\n cache = {}\n\n @app.route('/yelp/_restaurants_search')\n def yelp_restaurant_info():\n assert 'name' in request.args and 'latitude' in request.args and 'longitude' in request.args\n js_result = search_restaurants(request.args.get('name'), request.args.get('latitude'), request.args.get('longitude'), \"1\")\n assert 'businesses' in js_result, \"Request result: \" + js_result\n if len(js_result['businesses']) <= 0:\n return json.dumps(\n {\"error\": \"Could not find: {}, lat: {}, lon: {}\".format(request.args.get('name'),\n request.args.get('latitude'),\n request.args.get('longitude'))}), 404\n else:\n id = js_result['businesses'][0]['id']\n result = get_restaurant(id)\n assert 'id' in result and 'name' in result, \"Invalid yelp business id: {}\".format(id)\n return respond_with_json(result)\n\n def request_from_yelp(endpoint, query_params):\n assert endpoint and endpoint[0] != '/'\n headers = {'Authorization': 'Bearer {}'.format(secrets['yelp-api-key']), 'accept': 'application/json'}\n url = \"{}{}?{}\".format(config_map['yelp-base-url'], endpoint, \"&\".join(query_params))\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n return response\n\n def get_restaurant(id):\n key = id\n if key in cache:\n return cache[key]\n else:\n resp = request_from_yelp('businesses/'+id, query_params=[])\n cache[key] = json.loads(resp.content)\n return cache[key]\n\n def search_restaurants(name, lat, lon, limit):\n key = name, lat, lon, limit\n if key in cache:\n return cache[key]\n else:\n resp = request_from_yelp(endpoint='businesses/search', query_params=['term=restaurants {}'.format(name),\n 'longitude='+lon,\n 'latitude='+lat,\n 'limit='+limit])\n cache[key] = json.loads(resp.content)\n return cache[key]\n\n def get_reviews(restaurant):\n cache_id = restaurant['id']\n response = request_from_yelp('{}/{}/{}'.format('businesses', restaurant['id'], 'reviews'), [])\n resource = json.loads(response.content)\n if cache_id not in cache:\n cache[cache_id] = resource\n return resource\n else:\n return cache[cache_id]\n\n\nyelp_routes(app, config_map, secrets)\n\napp.run(port=port, host='0.0.0.0', debug=True)","sub_path":"yelp_routes.py","file_name":"yelp_routes.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"65967079","text":"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Dict, Mapping, Optional, Sequence, Tuple, Type, Union\n\nimport torch\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.optim.lr_scheduler import _LRScheduler\nfrom torchmetrics import Accuracy, Metric\n\nfrom flash.core.data.data_source import DefaultDataKeys\nfrom flash.core.model import Task\nfrom flash.core.registry import FlashRegistry\nfrom flash.core.utilities.imports import _IMAGE_AVAILABLE\nfrom flash.core.utilities.isinstance import _isinstance\nfrom flash.image.classification.data import ImageClassificationPreprocess\n\nif _IMAGE_AVAILABLE:\n from flash.image.classification.backbones import IMAGE_CLASSIFIER_BACKBONES\nelse:\n IMAGE_CLASSIFIER_BACKBONES = FlashRegistry(\"backbones\")\n\n\nclass ImageEmbedder(Task):\n \"\"\"The ``ImageEmbedder`` is a :class:`~flash.Task` for obtaining feature vectors (embeddings) from images. For\n more details, see :ref:`image_embedder`.\n\n Args:\n embedding_dim: Dimension of the embedded vector. ``None`` uses the default from the backbone.\n backbone: A model to use to extract image features, defaults to ``\"swav-imagenet\"``.\n pretrained: Use a pretrained backbone, defaults to ``True``.\n loss_fn: Loss function for training and finetuning, defaults to :func:`torch.nn.functional.cross_entropy`\n optimizer: Optimizer to use for training and finetuning, defaults to :class:`torch.optim.SGD`.\n optimizer_kwargs: Additional kwargs to use when creating the optimizer (if not passed as an instance).\n scheduler: The scheduler or scheduler class to use.\n scheduler_kwargs: Additional kwargs to use when creating the scheduler (if not passed as an instance).\n metrics: Metrics to compute for training and evaluation. Can either be an metric from the `torchmetrics`\n package, a custom metric inherenting from `torchmetrics.Metric`, a callable function or a list/dict\n containing a combination of the aforementioned. In all cases, each metric needs to have the signature\n `metric(preds,target)` and return a single scalar tensor. Defaults to :class:`torchmetrics.Accuracy`.\n learning_rate: Learning rate to use for training, defaults to ``1e-3``.\n pooling_fn: Function used to pool image to generate embeddings, defaults to :func:`torch.max`.\n \"\"\"\n\n backbones: FlashRegistry = IMAGE_CLASSIFIER_BACKBONES\n\n required_extras: str = \"image\"\n\n def __init__(\n self,\n embedding_dim: Optional[int] = None,\n backbone: str = \"resnet101\",\n pretrained: bool = True,\n loss_fn: Callable = F.cross_entropy,\n optimizer: Type[torch.optim.Optimizer] = torch.optim.SGD,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n scheduler: Optional[Union[Type[_LRScheduler], str, _LRScheduler]] = None,\n scheduler_kwargs: Optional[Dict[str, Any]] = None,\n metrics: Union[Metric, Callable, Mapping, Sequence, None] = (Accuracy()),\n learning_rate: float = 1e-3,\n pooling_fn: Callable = torch.max,\n ):\n super().__init__(\n model=None,\n loss_fn=loss_fn,\n optimizer=optimizer,\n optimizer_kwargs=optimizer_kwargs,\n scheduler=scheduler,\n scheduler_kwargs=scheduler_kwargs,\n metrics=metrics,\n learning_rate=learning_rate,\n preprocess=ImageClassificationPreprocess(),\n )\n\n self.save_hyperparameters()\n self.backbone_name = backbone\n self.embedding_dim = embedding_dim\n assert pooling_fn in [torch.mean, torch.max]\n self.pooling_fn = pooling_fn\n\n self.backbone, num_features = self.backbones.get(backbone)(pretrained=pretrained)\n\n if embedding_dim is None:\n self.head = nn.Identity()\n else:\n self.head = nn.Sequential(\n nn.Flatten(),\n nn.Linear(num_features, embedding_dim),\n )\n rank_zero_warn(\"Adding linear layer on top of backbone. Remember to finetune first before using!\")\n\n def apply_pool(self, x):\n x = self.pooling_fn(x, dim=-1)\n if _isinstance(x, Tuple[torch.Tensor, torch.Tensor]):\n x = x[0]\n x = self.pooling_fn(x, dim=-1)\n if _isinstance(x, Tuple[torch.Tensor, torch.Tensor]):\n x = x[0]\n return x\n\n def forward(self, x) -> torch.Tensor:\n x = self.backbone(x)\n\n # bolts ssl models return lists\n if isinstance(x, tuple):\n x = x[-1]\n\n if x.dim() == 4 and not self.embedding_dim:\n x = self.apply_pool(x)\n\n x = self.head(x)\n return x\n\n def training_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().training_step(batch, batch_idx)\n\n def validation_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().validation_step(batch, batch_idx)\n\n def test_step(self, batch: Any, batch_idx: int) -> Any:\n batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])\n return super().test_step(batch, batch_idx)\n\n def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:\n batch = batch[DefaultDataKeys.INPUT]\n return super().predict_step(batch, batch_idx, dataloader_idx=dataloader_idx)\n","sub_path":"flash/image/embedding/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"421800873","text":"# Lê nome e peso de várias pessoas e guardar em uma lista\n\npessoas = list()\ndado = []\ncont = 0\nmaior = menor = 0\nwhile True:\n dado.append(str(input(\"Digite o nome: \").title()))\n dado.append(int(input(\"Peso: [Kg] \")))\n if len(pessoas) == 0:\n maior = menor = dado[1]\n else:\n if dado[1] > maior:\n maior = dado[1]\n if pessoas[cont][1] < menor:\n menor = dado[1]\n pessoas.append(dado[:])\n dado.clear()\n conti = str(input(\"Quer continuar? [S/N]: \")) \n if conti in \"Nn\":\n break\nprint(\"~\" * 45)\nprint(f\"Foram cadastradas {len(pessoas)} pessoas\")\nprint(f\"O maior peso foi {maior}KG de:\")\nfor p in pessoas:\n if p[1] == maior:\n print(f\"{p[0]}\", end=\" \")\nprint(f\"\\nO menor peso foi {menor}KG de: \")\nif p[1] == menor:\n print(f\"{p[0]}\", end=\" \")\n ","sub_path":"Mundo 3/Exercícios/ex_084.py","file_name":"ex_084.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"150942149","text":"from reformat_gherkin.options import (\n AlignmentMode,\n NewlineMode,\n Options,\n TagLineMode,\n WriteBackMode,\n)\n\n\ndef make_options(\n *, step_keyword_alignment=AlignmentMode.NONE, tag_line_mode=TagLineMode.MULTILINE\n):\n return Options(\n write_back=WriteBackMode.CHECK,\n step_keyword_alignment=step_keyword_alignment,\n newline=NewlineMode.KEEP,\n tag_line_mode=tag_line_mode,\n fast=False,\n )\n\n\nOPTIONS = [\n make_options(step_keyword_alignment=alignment_mode)\n for alignment_mode in AlignmentMode\n]\n\nFILENAME_OPTION_MAP = {\n \"expected_default\": make_options(step_keyword_alignment=AlignmentMode.NONE),\n \"expected_left_aligned\": make_options(step_keyword_alignment=AlignmentMode.LEFT),\n \"expected_right_aligned\": make_options(step_keyword_alignment=AlignmentMode.RIGHT),\n \"expected_default_line_tags\": make_options(tag_line_mode=TagLineMode.SINGLELINE),\n \"expected_singleline_line_tags\": make_options(tag_line_mode=TagLineMode.SINGLELINE),\n \"expected_multiline_line_tags\": make_options(tag_line_mode=TagLineMode.MULTILINE),\n \"expected_custom_line_tags\": make_options(tag_line_mode=TagLineMode.CUSTOM),\n}\n\n\ndef get_content(dir_name):\n with open(f\"tests/data/valid/{dir_name}/input.feature\") as f:\n return f.read()\n\n\ndef dump_to_stderr(*output: str) -> str:\n return \"\\n\" + \"\\n\".join(output) + \"\\n\"\n","sub_path":"tests/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"395579990","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom BudejieItem.items import BudejieItem\n\nclass BudejieSpider(scrapy.Spider):\n name = 'budejie'\n allowed_domains = ['budejie.com/text']\n start_urls = ['http://budejie.com/text/']\n\n def parse(self, response):\n lies = response.css('div.j-r-list >ul >li')\n for li in lies:\n username = li.css('a.u-user-name::text').extract_first()\n content = '\\n'.join(li.css('div.j-r-list-c-desc a::text').extract())\n yield BudejieItem(username=username, content=content)\n next_page = response.css('div.j-page a.pagenxt::attr(href)').extract_first()\n if next_page is not None and int(next_page)<5:\n next_page = response.urljoin(next_page)\n print(next_page)\n yield scrapy.Request(next_page,dont_filter=\"true\")\n","sub_path":"BudejieItem/BudejieItem/spiders/budejie.py","file_name":"budejie.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"210672675","text":"for case in range(1, int(input()) + 1):\n board = []\n max_len = 0\n for i in range(5):\n word_list = [i for i in input()]\n if len(word_list) > max_len:\n max_len = len(word_list)\n board.append(word_list)\n for i in range(5):\n board[i] += ['#'] * (max_len - len(board[i]))\n result = ''\n for i in range(max_len):\n for j in range(5):\n if board[j][i] == '#':\n continue\n result += board[j][i]\n print(f'#{case} {result}')\n ","sub_path":"Python/SWEA/D3/5356_의석이의 세로로 말해요.py","file_name":"5356_의석이의 세로로 말해요.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"100577050","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nc_0 = tf.constant(0, name=\"c\") # => operation named \"c\"\n\n# Already-used names will be \"uniquified\".\nc_1 = tf.constant(2, name=\"c\") # => operation named \"c_1\"\n\n# Name scopes add a prefix to all operations created in the same context.\nwith tf.name_scope(\"outer\"):\n c_2 = tf.constant(2, name=\"c\") # => operation named \"outer/c\"\n\n # Name scopes nest like paths in a hierarchical file system.\n with tf.name_scope(\"inner\"):\n c_3 = tf.constant(3, name=\"c\") # => operation named \"outer/inner/c\"\n\n # Exiting a name scope context will return to the previous prefix.\n c_4 = tf.constant(4, name=\"c\") # => operation named \"outer/c_1\"\n\n # Already-used name scopes will be \"uniquified\".\n with tf.name_scope(\"inner\"):\n c_5 = tf.constant(5, name=\"c\") # => operation named \"outer/inner_1/c\"\n\n\nx = tf.constant([[37.0, -23.0], [1.0, 4.0]])\nw = tf.Variable(tf.random_uniform([2, 2]))\ny = tf.matmul(x, w)\noutput = tf.nn.softmax(y)\ninit_op = w.initializer\n\nwith tf.Session() as sess:\n # Run the initializer on `w`.\n sess.run(init_op)\n\n # Evaluate `output`. `sess.run(output)` will return a NumPy array containing\n # the result of the computation.\n print(sess.run(output))\n\n # Evaluate `y` and `output`. Note that `y` will only be computed once, and its\n # result used both to return `y_val` and as an input to the `tf.nn.softmax()`\n # op. Both `y_val` and `output_val` will be NumPy arrays.\n y_val, output_val = sess.run([y, output])\n","sub_path":"docs/MachineLearning/TensorFlow/office/TensorFlow指南/3_低阶API/Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"373092218","text":"finaliza = True\nlimitePessoas = 2\naNome = []\naSalario = []\ndef testeMenu(arg):\n if arg == 0:\n print(\"Finalizar.\")\n finalizaPrograma()\n global finaliza\n finaliza = False\n elif arg == 1:\n print(\"Cadastrar.\")\n cadastra()\n elif arg == 2:\n print(\"Lista.\")\n lista()\n elif arg == 3:\n print(\"Mostra salário de uma pessoa.\")\n elif arg == 4:\n print(\"Mostra o nome das pessoas que ganham acima da média.\")\n elif arg == 5:\n print(\"Aplique percentual de aumento a todos.\")\n elif arg == 6:\n print(\"Aplique percentual de aumento somente para aqueles que ganham abaixo da média.\")\n else: print(\"\\nValor incorreto.\")\n\ndef finalizaPrograma():\n print(\"Entrou finaliza\")\n\ndef digitaSalario():\n print(\"Entrou digitaSalario\")\n try:\n salario = float(input(\"\\nDigite o salário deste funcionário: R$ \"))\n except:\n print(\"Erro, rente novamente.\")\n return digitaSalario\n return salario\n\ndef cadastra():\n global aNome\n global aSalario\n global limitePessoas\n if len(aNome) < limitePessoas:\n try:\n nome = str(input(\"\\nDigite um nome: \"))\n except:\n print(\"Erro, tente novamente.\")\n return cadastra();\n aNome += [nome]\n aSalario += [digitaSalario()]\n\n else: print(\"Lista de pessoas cheia!\")\n\n\ndef lista():\n global aNome\n print(aNome)\n global aSalario\n print(aSalario)\n\ndef escolhaMenu():\n try:\n valor = int(input(\"\\nDigite: \"))\n except:\n print(\"Erro.\")\n return escolhaMenu();\n return valor;\n\nwhile finaliza:\n menu = \"\\nMenu\"\n menu += \"\\n-------------\"\n menu += \"\\n0- Finaliza\"\n menu += \"\\n1- Cadastra\"\n menu += \"\\n2- Lista\"\n menu += \"\\n3- Mostra salario de uma pessoa\"\n menu += \"\\n4- Mostra o nome das pessoas que ganham acima da media\"\n menu += \"\\n5- Aplique um percentual de aumento a todos\"\n menu += \"\\n6- Aplique um percentual de aumento somente\\npara aqueles que ganham abaixo da média\"\n print(menu)\n testeMenu(escolhaMenu())\n","sub_path":"exercicio1.py","file_name":"exercicio1.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"545272930","text":"import socket\nimport logging\nimport random\nimport time\nfrom pynput import keyboard\n\nprogram_running = True\n\n# Initialize the Logger\nlogging.basicConfig(filename = 'Log.log', level = logging.DEBUG, format = '%(asctime)s : %(levelname)s : Client : %(message)s')\n\n# sendMassage\ndef sendMessage(data, sock, serverAddress):\n\n try:\n sock.sendto(data.encode('utf-8'), serverAddress)\n\n logging.debug(\"Message has been sent to Server {} : {}\".format(serverAddress, data))\n\n data, serverAddress = sock.recvfrom(1024)\n text = data.decode('utf-8')\n\n if text != '-1':\n # print receive message\n print(\"The message '{}' reached to the server\".format(text))\n logging.debug(\"The message '{}' reached to {}\".format(text, serverAddress))\n\n except:\n logging.error(\"The message '{}' did'nt reached to {}\".format(data, serverAddress))\n if data != '-1':\n print(\"The message '{}' did'nt reached to the server!!\".format(data))\n else:\n print(\"The server is still working!!\")\n logging.critical(\"The server is still working!!\")\n# randomNum\ndef randomNum():\n return str(random.randint(0,1000000))\n\n # getSock\ndef getSock():\n # Initialize socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(5)\n\n return sock\n\n# getAddress\ndef getAddress():\n IP = '127.0.0.1'\n port = 5002\n\n return (IP, port)\n\n# Exit\ndef exit(sock, serverAddress):\n sendMessage('-1', sock, serverAddress)\n print(\"\\nGood Bye :)\")\n logging.debug(\"Closing Client...\")\n quit()\n\n# On Press\ndef on_press(key):\n global program_running\n logging.debug(\"User Pressed '{}'\".format(key))\n if key == keyboard.Key.esc or key == keyboard.Key.space:\n program_running = False\n return False\n\n# Main\n\nsock = getSock()\nserverAddress = getAddress()\n\nprint(\"Welcome to UDP Client/Server App\\nPress [Esc] / [Space] for Exit\")\ntime.sleep(2)\n\nwith keyboard.Listener(on_press=on_press) as listener:\n while program_running == True:\n data = randomNum()\n sendMessage(data, sock, serverAddress)\n time.sleep(2)\n listener.join()\n\n exit(sock, serverAddress)\n\n\n\n\n\n","sub_path":"UDP_Client.py","file_name":"UDP_Client.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"584725598","text":"def solution(name):\n length=len(name)\n sub = []\n for i in name:\n if ord(i) >= ord(\"N\"):\n sub.append(ord(\"Z\") + 1 - ord(i))\n else:\n sub.append(ord(i) - ord(\"A\"))\n\n print(sub)\n answer=0\n position=0\n\n while True:\n\n answer+=sub[position]\n sub[position]=0\n if sum(sub)==0:\n break\n\n #left\n left_count=0\n left=position\n while True:\n left-=1\n left_count+=1\n if sub[left]!=0:\n break\n\n #right\n right=position\n right_count =0\n while True:\n right+=1\n right_count+=1\n if right>=length:\n right-=1\n right_count-=1\n break\n\n if sub[right]!=0:\n break\n\n if right_count<=left_count:\n answer+=right_count\n position=right\n else:\n answer+=left_count\n position=left\n\n return answer\n\nprint(solution(\"BBBAAAB\"))","sub_path":"Programmers_level2/42860_조이스틱/안홍조_조이스틱.py","file_name":"안홍조_조이스틱.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"282442888","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @date 161228 - Implemented\n\"\"\"\nViews\n\"\"\"\nfrom aiohttp import web\n\n\nasync def get_worker(request):\n \"\"\"\n Test mongodb GET.\n \"\"\"\n db = request.app['db']\n cursor = db.worker.find({})\n if not cursor:\n return web.HTTPNotFound(text='No page named')\n\n docs = await cursor.to_list(None)\n resp = []\n for d in docs:\n resp += [{\n 'worker_id': d['workerID'],\n 'name': d['name'],\n 'team': d['team']\n }]\n\n return web.json_response(resp)\n","sub_path":"website/box/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"76011111","text":"\"\"\"Functions for moving money into, out of, or between wallets.\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nfrom decimal import Decimal\n\nfrom mangopay.exceptions import APIError\nfrom mangopay.resources import (\n BankAccount, BankWirePayIn, BankWirePayOut, DirectPayIn, SettlementTransfer,\n Transaction, Transfer, Wallet,\n)\nfrom mangopay.utils import Money\nfrom pando.utils import typecheck\n\nfrom liberapay.billing.fees import skim_bank_wire, skim_credit, upcharge_card\nfrom liberapay.constants import FEE_PAYOUT_WARN, QUARANTINE\nfrom liberapay.exceptions import (\n NegativeBalance, NotEnoughWithdrawableMoney, PaydayIsRunning,\n FeeExceedsAmount, TransactionFeeTooHigh, TransferError,\n AccountSuspended, Redirect,\n)\nfrom liberapay.models import check_db\nfrom liberapay.models.participant import Participant\nfrom liberapay.models.exchange_route import ExchangeRoute\nfrom liberapay.utils import group_by, NS\n\n\nMoney.__eq__ = lambda a, b: isinstance(b, Money) and a.__dict__ == b.__dict__\nMoney.__repr__ = lambda m: '' % m.__dict__\n\n\nQUARANTINE = '%s days' % QUARANTINE.days\n\n\ndef repr_error(o):\n r = o.ResultCode\n if r == '000000':\n return\n msg = getattr(o, 'ResultMessage', None)\n if msg:\n r += ': ' + msg\n return r\n\n\ndef repr_exception(e):\n if isinstance(e, APIError):\n return '%s %s' % (e.code, e.args[0])\n else:\n return repr(e)\n\n\ndef create_wallet(db, participant):\n w = Wallet()\n w.Owners = [participant.mangopay_user_id]\n w.Description = str(participant.id)\n w.Currency = 'EUR'\n w.save()\n db.run(\"\"\"\n UPDATE participants\n SET mangopay_wallet_id = %s\n WHERE id = %s\n \"\"\", (w.Id, participant.id))\n participant.set_attributes(mangopay_wallet_id=w.Id)\n return w.Id\n\n\ndef test_hook():\n return\n\n\ndef payout(db, participant, amount, ignore_high_fee=False):\n assert amount > 0\n\n if participant.is_suspended:\n raise AccountSuspended()\n\n payday = db.one(\"SELECT * FROM paydays WHERE ts_start > ts_end\")\n if payday:\n raise PaydayIsRunning\n\n route = ExchangeRoute.from_network(participant, 'mango-ba')\n assert route\n ba = BankAccount.get(route.address, user_id=participant.mangopay_user_id)\n\n # Do final calculations\n credit_amount, fee, vat = skim_credit(amount, ba)\n if credit_amount <= 0 and fee > 0:\n raise FeeExceedsAmount\n fee_percent = fee / amount\n if fee_percent > FEE_PAYOUT_WARN and not ignore_high_fee:\n raise TransactionFeeTooHigh(fee_percent, fee, amount)\n\n # Try to dance with MangoPay\n e_id = record_exchange(db, route, -credit_amount, fee, vat, participant, 'pre')\n payout = BankWirePayOut()\n payout.AuthorId = participant.mangopay_user_id\n payout.DebitedFunds = Money(int(amount * 100), 'EUR')\n payout.DebitedWalletId = participant.mangopay_wallet_id\n payout.Fees = Money(int(fee * 100), 'EUR')\n payout.BankAccountId = route.address\n payout.BankWireRef = str(e_id)\n payout.Tag = str(e_id)\n try:\n test_hook()\n payout.save()\n return record_exchange_result(db, e_id, payout.Status.lower(), repr_error(payout), participant)\n except Exception as e:\n error = repr_exception(e)\n return record_exchange_result(db, e_id, 'failed', error, participant)\n\n\ndef charge(db, participant, amount, return_url):\n \"\"\"Charge the participant's credit card.\n\n Amount should be the nominal amount. We'll compute fees below this function\n and add it to amount to end up with charge_amount.\n\n \"\"\"\n typecheck(amount, Decimal)\n\n route = ExchangeRoute.from_network(participant, 'mango-cc')\n assert route\n\n charge_amount, fee, vat = upcharge_card(amount)\n amount = charge_amount - fee\n\n e_id = record_exchange(db, route, amount, fee, vat, participant, 'pre')\n payin = DirectPayIn()\n payin.AuthorId = participant.mangopay_user_id\n if not participant.mangopay_wallet_id:\n create_wallet(db, participant)\n payin.CreditedWalletId = participant.mangopay_wallet_id\n payin.DebitedFunds = Money(int(charge_amount * 100), 'EUR')\n payin.CardId = route.address\n payin.SecureModeReturnURL = return_url\n payin.Fees = Money(int(fee * 100), 'EUR')\n payin.Tag = str(e_id)\n try:\n test_hook()\n payin.save()\n except Exception as e:\n error = repr_exception(e)\n return record_exchange_result(db, e_id, 'failed', error, participant)\n\n if payin.SecureModeRedirectURL:\n raise Redirect(payin.SecureModeRedirectURL)\n\n return record_exchange_result(db, e_id, payin.Status.lower(), repr_error(payin), participant)\n\n\ndef payin_bank_wire(db, participant, debit_amount):\n \"\"\"Prepare to receive a bank wire payin.\n\n The amount should be how much the user intends to send, not how much will\n arrive in the wallet.\n \"\"\"\n\n route = ExchangeRoute.from_network(participant, 'mango-bw')\n if not route:\n route = ExchangeRoute.insert(participant, 'mango-bw', 'x')\n\n amount, fee, vat = skim_bank_wire(debit_amount)\n\n e_id = record_exchange(db, route, amount, fee, vat, participant, 'pre')\n payin = BankWirePayIn()\n payin.AuthorId = participant.mangopay_user_id\n if not participant.mangopay_wallet_id:\n create_wallet(db, participant)\n payin.CreditedWalletId = participant.mangopay_wallet_id\n payin.DeclaredDebitedFunds = Money(int(debit_amount * 100), 'EUR')\n payin.DeclaredFees = Money(int(fee * 100), 'EUR')\n payin.Tag = str(e_id)\n try:\n test_hook()\n payin.save()\n except Exception as e:\n error = repr_exception(e)\n return None, record_exchange_result(db, e_id, 'failed', error, participant)\n\n e = record_exchange_result(db, e_id, payin.Status.lower(), repr_error(payin), participant)\n return payin, e\n\n\ndef record_payout_refund(db, payout_refund):\n orig_payout = BankWirePayOut.get(payout_refund.InitialTransactionId)\n e_origin = db.one(\"SELECT * FROM exchanges WHERE id = %s\" % (orig_payout.Tag,))\n e_refund_id = db.one(\"SELECT id FROM exchanges WHERE refund_ref = %s\", (e_origin.id,))\n if e_refund_id:\n # Already recorded\n return e_refund_id\n amount, fee, vat = -e_origin.amount, -e_origin.fee, -e_origin.vat\n assert payout_refund.DebitedFunds == Money(int(amount * 100), 'EUR')\n assert payout_refund.Fees == Money(int(fee * 100), 'EUR')\n route = ExchangeRoute.from_id(e_origin.route)\n participant = Participant.from_id(e_origin.participant)\n return db.one(\"\"\"\n INSERT INTO exchanges\n (amount, fee, vat, participant, status, route, note, refund_ref)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\n RETURNING id\n \"\"\", (amount, fee, vat, participant.id, 'created', route.id, None, e_origin.id))\n\n\ndef record_exchange(db, route, amount, fee, vat, participant, status, error=None):\n \"\"\"Given a Bunch of Stuff, return an int (exchange_id).\n\n Records in the exchanges table have these characteristics:\n\n amount It's negative for credits (representing an outflow from\n Liberapay to you) and positive for charges.\n The sign is how we differentiate the two in, e.g., the\n history page.\n\n fee The payment processor's fee. It's always positive.\n\n vat The amount of VAT included in the fee. Always positive.\n\n \"\"\"\n if participant.is_suspended:\n raise AccountSuspended()\n\n with db.get_cursor() as cursor:\n\n e = cursor.one(\"\"\"\n INSERT INTO exchanges\n (amount, fee, vat, participant, status, route, note)\n VALUES (%s, %s, %s, %s, %s, %s, %s)\n RETURNING *\n \"\"\", (amount, fee, vat, participant.id, status, route.id, error))\n\n if status == 'failed':\n propagate_exchange(cursor, participant, e, route, error, 0)\n elif amount < 0:\n amount -= fee\n propagate_exchange(cursor, participant, e, route, '', amount)\n\n return e.id\n\n\ndef record_exchange_result(db, exchange_id, status, error, participant):\n \"\"\"Updates the status of an exchange.\n \"\"\"\n with db.get_cursor() as cursor:\n e = cursor.one(\"\"\"\n UPDATE exchanges e\n SET status=%(status)s\n , note=%(error)s\n WHERE id=%(exchange_id)s\n AND status <> %(status)s\n RETURNING id, amount, fee, vat, participant, recorder, note, status\n , timestamp, refund_ref\n , ( SELECT r.*::exchange_routes\n FROM exchange_routes r\n WHERE r.id = e.route\n ) AS route\n \"\"\", locals())\n if not e:\n return\n assert participant.id == e.participant\n assert isinstance(e.route, ExchangeRoute)\n\n amount = e.amount\n if amount < 0:\n amount = -amount + max(e.fee, 0) if status == 'failed' else 0\n else:\n amount = amount - min(e.fee, 0) if status == 'succeeded' else 0\n propagate_exchange(cursor, participant, e, e.route, error, amount)\n\n return e\n\n\ndef propagate_exchange(cursor, participant, exchange, route, error, amount):\n \"\"\"Propagates an exchange's result to the participant's balance and the\n route's status.\n \"\"\"\n route.update_error(error or '')\n\n new_balance = cursor.one(\"\"\"\n UPDATE participants\n SET balance=(balance + %s)\n WHERE id=%s\n RETURNING balance\n \"\"\", (amount, participant.id))\n\n if amount < 0 and new_balance < 0:\n raise NegativeBalance\n\n if amount < 0:\n bundles = cursor.all(\"\"\"\n LOCK TABLE cash_bundles IN EXCLUSIVE MODE;\n SELECT b.*\n FROM cash_bundles b\n JOIN exchanges e ON e.id = b.origin\n WHERE b.owner = %s\n AND b.ts < now() - INTERVAL %s\n AND b.disputed IS NOT TRUE\n AND b.locked_for IS NULL\n ORDER BY b.owner = e.participant DESC, b.ts\n \"\"\", (participant.id, QUARANTINE))\n withdrawable = sum(b.amount for b in bundles)\n x = -amount\n if x > withdrawable:\n raise NotEnoughWithdrawableMoney(Money(withdrawable, 'EUR'))\n for b in bundles:\n if x >= b.amount:\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET owner = NULL\n , withdrawal = %s\n WHERE id = %s\n \"\"\", (exchange.id, b.id))\n x -= b.amount\n if x == 0:\n break\n else:\n assert x > 0\n cursor.run(\"\"\"\n INSERT INTO cash_bundles\n (owner, origin, ts, amount, withdrawal)\n VALUES (NULL, %s, %s, %s, %s)\n \"\"\", (b.origin, b.ts, x, exchange.id))\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET amount = (amount - %s)\n WHERE id = %s\n \"\"\", (x, b.id))\n break\n elif amount > 0 and (exchange.amount < 0 or exchange.refund_ref):\n # failed withdrawal\n orig_exchange_id = exchange.refund_ref or exchange.id\n cursor.run(\"\"\"\n UPDATE cash_bundles b\n SET owner = %(p_id)s\n , withdrawal = NULL\n WHERE withdrawal = %(e_id)s\n \"\"\", dict(p_id=participant.id, e_id=orig_exchange_id))\n elif amount > 0:\n cursor.run(\"\"\"\n INSERT INTO cash_bundles\n (owner, origin, amount, ts)\n VALUES (%s, %s, %s, %s)\n \"\"\", (participant.id, exchange.id, amount, exchange.timestamp))\n\n participant.set_attributes(balance=new_balance)\n\n if amount != 0:\n participant.update_giving_and_tippees(cursor)\n merge_cash_bundles(cursor, participant.id)\n\n\ndef transfer(db, tipper, tippee, amount, context, **kw):\n t_id = prepare_transfer(db, tipper, tippee, amount, context, **kw)\n get = lambda id, col: db.one(\"SELECT {0} FROM participants WHERE id = %s\".format(col), (id,))\n tr = Transfer()\n tr.AuthorId = kw.get('tipper_mango_id') or get(tipper, 'mangopay_user_id')\n tr.CreditedUserId = kw.get('tippee_mango_id') or get(tippee, 'mangopay_user_id')\n tr.CreditedWalletId = kw.get('tippee_wallet_id') or get(tippee, 'mangopay_wallet_id')\n if not tr.CreditedWalletId:\n tr.CreditedWalletId = create_wallet(db, Participant.from_id(tippee))\n tr.DebitedFunds = Money(int(amount * 100), 'EUR')\n tr.DebitedWalletId = kw.get('tipper_wallet_id') or get(tipper, 'mangopay_wallet_id')\n tr.Fees = Money(0, 'EUR')\n tr.Tag = str(t_id)\n tr.save()\n return record_transfer_result(db, t_id, tr), t_id\n\n\ndef prepare_transfer(db, tipper, tippee, amount, context, **kw):\n with db.get_cursor() as cursor:\n transfer = cursor.one(\"\"\"\n INSERT INTO transfers\n (tipper, tippee, amount, context, team, invoice, status)\n VALUES (%s, %s, %s, %s, %s, %s, 'pre')\n RETURNING *\n \"\"\", (tipper, tippee, amount, context, kw.get('team'), kw.get('invoice')))\n lock_bundles(cursor, transfer, bundles=kw.get('bundles'))\n return transfer.id\n\n\ndef lock_bundles(cursor, transfer, bundles=None, prefer_bundles_from=-1):\n assert transfer.status == 'pre'\n cursor.run(\"LOCK TABLE cash_bundles IN EXCLUSIVE MODE\")\n tipper, tippee = transfer.tipper, transfer.tippee\n bundles = bundles or cursor.all(\"\"\"\n SELECT b.*\n FROM cash_bundles b\n JOIN exchanges e ON e.id = b.origin\n WHERE b.owner = %(tipper)s\n AND b.withdrawal IS NULL\n AND b.locked_for IS NULL\n ORDER BY b.origin = %(prefer_bundles_from)s DESC\n , e.participant = %(tippee)s DESC\n , b.ts\n \"\"\", locals())\n transferable = sum(b.amount for b in bundles)\n x = transfer.amount\n if x > transferable:\n raise NegativeBalance()\n for b in bundles:\n if x >= b.amount:\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET locked_for = %s\n WHERE id = %s\n \"\"\", (transfer.id, b.id))\n x -= b.amount\n if x == 0:\n break\n else:\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET amount = (amount - %s)\n WHERE id = %s;\n\n INSERT INTO cash_bundles\n (owner, origin, amount, ts, locked_for)\n VALUES (%s, %s, %s, %s, %s);\n \"\"\", (x, b.id, transfer.tipper, b.origin, x, b.ts, transfer.id))\n break\n\n\ndef record_transfer_result(db, t_id, tr):\n error = repr_error(tr)\n status = tr.Status.lower()\n assert (not error) ^ (status == 'failed')\n return _record_transfer_result(db, t_id, status, error)\n\n\ndef _record_transfer_result(db, t_id, status, error=None):\n balance = None\n with db.get_cursor() as c:\n tipper, tippee, amount = c.one(\"\"\"\n UPDATE transfers\n SET status = %s\n , error = %s\n WHERE id = %s\n RETURNING tipper, tippee, amount\n \"\"\", (status, error, t_id))\n if status == 'succeeded':\n # Update the balances\n balance = c.one(\"\"\"\n\n UPDATE participants\n SET balance = balance + %(amount)s\n WHERE id = %(tippee)s;\n\n UPDATE participants\n SET balance = balance - %(amount)s\n WHERE id = %(tipper)s\n RETURNING balance;\n\n \"\"\", locals())\n # Transfer the locked bundles to the recipient\n bundles = c.all(\"\"\"\n UPDATE cash_bundles\n SET owner = %s\n , locked_for = NULL\n WHERE owner = %s\n AND locked_for = %s\n RETURNING *\n \"\"\", (tippee, tipper, t_id))\n bundles_sum = sum(b.amount for b in bundles)\n assert bundles_sum == amount\n else:\n # Unlock the bundles\n bundles = c.all(\"\"\"\n UPDATE cash_bundles\n SET locked_for = NULL\n WHERE owner = %s\n AND locked_for = %s\n \"\"\", (tipper, t_id))\n if balance is not None:\n merge_cash_bundles(db, tippee)\n return balance\n raise TransferError(error)\n\n\ndef lock_disputed_funds(cursor, exchange, amount):\n \"\"\"Prevent money that is linked to a chargeback from being withdrawn.\n \"\"\"\n if amount != exchange.amount + exchange.fee:\n raise NotImplementedError(\"partial disputes are not implemented\")\n cursor.run(\"LOCK TABLE cash_bundles IN EXCLUSIVE MODE\")\n disputed_bundles = [NS(d._asdict()) for d in cursor.all(\"\"\"\n UPDATE cash_bundles\n SET disputed = true\n WHERE origin = %s\n RETURNING *\n \"\"\", (exchange.id,))]\n disputed_bundles_sum = sum(b.amount for b in disputed_bundles)\n assert disputed_bundles_sum == exchange.amount\n original_owner = exchange.participant\n for b in disputed_bundles:\n if b.owner == original_owner:\n continue\n try_to_swap_bundle(cursor, b, original_owner)\n\n\ndef recover_lost_funds(db, exchange, lost_amount, repudiation_id):\n \"\"\"Recover as much money as possible from a payin which has been reverted.\n \"\"\"\n original_owner = exchange.participant\n # Try (again) to swap the disputed bundles\n with db.get_cursor() as cursor:\n cursor.run(\"LOCK TABLE cash_bundles IN EXCLUSIVE MODE\")\n disputed_bundles = [NS(d._asdict()) for d in cursor.all(\"\"\"\n SELECT *\n FROM cash_bundles\n WHERE origin = %s\n AND disputed = true\n \"\"\", (exchange.id,))]\n bundles_sum = sum(b.amount for b in disputed_bundles)\n assert bundles_sum == lost_amount - exchange.fee\n for b in disputed_bundles:\n if b.owner == original_owner:\n continue\n try_to_swap_bundle(cursor, b, original_owner)\n # Move the funds back to the original wallet\n chargebacks_account = Participant.get_chargebacks_account()\n LiberapayOrg = Participant.from_username('LiberapayOrg')\n assert LiberapayOrg\n grouped = group_by(disputed_bundles, lambda b: (b.owner, b.withdrawal))\n for (owner, withdrawal), bundles in grouped.items():\n assert owner != chargebacks_account.id\n if owner == original_owner:\n continue\n amount = sum(b.amount for b in bundles)\n if owner is None:\n bundles = None\n withdrawer = db.one(\"SELECT participant FROM exchanges WHERE id = %s\", (withdrawal,))\n payer = LiberapayOrg.id\n create_debt(db, withdrawer, payer, amount, exchange.id)\n create_debt(db, original_owner, withdrawer, amount, exchange.id)\n else:\n payer = owner\n create_debt(db, original_owner, payer, amount, exchange.id)\n transfer(db, payer, original_owner, amount, 'chargeback', bundles=bundles)\n # Add a debt for the fee\n create_debt(db, original_owner, LiberapayOrg.id, exchange.fee, exchange.id)\n # Send the funds to the credit wallet\n # We have to do a SettlementTransfer instead of a normal Transfer. The amount\n # can't exceed the original payin amount, so we can't settle the fee debt.\n original_owner = Participant.from_id(original_owner)\n t_id = prepare_transfer(\n db, original_owner.id, chargebacks_account.id, exchange.amount,\n 'chargeback', prefer_bundles_from=exchange.id,\n )\n tr = SettlementTransfer()\n tr.AuthorId = original_owner.mangopay_user_id\n tr.CreditedUserId = chargebacks_account.mangopay_user_id\n tr.CreditedWalletId = chargebacks_account.mangopay_wallet_id\n tr.DebitedFunds = Money(int(amount * 100), 'EUR')\n tr.DebitedWalletId = original_owner.mangopay_wallet_id\n tr.Fees = Money(0, 'EUR')\n tr.RepudiationId = repudiation_id\n tr.Tag = str(t_id)\n tr.save()\n return record_transfer_result(db, t_id, tr)\n\n\ndef try_to_swap_bundle(cursor, b, original_owner):\n \"\"\"Attempt to switch a disputed cash bundle with a \"safe\" one.\n \"\"\"\n swappable_origin_bundles = [NS(d._asdict()) for d in cursor.all(\"\"\"\n SELECT *\n FROM cash_bundles\n WHERE owner = %s\n AND disputed IS NOT TRUE\n AND locked_for IS NULL\n ORDER BY ts ASC\n \"\"\", (original_owner,))]\n try_to_swap_bundle_with(cursor, b, swappable_origin_bundles)\n merge_cash_bundles(cursor, original_owner)\n if b.withdrawal:\n withdrawer = cursor.one(\n \"SELECT participant FROM exchanges WHERE id = %s\", (b.withdrawal,)\n )\n swappable_recipient_bundles = [NS(d._asdict()) for d in cursor.all(\"\"\"\n SELECT *\n FROM cash_bundles\n WHERE owner = %s\n AND disputed IS NOT TRUE\n AND locked_for IS NULL\n ORDER BY ts ASC, amount = %s DESC\n \"\"\", (withdrawer, b.amount))]\n # Note: we don't restrict the date in the query above, so a swapped\n # bundle can end up \"withdrawn\" before it was even created\n try_to_swap_bundle_with(cursor, b, swappable_recipient_bundles)\n merge_cash_bundles(cursor, withdrawer)\n else:\n merge_cash_bundles(cursor, b.owner)\n\n\ndef try_to_swap_bundle_with(cursor, b1, swappable_bundles):\n \"\"\"Attempt to switch the disputed cash bundle `b1` with one (or more) from\n the `swappable_bundles` list.\n \"\"\"\n for b2 in swappable_bundles:\n if b2.amount == b1.amount:\n swap_bundles(cursor, b1, b2)\n break\n elif b2.amount > b1.amount:\n # Split the swappable bundle in two, then do the swap\n b3 = split_bundle(cursor, b2, b1.amount)\n swap_bundles(cursor, b1, b3)\n break\n else:\n # Split the disputed bundle in two, then do the swap\n b3 = split_bundle(cursor, b1, b2.amount)\n swap_bundles(cursor, b2, b3)\n\n\ndef split_bundle(cursor, b, amount):\n \"\"\"Cut a bundle in two.\n\n Returns the new second bundle, whose amount is `amount`.\n \"\"\"\n assert b.amount > amount\n assert not b.locked_for\n b.amount = cursor.one(\"\"\"\n UPDATE cash_bundles\n SET amount = (amount - %s)\n WHERE id = %s\n RETURNING amount\n \"\"\", (amount, b.id))\n return NS(cursor.one(\"\"\"\n INSERT INTO cash_bundles\n (owner, origin, amount, ts, withdrawal, disputed)\n VALUES (%s, %s, %s, %s, %s, %s)\n RETURNING *;\n \"\"\", (b.owner, b.origin, amount, b.ts, b.withdrawal, b.disputed))._asdict())\n\n\ndef swap_bundles(cursor, b1, b2):\n \"\"\"Switch the current locations of the two cash bundles `b1` and `b2`.\n \"\"\"\n assert not b1.locked_for\n assert not b2.locked_for\n cursor.run(\"\"\"\n UPDATE cash_bundles\n SET owner = %s\n , withdrawal = %s\n WHERE id = %s;\n UPDATE cash_bundles\n SET owner = %s\n , withdrawal = %s\n WHERE id = %s;\n \"\"\", (b2.owner, b2.withdrawal, b1.id, b1.owner, b1.withdrawal, b2.id))\n b1.owner, b2.owner = b2.owner, b1.owner\n b1.withdrawal, b2.withdrawal = b2.withdrawal, b1.withdrawal\n\n\ndef merge_cash_bundles(db, p_id):\n \"\"\"Regroup cash bundles who have the same origin and current location.\n \"\"\"\n return db.one(\"\"\"\n LOCK TABLE cash_bundles IN EXCLUSIVE MODE;\n WITH regroup AS (\n SELECT owner, origin, sum(amount) AS amount, max(ts) AS ts\n FROM cash_bundles\n WHERE owner = %s\n AND disputed IS NOT TRUE\n AND locked_for IS NULL\n GROUP BY owner, origin\n HAVING count(*) > 1\n ),\n inserted AS (\n INSERT INTO cash_bundles\n (owner, origin, amount, ts)\n SELECT owner, origin, amount, ts\n FROM regroup\n RETURNING *\n ),\n deleted AS (\n DELETE\n FROM cash_bundles b\n USING regroup g\n WHERE b.owner = g.owner\n AND b.origin = g.origin\n AND b.disputed IS NOT TRUE\n AND b.locked_for IS NULL\n RETURNING b.*\n )\n SELECT (SELECT json_agg(d) FROM deleted d) AS before\n , (SELECT json_agg(i) FROM inserted i) AS after\n \"\"\", (p_id,))\n\n\ndef create_debt(db, debtor, creditor, amount, origin):\n return db.one(\"\"\"\n INSERT INTO debts\n (debtor, creditor, amount, status, origin)\n VALUES (%s, %s, %s, 'due', %s)\n RETURNING *\n \"\"\", (debtor, creditor, amount, origin))\n\n\ndef sync_with_mangopay(db):\n \"\"\"We can get out of sync with MangoPay if record_exchange_result wasn't\n completed. This is where we fix that.\n \"\"\"\n check_db(db)\n\n exchanges = db.all(\"SELECT * FROM exchanges WHERE status = 'pre'\")\n for e in exchanges:\n p = Participant.from_id(e.participant)\n transactions = Transaction.all(user_id=p.mangopay_user_id)\n transactions = [x for x in transactions if x.Tag == str(e.id)]\n assert len(transactions) < 2\n if transactions:\n t = transactions[0]\n error = repr_error(t)\n status = t.Status.lower()\n assert (not error) ^ (status == 'failed')\n record_exchange_result(db, e.id, status, error, p)\n else:\n # The exchange didn't happen\n if e.amount < 0:\n # Mark it as failed if it was a credit\n record_exchange_result(db, e.id, 'failed', 'interrupted', p)\n else:\n # Otherwise forget about it\n db.run(\"DELETE FROM exchanges WHERE id=%s\", (e.id,))\n\n transfers = db.all(\"SELECT * FROM transfers WHERE status = 'pre'\")\n for t in transfers:\n tipper = Participant.from_id(t.tipper)\n transactions = Transaction.all(user_id=tipper.mangopay_user_id)\n transactions = [x for x in transactions if x.Type == 'TRANSFER' and x.Tag == str(t.id)]\n assert len(transactions) < 2\n if transactions:\n record_transfer_result(db, t.id, transactions[0])\n else:\n # The transfer didn't happen, remove it\n db.run(\"\"\"\n UPDATE cash_bundles\n SET locked_for = NULL\n WHERE owner = %s\n AND locked_for = %s\n \"\"\", (t.tipper, t.id))\n db.run(\"DELETE FROM transfers WHERE id = %s\", (t.id,))\n\n check_db(db)\n","sub_path":"liberapay/billing/transactions.py","file_name":"transactions.py","file_ext":"py","file_size_in_byte":26964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"404992847","text":"import random as rnd\r\n\r\n\"\"\"ВЫВОД ПОЛЯ\"\"\"\r\ndef showPlayBoard(board):\r\n for i in range(3):\r\n print(board[i])\r\n\r\n\"\"\"ОРГАНИЗАЦИЯ ОЧЕРЕДИ ХОДА\"\"\"\r\ndef showTurn(turn, player1, player2):\r\n if turn == 0:\r\n print(\"Ход игрока: \", str(player1))\r\n else:\r\n print(\"Ход игрока: \", str(player2))\r\n print(\"Введите номер ячейки\")\r\n\r\n\"\"\"СОЗДАНИЕ МЕТКИ НА ПОЛЕ\"\"\"\r\ndef choice(turn, playerChoice, board):\r\n if turn == 0:\r\n for i in range(3):\r\n for j in range(3):\r\n if board[i][j] == int(playerChoice):\r\n board[i][j] = \"X\"\r\n else:\r\n for i in range(3):\r\n for j in range(3):\r\n if board[i][j] == int(playerChoice):\r\n board[i][j] = \"O\"\r\n\r\n\r\n\r\n\"\"\"ИНИЦИАЛИЗАЦИЯ\"\"\"\r\nprint(\"Введите имя игрока\")\r\nplayer1 = input()\r\nprint(\"Введите имя оппонента\")\r\nplayer2 = input()\r\nprint(player1 + \" VS \" + player2)\r\n\r\n\"\"\"СБРАСЫВАЕМ ПОЛЕ\"\"\"\r\ndef restartBoard():\r\n board = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\r\n return board\r\n\r\n\"\"\"Определение первого игрока\"\"\"\r\ndef firstPlayer():\r\n flipcoin = rnd.randrange(0,2)\r\n turn = flipcoin\r\n pl1 = player1\r\n pl2 = player2\r\n if(flipcoin == 0):\r\n print(str(player1) + \" играет за (X), \" + str(player2) + \" играет за (O)\")\r\n else:\r\n print(str(player1) + \" играет за (O), \" + str(player2) + \" играет за (X)\")\r\n pl1 = player2\r\n pl2 = player1\r\n turn = 0\r\n return turn, pl1, pl2\r\n\r\nwin1 = ['X','X','X']\r\nwin2 = ['O','O','O']\r\n\r\ndef WinCheck(board):\r\n \"\"\"ГОРИЗОНТАЛЬНЫЕ\"\"\"\r\n if(board[0] == win1 or board[0] == win2):\r\n return 1\r\n if (board[1] == win1 or board[1] == win2):\r\n return 1\r\n if (board[2] == win1 or board[2] == win2):\r\n return 1\r\n\r\n \"\"\"ВЕРТИКАЛЬНЫЕ\"\"\"\r\n Y1 = [board[0][0], board[1][0], board[2][0]]\r\n Y2 = [board[0][1], board[1][1], board[2][1]]\r\n Y3 = [board[0][2], board[1][2], board[2][2]]\r\n if (Y1 == win1 or Y1 == win2):\r\n return 1\r\n if (Y2 == win1 or Y2 == win2):\r\n return 1\r\n if (Y3 == win1 or Y3 == win2):\r\n return 1\r\n\r\n \"\"\"КРЕСТ\"\"\"\r\n X1 = [board[0][0], board[1][1], board[2][2]]\r\n X2 = [board[2][0], board[1][1], board[0][2]]\r\n if (X1 == win1 or X1 == win2):\r\n return 1\r\n if (X2 == win1 or X2 == win2):\r\n return 1\r\n\r\n\r\n\"\"\"НАЧАЛО ИГРЫ\"\"\"\r\ndef Game():\r\n board = [[1,2,3],[4,5,6],[7,8,9]]\r\n turnCount = 0\r\n turn, player1, player2 = firstPlayer()\r\n print(player1, player2)\r\n isEnd = False\r\n while(isEnd == False and turnCount<9):\r\n \"\"\"Переменная, отвечающая за проверку ячейки на игровом поле\"\"\"\r\n itsOk = False\r\n showPlayBoard(board)\r\n showTurn(turn, player1, player2)\r\n while True:\r\n \"\"\"Проверка на корректность ввода\"\"\"\r\n playerChoice = input()\r\n if (str(playerChoice).upper() == \"RESTART\"):\r\n break\r\n for i in range(3):\r\n for j in range(3):\r\n if str(board[i][j]) == playerChoice:\r\n itsOk = True\r\n if itsOk:\r\n break\r\n if(str(playerChoice).upper() == \"RESTART\"):\r\n Game()\r\n choice(turn, playerChoice, board)\r\n if turn == 1:\r\n turn = 0\r\n else:\r\n turn = 1\r\n turnCount = turnCount + 1\r\n if (WinCheck(board) == 1):\r\n isEnd = True\r\n if(turn == 1):\r\n print(\"Победил игрок: \" + player1)\r\n else: print(\"Победил игрок: \" + player2)\r\n if turnCount == 9 or isEnd == True:\r\n print(\"Введите restart, чтобы начать заново\")\r\n playerChoice = input()\r\n if (str(playerChoice).upper() == \"RESTART\"):\r\n Game()\r\n\r\n\r\nGame()\r\n#\r\n# print(board[1][1])\r\n# \"\"\"5\"\"\"\r\n#\r\n","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"92167839","text":"from setuptools import find_packages\nfrom distutils.core import setup, Command\nfrom distutils.command.build import build\nimport shutil\nimport subprocess\nimport os, sys\nimport os.path\n\n\n__HERE__ = os.path.abspath(os.path.dirname(__file__))\n\n__NAME__ = \"arc1docs\"\n__DESC__ = \"Documentation for ArC1\",\n__VERSION__ = \"2.0.0\"\n__MAINTAINER__ = \"Spyros Stathopoulos\"\n__EMAIL__ = \"devel@arc-instruments.co.uk\"\n__URL__ = \"http://www.arc-instruments.co.uk/products/arc-one/\"\n\nwith open(os.path.join(__HERE__, \"README.md\"), encoding='utf-8') as readme:\n __LONG_DESC__ = readme.read()\n\n\nrequirements = [\n \"importlib-resources>=1.1.0; python_version < '3.7'\",\n]\n\n\nclass BuildDocs(Command):\n\n description = \"Generate the manual\"\n user_options = []\n\n def initialize_options(self):\n self.cwd = None\n\n def finalize_options(self):\n self.cwd = os.getcwd()\n\n def compile_docs(self):\n if sys.platform == 'win32':\n pandoc = shutil.which('pandoc.exe')\n else:\n pandoc = shutil.which('pandoc')\n subprocess.run([pandoc, 'manual.txt', '--number-sections', \\\n '--template=template.html5', '--css=manual.css',\\\n '--pdf-engine=weasyprint', '-o', \\\n os.path.join('arc1docs','manual.pdf')])\n\n def run(self):\n self.compile_docs()\n\n\nclass Build(build):\n\n user_options = build.user_options + []\n\n def run(self):\n self.run_command(\"build_docs\")\n super().run()\n\n\ncmdclass = {}\ncmdclass['build_docs'] = BuildDocs\ncmdclass['build'] = Build\n\npackages = find_packages(include=[\"arc1docs\"])\n\nsetup(\n name = __NAME__,\n version = __VERSION__,\n description = \"Documentation for ArC1\",\n long_description = __LONG_DESC__,\n long_description_content_type = 'text/markdown',\n author = __MAINTAINER__,\n author_email = __EMAIL__,\n url = __URL__,\n project_urls = {\n \"Source Code\": \"https://github.com/arc-instruments/arc1-docs\"\n },\n license = 'GPL3',\n platforms = ['any'],\n classifiers = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n \"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n packages = packages,\n python_requires = '>=3.3',\n install_requires = requirements,\n package_data = {\n 'arc1docs': ['manual.pdf']\n },\n cmdclass = cmdclass\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"446382205","text":"from ding.policy.base_policy import Policy\nfrom typing import Union, Optional, List, Any, Tuple\nimport os\nimport torch\nimport logging\nfrom functools import partial\nfrom tensorboardX import SummaryWriter\n\nfrom ding.envs import get_vec_env_setting, create_env_manager\nfrom ding.worker import BaseLearner, InteractionSerialEvaluator, BaseSerialCommander, create_buffer, \\\n create_serial_collector\nfrom ding.config import read_config, compile_config\nfrom ding.policy import create_policy, PolicyFactory\nfrom ding.utils import set_pkg_seed\n\n\ndef serial_pipeline_sqil(\n input_cfg: Union[str, Tuple[dict, dict]],\n expert_cfg: Union[str, Tuple[dict, dict]],\n seed: int = 0,\n env_setting: Optional[List[Any]] = None,\n model: Optional[torch.nn.Module] = None,\n expert_model: Optional[torch.nn.Module] = None,\n max_iterations: Optional[int] = int(1e10),\n) -> 'Policy': # noqa\n \"\"\"\n Overview:\n Serial pipeline sqil entry: we create this serial pipeline in order to\\\n implement SQIL in DI-engine. For now, we support the following envs\\\n Cartpole, Lunarlander, Pong, Spaceinvader, Qbert. The demonstration\\\n data come from the expert model. We use a well-trained model to \\\n generate demonstration data online\n Arguments:\n - input_cfg (:obj:`Union[str, Tuple[dict, dict]]`): Config in dict type. \\\n ``str`` type means config file path. \\\n ``Tuple[dict, dict]`` type means [user_config, create_cfg].\n - seed (:obj:`int`): Random seed.\n - env_setting (:obj:`Optional[List[Any]]`): A list with 3 elements: \\\n ``BaseEnv`` subclass, collector env config, and evaluator env config.\n - model (:obj:`Optional[torch.nn.Module]`): Instance of torch.nn.Module.\n - expert_model (:obj:`Optional[torch.nn.Module]`): Instance of torch.nn.Module.\\\n The default model is DQN(**cfg.policy.model)\n - max_iterations (:obj:`Optional[torch.nn.Module]`): Learner's max iteration. Pipeline will stop \\\n when reaching this iteration.\n Returns:\n - policy (:obj:`Policy`): Converged policy.\n \"\"\"\n if isinstance(input_cfg, str):\n cfg, create_cfg = read_config(input_cfg)\n expert_cfg, expert_create_cfg = read_config(expert_cfg)\n else:\n cfg, create_cfg = input_cfg\n expert_cfg, expert_create_cfg = expert_cfg\n create_cfg.policy.type = create_cfg.policy.type + '_command'\n expert_create_cfg.policy.type = expert_create_cfg.policy.type + '_command'\n env_fn = None if env_setting is None else env_setting[0]\n cfg = compile_config(cfg, seed=seed, env=env_fn, auto=True, create_cfg=create_cfg, save_cfg=True)\n expert_cfg = compile_config(\n expert_cfg, seed=seed, env=env_fn, auto=True, create_cfg=expert_create_cfg, save_cfg=True\n )\n # Create main components: env, policy\n if env_setting is None:\n env_fn, collector_env_cfg, evaluator_env_cfg = get_vec_env_setting(cfg.env)\n else:\n env_fn, collector_env_cfg, evaluator_env_cfg = env_setting\n collector_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in collector_env_cfg])\n expert_collector_env = create_env_manager(\n expert_cfg.env.manager, [partial(env_fn, cfg=c) for c in collector_env_cfg]\n )\n evaluator_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in evaluator_env_cfg])\n expert_collector_env.seed(cfg.seed)\n collector_env.seed(cfg.seed)\n evaluator_env.seed(cfg.seed, dynamic_seed=False)\n expert_policy = create_policy(expert_cfg.policy, model=expert_model, enable_field=['collect', 'command'])\n set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda)\n policy = create_policy(cfg.policy, model=model, enable_field=['learn', 'collect', 'eval', 'command'])\n expert_policy.collect_mode.load_state_dict(\n torch.load(cfg.policy.collect.demonstration_info_path, map_location='cpu')\n )\n # Create worker components: learner, collector, evaluator, replay buffer, commander.\n tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'serial'))\n learner = BaseLearner(cfg.policy.learn.learner, policy.learn_mode, tb_logger, exp_name=cfg.exp_name)\n collector = create_serial_collector(\n cfg.policy.collect.collector,\n env=collector_env,\n policy=policy.collect_mode,\n tb_logger=tb_logger,\n exp_name=cfg.exp_name\n )\n expert_collector = create_serial_collector(\n expert_cfg.policy.collect.collector,\n env=expert_collector_env,\n policy=expert_policy.collect_mode,\n tb_logger=tb_logger,\n exp_name=expert_cfg.exp_name\n )\n evaluator = InteractionSerialEvaluator(\n cfg.policy.eval.evaluator, evaluator_env, policy.eval_mode, tb_logger, exp_name=cfg.exp_name\n )\n replay_buffer = create_buffer(cfg.policy.other.replay_buffer, tb_logger=tb_logger, exp_name=cfg.exp_name)\n expert_buffer = create_buffer(expert_cfg.policy.other.replay_buffer, tb_logger=tb_logger, exp_name=cfg.exp_name)\n commander = BaseSerialCommander(\n cfg.policy.other.commander, learner, collector, evaluator, replay_buffer, policy.command_mode\n )\n expert_commander = BaseSerialCommander(\n expert_cfg.policy.other.commander, learner, expert_collector, evaluator, replay_buffer,\n expert_policy.command_mode\n ) # we create this to avoid the issue of eps, this is an issue due to the sample collector part.\n expert_collect_kwargs = expert_commander.step()\n if 'eps' in expert_collect_kwargs:\n expert_collect_kwargs['eps'] = -1\n # ==========\n # Main loop\n # ==========\n # Learner's before_run hook.\n learner.call_hook('before_run')\n\n # Accumulate plenty of data at the beginning of training.\n if cfg.policy.get('random_collect_size', 0) > 0:\n action_space = collector_env.env_info().act_space\n random_policy = PolicyFactory.get_random_policy(policy.collect_mode, action_space=action_space)\n collector.reset_policy(random_policy)\n collect_kwargs = commander.step()\n new_data = collector.collect(n_sample=cfg.policy.random_collect_size, policy_kwargs=collect_kwargs)\n replay_buffer.push(new_data, cur_collector_envstep=0)\n collector.reset_policy(policy.collect_mode)\n for _ in range(max_iterations):\n collect_kwargs = commander.step()\n # Evaluate policy performance\n if evaluator.should_eval(learner.train_iter):\n stop, reward = evaluator.eval(learner.save_checkpoint, learner.train_iter, collector.envstep)\n if stop:\n break\n # Collect data by default config n_sample/n_episode\n new_data = collector.collect(train_iter=learner.train_iter, policy_kwargs=collect_kwargs)\n expert_data = expert_collector.collect(\n train_iter=learner.train_iter, policy_kwargs=expert_collect_kwargs\n ) # policy_kwargs={'eps': -1}\n for i in range(len(new_data)):\n device_1 = new_data[i]['obs'].device\n device_2 = expert_data[i]['obs'].device\n new_data[i]['reward'] = torch.zeros(cfg.policy.nstep).to(device_1)\n expert_data[i]['reward'] = torch.ones(cfg.policy.nstep).to(device_2)\n replay_buffer.push(new_data, cur_collector_envstep=collector.envstep)\n expert_buffer.push(expert_data, cur_collector_envstep=collector.envstep)\n # Learn policy from collected data\n for i in range(cfg.policy.learn.update_per_collect):\n # Learner will train ``update_per_collect`` times in one iteration.\n train_data = replay_buffer.sample((learner.policy.get_attribute('batch_size')) // 2, learner.train_iter)\n train_data_demonstration = expert_buffer.sample(\n (learner.policy.get_attribute('batch_size')) // 2, learner.train_iter\n )\n if train_data is None:\n # It is possible that replay buffer's data count is too few to train ``update_per_collect`` times\n logging.warning(\n \"Replay buffer's data can only train for {} steps. \".format(i) +\n \"You can modify data collect config, e.g. increasing n_sample, n_episode.\"\n )\n break\n train_data = train_data + train_data_demonstration\n learner.train(train_data, collector.envstep)\n if learner.policy.get_attribute('priority'):\n replay_buffer.update(learner.priority_info)\n\n # Learner's after_run hook.\n learner.call_hook('after_run')\n return policy\n","sub_path":"ding/entry/serial_entry_sqil.py","file_name":"serial_entry_sqil.py","file_ext":"py","file_size_in_byte":8646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"164883609","text":"#!/usr/bin/python\nimport sys\nsys.path.append('/home/pi/RCCar-Library')\nfrom Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor\nimport RPi.GPIO as GPIO\n\nimport time\nimport atexit\n\nGPIO.setwarnings(False)\n# nulls warning outputs we don't need\nGPIO.setmode(GPIO.BCM)\n# sets the GPIO board to the BCM mode (read up on BCM vs BOARD if interested but not necessary)\n\nt1 = 6\nt2 = 13\nt3 = 21\nt4 = 20\n# t stands for trigger, 1 and 2 correspond to the respective ultrasonic sensors\n# the trigger is a GPIO out from the RPi, it sends a short impulse to the ultrasonic sensor to initialize it\ne1 = 4\ne2 = 18\ne3 = 22\ne4 = 24\n# e stands for echo, 1 and 2 correspond to the respective ultrasonic sensors\n# the echo is a GPIO in to the RPi, the sensors send out square waves that we can use to calculate distance\n\nGPIO.setup(t1, GPIO.OUT)\nGPIO.setup(e1, GPIO.IN)\nGPIO.setup(t2, GPIO.OUT)\nGPIO.setup(e2, GPIO.IN)\nGPIO.setup(t3, GPIO.OUT)\nGPIO.setup(e3, GPIO.IN)\nGPIO.setup(t4, GPIO.OUT)\nGPIO.setup(e4, GPIO.IN)\n# this block of code sets up the pins as inputs or outputs\n\nGPIO.output(t1, False)\nGPIO.output(t2, False)\nGPIO.output(t3, False)\nGPIO.output(t4, False)\ntime.sleep(0.5)\n# required to sleep and wake the sensors\n\ndef distance(echo, trigger):\n GPIO.output(trigger, True)\n time.sleep(0.00001)\n GPIO.output(trigger, False)\n # initializes the sensors\n\n start = 0\n stop = 0\n \n while GPIO.input(echo) == 0:\n start = time.clock()\n # loops for when echo terminal receives no sonic input\n \n while GPIO.input(echo) == 1:\n stop = time.clock()\n # loops for when echo terminal receives a sonic input\n \n if stop != 0 and start != 0:\n elapsed = stop - start # measures the time between the emission of a wave and a reception of a reflected wave\n distance = elapsed * 33440 # multiplies this time by the speed of sound at approx 5280 ft in cm/s \n distance = distance / 2 # halves the measured distance since the wave traveled double the actual distance\n return distance\n\ndef averageDistance(echo, trigger):\n d1 = 0\n d2 = 0\n d3 = 0\n d1 = distance(echo, trigger)\n time.sleep(0.01)\n d2 = distance(echo, trigger)\n time.sleep(0.01)\n d3 = distance(echo, trigger)\n averageDistance = (d1 + d2 + d3)/3\n return averageDistance\n\n# create a default object, no changes to I2C address or frequency\nmh = Adafruit_MotorHAT(addr=0x60)\n\n# recommended for auto-disabling motors on shutdown!\ndef turnOffMotors():\n\tmh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n\tmh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n\tmh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n\tmh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)\n\natexit.register(turnOffMotors)\n\n################################# DC motor test!\nm1 = mh.getMotor(1)\nm2 = mh.getMotor(2)\nm3 = mh.getMotor(3)\nm4 = mh.getMotor(4)\n\n# set the speed to start, from 0 (off) to 255 (max speed)\nm1.setSpeed(150)\nm2.setSpeed(150)\nm3.setSpeed(150)\nm4.setSpeed(150)\n\nm1.run(Adafruit_MotorHAT.FORWARD);\nm2.run(Adafruit_MotorHAT.FORWARD);\nm3.run(Adafruit_MotorHAT.FORWARD);\nm4.run(Adafruit_MotorHAT.FORWARD);\n# turn on motor\nm1.run(Adafruit_MotorHAT.RELEASE);\nm2.run(Adafruit_MotorHAT.RELEASE);\nm3.run(Adafruit_MotorHAT.RELEASE);\nm4.run(Adafruit_MotorHAT.RELEASE);\n\nmotors = [m1, m2, m3, m4]\ndef leftDistance():\n return averageDistance(e1, t1)\n\ndef frontDistance(): \n return averageDistance(e2, t2)\n\ndef rightDistance():\n return averageDistance(e3, t3)\n\ndef backDistance():\n return averageDistance(e4, t4)\n\ndef forward():\n for motor in motors:\n motor.run(Adafruit_MotorHAT.FORWARD)\n motor.setSpeed(150)\n\ndef stop():\n for motor in motors:\n motor.run(Adafruit_MotorHAT.RELEASE)\n\ndef reverse():\n for motor in motors:\n motor.run(Adafruit_MotorHAT.BACKWARD)\n\ndef reverseRight():\n for motor in motors:\n motor.run(Adafruit_MotorHAT.BACKWARD)\n m1.setSpeed(255)\n m2.setSpeed(255)\n m3.setSpeed(100)\n m4.setSpeed(100)\n\ndef reverseLeft():\n for motor in motors:\n motor.run(Adafruit_MotorHAT.BACKWARD)\n m1.setSpeed(100)\n m2.setSpeed(100)\n m3.setSpeed(255)\n m4.setSpeed(255)\n\ndef turnRight():\n for motor in motors:\n motor.run(Adafruit_MotorHAT.FORWARD)\n m1.setSpeed(255)\n m2.setSpeed(255)\n m3.setSpeed(30)\n m4.setSpeed(30)\n\ndef turnLeft():\n for motor in motors:\n motor.run(Adafruit_MotorHAT.FORWARD)\n m1.setSpeed(30)\n m2.setSpeed(30)\n m3.setSpeed(255)\n m4.setSpeed(255)\n\nwhile(True):\n #print '%8s' % str(round(frontDistance(), 2)), '%8s' % str(round(leftDistance(), 2)), '%8s' % str(round(rightDistance(), 2)), '%8s' % str(round(backDistance()))\n if frontDistance() < 30.0 and leftDistance() > 30.0 and rightDistance() > 30.0:\n stop()\n time.sleep(0.1)\n if leftDistance() > rightDistance():\n reverseLeft()\n time.sleep(0.3)\n else:\n reverseRight()\n time.sleep(0.3)\n if frontDistance() < 10.0:\n reverse()\n elif leftDistance() > 30.0 and rightDistance() > 30.0:\n forward()\n elif leftDistance() < 7.5:\n reverseLeft()\n while backDistance() < 7.5:\n turnLeft()\n elif leftDistance() < 30.0 and rightDistance() > 30.0:\n turnRight()\n elif rightDistance() < 7.5:\n reverseRight()\n while backDistance() < 7.5:\n turnRight()\n elif rightDistance() < 30.0 and leftDistance() > 30.0:\n turnLeft()\n time.sleep(0.01)\n # delays the sensor a bit so Python doesn't screw up timing (it isn't that timely of a language)\n\nturnOffMotors()\n\nGPIO.cleanup() # required after any code involving setting up GPIO pins\n","sub_path":"examples/ummt2.py","file_name":"ummt2.py","file_ext":"py","file_size_in_byte":5570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"398824807","text":"#!/usr/bin/env python3\n\n#Required pip install tabulate\n\nimport requests\nimport json\nimport urllib3\nfrom pprint import pprint\nfrom tabulate import *\n\nrequests.packages.urllib3.disable_warnings()\n\ndef get_ticket():\n url = \"https://devnetsbx-netacad-apicem-3.cisco.com/api/v1/ticket\"\n headers = {\n 'Content-Type': 'application/json'\n }\n body_json = {\n \"password\": \"Xj3BDqbU\",\n \"username\": \"devnetuser\"\n }\n resp = requests.post(url,json.dumps(body_json),headers=headers,verify=False)\n statusCode = resp.status_code\n response_json = resp.json()\n serviceTicket = response_json['response']['serviceTicket']\n print(\"El ticket de servicio asignado es: \",serviceTicket)\n return serviceTicket\n\n\ndef print_hosts():\n url = \"https://devnetsbx-netacad-apicem-3.cisco.com/api/v1/host\"\n ticket = get_ticket()\n headers = {\n 'Content-Type': 'application/json',\n 'X-Auth-Token': ticket\n } \n resp = requests.get(url,headers=headers,verify=False)\n response_json = resp.json()\n hosts = []\n i = 0\n for host in response_json[\"response\"]:\n i += 1\n auxHost = [\n i,\n host[\"hostType\"],\n host[\"hostIp\"],\n host[\"hostMac\"]\n ] \n hosts.append(auxHost)\n \n header_print = [\n \"Number\",\n \"Type\",\n \"IP\",\n \"MAC\"\n ]\n\n print(tabulate(hosts, header_print))\n #pprint(response_json)\n\n\ndef print_networkDevices():\n url = \"https://devnetsbx-netacad-apicem-3.cisco.com/api/v1/network-device\"\n ticket = get_ticket()\n headers = {\n 'Content-Type': 'application/json',\n 'X-Auth-Token': ticket\n } \n resp = requests.get(url,headers=headers,verify=False)\n response_json = resp.json()\n netDevices = []\n i = 0\n \n for host in response_json[\"response\"]:\n i += 1\n auxDevs = [\n i,\n host[\"hostname\"],\n host[\"family\"],\n host[\"macAddress\"],\n host[\"softwareVersion\"],\n host[\"type\"]\n ] \n netDevices.append(auxDevs)\n \n header_print = [\n \"Hostname\",\n \"Family\",\n \"MAC\",\n \"Version\",\n \"Type\"\n ]\n print(tabulate(netDevices, header_print))\n #pprint(response_json)\n\n\ndef print_networkVlans():\n url = \"https://devnetsbx-netacad-apicem-3.cisco.com/api/v1/network-device\"\n ticket = get_ticket()\n headers = {\n 'Content-Type': 'application/json',\n 'X-Auth-Token': ticket\n } \n resp = requests.get(url,headers=headers,verify=False)\n response_json = resp.json()\n netIds = []\n vlanDevs = []\n i = 0\n for host in response_json[\"response\"]:\n netIds.append(host[\"id\"])\n print(\"Processing...\") \n for id in netIds:\n url = \"https://devnetsbx-netacad-apicem-3.cisco.com/api/v1/network-device/\" + id + \"/vlan\"\n resp = requests.get(url,headers=headers,verify=False)\n response_json = resp.json()\n if((resp.status_code == 200)):\n for device in response_json[\"response\"]:\n i += 1\n interfaceName = \"No Data\"\n ipAddress = \"No Data\"\n networkAddress = \"No Data\"\n numberOfIPs = \"No Data\"\n if \"interfaceName\" in device:\n interfaceName = device[\"interfaceName\"]\n if \"ipAddress\" in device:\n ipAddress = device[\"ipAddress\"]\n if \"networkAddress\" in device:\n networkAddress = device[\"networkAddress\"]\n if \"numberOfIPs\" in device:\n numberOfIPs = device[\"numberOfIPs\"]\n auxDevs = [\n i,\n interfaceName,\n ipAddress,\n networkAddress, \n numberOfIPs\n ] \n vlanDevs.append(auxDevs)\n \n header_print = [\n \"Interface\",\n \"IP\",\n \"Network IP\",\n \"Number of IPs\",\n ]\n print(tabulate(vlanDevs, header_print))\n\n\n\ndef print_networkLicense():\n url = \"https://devnetsbx-netacad-apicem-3.cisco.com/api/v1/network-device\"\n ticket = get_ticket()\n headers = {\n 'Content-Type': 'application/json',\n 'X-Auth-Token': ticket\n } \n resp = requests.get(url,headers=headers,verify=False)\n response_json = resp.json()\n netIds = []\n licenseDevs = []\n i = 0\n for host in response_json[\"response\"]:\n netIds.append(host[\"id\"])\n print(\"Processing...\") \n for id in netIds:\n url = \"https://devnetsbx-netacad-apicem-3.cisco.com/api/v1/network-device/\" + id\n resp = requests.get(url,headers=headers,verify=False)\n response_json = resp.json() \n if((resp.status_code == 200)):\n device = response_json[\"response\"]\n i += 1\n auxDevs = [\n i,\n device[\"family\"],\n device[\"hostname\"],\n device[\"id\"], \n device[\"type\"],\n device[\"upTime\"]\n ] \n licenseDevs.append(auxDevs)\n header_print = [\n \"Family\",\n \"Hostname\",\n \"ID\",\n \"Type\",\n \"UpTime\"\n ]\n print(tabulate(licenseDevs, header_print))\n","sub_path":"APIC-EM/apic_em_functions.py","file_name":"apic_em_functions.py","file_ext":"py","file_size_in_byte":5312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"57054429","text":"\"\"\"\n n2 n4\n | |\n \\ /\n \\ /\n ---=========---\n n1 length_x n3\n\n\"\"\"\n\n\nfrom gdslib import plot_sparameters\nfrom simphony.library import siepic\n\nif __name__ == \"__main__\":\n c = siepic.ebeam_dc_halfring_straight(\n gap=200e-9, radius=10e-6, width=500e-9, thickness=220e-9, couple_length=0.0\n )\n c = plot_sparameters(c)\n","sub_path":"gdslib/coupler_ring_siepic.py","file_name":"coupler_ring_siepic.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"41378867","text":"import pyshark\nimport pandas as pd\nimport numpy as np\n\n# Some constants\n\n# In[3]:\n\n# TOTAL_BITS = 400\nTOTAL_BITS = 10000\nAP_MAC = '58:6d:8f:d3:5e:70'\n\n# ## Loading the captured packets\n\n# In[4]:\n# file = '2015-12-09_17_12_60mw_all_nightmbps.pcapng' # Big automated\nfile = '2015-12-09_18_33_60mw_all_night2mbps.pcapng' # ALL NIGHT!\n\ncap = pyshark.FileCapture(file)\n\n\np_data = pd.DataFrame(columns=['Data','Data_rate','RSSI','Tx_mac','Time'])\n\ni = 0\nfor pkt in cap:\n if pkt.wlan.fcs_bad == '1':\n try:\n data = pkt.data.data.replace(':','')\n data_rate = np.float16(pkt.radiotap.datarate)\n rssi = int(pkt.radiotap.dbm_antsignal)\n tx_mac = pkt.wlan.ta\n time_rel = pkt.frame_info.time_relative\n \n p_data.loc[i] = [data,data_rate,rssi,tx_mac,time_rel]\n i +=1\n except:\n continue\n\n\n# Add a column stating if the AP sent the packet\np_data['AP_pkt'] = p_data['Tx_mac'].apply(lambda x: x == '58:6d:8f:d3:5e:70')\n# ==========\n\n\n# Add a column with Binary Data\ndef str2bin(number):\n decimal_number = int(number,16)\n # Do not return the '0b' at the beginning and fill with zeroes\n return bin(decimal_number)[2:].zfill(8)\n\ndef data_to_bin(data):\n return [str2bin(data[byte : byte+2]) for byte in range(0, len(data),2)]\n\ndef bytes_to_binarray(list_of_bytes):\n temp_str = ''\n for byte in list_of_bytes:\n temp_str = temp_str + byte\n return temp_str\n\np_data['Data_bin'] = p_data['Data'].apply(data_to_bin)\np_data['Data_bin'] = p_data['Data_bin'].apply(bytes_to_binarray)\n# ==========\n\n# Add a column stating if the data has the right length\n# p_data['Right_Length'] = p_data['Data'].apply(lambda x: len(x) == 100)\np_data['Right_Length'] = p_data['Data_bin'].apply(lambda x: len(x) == TOTAL_BITS)\n# ==========\n\n# Add column with number of bits flipped\np_data['Flipped_bits'] = p_data['Data_bin'].apply(lambda x: x.count('1'))\n# ==========\n\n# Save DF\np_data.to_msgpack('output.msg')\n# ==========\n\n","sub_path":"fcs_failed_packet_inspection.py","file_name":"fcs_failed_packet_inspection.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"76319904","text":"import rospy\r\nfrom beginner_tutorials.msg import vision_feedback\r\n\r\ndef talker():\r\n pub = rospy.Publisher('feedback', vision_feedback, queue_size=10)\r\n rospy.init_node('msg_talker', anonymous=True)\r\n rate = rospy.Rate(10) # 10hz\r\n while not rospy.is_shutdown():\r\n feedback_pose = vision_feedback()\r\n feedback_pose.x = 0.1\r\n feedback_pose.y = 0.2\r\n feedback_pose.theta = 0.3\r\n x_str = \"feedback x = %f\" % feedback_pose.x\r\n y_str = \"feedback y = %f\" % feedback_pose.y\r\n theta_str = \"feedback theta = %f\" % feedback_pose.theta\r\n rospy.loginfo(x_str)\r\n rospy.loginfo(y_str)\r\n rospy.loginfo(theta_str)\r\n pub.publish(feedback_pose)\r\n rate.sleep()\r\n\r\nif __name__ == '__main__':\r\n try:\r\n talker()\r\n except rospy.ROSInterruptException:\r\n pass","sub_path":"scripts/msg_talker.py","file_name":"msg_talker.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"517671377","text":"# SPDX-License-Identifier: MIT\n# Copyright (c) 2019-2021 The Pybricks Authors\n\nimport errno\nimport os\nimport platform\nimport sys\n\nfrom contextlib import nullcontext\nfrom importlib.resources import path\nfrom subprocess import DEVNULL, call, check_call\nfrom tempfile import TemporaryDirectory\nfrom typing import BinaryIO, ContextManager\n\nfrom usb.core import NoBackendError, USBError\n\nfrom . import _dfu_upload, _dfu_create, resources\nfrom .ble.lwp3.bytecodes import HubKind\n\nFIRMWARE_ADDRESS = 0x08008000\nFIRMWARE_SIZE = 1 * 1024 * 1024 - 32 * 1024 # 1MiB - 32KiB\nLEGO_VID = 0x0694\nSPIKE_PRIME_PID = 0x0008\nMINDSTORMS_INVENTOR_PID = 0x0011\n\n\nSPIKE_PRIME_DEVICE = f\"0x{LEGO_VID:04x}:0x{SPIKE_PRIME_PID:04x}\"\nMINDSTORMS_INVENTOR_DEVICE = f\"0x{LEGO_VID:04x}:0x{MINDSTORMS_INVENTOR_PID:04x}\"\n\n\ndef _get_dfu_util() -> ContextManager[os.PathLike]:\n \"\"\"Gets ``dfu-util`` command line tool path.\n\n Returns: Context manager containing the path. The path may no longer be\n valid after the context manager exits.\n \"\"\"\n # Use embedded .exe for Windows\n if platform.system() == \"Windows\":\n return path(resources, resources.DFU_UTIL_EXE)\n\n # otherwise use system provided dfu-util\n dfu_util = \"dfu-util\"\n\n try:\n check_call([dfu_util, \"--version\"], stdout=DEVNULL)\n except FileNotFoundError:\n print(\n \"No working DFU found.\",\n \"Please install libusb or ensure dfu-util is in PATH.\",\n file=sys.stderr,\n )\n exit(1)\n\n return nullcontext(dfu_util)\n\n\ndef backup_dfu(file: BinaryIO) -> None:\n \"\"\"Backs up device data via DFU.\n\n Args:\n file:\n file where firmware (MCU flash memory) will be saved\n \"\"\"\n try:\n # TODO: implement this using pydfu\n raise NoBackendError\n except NoBackendError:\n # if libusb was not found, try using dfu-util command line tool\n\n with _get_dfu_util() as dfu_util:\n\n file.close()\n\n # dfu-util won't overwrite existing files so we have to do that first\n os.remove(file.name)\n\n exit(\n call(\n [\n dfu_util,\n \"--device\",\n f\",{SPIKE_PRIME_DEVICE},{MINDSTORMS_INVENTOR_DEVICE}\",\n \"--alt\",\n \"0\",\n \"--dfuse-address\",\n f\"{FIRMWARE_ADDRESS}:{FIRMWARE_SIZE}\",\n \"--upload\",\n file.name,\n ]\n )\n )\n\n\ndef restore_dfu(file: BinaryIO) -> None:\n \"\"\"Restores flash memory from a file (raw data, not .dfu file).\n\n Args:\n file: the file that contains the firmware data\n \"\"\"\n file.seek(0, os.SEEK_END)\n size = file.tell()\n file.seek(0, os.SEEK_SET)\n\n if size < 512:\n raise ValueError(\"File is too small to be a valid firmware file\")\n\n try:\n # TODO: implement this using pydfu\n raise NoBackendError\n except NoBackendError:\n # if libusb was not found, try using dfu-util command line tool\n\n with _get_dfu_util() as dfu_util:\n\n file.close()\n\n exit(\n call(\n [\n dfu_util,\n \"--device\",\n f\",{SPIKE_PRIME_DEVICE},{MINDSTORMS_INVENTOR_DEVICE}\",\n \"--alt\",\n \"0\",\n \"--dfuse-address\",\n f\"{FIRMWARE_ADDRESS}\",\n \"--download\",\n file.name,\n ]\n )\n )\n\n\ndef flash_dfu(firmware_bin: bytes, metadata: dict) -> None:\n \"\"\"Flashes a firmware file using DFU.\"\"\"\n\n if metadata[\"device-id\"] != HubKind.TECHNIC_LARGE:\n print(\"Unknown hub type:\", metadata[\"device-id\"], file=sys.stderr)\n exit(1)\n\n with TemporaryDirectory() as out_dir:\n outfile = os.path.join(out_dir, \"firmware.dfu\")\n target = {\"address\": FIRMWARE_ADDRESS, \"data\": firmware_bin}\n\n try:\n # Determine correct product ID\n\n devices = _dfu_upload.get_dfu_devices(idVendor=LEGO_VID)\n if not devices:\n print(\n \"No DFU devices found.\",\n \"Make sure hub is in DFU mode and connected with USB.\",\n file=sys.stderr,\n )\n exit(1)\n\n product_id = devices[0].idProduct\n if product_id != SPIKE_PRIME_PID and product_id != MINDSTORMS_INVENTOR_PID:\n print(f\"Unknown USB product ID: {product_id:04X}\", file=sys.stderr)\n exit(1)\n\n # Create dfu file\n device = \"0x{0:04x}:0x{1:04x}\".format(LEGO_VID, product_id)\n _dfu_create.build(outfile, [[target]], device)\n\n # Init dfu tool\n _dfu_upload.__VID = LEGO_VID\n _dfu_upload.__PID = product_id\n _dfu_upload.init()\n elements = _dfu_upload.read_dfu_file(outfile)\n\n # Erase flash\n print(\"Erasing flash...\")\n _dfu_upload.mass_erase()\n\n # Upload dfu file\n print(\"Writing new firmware...\")\n _dfu_upload.write_elements(elements, True, _dfu_upload.cli_progress)\n _dfu_upload.exit_dfu()\n print(\"Done.\")\n except USBError as e:\n if e.errno != errno.EACCES or platform.system() != \"Linux\":\n # not expecting other errors\n raise\n\n print(\n \"Permission to access USB device denied. Did you install udev rules?\",\n file=sys.stderr,\n )\n print(\n \"Run `pybricksdev udev | sudo tee /etc/udev/rules.d/99-pybricksdev.rules` then try again.\",\n file=sys.stderr,\n )\n exit(1)\n except NoBackendError:\n # if libusb was not found, try using dfu-util command line tool\n\n with _get_dfu_util() as dfu_util:\n\n # Exact device product ID doesn't matter here since we are using\n # the --device command line option below.\n _dfu_create.build(outfile, [[target]], SPIKE_PRIME_DEVICE)\n\n exit(\n call(\n [\n dfu_util,\n \"--device\",\n f\",{SPIKE_PRIME_DEVICE},{MINDSTORMS_INVENTOR_DEVICE}\",\n \"--alt\",\n \"0\",\n \"--download\",\n outfile,\n ]\n )\n )\n","sub_path":"pybricksdev/dfu.py","file_name":"dfu.py","file_ext":"py","file_size_in_byte":6749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"104401881","text":"import Rhino\nimport rhinoscriptsyntax as rs\nimport rhino_helpers as rh\n\ndef edgeAngle(mesh, edgeIndex):\n faceIdxs = rh.getFacesForEdge(mesh, edgeIndex)\n\n if (len(faceIdxs)==2):\n faceNorm0 = mesh.FaceNormals.Item[faceIdxs[0]]\n faceNorm1 = mesh.FaceNormals.Item[faceIdxs[1]]\n return rs.VectorAngle(faceNorm0,faceNorm1) \n else:\n return None\n\ndef uniform(mesh, edgeIndex):\n return 1\n\nimport random as rand\n\ndef random(mesh, edgeIndex):\n return rand.random()\n\ndef planeAligned(mesh,edgeIdx):\n planeNormal = Rhino.Geometry.Vector3d(1,0,0)\n edgeVec = rh.getEdgeVector(mesh,edgeIdx)\n angle = rh.angleBetweenVecAndPlane(edgeVec,planeNormal)\n return 1.0/(angle+.000000001)","sub_path":"rhino_unwrapper/weight_functions.py","file_name":"weight_functions.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"538895837","text":"# Copyright 2020 Alexis Lopez Zubieta\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\nimport configparser\nimport os\n\n\nclass DesktopFileParser:\n def __init__(self, file_path):\n self.file_path = file_path\n self.parser = configparser.RawConfigParser()\n self.parser.read(file_path)\n\n exec = self.parser[\"Desktop Entry\"][\"Exec\"].strip()\n self.exec_path, self.exec_args = self._split_exec_path_and_args(exec)\n\n # convert desktop file exec args to bash notation\n self.exec_args = self.exec_args.replace(\"%f\", \"$@\")\n self.exec_args = self.exec_args.replace(\"%F\", \"$@\")\n self.exec_args = self.exec_args.replace(\"%U\", \"$@\")\n self.exec_args = self.exec_args.replace(\"%u\", \"$@\")\n\n @staticmethod\n def _split_exec_path_and_args(exec):\n if exec[0] == \"'\" or exec[0] == '\"':\n end = exec.find(exec[0], 1)\n while end != -1 and exec[end - 1] == \"\\\\\":\n end = exec.find(exec[0], end + 1)\n if end == -1:\n end = len(exec)\n\n exec_path = exec[1:end].strip()\n exec_args = exec[end + 1 :].strip()\n\n return exec_path, exec_args\n else:\n end = exec.find(\" \")\n if end == -1:\n end = len(exec)\n\n exec_path = exec[:end].strip()\n exec_args = exec[end + 1 :].strip()\n\n return exec_path, exec_args\n\n def get_name(self):\n return self.parser[\"Desktop Entry\"][\"Name\"]\n\n def get_icon(self):\n return self.parser[\"Desktop Entry\"][\"Icon\"]\n\n def get_exec_path(self):\n return self.exec_path\n\n def get_exec_args(self):\n return self.exec_args\n\n def get_id(self):\n filename, file_extension = os.path.splitext(os.path.basename(self.file_path))\n return filename\n","sub_path":"appimagebuilder/generator/desktop_entry_parser.py","file_name":"desktop_entry_parser.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"229055545","text":"import argparse\nimport shutil\nimport sys\nfrom glob import glob\nfrom pathlib import Path\n\nfilename2zotkey = {\n \"proposal\": \"UFJLUPV7\",\n \"work_in_progress\": \"VQKH6WID\",\n \"midterm_slides\": \"IW6HY8IC\",\n \"thesis\": \"93P5LGQ9\",\n}\n\nzot_storage_path = Path('~/Zotero/storage').expanduser()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, help='file to upload and display')\n flags = parser.parse_args()\n\n file_path = Path(flags.file)\n file_name = file_path.stem\n\n dest_dir = zot_storage_path / filename2zotkey[file_name]\n dest_filename = Path(glob(str(dest_dir / \"*\"))[0]).name\n print(f'copying {file_path} to {dest_dir / dest_filename}')\n shutil.copy(file_path, dest_dir / dest_filename)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"scripts_/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"387068685","text":"import os\nimport sys\nimport shutil\nimport fnmatch\nimport tempfile\nimport contextlib\nimport functools\nimport errno\n\n\nif sys.version_info.major == 2:\n FileNotFoundError = OSError\n\n\ndef ensure_path_exists(dir_path):\n \"\"\"\n Make sure that a path exists\n \"\"\"\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n return True\n return False\n\n\ndef ensure_file_exists(file_path):\n \"\"\"\n Make sure that a path exists\n \"\"\"\n if os.path.exists(file_path):\n return False\n base_dir = os.path.dirname(file_path)\n ensure_path_exists(base_dir)\n with open(file_path, 'w'):\n pass\n return True\n\n\ndef remove_file_if_exists(path):\n if os.path.isfile(path):\n os.remove(path)\n return True\n return False\n\n\ndef remove_dir_if_exists(path):\n if os.path.isdir(path):\n shutil.rmtree(path)\n return True\n return False\n\n\ndef mkdir(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\nDEFAULT_CONTRACTS_DIR = \"./contracts/\"\n\n\ndef get_contracts_dir(project_dir):\n contracts_dir = os.path.join(project_dir, DEFAULT_CONTRACTS_DIR)\n return os.path.abspath(contracts_dir)\n\n\nBUILD_DIR = \"./build/\"\n\n\ndef get_build_dir(project_dir):\n build_dir = os.path.join(project_dir, BUILD_DIR)\n ensure_path_exists(build_dir)\n return build_dir\n\n\nCOMPILED_CONTRACTS_FILENAME = \"contracts.json\"\n\n\ndef get_compiled_contracts_file_path(project_dir):\n build_dir = get_build_dir(project_dir)\n return os.path.join(build_dir, COMPILED_CONTRACTS_FILENAME)\n\n\nBLOCKCHAIN_DIR = \"./chains/\"\n\n\ndef get_blockchains_dir(project_dir):\n blockchains_dir = os.path.abspath(os.path.join(project_dir, BLOCKCHAIN_DIR))\n ensure_path_exists(blockchains_dir)\n return blockchains_dir\n\n\nMIGRATIONS_DIR = \"./migrations/\"\n\n\ndef get_migrations_dir(project_dir, lazy_create=True):\n migrations_dir = os.path.abspath(os.path.join(project_dir, MIGRATIONS_DIR))\n if lazy_create:\n init_file_path = os.path.join(migrations_dir, '__init__.py')\n ensure_path_exists(migrations_dir)\n ensure_file_exists(init_file_path)\n return migrations_dir\n\n\ndef is_executable_available(program):\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath = os.path.dirname(program)\n if fpath:\n if is_exe(program):\n return True\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return True\n\n return False\n\n\ndef recursive_find_files(base_dir, pattern):\n for dirpath, _, filenames in os.walk(base_dir):\n for filename in filenames:\n if fnmatch.fnmatch(filename, pattern):\n yield os.path.join(dirpath, filename)\n\n\n@contextlib.contextmanager\ndef tempdir():\n directory = tempfile.mkdtemp()\n\n try:\n yield directory\n finally:\n shutil.rmtree(directory)\n\n\ndef is_same_path(p1, p2):\n n_p1 = os.path.abspath(os.path.expanduser(p1))\n n_p2 = os.path.abspath(os.path.expanduser(p2))\n\n try:\n return os.path.samefile(n_p1, n_p2)\n except FileNotFoundError:\n return n_p1 == n_p2\n\n\ndef relpath(fn):\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n path = fn(*args, **kwargs)\n return os.path.relpath(path)\n return wrapper\n","sub_path":"populus/utils/filesystem.py","file_name":"filesystem.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"86586366","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch_geometric.data import DataLoader\r\nfrom torch_geometric import utils\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import random_split\r\n\r\nimport argparse\r\nimport os\r\nimport time\r\nimport setproctitle\r\nfrom train import *\r\nfrom dataset_processing import CVPDataset\r\nfrom gcn_model import *\r\nimport logging\r\n\r\nimport sys\r\n\r\ndef init_logging_and_result(args):\r\n global Log_dir_name\r\n global filename\r\n Log_dir_name = 'Log'\r\n if not os.path.exists(Log_dir_name):\r\n os.makedirs(Log_dir_name)\r\n \r\n filename = '{}-lr{}-wd{}-enh{}-gnh{}-nh{}-{}'.format(args.model, args.lr, args.weight_decay, args.enh, args.gnh, args.n_hidden, args.en)\r\n if not os.path.exists(Log_dir_name + '/' + filename):\r\n logging.basicConfig(filename=Log_dir_name + '/' + filename, level=logging.INFO)\r\n else:\r\n print(Log_dir_name + '/' + filename, 'already exists, removing ...')\r\n os.remove(Log_dir_name + '/' + filename)\r\n logging.basicConfig(filename=Log_dir_name + '/' + filename, level=logging.INFO)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # Experiment parameters\r\n parser = argparse.ArgumentParser(description='xxx')\r\n \r\n parser.add_argument('-sd', '--seed', type=int, default=4, help='random seed')\r\n parser.add_argument('-exp', '--exp', type=int, default=0, help='experiment name')\r\n parser.add_argument('-lr', '--lr', type=float, default=0.01, help='learning rate')\r\n parser.add_argument('-b', '--batch_size', type=int, default=32, help='batch size')\r\n parser.add_argument('-wd', '--weight_decay', type=float, default=1e-3, help='weight decay')\r\n parser.add_argument('-e', '--epochs', type=int, default=100, help='number of epochs')\r\n parser.add_argument('-d', '--dropout_ratio', type=float, default=0.2, help='dropout rate')\r\n parser.add_argument('-dvs', '--device', type=str, default='cuda', choices=['cuda:2','cuda:1','cuda:0','cpu'])\r\n parser.add_argument('-cd', '--cuda_device', type=str, default='1', help='cuda device')\r\n parser.add_argument('-m', '--model', type=str, default='*', help='model')\r\n parser.add_argument('-nh', '--n_hidden', type=int, default=32, help='number of hidden nodes in each layer of GCN')\r\n parser.add_argument('-p', '--patience', type=int, default=80, help='Patience')\r\n parser.add_argument('-heads', '--heads', type=int, default=1, help='number of heads for GAT')\r\n parser.add_argument('-gnh', '--gnh', type=int, default=32, help='number of gcn hidden layer.')\r\n parser.add_argument('-en', '--en', type=str, default='test',help='add experiment name.')\r\n parser.add_argument('-enh','--enh', type=int, default=32, help='number of embedding size.')\r\n parser.add_argument('-lf','--loss_fn', type=str, default='mse', help='number of embedding size.')\r\n parser.add_argument('-ew','--edge_weight', type=str, default='weighted', help='weighed edge.')\r\n parser.add_argument('-es','--early_stop', type=float, default=0.0, help='the point of early stop.')\r\n args = parser.parse_args()\r\n\r\n init_logging_and_result(args)\r\n # 设定相关信息\r\n torch.manual_seed(args.seed)\r\n if torch.cuda.is_available:\r\n torch.cuda.manual_seed_all(args.seed)\r\n np.random.seed(args.seed)\r\n torch.backends.cudnn.deterministic = True # 每次训练得到相同结果\r\n\r\n setproctitle.setproctitle('xxx') # 设定程序名\r\n\r\n ############################### Load Data ###############################\r\n print('------------------------- Loading data -------------------------')\r\n # create train, val, test set\r\n\r\n train_loader = CVPDataset(season='q1', directed = True , args=args)\r\n val_loader = CVPDataset(season='q2', directed = True, args=args)\r\n test_loader = CVPDataset(season='q3', directed = True, args=args)\r\n \r\n \r\n args.num_features = train_loader[0].num_features\r\n\r\n n_trainset = train_loader.num\r\n n_valset = val_loader.num\r\n n_testset = test_loader.num\r\n for arg in vars(args):\r\n print(arg, getattr(args, arg))\r\n\r\n \r\n print(train_loader[0])\r\n\r\n if args.model=='GCN_motif_gru':\r\n model = GCN_motif_gru(args).to(args.device) \r\n else:\r\n raise NotImplementedError(args.model)\r\n\r\n \r\n\r\n print(model)\r\n train_params = list(filter(lambda p: p.requires_grad, model.parameters()))\r\n print('Trainable Parameters:', np.sum([p.numel() for p in train_params]))\r\n logging.info('Trainable Parameters:{}'.format(np.sum([p.numel() for p in train_params])))\r\n\r\n\r\n train(model, train_loader, val_loader, test_loader, args)\r\n #torch.save(model.state_dict(),'./model/{}-lr{}-wd{}-enh{}-gnh{}-nh{}-{}'.format(args.model, args.lr, args.weight_decay, args.enh, args.gnh, args.n_hidden, args.en))\r\n \r\n mae_loss, rmse_loss, mape_loss, mspe_loss = test(model,val_loader,args)\r\n #print(\"--q2-- MAE:{} RMSE:{} MAPE:{} MSPE:{}\".format(mae_loss, rmse_loss, mape_loss, mspe_loss)) \r\n #logging.info(\"--q2-- MAE:{} RMSE:{} MAPE:{} MSPE:{}\".format(mae_loss, rmse_loss, mape_loss, mspe_loss))\r\n \r\n mae_loss, rmse_loss, mape_loss, mspe_loss = test(model,test_loader,args)\r\n #print(\"--q3-- MAE:{} RMSE:{} MAPE:{} MSPE:{}\".format(mae_loss, rmse_loss, mape_loss, mspe_loss)) \r\n #logging.info(\"--q3-- MAE:{} RMSE:{} MAPE:{} MSPE:{}\".format(mae_loss, rmse_loss, mape_loss, mspe_loss))","sub_path":"EPD/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"394094652","text":"# Script that splits data into folds. The last fold is testing data while the others are training data\n#\n# Usage: pickle_train_test_data.py pickled_data number_of_folds\n# Example pickle_...py \"C:\\Users\\crobe\\Google Drive\\DataMiningGroup\\Datasets\\restaurant_photos_with_labels.pkl\" 6\nimport cPickle\nimport os\nimport sys\n\nimport pandas as pd\nfrom sklearn.cross_validation import StratifiedKFold\n\nif len(sys.argv) > 2:\n # Read system arguments\n filename = sys.argv[1]\n folds = int(sys.argv[2])\n\n # Output will be named as the input filename but appending _train.pkl or _test.pkl at the end.\n train_fn = os.path.splitext(filename)[0] + '_train.pkl'\n test_fn = os.path.splitext(filename)[0] + '_test.pkl'\n\n # Read the pandas dataframe with the JSON data\n df = pd.read_pickle(filename)\n # Change the label column to categorical type, and store the categories so that we know what they mean later\n df['label'] = df['label'].astype('category')\n with open('..\\data\\categories.pkl', 'wb') as f:\n cPickle.dump(df['label'].cat.categories, f, protocol=cPickle.HIGHEST_PROTOCOL)\n # Change from string to int\n df['label'] = df['label'].cat.codes.values\n # Create folds using a Stratified approach to keep proportions\n skf = StratifiedKFold(df['label'], n_folds=folds)\n\n # Use only the first configuration (train/test) and store as pickled data frames.\n train_ix, test_ix = next(iter(skf))\n df.loc[train_ix, :].to_pickle(train_fn)\n df.loc[test_ix, :].to_pickle(test_fn)\n","sub_path":"batches/pickle_train_test_data.py","file_name":"pickle_train_test_data.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"228545773","text":"import sys\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport argparse\r\nimport matplotlib.pyplot as plt\r\nimport SegmentAnalysis as SA\r\n# Sklearn does not automatically import submodules\r\nimport sklearn.ensemble as skle\r\nimport sklearn.model_selection as sklms\r\nimport sklearn.linear_model as skllm\r\nimport sklearn.discriminant_analysis as sklda\r\n\r\n# Set up cmd arguments for the user\r\nparser = argparse.ArgumentParser(description = 'Enter absolute path and background coordinates')\r\nparser.add_argument('filename', type = str, help = 'Features extracted from algorithm (one sheet)')\r\nargs = parser.parse_args()\r\n\r\n# Open excel file\r\ndt = pd.read_excel(args.filename)\r\n# Drop all attributes which are not integers or floats\r\ndt = pd.concat([dt.select_dtypes('int64'), dt.select_dtypes('float64')], axis = 1)\r\ndt = dt.drop(['Unnamed: 0', 'Num'], axis = 1)\r\n# Set up variables to use for later\r\nmain_five = ['Single_Loaded', 'Clear', 'Straight', 'Head_First', 'Whole_Animal']\r\nattributes = dt.drop(main_five, axis = 1)\r\n\r\n\r\n#####################################################################################\r\n# LASSO: Least Absolute Shrinkage and Selection Operator: does feature\r\n# selection for you for linear model (L1 Regression)\r\n#####################################################################################\r\n# Lasso from SegmentAnalysis.py\r\nprint('\\nLasso')\r\nlass1, grid1 = SA.my_lasso(dt, 'Single_Loaded')\r\nlass2, grid2 = SA.my_lasso(dt, 'Clear')\r\nlass3, grid3 = SA.my_lasso(dt, 'Straight')\r\nlass4, grid4 = SA.my_lasso(dt, 'Head_First')\r\nlass5, grid5 = SA.my_lasso(dt, 'Whole_Animal')\r\nlass_weights = pd.DataFrame([lass1.coef_, lass2.coef_, lass3.coef_, lass4.coef_, lass5.coef_])\r\nlass_weights.columns = attributes.columns\r\nlass_weights['Features'] = main_five\r\nlass_weights.to_excel('feature_selection/lasso_weights.xlsx')\r\n# plt.matshow(lass_weights.drop(['Features'], axis = 1))\r\n# plt.xticks(range(len(attributes.columns)), attributes.columns, rotation=90)\r\n# plt.yticks(range(len(main_five)), main_five)\r\n# plt.title('Lasso Weights for Each Classification', pad = 120)\r\n# plt.savefig('feature_selection/lasso_weights' + '.png')\r\n# plt.show()\r\n\r\na = lass_weights.drop(['Features'], axis = 1).T\r\na.columns = main_five\r\nfor feature in main_five:\r\n print(feature)\r\n print(a.abs().nlargest(5, feature)[feature])\r\n\r\n\r\n# L1 regression\r\nprint('\\nL1')\r\n# l1_1, coefs1 = SA.l1_reg(dt,'Single_Loaded')\r\n# l1_2, coefs2 = SA.l1_reg(dt, 'Clear')\r\n# l1_3, coefs3 = SA.l1_reg(dt, 'Straight')\r\n# l1_4, coefs4 = SA.l1_reg(dt, 'Head_First')\r\n# l1_5, coefs5 = SA.l1_reg(dt, 'Whole_Animal')\r\n\r\n# for coef, feature in zip([coefs1.T, coefs2.T, coefs3.T, coefs4.T, coefs5.T], main_five):\r\n# print(feature)\r\n# for i in coef.columns:\r\n# print('C = ' + str(i))\r\n# print(attributes.columns[coef[i].abs().nlargest(5).index])\r\n# plt.matshow(coef)\r\n# plt.title('change in coeffs by c for ' + feature)\r\n# plt.savefig('feature_selection/change_in_coeffs_by_c_for_' + feature + '.png')\r\n# plt.show()\r\n# print('\\n')\r\n\r\n\r\n#####################################################################################\r\n# LDA\r\n#####################################################################################\r\n# Use lda function from SegmentAnalysis.py\r\nprint('\\nLDA')\r\n# test_acc1, lda1 = SA.lda(attributes, dt['Single_Loaded'], 'Single_Loaded')\r\n# test_acc2, lda2 = SA.lda(attributes, dt['Clear'], 'Clear')\r\n# test_acc3, lda3 = SA.lda(attributes, dt['Straight'], 'Straight')\r\n# test_acc4, lda4 = SA.lda(attributes, dt['Head_First'], 'Head_First')\r\n# test_acc5, lda5 = SA.lda(attributes, dt['Whole_Animal'], 'Whole_Animal')\r\n# # Put all of the weight coefficients from lda into a single pandas data frame\r\n# lda_weights = pd.DataFrame([lda1.coef_[0], lda2.coef_[0], lda3.coef_[0], lda4.coef_[0], lda5.coef_[0]])\r\n# lda_weights.columns = attributes.columns\r\n# # Get the absolute values of the weights for later\r\n# abs_lda_weights = lda_weights.abs()\r\n# # Add a column designating which feature corresponds to which row of the data frame\r\n# lda_weights.index = main_five\r\n# # Plot weights\r\n# plt.matshow(lda_weights)\r\n# plt.yticks(range(len(main_five)), main_five)\r\n# plt.title('Coefficient values for each feature from LDA', pad = 65)\r\n# plt.xticks(range(len(attributes.columns)), attributes.columns, rotation=90)\r\n# plt.savefig('feature_selection/lda_weights_heatmap.png')\r\n# plt.show()\r\n# # Output the weights to an excel file\r\n# lda_weights.to_excel('feature_selection/lda_weights.xlsx')\r\n\r\n# # Identify largest weights\r\n# abs_lda_weights = abs_lda_weights.T\r\n# abs_lda_weights.columns = main_five\r\n# for column in main_five:\r\n# print(column + ': \\n')\r\n# print(abs_lda_weights.nlargest(5, column)[column])\r\n\r\n\r\n#####################################################################################\r\n# Forward/backward/stepwise selection: only keep the best/most accurate\r\n# variables (ML extend module)\r\n#####################################################################################\r\n# Step forward feature selection from SegmentAnalysis.py\r\nprint('\\nSFS')\r\n# sfs1, classifier1, data1 = SA.step_forward(attributes, dt['Single_Loaded'], 'Single_Loaded')\r\n# sfs2, classifier2, data2 = SA.step_forward(attributes, dt['Clear'], 'Clear')\r\n# sfs3, classifier3, data3 = SA.step_forward(attributes, dt['Straight'], 'Straight')\r\n# sfs4, classifier4, data4 = SA.step_forward(attributes, dt['Head_First'], 'Head_First')\r\n# sfs5, classifier5, data5 = SA.step_forward(attributes, dt['Whole_Animal'], 'Whole_Animal')\r\n# sfs_data = pd.concat([data1, data2, data3, data4, data5])\r\n# sfs_data.to_excel('feature_selection/sfs_data.xlsx')\r\n","sub_path":"Python/FeatureSelectionV4.py","file_name":"FeatureSelectionV4.py","file_ext":"py","file_size_in_byte":5686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"343790319","text":"import os\nimport requests\nimport zipfile\nimport stat\n\n\nURLS = [\n \"https://s3-us-west-1.amazonaws.com/udacity-drlnd/P3/Tennis/Tennis_Linux.zip\", \n ]\n\n\nif __name__ == '__main__':\n for url in URLS:\n file_name = url.split('/')[-1]\n if os.path.isfile(file_name):\n continue\n\n print('Downloading ' + url)\n response = requests.get(url)\n response.raise_for_status()\n\n with open(file_name, mode='bw') as f:\n f.write(response.content)\n print('Saved ' + file_name)\n\n with zipfile.ZipFile(file_name, 'r') as zip_ref:\n zip_ref.extractall('.')\n\n dir_name = file_name.split('.')[0]\n exec_file = os.path.join(dir_name, 'Tennis.x86_64')\n os.chmod(exec_file, os.stat(exec_file).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n","sub_path":"download_external_dependencies.py","file_name":"download_external_dependencies.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"67273221","text":"# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\nimport nltk\nimport csv\n# nltk.download('twitter_samples')\n# nltk.download('stopwords')\n# nltk.download('averaged_perceptron_tagger')\n# nltk.download('wordnet')\n# nltk.download('punkt')\nimport pandas as pd\nimport os\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.corpus import twitter_samples, stopwords\nfrom nltk.tag import pos_tag\nfrom nltk.tokenize import word_tokenize\nfrom nltk import FreqDist, classify, NaiveBayesClassifier\nfrom pandas import Series\nimport re, string, random\nfrom nltk.tokenize import TweetTokenizer\nimport collections\nfrom nltk.metrics.scores import precision\nfrom nltk.metrics.scores import recall\n\ntknzr = TweetTokenizer()\nfrom sklearn.naive_bayes import (\n BernoulliNB,\n ComplementNB,\n MultinomialNB,\n)\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom nltk.metrics import f_measure\n\ndef remove_noise(tweet_tokens, stop_words=()):\n cleaned_tokens = []\n\n for token, tag in pos_tag(tweet_tokens):\n token = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', token)\n token = re.sub(\"(@[A-Za-z0-9_]+)\", \"\", token)\n token = re.sub(\"Covid\", \"\", token)\n token = re.sub(\"COVID\", \"\", token)\n token = re.sub(\"lockdown\", \"\", token)\n token = re.sub(\"nan\", \"\", token)\n token = re.sub(\"️\", \"\", token)\n token = re.sub(\"get\", \"\", token)\n token = re.sub(\"australia\", \"\", token)\n token = re.sub(\"go\", \"\", token)\n token = re.sub(\"vaccine\", \"\", token)\n token = re.sub(\"pandemic\", \"\", token)\n token = re.sub(\"^\\s+\", \"\", token) # remove the front\n token = re.sub(\"\\s+\\Z\",\"\",token) #remove the back\n token = re.sub(\"19\", \"\", token)\n token = re.sub(\"Australia\", \"\", token)\n token = re.sub(\"AUSTRALIA\", \"\", token)\n token = re.sub(\"sydney\", \"\", token)\n\n\n if tag.startswith(\"NN\"):\n pos = 'n'\n elif tag.startswith('VB'):\n pos = 'v'\n else:\n pos = 'a'\n\n lemmatizer = WordNetLemmatizer()\n token = lemmatizer.lemmatize(token, pos)\n\n if len(token) > 0 and token not in string.punctuation and token.lower() not in stop_words and token != \"...\" and token != \"’\" and token != 'covid':\n cleaned_tokens.append(token.lower())\n return cleaned_tokens\n\n\ndef get_all_words(cleaned_tokens_list):\n for tokens in cleaned_tokens_list:\n for token in tokens:\n yield token\n\n\ndef get_tweets_for_model(cleaned_tokens_list):\n for tweet_tokens in cleaned_tokens_list:\n yield dict([token, True] for token in tweet_tokens)\n\n\nif __name__ == \"__main__\":\n # --------------------------------------------\n\n # Read from general dataset with positive/negative sentiments\n\n # file = open('dataset.csv')\n # csvreader = csv.reader(file)\n # df = pd.read_csv(file)\n # print(df.head())\n # df = df.sample(frac=1).reset_index(drop=True)\n # print(df.head())\n # df = df.iloc[:50000]\n # print(df.head())\n\n # ---------------------------------------------\n df = pd.read_csv('allMerged.csv', index_col=False)\n\n # Merge hastags with Tweet------------\n df['Tweet_text_merged'] = df.Tweet_text.astype(str).str.cat(df.hashtags.astype(str), sep=' ')\n # ----------------------\n # df = df[['Sentiment_Label', 'Tweet_text']]\n df = df[['Sentiment_Label', 'Tweet_text_merged']]\n print(\"1\")\n positive_tweets = df.loc[df['Sentiment_Label'] == 'positive']\n negative_tweets = df.loc[df['Sentiment_Label'] == 'negative']\n neutral_tweets = df.loc[df['Sentiment_Label'] == 'neutral']\n\n positive_tweets = positive_tweets[['Tweet_text_merged']]\n negative_tweets = negative_tweets[['Tweet_text_merged']]\n neutral_tweets = neutral_tweets[['Tweet_text_merged']]\n\n positive_tweet_tokens = []\n negative_tweet_tokens = []\n neutral_tweet_tokens = []\n\n for index, row in positive_tweets.iterrows():\n positive_tweet_tokens.append(tknzr.tokenize(row['Tweet_text_merged']))\n # positive_tweet_tokens.append(nltk.word_tokenize(row['Tweet_text']))\n\n for index, row in negative_tweets.iterrows():\n negative_tweet_tokens.append(tknzr.tokenize(row['Tweet_text_merged']))\n # negative_tweet_tokens.append(nltk.word_tokenize(row['Tweet_text']))\n\n for index, row in neutral_tweets.iterrows():\n neutral_tweet_tokens.append(tknzr.tokenize(row['Tweet_text_merged']))\n # neutral_tweet_tokens.append(nltk.word_tokenize(row['Tweet_text']))\n\n # ---------------------------------------------\n stop_words = stopwords.words('english')\n\n positive_cleaned_tokens_list = []\n negative_cleaned_tokens_list = []\n neutral_cleaned_tokens_list = []\n print(\"2\")\n for tokens in positive_tweet_tokens:\n positive_cleaned_tokens_list.append(remove_noise(tokens, stop_words))\n\n for tokens in negative_tweet_tokens:\n negative_cleaned_tokens_list.append(remove_noise(tokens, stop_words))\n\n for tokens in neutral_tweet_tokens:\n neutral_cleaned_tokens_list.append(remove_noise(tokens, stop_words))\n\n print(\"Most common occuring words in positive tweets\")\n all_pos_words = get_all_words(positive_cleaned_tokens_list)\n freq_dist_pos = FreqDist(all_pos_words)\n print(freq_dist_pos.most_common(10))\n\n print(\"Most common occuring words in negative tweets\")\n all_neg_words = get_all_words(negative_cleaned_tokens_list)\n freq_dist_neg = FreqDist(all_neg_words)\n print(freq_dist_neg.most_common(10))\n\n print(\"Most common occuring words in neutral tweets\")\n all_neu_words = get_all_words(neutral_cleaned_tokens_list)\n freq_dist_neu = FreqDist(all_neu_words)\n print(freq_dist_neu.most_common(10))\n\n positive_tokens_for_model = get_tweets_for_model(positive_cleaned_tokens_list)\n negative_tokens_for_model = get_tweets_for_model(negative_cleaned_tokens_list)\n neutral_tokens_for_model = get_tweets_for_model(neutral_cleaned_tokens_list)\n\n positive_dataset = [(tweet_dict, \"Positive\")\n for tweet_dict in positive_tokens_for_model]\n\n negative_dataset = [(tweet_dict, \"Negative\")\n for tweet_dict in negative_tokens_for_model]\n\n neutral_dataset = [(tweet_dict, \"Neutral\")\n for tweet_dict in neutral_tokens_for_model]\n\n dataset = positive_dataset + negative_dataset + neutral_dataset\n\n random.shuffle(dataset)\n print(len(dataset))\n train_data = dataset[:31000]\n print(len(train_data))\n test_data = dataset[31000:]\n print(len(test_data))\n # print(\"3\")\n classifier = NaiveBayesClassifier.train(train_data)\n\n print(\"Accuracy is:\", classify.accuracy(classifier, test_data))\n\n print(classifier.show_most_informative_features(10))\n\n # Precision and recall\n refsets = collections.defaultdict(set)\n testsets = collections.defaultdict(set)\n\n for i, (feats, label) in enumerate(test_data):\n refsets[label].add(i)\n observed = classifier.classify(feats)\n testsets[observed].add(i)\n print(refsets)\n\n print('Precision Positive:', precision(refsets['Positive'], testsets['Positive']))\n print('Recall Positive:', recall(refsets['Positive'], testsets['Positive']))\n print('F-measure Positive: ', f_measure(refsets['Positive'], testsets['Positive']))\n\n print('Precision Negative:', precision(refsets['Negative'], testsets['Negative']))\n print('Recall Negative:', recall(refsets['Negative'], testsets['Negative']))\n print('F-measure Negative: ', f_measure(refsets['Negative'], testsets['Negative']))\n\n print('Precision Neutral:', precision(refsets['Neutral'], testsets['Neutral']))\n print('Recall Neutral:', recall(refsets['Neutral'], testsets['Neutral']))\n print('F-measure Neutral: ', f_measure(refsets['Neutral'], testsets['Neutral']))\n\n # ALl other classifiers\n classifiers = {\n \"SGDClassifier\": SGDClassifier(max_iter=1000),\n # \"MultinomialNB\": MultinomialNB(),\n \"LinearSVC\": LinearSVC(),\n # \"BernoulliNB\": BernoulliNB(),\n \"ComplementNB\": ComplementNB(),\n\n # \"KNeighborsClassifier\": KNeighborsClassifier(),\n # \"DecisionTreeClassifier\": DecisionTreeClassifier(),\n \"RandomForestClassifier\": RandomForestClassifier(),\n \"LogisticRegression\": LogisticRegression(max_iter=10000),\n # \"MLPClassifier\": MLPClassifier(),\n # \"AdaBoostClassifier\": AdaBoostClassifier(),\n }\n\n\n\n # train_count = 31000\n # # print(\"4\")\n # for name, sklearn_classifier in classifiers.items():\n # classifier = nltk.classify.SklearnClassifier(sklearn_classifier)\n #\n # classifier.train(train_data)\n #\n # accuracy = nltk.classify.accuracy(classifier, test_data)\n #\n # print(F\"{accuracy:.2%} - {name}\")\n #\n # custom_tweet = \"This is bad and wrong\"\n # # print(\"5\")\n # custom_tokens = remove_noise(word_tokenize(custom_tweet))\n #\n # print(custom_tweet, classifier.classify(dict([token, True] for token in custom_tokens)))\n","sub_path":"covid19SentimentalAnalysis/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"583634739","text":"import numpy as np\nfrom unittest import TestCase\nfrom exatomic.va import GenInput as gi, gen_delta\nfrom exatomic.gaussian import Fchk\nfrom exatomic.base import resource\n\nclass TestGenInput(TestCase):\n def setUp(self):\n self.h2o2 = Fchk(resource('g16-h2o2-def2tzvp-freq.fchk'))\n\n def test_delta_small(self):\n delta_0 = np.array([[0.10591816, 0.],\n [0.08949634, 1.],\n [0.10522441, 2.],\n [0.10582653, 3.],\n [0.10712271, 4.],\n [0.10699982, 5.]])\n delta_1 = np.array([[0.05947087, 0.],\n [0.05947087, 1.],\n [0.05947087, 2.],\n [0.05947087, 3.],\n [0.05947087, 4.],\n [0.05947087, 5.]])\n delta_2 = np.array([[0.05671023, 0.],\n [0.05960412, 1.],\n [0.05672138, 2.],\n [0.05669342, 3.],\n [0.05189421, 4.],\n [0.05188801, 5.]])\n delta_3 = np.array([[0.01000000, 0.],\n [0.01000000, 1.],\n [0.01000000, 2.],\n [0.01000000, 3.],\n [0.01000000, 4.],\n [0.01000000, 5.]])\n self.h2o2.parse_frequency()\n test_0 = gen_delta(delta_type=0, freq=self.h2o2.frequency.copy())\n test_1 = gen_delta(delta_type=1, freq=self.h2o2.frequency.copy())\n test_2 = gen_delta(delta_type=2, freq=self.h2o2.frequency.copy())\n test_3 = gen_delta(delta_type=3, disp=0.01, freq=self.h2o2.frequency.copy())\n self.assertTrue(np.allclose(test_0.values, delta_0))\n self.assertTrue(np.allclose(test_1.values, delta_1))\n self.assertTrue(np.allclose(test_2.values, delta_2))\n self.assertTrue(np.allclose(test_3.values, delta_3))\n self.assertRaises(ValueError, gen_delta, delta_type=3, freq=self.h2o2.frequency.copy())\n\n def test_all_small(self):\n self.h2o2.parse_atom()\n self.h2o2.parse_frequency()\n self.h2o2.parse_frequency_ext()\n all_freq = gi(uni=self.h2o2, delta_type=2)\n self.assertEqual(all_freq.disp.shape[0], 52)\n self.assertTrue(np.allclose(np.concatenate([[0.], self.h2o2.frequency_ext['freq'].values]),\n all_freq.disp['modes'].drop_duplicates().values))\n\n def test_select_freq(self):\n self.h2o2.parse_atom()\n self.h2o2.parse_frequency()\n self.h2o2.parse_frequency_ext()\n freq_2_3_4 = gi(uni=self.h2o2, delta_type=2, fdx=[2,3,4])\n self.assertEqual(freq_2_3_4.disp.shape[0], 28)\n self.assertTrue(np.allclose(np.concatenate([[0.], self.h2o2.frequency_ext.loc[[1,2,3],'freq'].values]),\n freq_2_3_4.disp['modes'].drop_duplicates().values))\n freq_5 = gi(uni=self.h2o2, delta_type=2, fdx=[5])\n self.assertEqual(freq_5.disp.shape[0], 12)\n self.assertTrue(np.allclose(np.concatenate([[0.], [self.h2o2.frequency_ext.loc[4,'freq']]]),\n freq_5.disp['modes'].drop_duplicates().values))\n\n","sub_path":"exatomic/va/tests/test_inputs.py","file_name":"test_inputs.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"1088204","text":"#!/usr/bin/env python\n\nimport copy\nimport readline\nimport sys\nimport re\n\nclass SimpleCompleter(object):\n\n def __init__(self, options):\n self.options = sorted(options)\n return\n\n def complete(self, text, state):\n response = None\n if state == 0:\n # This is the first time for this text, so build a match list.\n if text:\n self.matches = [s for s in self.options if s and s.startswith(text)]\n else:\n self.matches = self.options[:]\n\n # Return the state'th item from the match list, if we have that many.\n try:\n response = self.matches[state]\n except IndexError:\n response = None\n return response\n\nclass PandemicInfections(object):\n\n def __init__(self, cities_file, state_filename='state.txt'):\n self.cities = []\n self.stack = []\n self.cards_drawn = []\n self.state_filename = state_filename\n self.level = 2\n self.setup(cities_file)\n\n def setup(self, cities_file):\n self.read_cities(cities_file)\n self.stack = [copy.copy(self.cities)]\n # Register the completer function and bind tab\n options = copy.copy(self.cities)\n options = sorted(set(options), key=options.index)\n options.append('READ')\n options.append('EPIDEMIC')\n readline.set_completer(SimpleCompleter(options).complete)\n readline.parse_and_bind('tab: complete')\n\n def set_level(self):\n question = 'Set infection level to? '\n impossible = 'Invalid input! Please enter a number.'\n line = input(question)\n while not re.match('^\\d+$', line):\n print(impossible)\n line = input(question)\n self.level = int(line)\n\n def read_cities(self, filename):\n # Read input file with city names.\n self.cities = []\n with open(filename, 'r') as f:\n for line in f:\n line = line.strip('\\n')\n if not line or line.startswith('#'):\n continue\n if '*' in line:\n n, city = line.split('*')\n self.cities += int(n) * [city]\n else:\n self.cities += [line]\n\n def draw_card(self, line):\n # Draw card from the top of the stack and add it to the discard pile.\n self.cards_drawn.append(line)\n assert(self.stack[-1])\n self.stack[-1].remove(line)\n if not self.stack[-1]:\n self.stack.pop()\n\n def epidemic(self):\n question = 'Which city was drawn from the bottom in the Epidemic? '\n impossible = 'This is impossible!'\n # Draw card from front of the stack (\"the bottom\")\n line = input(question)\n assert(self.stack)\n front_pile = self.stack[0]\n while not line in front_pile:\n print(impossible)\n line = input(question)\n self.cards_drawn.append(line)\n # Remove card from front pile and remove it from the stack if it is now empty.\n front_pile.remove(line)\n if not front_pile:\n del self.stack[0]\n # Push discard pile on stack and reset it.\n self.stack.append(sorted(self.cards_drawn))\n self.cards_drawn = []\n\n def print_state(self, f=sys.stdout):\n # Print the draw deck with sections\n i = 0\n print('', file=f)\n print('############################', file=f)\n print('### The Deck ###', file=f)\n for x in self.stack:\n for city in sorted(set(x), key=lambda v: x.count(v), reverse=True):\n print('%d * %s' % (x.count(city), city), file=f)\n i += 1\n if i != len(self.stack):\n print('----------------------------', file=f)\n print('############################', file=f)\n # Print the discard pile\n print('', file=f)\n print('############################', file=f)\n print('### Discard ###', file=f)\n for city in sorted(set(self.cards_drawn), key=lambda v: self.cards_drawn.count(v), reverse=True):\n print('%d * %s' % (self.cards_drawn.count(city), city), file=f)\n print('############################', file=f)\n\n def write_state(self):\n # Write the current state to disk\n with open(self.state_filename, 'a') as f:\n self.print_state(f=f)\n\n def read_state(self):\n # Read the current state from disk\n self.stack = []\n self.cards_drawn = []\n phase = ''\n with open(self.state_filename, 'r') as f:\n for line in f:\n line = line.strip('\\n')\n if 'The Deck' in line:\n self.stack = [[]]\n self.cards_drawn = []\n phase = 'deck'\n elif 'Discard' in line:\n phase = 'discard'\n if phase == 'deck' and line.startswith('-----'):\n self.stack.append([])\n if not re.search('^\\d+ \\* \\w+$', line):\n continue\n occurences, _, city = line.split(' ')\n for k in range(int(occurences)):\n if phase == 'deck':\n self.stack[-1].append(city)\n elif phase == 'discard':\n self.cards_drawn.append(city)\n else:\n assert(False)\n\n def calculate_probability(self, city, M, N, stack=None):\n if stack is None:\n stack = copy.deepcopy(self.stack)\n N_cards = sum([len(x) for x in stack])\n N = min(N, N_cards)\n\n # Stop conditions\n if M == 0:\n return 1.0\n if M > N:\n return 0.0\n\n assert(M >= 1)\n assert(N >= 1)\n assert(stack)\n\n pile = stack.pop()\n count = pile.count(city)\n total = len(pile)\n assert(total > 0)\n p_city = count / total\n\n # If there was only one card to draw: This is the leaf probability\n if N == 1:\n return p_city\n\n # Prepare two new piles, one with the city removed and one with some other city removed (if any)\n pile1 = copy.copy(pile)\n if city in pile1:\n pile1.remove(city)\n pile2 = copy.copy(pile)\n for x in pile2:\n if x != city:\n pile2.remove(x)\n break\n\n # Add the two new piles to two stacks\n stack1 = copy.copy(stack)\n stack2 = copy.copy(stack)\n if pile1:\n stack1.append(pile1)\n if pile2:\n stack2.append(pile2)\n\n # Add the two branch probabilities\n p1 = (p_city * self.calculate_probability(city, M-1, N-1, stack=stack1)) if p_city > 0.0 else 0.0\n p2 = ((1 - p_city) * self.calculate_probability(city, M, N-1, stack=stack2)) if p_city < 1.0 else 0.0\n return p1 + p2\n\n def print_probabilities(self, f=sys.stdout):\n print('', file=f)\n\n header = '%-15s' % 'Name'\n for i in range(1, self.level + 1):\n header += ' %6s' % (\"N>=%d\" % i)\n print(header, file=f)\n print(len(header)*'-', file=f)\n\n probabilities = dict()\n for x in set(self.cities):\n probabilities[x] = []\n for M in range(1, self.level + 1):\n probabilities[x].append(self.calculate_probability(x, M, self.level))\n\n for x, p in sorted(probabilities.items(), key=lambda x: x[1][0], reverse=True):\n line = '%-15s ' % x\n for px in p:\n line += \"%5.1f%% \" % (100.0 * px)\n print(line, file=f)\n\n def write_probabilities(self):\n # Write the current state to disk\n with open(self.state_filename, 'a') as f:\n self.print_probabilities(f=f)\n\n def run(self):\n # The main input loop\n question = 'Please enter the name of the city which was drawn or \"EPIDEMIC/READ\": '\n impossible = 'This is impossible!'\n while True:\n # Get new input\n print()\n line = input(question)\n while line not in ['EPIDEMIC', 'READ', 'LEVEL'] and not line in self.stack[-1]:\n print(impossible)\n line = input(question)\n # Process\n if line == 'LEVEL':\n self.set_level()\n elif line == 'READ':\n self.read_state()\n elif line == 'EPIDEMIC':\n self.epidemic()\n else:\n self.draw_card(line)\n # Print current state and probabilities, write state to disk\n self.print_state()\n self.print_probabilities()\n self.write_state()\n self.write_probabilities()\n\n# Start the input loop\ncities_file = sys.argv[1]\noutput_file = sys.argv[2] if len(sys.argv) > 2 else 'state.txt'\np = PandemicInfections(cities_file=cities_file, state_filename=output_file)\np.run()\n","sub_path":"pandemic.py","file_name":"pandemic.py","file_ext":"py","file_size_in_byte":7865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"402968055","text":"r\"\"\"\nThis module implements differential operators on polar grids \n\n.. autosummary::\n :nosignatures:\n\n make_laplace\n make_gradient\n make_divergence\n make_vector_gradient\n make_tensor_divergence\n make_operator\n \n \n.. codeauthor:: David Zwicker \n\n\n.. The following contains text parts that are used multiple times below:\n\n.. |Description_polar| replace:: \n This function assumes polar symmetry of the grid, so that fields only\n depend on the radial coordinate `r`. The radial discretization is defined as\n :math:`r_i = r_\\mathrm{min} + (i + \\frac12) \\Delta r` for\n :math:`i=0, \\ldots, N_r-1`, where :math:`r_\\mathrm{min}` is the radius of\n the inner boundary, which is zero by default. Note that the radius of the \n outer boundary is given by\n :math:`r_\\mathrm{max} = r_\\mathrm{min} + N_r \\Delta r`.\n\"\"\"\n\nfrom typing import Callable\n\nfrom .. import PolarGrid\nfrom ..boundaries import Boundaries\nfrom ...tools.numba import jit_allocate_out\n\n\n\ndef make_laplace(bcs: Boundaries) -> Callable:\n \"\"\" make a discretized laplace operator for a polar grid\n \n |Description_polar|\n\n Args:\n bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):\n |Arg_boundary_conditions|\n \n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(bcs.grid, PolarGrid)\n bcs.check_value_rank(0)\n \n # calculate preliminary quantities\n dim_r = bcs.grid.shape[0]\n dr = bcs.grid.discretization[0]\n rs = bcs.grid.axes_coords[0]\n r_min, _ = bcs.grid.axes_bounds[0]\n dr_2 = 1 / dr**2\n\n # prepare boundary values\n value_lower_bc = bcs[0].low.get_virtual_point_evaluator()\n value_upper_bc = bcs[0].high.get_virtual_point_evaluator() \n \n @jit_allocate_out(out_shape=(dim_r,))\n def laplace(arr, out=None):\n \"\"\" apply laplace operator to array `arr` \"\"\"\n i = 0\n if r_min == 0:\n out[i] = 2 * (arr[i + 1] - arr[i]) * dr_2\n else:\n arr_r_l = value_lower_bc(arr, (i,))\n out[i] = ((arr[i + 1] - 2 * arr[i] + arr_r_l) * dr_2 +\n (arr[i + 1] - arr_r_l) / (2 * rs[i] * dr))\n \n for i in range(1, dim_r - 1): # iterate inner radial points\n out[i] = ((arr[i + 1] - 2 * arr[i] + arr[i - 1]) * dr_2 +\n (arr[i + 1] - arr[i - 1]) / (2 * rs[i] * dr))\n \n # express boundary condition at outer side\n i = dim_r - 1\n arr_r_h = value_upper_bc(arr, (i,))\n out[i] = ((arr_r_h - 2 * arr[i] + arr[i - 1]) * dr_2 +\n (arr_r_h - arr[i - 1]) / (2 * rs[i] * dr))\n return out \n \n return laplace # type: ignore\n\n\n\ndef make_gradient(bcs: Boundaries) -> Callable:\n \"\"\" make a discretized gradient operator for a polar grid\n \n |Description_polar|\n\n Args:\n bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):\n |Arg_boundary_conditions|\n \n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(bcs.grid, PolarGrid)\n bcs.check_value_rank(0)\n\n # calculate preliminary quantities\n dim_r = bcs.grid.shape[0]\n r_min, _ = bcs.grid.axes_bounds[0]\n dr = bcs.grid.discretization[0]\n scale_r = 1 / (2 * dr)\n \n # prepare boundary values\n boundary = bcs[0]\n value_lower_bc = boundary.low.get_virtual_point_evaluator()\n value_upper_bc = boundary.high.get_virtual_point_evaluator()\n \n @jit_allocate_out(out_shape=(2, dim_r))\n def gradient(arr, out=None):\n \"\"\" apply gradient operator to array `arr` \"\"\"\n # no-flux at the origin \n i = 0\n if r_min == 0:\n out[0, i] = (arr[1] - arr[0]) * scale_r\n else:\n arr_r_l = value_lower_bc(arr, (i,))\n out[0, i] = (arr[1] - arr_r_l) * scale_r \n out[1, i] = 0 # no angular dependence by definition\n \n for i in range(1, dim_r - 1): # iterate inner radial points\n out[0, i] = (arr[i + 1] - arr[i - 1]) * scale_r\n out[1, i] = 0 # no angular dependence by definition\n\n i = dim_r - 1\n arr_r_h = value_upper_bc(arr, (i,))\n out[0, i] = (arr_r_h - arr[i - 1]) * scale_r\n out[1, i] = 0 # no angular dependence by definition\n \n return out\n \n return gradient # type: ignore\n\n\n\ndef make_divergence(bcs: Boundaries) -> Callable:\n \"\"\" make a discretized divergence operator for a polar grid\n \n |Description_polar|\n\n Args:\n bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):\n |Arg_boundary_conditions|\n \n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(bcs.grid, PolarGrid)\n bcs.check_value_rank(0)\n\n # calculate preliminary quantities\n dim_r = bcs.grid.shape[0]\n dr = bcs.grid.discretization[0]\n rs = bcs.grid.axes_coords[0]\n r_min, _ = bcs.grid.axes_bounds[0]\n scale_r = 1 / (2 * dr)\n \n # prepare boundary values\n boundary = bcs[0]\n value_lower_bc = boundary.low.get_virtual_point_evaluator()\n value_upper_bc = boundary.high.get_virtual_point_evaluator()\n\n if r_min == 0:\n @jit_allocate_out(out_shape=(dim_r,))\n def divergence(arr, out=None):\n \"\"\" apply divergence operator to array `arr` \"\"\" \n # inner radial boundary condition\n i = 0\n out[i] = (arr[0, 1] + 3 * arr[0, 0]) * scale_r\n \n for i in range(1, dim_r - 1): # iterate radial points\n out[i] = ((arr[0, i + 1] - arr[0, i - 1]) * scale_r + \n (arr[0, i] / ((i + 0.5) * dr)))\n \n # outer radial boundary condition\n i = dim_r - 1\n arr_r_h = value_upper_bc(arr[0], (i,))\n out[i] = ((arr_r_h - arr[0, i - 1]) * scale_r + \n (arr[0, i] / ((i + 0.5) * dr)))\n \n return out\n \n else: # r_min > 0\n @jit_allocate_out(out_shape=(dim_r,))\n def divergence(arr, out=None):\n \"\"\" apply divergence operator to array `arr` \"\"\" \n # inner radial boundary condition\n i = 0\n arr_r_l = value_lower_bc(arr[0], (i,))\n out[i] = (arr[0, i + 1] - arr_r_l) * scale_r + arr[0, i] / rs[i]\n \n for i in range(1, dim_r - 1): # iterate radial points\n out[i] = ((arr[0, i + 1] - arr[0, i - 1]) * scale_r + \n arr[0, i] / rs[i])\n \n # outer radial boundary condition\n i = dim_r - 1\n arr_r_h = value_upper_bc(arr[0], (i,))\n out[i] = (arr_r_h - arr[0, i - 1]) * scale_r + arr[0, i] / rs[i]\n \n return out\n \n return divergence # type: ignore\n\n \n \ndef make_vector_gradient(bcs: Boundaries) -> Callable:\n \"\"\" make a discretized vector gradient operator for a polar grid\n \n |Description_polar|\n\n Args:\n bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):\n |Arg_boundary_conditions|\n \n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(bcs.grid, PolarGrid)\n bcs.check_value_rank(1)\n\n gradient_r = make_gradient(bcs.extract_component(0))\n gradient_phi = make_gradient(bcs.extract_component(1))\n \n @jit_allocate_out(out_shape=(2, 2) + bcs.grid.shape)\n def vector_gradient(arr, out=None):\n \"\"\" apply gradient operator to array `arr` \"\"\"\n gradient_r(arr[0], out=out[:, 0])\n gradient_phi(arr[1], out=out[:, 1])\n return out \n \n return vector_gradient # type: ignore\n\n\n\ndef make_tensor_divergence(bcs: Boundaries) -> Callable:\n \"\"\" make a discretized tensor divergence operator for a polar grid\n \n |Description_polar|\n\n Args:\n bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):\n |Arg_boundary_conditions|\n \n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(bcs.grid, PolarGrid)\n bcs.check_value_rank(1)\n\n divergence_r = make_divergence(bcs.extract_component(0))\n divergence_phi = make_divergence(bcs.extract_component(1))\n \n @jit_allocate_out(out_shape=(2,) + bcs.grid.shape)\n def tensor_divergence(arr, out=None):\n \"\"\" apply gradient operator to array `arr` \"\"\"\n divergence_r(arr[0], out=out[0])\n divergence_phi(arr[1], out=out[1])\n return out\n \n return tensor_divergence # type: ignore\n\n\n\ndef make_operator(op: str, bcs: Boundaries) -> Callable:\n \"\"\" make a discretized operator for a polar grid\n \n |Description_polar|\n\n Args:\n op (str): Identifier for the operator. Some examples are 'laplace',\n 'gradient', or 'divergence'.\n bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):\n |Arg_boundary_conditions|\n \n Returns:\n A function that takes the discretized data as an input and returns\n the data to which the operator `op` has been applied. This function\n optionally supports a second argument, which provides allocated\n memory for the output.\n \"\"\"\n if op == 'laplace' or op == 'laplacian':\n return make_laplace(bcs)\n elif op == 'gradient':\n return make_gradient(bcs)\n elif op == 'divergence':\n return make_divergence(bcs)\n elif op == 'vector_gradient':\n return make_vector_gradient(bcs)\n elif op == 'tensor_divergence':\n return make_tensor_divergence(bcs)\n else:\n raise NotImplementedError(f'Operator `{op}` is not defined for '\n 'polar grids')\n \n \n\n \n","sub_path":"pde/grids/operators/polar.py","file_name":"polar.py","file_ext":"py","file_size_in_byte":9817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"603838580","text":"#!/usr/bin/env python3\n\nfrom click.testing import CliRunner\nimport ev_group46\nimport urllib3\nimport json\nimport os\nfrom os.path import expanduser\nurllib3.disable_warnings()\n\nhome = expanduser(\"~\")\t#https://stackoverflow.com/questions/4028904/how-to-get-the-home-directory-in-python\npath_of_token = \"%s/softeng20bAPI.token\" % home\n\ndef test_healthcheck():\n\trunner = CliRunner()\n\tresult = runner.invoke(ev_group46.healthcheck)\n\tassert result.exit_code == 0\n\tassert result.output == 'we are connected with the database\\n'\n\ndef test_login():\n\trunner = CliRunner()\n\tresult = runner.invoke(ev_group46.Login, ['--username', 'admin', '--passw', 'petrol4ever'])\n\tassert result.exit_code == 0\n\t#print(result.output)\n\tassert result.output == 'login was successful\\n'\n\n\ndef test_sessions_upd():\n\trunner = CliRunner()\n\ttoken_file = open(path_of_token, 'r')\n\ttok = json.load(token_file)\n\tapikey = tok['token']\n\tresult = runner.invoke(ev_group46.Admin, ['--sessionsupd', '--source', 'demo.csv', '--apikey', apikey])\n\tassert result.exit_code == 0\n\tassert 'sessionsupd : done\\n' in result.output\n\n\ndef test_SessionsPerPoint():\n\trunner = CliRunner()\n\ttoken_file = open(path_of_token, 'r')\n\ttok = json.load(token_file)\n\tapikey = tok['token']\n\tresult = runner.invoke(ev_group46.SessionsPerPoint, ['--point', '5f6978bb00355e4c01059bc7_5096', '--datefrom', '20190901', '--dateto', '20190902', '--apikey', apikey])\n\tprint(result.output)\n\tassert result.exit_code == 0\n\tassert '0.746999979019165' in result.output\n\ndef test_SessionsPerStation():\n\trunner = CliRunner()\n\ttoken_file = open(path_of_token, 'r')\n\ttok = json.load(token_file)\n\tapikey = tok['token']\n\tresult = runner.invoke(ev_group46.SessionsPerStation, ['--station', '5f6978bb00355e4c01059bc7', '--datefrom', '20190901', '--dateto', '20190902', '--apikey', apikey])\n\t#print(result.output)\n\tassert result.exit_code == 0\n\tassert '0.746999979019165' in result.output\n\ndef test_SessionsPerProvider():\n\trunner = CliRunner()\n\ttoken_file = open(path_of_token, 'r')\n\ttok = json.load(token_file)\n\tapikey = tok['token']\n\tresult = runner.invoke(ev_group46.SessionsPerProvider, ['--provider', 'ESB Ecars', '--datefrom', '20190901', '--dateto', '20190902', '--apikey', apikey])\n\t#print(result.output)\n\tassert result.exit_code == 0\n\tassert '0.746999979019165' in result.output\n\ndef test_SessionsPerEV():\n\trunner = CliRunner()\n\ttoken_file = open(path_of_token, 'r')\n\ttok = json.load(token_file)\n\tapikey = tok['token']\n\tresult = runner.invoke(ev_group46.SessionsPerEV, ['--ev', '45b68c71-cd11-4bd7-a03f-fdaae259635d', '--datefrom', '20190901', '--dateto', '20190902', '--apikey', apikey])\n\tprint(result.output)\n\tassert result.exit_code == 0\n\tassert '0.746999979019165' in result.output\n\ndef test_logout():\n\trunner = CliRunner()\n\ttoken_file = open(path_of_token, 'r')\n\ttok = json.load(token_file)\n\tapikey = tok['token']\n\tresult = runner.invoke(ev_group46.logout, ['--apikey', apikey])\n\tassert result.exit_code == 0\n\tassert result.output == 'logout was successful\\n'\n","sub_path":"cli-client/functional_tests/functional_tests7.py","file_name":"functional_tests7.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"414232822","text":"#######################################################################################################\n### Contents ###\n\n# Dynamical Functions and Class - The main simulation code\n# Supporting Function - All commands used by simulation not from a standard python package are defined here.\n#######################################################################################################\n\n\n#######################################################################################################\n### Dynamical Functions and Classes ###\n#######################################################################################################\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport ipywidgets as widgets\nimport itertools as it\nimport scipy.linalg\nfrom scipy.interpolate import InterpolatedUnivariateSpline\n\nabs_tol = 1e-4\nrnd = np.random.RandomState(seed)\n\ndef analyze_diffusion(start=1, anomalous=False):\n x0 = part.pos_hist[0]\n x = part.pos_hist[start:]\n t = part.t_hist[start:]\n\n displace = (x - x0).T\n delta = (displace[np.newaxis,:] * displace[:,np.newaxis])\n delta = np.mean(delta, axis=2) \n if anomalous == False:\n #print('not anomalous')\n D_matrix = delta / (2 *t)\n else:\n #print('anomalous')\n D_matrix = delta / (2 * t * np.log(t))\n\n trace = np.einsum('dds->s', D_matrix)\n D_const = trace / part.dim\n \n fig, ax = plt.subplots(figsize=[8,8])\n for d in range(dim):\n for e in range(d+1):\n ax.plot(t, D_matrix[d,e]) \n ax.plot(t, D_const, '.')\n plt.show()\n \n l = np.round(t.shape[0]/4).astype(int)\n D = np.mean(D_const[-l:])\n #D = D_const[-1]\n print(D)\n def norm_pdf(x,mu=0,var=1):\n return np.exp(-(x-mu)**2/(2*var)) / np.sqrt(2*np.pi*var)\n\n def interactive_plot(num_frames=-1):\n max_frames = x.shape[0]-1\n if (num_frames == -1) or (num_frames > max_frames):\n num_frames = max_frames\n s= t[:,np.newaxis,np.newaxis]\n if anomalous == False:\n #print('not anomalous')\n data = x / np.sqrt(s)\n else:\n #print('anomalous')\n data = x / np.sqrt(s * np.log(s))\n def update(s):\n fig, ax = plt.subplots()\n for d in range(part.dim):\n q = np.histogram(data[s,:,d], density=True) \n h = q[1]\n dh = np.diff(h) / 2\n h = h[:-1] + dh \n hnew = np.linspace(h.min(),h.max(),300)\n v = InterpolatedUnivariateSpline(h,q[0])(hnew)\n if anomalous == False:\n #print('not anomalous')\n n = norm_pdf(hnew, mu=0, var=2*D)\n else:\n #print('anomalous')\n n = norm_pdf(hnew, mu=0, var=2*D)\n ax.plot(hnew,v)\n ax.plot(hnew,n)\n\n l = widgets.Layout(width='150px')\n step_text = widgets.BoundedIntText(min=2, max=num_frames, value=0, layout=l)\n step_slider = widgets.IntSlider(min=2, max=num_frames, value=0, readout=False, continuous_update=False, layout=l)\n widgets.jslink((step_text, 'value'), (step_slider, 'value'))\n\n play_button = widgets.Play(min=2, max=num_frames, step=1, interval=50, layout=l)\n widgets.jslink((step_text, 'value'), (play_button, 'value'))\n\n img = widgets.interactive_output(update, {'s':step_text})\n display(widgets.HBox([widgets.VBox([step_text, step_slider, play_button]), img]))\n interactive_plot()\n\n\n \n \nclass CollisionLaw:\n @staticmethod\n def resolve_collision(wall, part, p):\n raise Exception('You should implement the method resolve_collision() in a subclass.')\n\nclass IgnoreLaw(CollisionLaw):\n @staticmethod\n def resolve_collision(wall, part, p):\n pass\n\nclass SpecularLaw(CollisionLaw):\n @staticmethod\n def resolve_collision(wall, part, p):\n nu = wall.normal(part.pos[p])\n part.vel[p] -= 2 * part.vel[p].dot(nu) * nu\n\nclass TerminateLaw(CollisionLaw):\n @staticmethod\n def resolve_collision(wall, part, p):\n raise Exception('particle {} hit termination wall {}'.format(p, wall.idx))\n\nclass WrapLaw(CollisionLaw):\n def __init__(self, wrap_dim, wrap_wall):\n self.wrap_dim = wrap_dim\n self.wrap_wall = wrap_wall\n\n def resolve_collision(self, wall, part, p):\n d = self.wrap_dim # which dim will have sign flip\n s = np.sign(part.pos[p, d]).astype(int) # is it at + or -\n part.cell_offset[p, d] += s # tracks cell position for each particle\n part.pos[p, d] *= -1 # flips sign of dime d\n part.wp_mask[wall.idx, p] = False\n part.wp_mask[self.wrap_wall, p] = True\n\n# Particle-wall no-slip law in any dimension from private correspondence with Cox and Feres.\n#See last pages of: https://github.com/drscook/unb_billiards/blob/master/references/no%20slip%20collisions/feres_N_dim_no_slip_law_2017.pdf\n# Uses functions like Lambda_nu defined at the end of this file\nclass NoSlipLaw(CollisionLaw):\n @staticmethod\n def resolve_collision(wall, part, p):\n nu = wall.normal(part.pos[p])\n m = part.mass[p]\n g = part.gamma[p]\n r = part.radius[p]\n d = (2*m*g**2)/(1+g**2)\n \n U_in = part.spin[p]\n v_in = part.vel[p]\n U_out = U_in - (d/(m*g**2) * Lambda_nu(U_in, nu)) + (d/(m*r*g**2)) * E_nu(v_in, nu)\n v_out = (r*d/m) * Gamma_nu(U_in, nu) + v_in - 2 * Pi_nu(v_in, nu) - (d/m) * Pi(v_in,nu)\n\n part.spin[p] = U_out\n part.vel[p] = v_out\n\n\nclass Wall():\n # default values that apply to all geometries\n def __init__(self, dim=2, gap_pad=0.0, collision_law=SpecularLaw):\n self.dim = dim\n self.gap_pad = gap_pad\n self.collision_law = collision_law\n\n def resolve_collision(self, part, p):\n self.collision_law.resolve_collision(self, part, p)\n\n @staticmethod\n def normal(pos):\n raise Exception('You should implement the method normal() in a subclass.')\n\n @staticmethod\n def get_mesh():\n raise Exception('You should implement the method get_mesh() in a subclass.')\n\n @staticmethod\n def get_wp_col_time(self, mask=None):\n raise Exception('You should implement the method get_wp_col_time() in a subclass.')\n\n \nclass FlatWall(Wall):\n def __init__(self, base_point, normal, tangents, dim=2, gap_pad=0.0, collision_law=SpecularLaw):\n super().__init__(dim=dim, gap_pad=gap_pad, collision_law=collision_law)\n \n \n self.base_point = np.asarray(base_point)\n self.normal_static = np.asarray(normal)\n self.tangents = np.asarray(tangents)\n self.wp_gap_min = gap_pad\n self.get_mesh()\n\n def get_mesh(self):\n self.mesh = flat_mesh(self.tangents) + self.base_point\n\n def normal(self, pos):\n # normal does not depend on collision point\n return self.normal_static\n\n def get_wp_col_time(self, mask=None):\n nu = self.normal_static\n dx = part.pos - self.base_point\n c = dx.dot(nu) - self.wp_gap_min\n b = part.vel.dot(nu)\n t = solve_linear(b, c, mask)\n return t\n\n # computes wp spacing\n def get_wp_gap(self):\n dx = part.pos - self.base_point\n self.wp_gap = dx.dot(self.normal_static) - self.wp_gap_min\n return self.wp_gap\n\n\nclass SphereWall(Wall):\n def __init__(self, base_point, radius, dim=2, gap_pad=0.0, collision_law=SpecularLaw):\n super().__init__(dim=dim, gap_pad=gap_pad, collision_law=collision_law)\n self.base_point = np.asarray(base_point)\n self.radius = radius\n self.wp_gap_min = self.radius + self.gap_pad\n self.get_mesh()\n\n def get_mesh(self):\n self.mesh = sphere_mesh(self.dim, self.radius) + self.base_point\n\n def normal(self, pos): # normal depends on collision point\n dx = pos - self.base_point\n return make_unit(dx) # see below for make_unit\n\n def get_wp_col_time(self, mask=None):\n dx = part.pos - self.base_point\n dv = part.vel\n a = (dv*dv).sum(axis=-1)\n b = 2*(dv*dx).sum(axis=-1)\n c = (dx*dx).sum(axis=-1) - self.wp_gap_min**2\n t_small, t_big = solve_quadratic(a, b, c, mask)\n t = np.fmin(t_small, t_big)\n return t\n\n def get_wp_gap(self):\n dx = part.pos - self.base_point\n self.wp_gap = np.linalg.norm(dx, axis=-1) - self.wp_gap_min\n return self.wp_gap \n \nclass Particles():\n def __init__(self, **kwargs):\n params = {'max_steps':50, 'dim':2, 'num':1, 'radius':[1.0], 'mass':[1.0], 'pp_collision_law':'pp_specular', 'gamma':'uniform'}\n params.update(kwargs)\n \n if(params['gamma'] == 'uniform'):\n params['gamma'] = np.sqrt(2/(2+params['dim']))\n elif(params['gamma'] == 'shell'):\n params['gamma'] = np.sqrt(2/params['dim'])\n elif(params['gamma'] == 'point'):\n params['gamma'] = 0\n\n # Each parameter list must be num_particles long. If not, this will extend by filling with the last entry\n constants = ['radius', 'mass', 'gamma']\n for const in constants:\n c = listify(params[const]) #listify defined at bottom of this file\n for p in range(len(c), params['num']):\n c.append(c[-1])\n params[const] = np.asarray(c).astype(float)\n \n for key, val in params.items():\n if isinstance(val, list):\n val = np.asarray(val) # converts lists to arrays\n setattr(self, key, val)\n self.mom_inert = self.mass * (self.gamma * self.radius)**2\n self.get_mesh()\n \n self.pp_gap_min = cross_subtract(self.radius, -self.radius)\n np.fill_diagonal(self.pp_gap_min, -1) # no gap between a particle and itself\n \n self.wp_dt = np.zeros([len(wall), self.num], dtype='float')\n self.wp_mask = self.wp_dt.copy().astype(bool)\n\n if self.pp_collision_law == 'pp_ignore':\n self.pp_dt = np.array([np.inf])\n else:\n self.pp_dt = np.zeros([self.num, self.num], dtype='float')\n self.pp_mask = self.pp_dt.copy().astype(bool)\n \n self.t = 0.0\n self.cell_offset = np.zeros([self.num, self.dim], dtype=int) # tracks which cell the particle is in\n self.col = {}\n self.t_hist = []\n self.col_hist = []\n self.pos_hist = []\n self.vel_hist = []\n self.spin_hist = []\n \n # Color particles (helpful for the future when we have many particles)\n cm = plt.cm.gist_rainbow\n idx = np.linspace(0, cm.N-1 , self.num).round().astype(int)\n self.clr = [cm(i) for i in idx]\n \n def get_mesh(self):\n self.mesh = []\n for p in range(self.num):\n R = self.radius[p]\n M = sphere_mesh(dim=self.dim, radius=R)\n if self.dim == 2:\n M = np.vstack([M, [-R,0]]) # draw equator\n self.mesh.append(M)\n self.mesh = np.asarray(self.mesh)\n\n def record_state(self):\n self.t_hist.append(self.t)\n self.pos_hist.append(self.pos_to_global())\n self.vel_hist.append(self.vel.copy())\n self.spin_hist.append(self.spin.copy())\n # we compute orient later in smoother\n #self.cell_offset_hist.append(self.cell_offset.copy())\n self.col_hist.append(self.col.copy())\n\n def get_pp_gap(self):\n dx = cross_subtract(self.pos_to_global()) #cross_subtract defined below\n self.pp_gap = np.linalg.norm(dx, axis=-1) - self.pp_gap_min\n return self.pp_gap \n\n def check_gap(self, p=Ellipsis):\n # if p is specified, checks gap for particles in list p. Else, checks all.\n self.wp_gap = np.array([w.get_wp_gap() for w in wall])\n wp_check = self.wp_gap > -abs_tol\n wp_check = wp_check[:,p]\n if self.pp_collision_law == 'pp_ignore':\n pp_check = [True]\n else:\n self.get_pp_gap()\n pp_check = self.pp_gap > -abs_tol\n pp_check = pp_check[:,p]\n return np.all(wp_check) and np.all(pp_check)\n\n def check_angular(self, p=Ellipsis):\n orient_det = np.abs(np.linalg.det(self.orient[p]))-1\n orient_det_check = np.abs(orient_det) < abs_tol\n S = self.spin[p]\n spin_skew = np.abs(S + np.swapaxes(S, -2, -1))\n spin_skew = spin_skew.sum(axis=-1).sum(axis=-1)\n spin_skew_check = np.abs(spin_skew) < abs_tol\n return np.all(orient_det_check) and np.all(spin_skew_check)\n \n # Computes time to next collision with for each p-p pair via (x1+v1*t-x2-v1*t) dot (x1+v1*t-x2-v1*t) = (r1+r2)^2\n # Gives quadatric in t\n def get_pp_col_time(self, mask=None):\n dx = cross_subtract(self.pos_to_global())\n dv = cross_subtract(self.vel)\n a = (dv*dv).sum(axis=-1)\n b = 2*(dv*dx).sum(axis=-1)\n c = (dx*dx).sum(axis=-1) - self.pp_gap_min**2\n t_small, t_big = solve_quadratic(a, b, c, mask=self.pp_mask)\n t = np.fmin(t_small, t_big)\n return t\n\n def pp_specular_law(self, p1, p2):\n m1, m2 = self.mass[p1], self.mass[p2]\n M = m1 + m2\n nu = make_unit(self.pos[p2] - self.pos[p1])\n dv = self.vel[p2] - self.vel[p1]\n w = dv.dot(nu) * nu\n self.vel[p1] += 2 * (m2/M) * w\n self.vel[p2] -= 2 * (m1/M) * w \n\n def pp_no_slip_law(self, p1, p2):\n m1 = part.mass[p1]\n m2 = part.mass[p2]\n M = m1 + m2\n g1 = part.gamma[p1]\n g2 = part.gamma[p2]\n r1 = part.radius[p1]\n r2 = part.radius[p2] \n\n d = 2/((1/m1)*(1+1/g1**2) + (1/m2)*(1+1/g2**2))\n dx = part.pos[p2] - part.pos[p1] \n nu = make_unit(dx)\n U1_in = part.spin[p1]\n U2_in = part.spin[p2]\n v1_in = part.vel[p1]\n v2_in = part.vel[p2]\n\n U1_out = (U1_in-d/(m1*g1**2) * Lambda_nu(U1_in, nu)) \\\n + (-d/(m1*r1*g1**2)) * E_nu(v1_in, nu) \\\n + (-r2/r1)*(d/(m1*g1**2)) * Lambda_nu(U2_in, nu) \\\n + d/(m1*r1*g1**2) * E_nu(v2_in, nu)\n\n v1_out = (-r1*d/m1) * Gamma_nu(U1_in, nu) \\\n + (v1_in - 2*m2/M * Pi_nu(v1_in, nu) - (d/m1) * Pi(v1_in, nu)) \\\n + (-r2*d/m1) * Gamma_nu(U2_in, nu) \\\n + (2*m2/M) * Pi_nu(v2_in, nu) + (d/m1) * Pi(v2_in, nu)\n\n U2_out = (-r1/r2)*(d/(m2*g2**2)) * Lambda_nu(U1_in, nu) \\\n + (-d/(m2*r2*g2**2)) * E_nu(v1_in, nu) \\\n + (U2_in - (d/(m2*g2**2)) * Lambda_nu(U2_in, nu)) \\\n + (d/(m2*r2*g2**2)) * E_nu(v2_in, nu)\n\n v2_out = (r1*d/m2) * Gamma_nu(U1_in, nu) \\\n + (2*m1/M) * Pi_nu(v1_in, nu) + (d/m2) * Pi(v1_in, nu) \\\n + (r2*d/m2) * Gamma_nu(U2_in, nu) \\\n + v2_in - (2*m1/M) * Pi_nu(v2_in, nu) - (d/m2) * Pi(v2_in,nu)\n part.spin[p1] = U1_out\n part.spin[p2] = U2_out\n part.vel[p1] = v1_out\n part.vel[p2] = v2_out \n \n def resolve_collision(self, p1, p2):\n if self.pp_collision_law == 'pp_specular':\n self.pp_specular_law(p1, p2)\n elif self.pp_collision_law == 'pp_no_slip':\n self.pp_no_slip_law(p1, p2)\n elif self.pp_collision_law == 'pp_ignore':\n raise Exception('Should not detect pp collisions')\n else:\n raise Exception('Unknown pp collision law {}'.format(self.collision_law))\n\n def get_KE(self):\n lin_KE = part.mass * (part.vel**2).sum(axis=-1)\n ang_KE = part.mom_inert * (np.triu(part.spin,1)**2).sum(axis=-1).sum(axis=-1)\n KE = lin_KE + ang_KE\n return KE / 2\n \n def pos_to_global(self):\n # self.pos is local to current cell. This return the global position by adding cell offset.\n return self.pos + self.cell_offset * self.cell_size * 2\n\ndef check():\n N = part.num\n D = part.dim\n if np.any([w.dim != D for w in wall]):\n raise Exception('Not all wall and part dimensions agree')\n if (part.pos.shape != (N,D)) or (part.vel.shape != (N,D)):\n raise Exception('Some dynamical variables do not have correct shape')\n if np.any((part.gamma < 0) | (part.gamma > np.sqrt(2/part.dim))):\n raise Exception('illegal mass distribution parameter {}'.format(gamma))\n if part.check_gap() == False:\n raise Exception('A particle escaped')\n if part.check_angular() == False:\n raise Exception('A particle has invalid orintation or spin matrix')\n if np.abs(part.get_KE().sum() - part.KE_init) > abs_tol:\n raise Exception('Total kinetic energy was not conserved')\n\ndef next_state(wall, part):\n for (i,w) in enumerate(wall):\n part.wp_dt[i] = w.get_wp_col_time(part.wp_mask[i])\n if part.pp_collision_law != 'pp_ignore':\n part.pp_dt = part.get_pp_col_time(part.pp_mask)\n part.dt = np.min([np.min(part.pp_dt), np.min(part.wp_dt)])\n if np.isinf(part.dt):\n raise Exception(\"No future collisions detected\")\n\n part.t += part.dt\n part.pos += part.vel * part.dt\n # We choose not to update orient during simulation because it does not affect the dynamics\n # and would slow us down. We compute it later in smoother if needed.\n\n part.wp_mask = (part.wp_dt - part.dt) < 1e-8\n part.pp_mask = (part.pp_dt - part.dt) < 1e-8\n \n wp_counts = np.sum(part.wp_mask,axis=0)\n pp_counts = np.sum(part.pp_mask,axis=0)\n total_counts = wp_counts + pp_counts\n if np.any(total_counts > 1):\n raise Exception('Complex event - would re-randomize position of particles involved if implemented. Until that is complete, simulation simply terminates.')\n else:\n part.col = []\n for (w, p) in zip(*np.nonzero(part.wp_mask)):\n part.col.append({'w':w, 'p':p})\n wall[w].resolve_collision(part, p)\n for (p1, p2) in zip(*np.nonzero(part.pp_mask)):\n if p1 < p2:\n part.col.append({'p':p1, 'q':p2})\n part.resolve_collision(p1, p2)\n \n\ndef clean_up(part):\n part.t_hist = np.asarray(part.t_hist)\n #part.cell_offset_hist = np.asarray(part.cell_offset_hist)\n part.pos_hist = np.asarray(part.pos_hist)\n part.vel_hist = np.asarray(part.vel_hist)\n part.spin_hist = np.asarray(part.spin_hist)\n print('Done!! Steps = {}, Time = {:4f}'.format(len(part.t_hist)-1, part.t_hist[-1]))\n \n\n####################################################################################################### \n#######################################################################################################\n### Support Functions ###\n#######################################################################################################\n#######################################################################################################\n\n\n#######################################################################################################\n### Graphics Functions ###\n#######################################################################################################\n\ndef draw_background(pos):\n M = [[(np.min(pos[:,:,d])/(2*part.cell_size[d])).round()\n ,(np.max(pos[:,:,d])/(2*part.cell_size[d])).round()\n ] for d in range(part.dim)]\n cell_range = [2 * part.cell_size[d] * np.arange(M[d][0], M[d][1]+1) for d in range(part.dim)]\n translates = it.product(*cell_range)\n ax = plt.gca()\n if part.dim == 2:\n for trans in translates:\n for w in wall:\n ax.plot(*(w.mesh + trans).T, color='black')\n\ndef draw_state(num_frames=-1):\n max_frames = part.re_pos.shape[0]-1\n if (num_frames == -1) or (num_frames > max_frames):\n num_frames = max_frames\n \n pos = part.re_pos[:num_frames+1]\n orient = part.re_orient[:num_frames+1]\n fig, ax = plt.subplots()\n draw_background(pos)\n for p in range(part.num):\n ax.plot(pos[:,p,0], pos[:,p,1], color=part.clr[p])\n ax.plot(*(part.mesh[p].dot(orient[-1,p].T) + pos[-1,p]).T, color=part.clr[p])\n ax.set_aspect('equal')\n plt.show()\n\ndef interactive_plot(num_frames=-1):\n max_frames = part.re_pos.shape[0]-1\n if (num_frames == -1) or (num_frames > max_frames):\n num_frames = max_frames\n\n pos = part.re_pos[:num_frames+1]\n orient = part.re_orient[:num_frames+1]\n dpos = np.diff(pos, axis=0) # position change \n def update(s):\n fig, ax = plt.subplots(figsize=[8,8]);\n ax.set_aspect('equal')\n plt.title('s={} t={:.2f}'.format(s,part.re_t[s]))\n draw_background(pos[:s+1])\n for p in range(part.num):\n ax.plot(pos[:s+1,p,0], pos[:s+1,p,1], color=part.clr[p])\n ax.plot(*(part.mesh[p].dot(orient[s,p].T) + pos[s,p]).T, color=part.clr[p])\n plt.show()\n\n l = widgets.Layout(width='150px')\n step_text = widgets.BoundedIntText(min=0, max=num_frames, value=0, layout=l)\n step_slider = widgets.IntSlider(min=0, max=num_frames, value=0, readout=False, continuous_update=False, layout=l)\n widgets.jslink((step_text, 'value'), (step_slider, 'value'))\n\n play_button = widgets.Play(min=0, max=num_frames, interval=50, layout=l)\n widgets.jslink((step_text, 'value'), (play_button, 'value'))\n\n img = widgets.interactive_output(update, {'s':step_text})\n display(widgets.HBox([widgets.VBox([step_text, step_slider, play_button]), img]))\n\ndef smoother(part, min_frames=None, orient=True):\n t, x, v, s = part.t_hist, part.pos_hist, part.vel_hist, part.spin_hist\n dts = np.diff(t)\n if (min_frames is None):\n ddts = dts\n num_frames = np.ones_like(dts).astype(int)\n else:\n short_step = dts < abs_tol\n nominal_frame_length = np.percentile(dts[~short_step], 25) / min_frames\n num_frames = np.round(dts / nominal_frame_length).astype(int) # Divide each step into pieces of length as close to nominal_frame_length as possible\n num_frames[num_frames<1] = 1\n ddts = dts / num_frames # Compute frame length within each step\n\n # Now interpolate. re_x denotes the interpolated version of x\n re_t, re_x, re_v, re_s = [t[0]], [x[0]], [v[0]], [s[0]]\n re_o = [part.orient]\n for (i, ddt) in enumerate(ddts):\n re_t[-1] = t[i]\n re_x[-1] = x[i]\n re_v[-1] = v[i]\n re_s[-1] = s[i]\n dx = re_v[-1] * ddt\n if orient == True:\n do = [scipy.linalg.expm(ddt * U) for U in re_s[-1]] # incremental rotatation during each frame\n\n for f in range(num_frames[i]):\n re_t.append(re_t[-1] + ddt)\n re_x.append(re_x[-1] + dx)\n re_v.append(re_v[-1])\n re_s.append(re_s[-1])\n if orient == True:\n #B = [A.dot(Z) for (A,Z) in zip(re_o[-1], do)] # rotates each particle the right amount\n B = np.einsum('pde,pef->pdf', re_o[-1], do)\n re_o.append(np.array(B))\n else:\n re_o.append(re_o[-1])\n\n part.re_t = np.asarray(re_t)\n part.re_pos = np.asarray(re_x)\n part.re_vel = np.asarray(re_v)\n part.re_orient = np.asarray(re_o)\n part.re_spin = np.asarray(re_s) \n \ndef flat_mesh(tangents):\n pts = 100\n N, D = tangents.shape\n grid = [np.linspace(-1, 1, pts) for n in range(N)]\n grid = np.meshgrid(*grid)\n grid = np.asarray(grid)\n mesh = grid.T.dot(tangents)\n return mesh\n\ndef sphere_mesh(dim, radius):\n pts = 100\n grid = [np.linspace(0, np.pi, pts) for d in range(dim-1)]\n grid[-1] *= 2\n grid = np.meshgrid(*grid) \n mesh = []\n for d in range(dim):\n w = radius * np.ones_like(grid[0])\n for j in range(d):\n w *= np.sin(grid[j])\n if d < dim-1:\n w *= np.cos(grid[d])\n mesh.append(w)\n return np.asarray(mesh).T \n \n \n#######################################################################################################\n### No-Slip Collision Functions ###\n#######################################################################################################\ndef spin_matrix_from_vector(v):\n # Converts spin vector to spin matrix\n # https://en.wikipedia.org/wiki/Rotation_matrix#Exponential_map\n \n l = len(v)\n # l = d(d-1) -> d**2 - d - 2l = 0\n d = (1 + np.sqrt(1 + 8*l)) / 2\n if d % 1 != 0:\n raise Exception('vector {} of length {} converts to dim = {:.2f}. Not integer.'.format(v,l,d))\n d = int(d)\n M = np.zeros([d,d])\n idx = np.triu_indices_from(M,1)\n s = (-1)**(np.arange(len(v))+1)\n w = v * s\n w = w[::-1]\n M[idx] = w\n M = make_symmetric(M, skew=True)\n return M\n\ndef spin_vector_from_matrix(M):\n idx = np.triu_indices_from(M,1)\n w = M[idx]\n s = (-1)**(np.arange(len(w))+1)\n w = w[::-1] \n v = w * s\n return v\n \ndef wedge(a,b):\n return np.outer(b,a)-np.outer(a,b)\n\ndef Pi_nu(v, nu):\n return v.dot(nu) * nu\n\ndef Pi(v, nu):\n w = Pi_nu(v ,nu)\n return v - w\n\ndef Lambda_nu(U, nu):\n return wedge(nu, U.dot(nu))\n\ndef E_nu(v, nu):\n return wedge(nu, v)\n\ndef Gamma_nu(U, nu):\n return U.dot(nu)\n \n#######################################################################################################\n### Random Functions ###\n#######################################################################################################\n\ndef random_uniform_sphere(num=1, dim=2, radius=1.0):\n pos = rnd.normal(size=[num, dim])\n pos = make_unit(pos, axis=1)\n return abs(radius) * pos\n\n\ndef random_uniform_ball(num=1, dim=2, radius=1.0):\n pos = random_uniform_sphere(num, dim, radius)\n r = rnd.uniform(size=[num, 1])\n return r**(1/dim) * pos\n \n#######################################################################################################\n### Basic Math Functions ###\n#######################################################################################################\ndef cross_subtract(u, v=None):\n if v is None:\n v=u.copy()\n w = u[np.newaxis,:] - v[:,np.newaxis]\n return w\n\n\ndef make_symmetric(A, skew=False):\n \"\"\"\n Returns symmetric or skew-symmatric matrix by copying upper triangular onto lower.\n \"\"\"\n A = np.asarray(A)\n U = np.triu(A,1)\n if skew == True:\n return U - U.T\n else:\n return np.triu(A,0) + U.T \n \ndef make_unit(A, axis=-1):\n # Normalizes along given axis. This means that Thus, np.sum(A**2, axis) gives a matrix of all 1's.\n #In other words, if you pick values for all indices except axis and sum the squares, you get 1. \n A = np.asarray(A, dtype=float)\n M = np.linalg.norm(A, axis=axis, keepdims=True)\n return A / M\n\ndef solve_linear(b, c, mask=None):\n t = np.full(part.num, np.inf) # default in np.inf \n idx = np.abs(b) >= abs_tol # prevents divide by zero\n t[idx] = -1 * c[idx] / b[idx]\n if mask is not None:\n t[mask] = np.inf\n t[t<0] = np.inf #np.inf for negative times\n return t\n\ndef solve_quadratic(a, b, c, mask=None):\n small = np.full_like(a, np.inf) \n d = b**2 - 4*a*c #discriminant\n lin = (abs(a) < abs_tol) & (abs(b) >= abs_tol) #linear \n quad = (abs(a) >= abs_tol) & (d >= abs_tol) #quadratic\n \n small[lin] = -1 * c[lin] / b[lin]\n big = small.copy()\n \n d[quad] = np.sqrt(d[quad])\n small[quad] = (-b[quad] - d[quad]) / (2*a[quad])\n big[quad] = (-b[quad] + d[quad]) / (2*a[quad])\n swap = (b >= abs_tol) # We want the solutions ordered (small, big), so we swap where needed\n small[swap], big[swap] = big[swap], small[swap]\n if mask is not None:\n small[mask] = np.inf\n big[mask] = np.inf\n small[small<0] = np.inf\n big[big<0] = np.inf\n return small, big\n\n\n\ndef listify(X):\n \"\"\"\n Convert X to list if it's not already\n \"\"\"\n if (X is None) or (X is np.nan):\n return []\n elif isinstance(X,str):\n return [X]\n else:\n try:\n return list(X)\n except:\n return [X]\n","sub_path":"code/working/billiard_defs_5a.py","file_name":"billiard_defs_5a.py","file_ext":"py","file_size_in_byte":28078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"425494704","text":"from keras.applications.mobilenet import MobileNet\nfrom keras.applications.mobilenet_v2 import MobileNetV2\n# import efficientnet.keras as efn\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.applications.vgg16 import VGG16\nfrom keras.models import Sequential\nfrom keras.layers import Activation\nfrom keras.layers import Input, Dropout\nfrom keras.layers import Dense, GlobalAveragePooling2D\nfrom keras.models import Model\nfrom keras.optimizers import SGD\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator, array_to_img\n\n\nclass Generator_train(object): # rule1\n def __init__(self):\n\n train_dir = \"./train_data\"\n datagen = ImageDataGenerator(zoom_range=[\n 0.95, 1.0], width_shift_range=0.05, height_shift_range=0.05, brightness_range=[0.95, 1.05], rescale=1/255.)\n\n train_gen = datagen.flow_from_directory(\n train_dir,\n target_size=(50, 50),\n batch_size=32,\n class_mode='categorical',\n shuffle=True,\n subset=\"training\"\n )\n\n self.gene = train_gen\n\n def __iter__(self):\n return self\n\n def __next__(self):\n X, Y = self.gene.next()\n return [X, Y], Y\n\n\nclass Generator_val(object): # rule1\n def __init__(self):\n\n val_dir = \"./val_data\"\n datagen = ImageDataGenerator(zoom_range=[\n 0.95, 1.0], width_shift_range=0.05, height_shift_range=0.05, brightness_range=[0.95, 1.05], rescale=1/255.)\n\n val_gen = datagen.flow_from_directory(\n val_dir,\n target_size=(50, 50),\n batch_size=32,\n class_mode='categorical',\n shuffle=True,\n subset=\"training\"\n )\n\n self.gene = val_gen\n\n def __iter__(self):\n return self\n\n def __next__(self):\n X, Y = self.gene.next()\n return [X, Y], Y\n\n\ndef main():\n train_generator = Generator_train()\n val_generator = Generator_val()\n # model = model()\n n_categories = 2\n base_model = VGG16(input_shape=(50, 50, 3),\n weights=\"imagenet\",\n include_top=False)\n\n x = base_model.output\n yinput = Input(shape=(n_categories,))\n hidden = GlobalAveragePooling2D()(x)\n x = Dropout(0.5)(hidden)\n x = Dense(1024, activation='relu')(x)\n x = Dense(512, activation='relu')(x)\n x = Dense(256, activation='relu')(x)\n x = Dense(2, activation='relu')(x)\n prediction = Activation('softmax', name=\"act_softmax\")(x)\n learn_model = Model(inputs=[base_model.input, yinput], outputs=prediction)\n\n for layer in base_model.layers[:15]:\n layer.trainable = False\n\n learn_model.compile(optimizer=Adam(lr=0.0001),\n loss='categorical_crossentropy', metrics=['accuracy'])\n learn_model.summary()\n\n learn_model.fit_generator(train_generator, steps_per_epoch=10, epochs=100, verbose=1, validation_data=val_generator,\n validation_steps=10, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0)\n learn_model.save('train_weights.h5')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"train_model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"86519221","text":"from server.database_config import db, db_session\nfrom sqlalchemy import Column, Integer, VARCHAR, JSON, String\nimport json\n\n\nclass Game(db):\n __tablename__ = 'games'\n query = db_session.query_property()\n\n gameId = Column(Integer, primary_key=True, autoincrement=True)\n userId = Column(Integer)\n data = Column(String)\n first = Column(Integer)\n second = Column(Integer)\n third = Column(Integer)\n\n\n def __init__(self, userId):\n self.userId = userId\n self.data = self.to_string({'called':[]})\n\n def last_numbers(self):\n data = self.to_json()\n return_values={}\n try:\n return_values['number'] = data['called'].__getitem__(0)\n except IndexError:\n return_values['number'] = \"\"\n\n try:\n return_values['last_number'] = data['called'].__getitem__(1)\n except IndexError:\n return_values['last_number'] = \"\"\n\n return return_values\n\n\n def to_string(self, data):\n if data is None:\n return '{}'\n else:\n return json.dumps(data)\n\n def to_json(self):\n if self.data is None:\n return {}\n else:\n return json.loads(self.data)\n","sub_path":"server/models/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"653534208","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import *\nfrom selenium.webdriver import ActionChains\nimport time\nimport os\n\n\nclass CustomDriver:\n def __init__(self, webdriver):\n self.webdriver = webdriver\n self.webdriver.implicitly_wait(20)\n\n def _get_selector_type(self, selector_type):\n selector_type = selector_type.lower()\n\n if selector_type == 'xpath':\n return By.XPATH\n elif selector_type == 'css':\n return By.CSS_SELECTOR\n elif selector_type == 'class_name':\n return By.CLASS_NAME\n elif selector_type == 'id':\n return By.ID\n elif selector_type == 'tag':\n return By.TAG_NAME\n elif selector_type == 'name':\n return By.NAME\n elif selector_type == 'link':\n return By.LINK_TEXT\n elif selector_type == 'partial_link':\n return By.PARTIAL_LINK_TEXT\n else:\n raise Exception('selector type is incorrect!')\n\n def click(self, selector, selector_type='xpath'):\n st = self._get_selector_type(selector_type)\n element = self.webdriver.find_element(st, selector)\n element.location_once_scrolled_into_view\n element.click()\n time.sleep(2)\n\n def click_with_wait(self, selector=None, selector_type='xpath', element=None):\n if not element:\n st = self._get_selector_type(selector_type)\n wait = WebDriverWait(self.webdriver,\n timeout=30,\n poll_frequency=1,\n )\n element = wait.until(EC.element_to_be_clickable((st, selector)))\n\n element.location_once_scrolled_into_view\n element.click()\n time.sleep(1)\n\n def input_text(self, text, selector=None, selector_type='xpath', element=None):\n if not element:\n st = self._get_selector_type(selector_type)\n wait = WebDriverWait(self.webdriver,\n timeout=30,\n poll_frequency=1,\n )\n element = wait.until(EC.visibility_of_element_located((st, selector)))\n element.location_once_scrolled_into_view\n element.clear()\n element.send_keys(text)\n time.sleep(0.5)\n\n def screenshot_of_element(self, file_name, selector, selector_type='xpath'):\n st = self._get_selector_type(selector_type)\n path = os.getcwd()+f'/files/{file_name}.png'\n\n wait = WebDriverWait(self.webdriver,\n timeout=10,\n poll_frequency=1,\n ignored_exceptions=[\n NoSuchElementException,\n ElementNotVisibleException,\n ElementNotSelectableException\n ])\n # element = self.webdriver.find_element(st, selector)\n element = wait.until(EC.visibility_of_element_located((st, selector)))\n time.sleep(0.5)\n element.screenshot(path)\n\n def visibility_of_element(self, selector, selector_type='xpath'):\n st = self._get_selector_type(selector_type)\n\n wait = WebDriverWait(self.webdriver,\n timeout=10,\n poll_frequency=1,\n )\n wait.until(EC.visibility_of_element_located((st, selector)))\n\n def invisibility_of_element(self, selector, selector_type = 'xpath'):\n st = self._get_selector_type(selector_type)\n\n wait = WebDriverWait(self.webdriver,\n timeout=10,\n poll_frequency=1,\n )\n wait.until(EC.invisibility_of_element_located((st, selector)))\n\n def show_element(self, selector=None, selector_type='xpath', element=None):\n if element:\n element = element\n else:\n st = self._get_selector_type(selector_type)\n element = self.webdriver.find_element(st, selector)\n element.location_once_scrolled_into_view\n\n def click_element_in_list(self, item_name, selector, selector_type = 'xpath'):\n st = self._get_selector_type(selector_type)\n els = self.get_elements(selector, selector_type)\n\n for el in els:\n el.location_once_scrolled_into_view\n if el.text == item_name.upper():\n el.click()\n break\n\n def get_elements(self, selector, selector_type=None, element=None):\n st = self._get_selector_type(selector_type)\n if element:\n elements = element.find_elements(st, selector)\n else:\n elements = self.webdriver.find_elements(st, selector)\n return elements\n\n def get_element(self, selector, selector_type='xpath', element=None):\n st = self._get_selector_type(selector_type)\n if element:\n element = element.find_element(st, selector)\n else:\n element = self.webdriver.find_element(st, selector)\n return element\n\n def get_atribute(self, selector=None, selector_type='xpath', element=None, attribute='innerHTML'):\n if not element:\n st = self._get_selector_type(selector_type)\n wait = WebDriverWait(self.webdriver,\n timeout=10,\n poll_frequency=1,\n ignored_exceptions=[\n NoSuchElementException,\n ElementNotVisibleException,\n ElementNotSelectableException\n ])\n\n element = wait.until(EC.visibility_of_element_located((st, selector)))\n\n return element.get_attribute(attribute)\n\n def check_el_text(self, text, selector, selector_type='xpath'):\n st = self._get_selector_type(selector_type)\n wait = WebDriverWait(self.webdriver,\n timeout=10,\n poll_frequency=1,\n )\n\n element = wait.until(EC.text_to_be_present_in_element((st, selector), text))\n return element\n\n def click_by_coordinates(self, x, y):\n if 'get' in dir(self.webdriver):\n action = ActionChains(self.webdriver)\n action.move_by_offset(int(x), int(y)).click().perform()\n else:\n raise Exception('object is element of page')\n\n def drug_and_drop(self, selector, selector_type='xpath', x=0,y=0):\n if 'get' in dir(self.webdriver):\n st = self._get_selector_type(selector_type)\n element = self.webdriver.find_element(st, selector)\n action = ActionChains(self.webdriver)\n action.drag_and_drop_by_offset(element, x, y).perform()\n else:\n raise Exception('object is element of page')\n\n def move_to_element(self, selector, selector_type='xpath', x=0, y=0):\n if 'get' in dir(self.webdriver):\n st = self._get_selector_type(selector_type)\n element = self.webdriver.find_element(st, selector)\n action = ActionChains(self.webdriver)\n action.move_to_element(element).perform()\n else:\n raise Exception('object is element of page')\n\n def get_url(self):\n if 'get' in dir(self.webdriver):\n time.sleep(0.3)\n return self.webdriver.current_url\n else:\n raise Exception('object is element of page')\n\n def use_url(self, url):\n if 'get' in dir(self.webdriver):\n self.webdriver.get(url)\n else:\n raise Exception('object is element of page')\n\n def quit(self):\n if 'get' in dir(self.webdriver):\n self.webdriver.quit()\n else:\n raise Exception('object is element of page')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"base/custom_driver.py","file_name":"custom_driver.py","file_ext":"py","file_size_in_byte":8119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"400820749","text":"#1)Заповнити матрицю розмірності MxN і вивести її на екран\nimport random\nm = int(input(\"Input M:\"))\nn = int(input(\"Input M:\"))\nnumbers = [[0] * n for i in range(m)]\n\nfor i in range(m):\n for j in range(n):\n numbers[i][j] = random.randint(1,30)\n\nfor i in range(m):\n for j in range(n):\n print(\"%4d\"%numbers[i][j],end=\" \")\n print()\n#2)Знайти мінімальний елемент 3-го рядочка та максимальний елемент 2-го стовпчика і вивести їх на екран\nprint()\nthird_row = []\nfor j in range(n):\n third_row.append(numbers[2][j])\nprint(\"Min element of third row is: %d\"%min(third_row))\nsecond_colomn = []\nfor i in range(m):\n second_colomn.append(numbers[i][1])\nprint(\"Max element of 2nd colomn is: %d\"%max(second_colomn))\n#3)Поміняти місцями 2-й та 4-й стовпці матриці. Результат вивести на екран\nprint()\nprint(\"Swaped 2nd and 4th coloums: \")\ntemp = 0\nfor i in range(m):\n temp = numbers[i][1]\n numbers[i][1] = numbers[i][3]\n numbers[i][3] = temp\nprint()\nfor i in range(m):\n for j in range(n):\n print(\"%4d\"%numbers[i][j],end=\" \")\n print()\n #4)Вивести на екран елементи головної діагоналі матриці та їх суму\nelements_of_diagonal = []\n\nprint()\ni = 0\nfor j in range(n):\n elements_of_diagonal.append(numbers[i][j])\n i+=1\nprint(\"Elements of main diagonal: \")\nfor item in elements_of_diagonal:\n print(\"%d, \" %item, end =\"\")\nsum = 0\nfor item in elements_of_diagonal:\n sum+=item\nprint(\"Their sum = %d\"%sum)","sub_path":"Task_4.py","file_name":"Task_4.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"58531793","text":"from collections import OrderedDict\nimport json\nimport os\n\n\ndef file_header(sample_data, config, out_path, preds, parse_preds=True):\n lab = config[\"dcc_lab\"]\n experiment = sample_data[\"experiment\"]\n replicate = sample_data[\"replicate_num\"]\n alias = f\"{lab}:{experiment}${replicate}${os.path.basename(out_path)}\"\n if parse_preds:\n pred_ids = [f\"{lab}:{experiment}${replicate}${os.path.basename(p)}\" for p in preds]\n else:\n pred_ids = preds\n h = OrderedDict({\n \"lab\": lab,\n \"award\": config[\"dcc_award\"],\n \"dataset\": experiment,\n \"aliases\": [alias],\n \"submitted_file_name\": os.path.abspath(out_path),\n \"derived_from\": pred_ids\n })\n return h\n\ndef fastq_metadata(sample_data, pair, other):\n d = OrderedDict({\n \"file_format\": \"fastq\",\n \"run_type\": \"paired-ended\",\n \"output_type\": \"reads\",\n \"platform\": sample_data[\"platform\"],\n \"read_length\": sample_data[\"read_length\"],\n \"replicate\": sample_data[\"replicate_id\"],\n \"paired_end\": pair,\n })\n if pair == \"2\":\n d[\"paired_with\"] = other\n return d\n\ndef bam_metadata(sample_data):\n d = OrderedDict({\n \"file_format\": \"bam\",\n \"assembly\": \"GRCh38\",\n \"mapped_run_type\": \"paired-ended\",\n \"output_type\": \"alignments\",\n \"mapped_read_length\": sample_data[\"read_length\"],\n })\n return d\n\ndef fragments_metadata(sample_data):\n d = OrderedDict({\n \"file_format\": \"tar\",\n \"assembly\": \"GRCh38\",\n \"output_type\": \"fragments\",\n })\n return d\n\ndef write_json(data, out_path):\n with open(out_path, \"w\") as f:\n json.dump(data, f, indent=4)\n\ntry:\n out_group = snakemake.params['output_group']\n sample_data = snakemake.params['sample_data']\n config = snakemake.config\n\n if out_group == \"fastqs\":\n r1 = snakemake.input['r1']\n r2 = snakemake.input['r2']\n out1 = snakemake.output['r1']\n out2 = snakemake.output['r2']\n\n preds = list(sample_data[\"accessions\"].values())\n \n h1 = file_header(sample_data, config, r1, preds, parse_preds=False)\n h2 = file_header(sample_data, config, r2, preds, parse_preds=False)\n\n d1 = fastq_metadata(sample_data, \"1\", h2[\"aliases\"][0])\n d2 = fastq_metadata(sample_data, \"2\", h1[\"aliases\"][0])\n \n s1 = h1 | d1\n s2 = h2 | d2\n\n write_json(s1, out1)\n write_json(s2, out2)\n\n elif out_group == \"mapping\":\n bam = snakemake.input['bam']\n r1_pred = snakemake.input['fq_R1']\n r2_pred = snakemake.input['fq_R2']\n out, = snakemake.output\n\n h = file_header(sample_data, config, bam, [r1_pred, r2_pred])\n d = bam_metadata(sample_data)\n s = h | d\n\n write_json(s, out)\n\n elif out_group == \"filtering\":\n bam = snakemake.input['bam']\n r1_pred = snakemake.input['fq_R1']\n r2_pred = snakemake.input['fq_R2']\n out, = snakemake.output\n\n h = file_header(sample_data, config, bam, [r1_pred, r2_pred])\n d = bam_metadata(sample_data)\n s = h | d\n\n write_json(s, out)\n\n elif out_group == \"fragments\":\n fragments = snakemake.input['fragments']\n pred = snakemake.input['bam']\n out, = snakemake.output\n\n h = file_header(sample_data, config, fragments, [pred])\n d = fragments_metadata(sample_data)\n s = h | d\n\n write_json(s, out)\n\n elif out_group == \"archr\":\n pass\n\n\nexcept NameError:\n pass","sub_path":"workflow/scripts/write_file_metadata.py","file_name":"write_file_metadata.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"74468101","text":"# King can move 1 square in any direction. Given 2 coordiantes, return YES if can can get to second square, else NO \n\ndef king(nums):\n first_post = nums[:2]\n second_post = nums[2:]\n if abs(first_post[0]-second_post[0]) < 2 and abs(first_post[1]- second_post[1]) <2:\n return \"YES\"\n return \"NO\"\n \n\nprint(king([4,4,5,5])) # YES\n\n\n# 5,4 --> (5,3), (5,5), (6,4), (6,3,), (4,5), (4,3), (5,3), (5,5)","sub_path":"CTI/replit_python_practice/if_else/king.py","file_name":"king.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"213434635","text":"'''\nВерсия python 3.8.10\nиспользуемые версии модулей python:\npandas==1.3.4\nnumpy==1.20.2\nmatplotlib==3.4.1\ntensorflow==2.8.0\nkeras==2.8.0\nscikit-learn==0.22.2\ntorch==1.11.0\ntorchbnn==1.2\nscipy==1.4.1\nadversarial-robustness-toolbox==1.10.0\n'''\n\n#импорт необходимых модулей\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchbnn as bnn\nimport matplotlib.pyplot as plt\nimport array\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport art\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.utils import to_categorical\nfrom scipy.special import softmax as sci_softmax\nfrom art.attacks.evasion import BoundaryAttack\nfrom art.estimators.classification import BlackBoxClassifierNeuralNetwork\n\n#объявление служебных переменных и констант\ndf_name = '../sign_dump_BNN.csv'\nnumber_of_features_1 = 144\nmax_type = 22\nnumber_of_neurans_1 = 30\nmodel_path = '../BNN_models/BNN_model_'\nid_user='id_user'\nusers = [1,8,9,10,11,14,15,17,18,19,20,22]\n\n#загрузка датасета\ndf = pd.read_csv(df_name)\ndf = df.drop(df.columns[0], axis=1) #удаление служебного безымянного столбца\n\n#получение результата предсказания в softmax encoded виде\ndef get_predict(test_df):\n #перечень распознаваемых пользователей\n softmax = nn.Softmax(dim=1)\n\n result = np.zeros((test_df.shape[0], 23))\n \n #для исключения влияния случайных факторов, актуальных для BNN, распознавание повторяется 50 раз\n for repeat in range(50):\n #проверка данных на каждом из пользователе\n for i in users: \n data = test_df\n data_tensor=torch.from_numpy(data).float()\n model_name = model_path+str(i)+'.h5'\n model = torch.load(model_name)\n outputs = model(data_tensor)\n prob_array = softmax(outputs).detach().numpy()\n\n #запись результата в итоговый массив\n for itr in range(test_df.shape[0]):\n result[itr][i] += prob_array[itr][1]\n\n #приведение результата в softmax encoded вид\n result = sci_softmax(result, axis=1)\n return result\n\n#вычисление значения потерь и точности предсказания относительно верных ответов\ndef evaluate(prediction, answers): \n cross_entropy_loss = nn.CrossEntropyLoss()\n \n correct = (np.argmax(prediction, axis=1) == np.argmax(answers, axis=1)).sum()\n accuracy = correct / prediction.shape[0]\n \n pred_tensor = torch.from_numpy(prediction).float()\n ans_tensor = torch.from_numpy(answers).float()\n loss = cross_entropy_loss(pred_tensor, ans_tensor)\n loss = loss.item()\n\n return loss, accuracy\n\n#подготовка данных для атаки\nlabels = df.columns\ntemp = pd.DataFrame(columns = labels)\n #для каждого из распознаваемых пользователей выбирается по 3 подписи\nfor i in users :\n temp = pd.concat([temp,df[df['id_user'].isin([i])].head(3)],ignore_index=True,)\nprint(temp)\nX_for_attack = temp[temp['id_user'].isin(users)]\nY_for_attack = X_for_attack['id_user'].to_numpy()\nY_for_attack = to_categorical(Y_for_attack, 23)\nX_for_attack = X_for_attack.drop(columns = 'id_user')\nX_for_attack = X_for_attack.to_numpy()\nX_for_attack = X_for_attack.astype('float32')\n\n#проведение атаки для обученных датасетов\nfor iteration in range(30):\n print('iteration №', iteration + 1)\n \n model_path ='../BNN_models/BNN_model_'+str(iteration)+'/'\n classifier = BlackBoxClassifierNeuralNetwork(predict_fn = get_predict,\n nb_classes = 23, \n input_shape=(144,))\n \n attack = BoundaryAttack(classifier, max_iter = 1) \n x_test_adv = attack.generate(x=X_for_attack, y=Y_for_attack)\n \n Y_test_adv = get_predict(x_test_adv)\n print(evaluate(Y_test_adv, Y_for_attack))","sub_path":"AdvAttacks/BNN/Boundary_BNN.py","file_name":"Boundary_BNN.py","file_ext":"py","file_size_in_byte":4352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"581183791","text":"# Name - Harshith Latchupatula\n# Date - September 8, 2020\n# Language - Python 3.8\n# Contest Completed - Yes\n\ndef shiftySum():\n num = int(input('Enter the number: '))\n shifts = int(input('Enter the number: '))\n shiftSum = num\n for x in range(shifts):\n singleNum = str(num)\n ending = ''\n for y in range(x+1):\n ending = ending + '0'\n singleNum = singleNum + ending\n shiftSum = shiftSum + int(singleNum)\n print(shiftSum)\n\n\nshiftySum()","sub_path":"Junior_Level/J_2017/J2_2017.py","file_name":"J2_2017.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"300392583","text":"\"\"\" Input parameters to calculate the intrinsic valuation of a business\"\"\"\n\nimport pandas as pd\nimport sys\nsys.path.append('..')\n\n\ndef get_inp_params(tick,finances,fin_others,mkt_data):\n \"\"\" Returns the dictionary of input parameters\"\"\"\n\n # Get the fundamental data\n b = finances.get_sheet(tick,\"balance_sheet\")\n i = finances.get_sheet(tick,\"income_sheet\")\n c = finances.get_sheet(tick,\"cashflow_sheet\")\n o = fin_others.get_sheet(tick)\n mk = mkt_data.get_stock_data(tick)\n\n inp_params = {}\n inp_params['inc_country'] = 'USA'\n inp_params['industry'] = 'default'\n inp_params['revenue'] = i.loc['revt'].iloc[-1] # latest revenue\n inp_params['ebit'] = i.loc['pi'].iloc[-1]\n inp_params['interest_expense'] = i.loc['xint'].iloc[-1]\n inp_params['bk_val_equity'] = b.loc['seq'].iloc[-1]\n inp_params['bk_val_debt'] = b.loc['dlc'].iloc[-1] + b.loc['dltt'].iloc[-1]\n inp_params['R_D_expenses'] = False\n inp_params['lease_commit'] = False\n inp_params['cash_eq'] = b.loc['che'].iloc[-1]\n inp_params['outstanding_shares'] = mk.loc['csho'].iloc[-1]\n inp_params['curr_stock_price'] = mk.loc['prcc_c'].iloc[-1]\n inp_params['eff_tax_r'] = (1.0*i.loc['txt'].iloc[-1])/i.loc['pi'].iloc[-1]\n inp_params['marg_tax_r'] = .28\n\n # Drivers\n # Compounded annual growth rate over future 5 years, growth driver\n inp_params['rev_growth_f'] = .25\n # Target pre-tax operating margin or EBIT wrt to revenue, profitability Driver\n inp_params['target_ebit'] = .25\n # reinvestment, efficiency driver\n inp_params['sales_to_capital'] = 5\n\n # Market parameters\n # Risk-free rate (rf_rate), US 10 year note\n inp_params['rf_rate'] = 0.02405\n # Initial cost of capital\n inp_params['cost_of_capital'] = 0.08\n\n # Other params\n # Employee stock options (ESO) outstanding\n inp_params['ESO'] = True\n inp_params['number_of_ESO'] = 4.4\n inp_params['avg_strike_price'] = 6.05\n inp_params['avg_maturity'] = 2.6\n inp_params['std_stock'] = .3\n\n # Assumptions\n\n # 1. In stable growth, cost of capital = risk_free_rate + 4.5%\n cc_default = inp_params['rf_rate'] + 0.045\n inp_params['stable_cc_default'] = [False,0.085]\n # To use other cc value, use False and cc after year 10\n\n # 2. After year 10, return on capital = cost of capital after year 10.\n # Competitive advantage fades over time\n roc_base = inp_params['cost_of_capital']\n inp_params['roc_default'] = [False,14]\n # To use other, set the values to False and expected ROC after year 10\n\n # 3. Chance of failure in the forseeable future\n inp_params['failure_prob'] = 0.0\n inp_params['proceeds'] = inp_params['bk_val_equity']\n inp_params['val_of_proceeds'] = 0.5 # % of value of proceeds if the firm fails\n\n # 4. Effective tax rate normalizes to marginal tax rate\n inp_params['tax_normalization'] = False\n\n # 5. Growth assumptions\n # After 10 year, growth = risk free rate\n # Ensures that unreasonalble growth rates are not used\n stable_avg_growth_rate = inp_params['rf_rate']\n inp_params['stable_growth_rate_default'] = [False,0.03]\n # To use other values, use False and growth rate after year 10\n\n # 6. Cash in other countries\n inp_params['foreign_cash'] = 0.0\n inp_params['avg_tx_rate_foreign'] = 0.15\n\n return inp_params\n","sub_path":"valuation/input_params.py","file_name":"input_params.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"504818115","text":"from flask import request\nfrom urllib.parse import urlparse\nfrom datetime import datetime, timedelta\nfrom functools import reduce\nfrom tempfile import NamedTemporaryFile\nfrom binascii import hexlify\nfrom collections import OrderedDict\nimport os\nimport operator\nimport json\nimport re\nimport sys\n\nfrom .api import local_api\nfrom .util import Signature, verify_hex_digest\nfrom .errors import PermissionsError, NoSuchAppError, \\\n NoSuchAppsError, NoSuchPermissionError\n\nADMIN_APP_URL='admin.intrustd.com'\nINSTALL_APP_PERMISSION='install-apps'\nADMIN_NUCLEAR_PERMISSION='nuclear'\nLOGIN_PERMISSION='login'\nSITE_PERMISSION='site'\nGUEST_PERMISSION='guest'\nDAEMON_PERMISSION='daemon'\nINET_PERM_RE=re.compile('internet/(tcp|udp)/[0-9]+(/(ingoing|outgoing|both))?')\n\nINTRUSTD_DELEGATED_TOKENS_DIR='/intrustd/delegated'\n # At most allow seven days for this to be approved. TODO allow customization\nMAX_DELEGATION_TIME=timedelta(days=7)\n\nTRANSFER_SUFFIX='/transfer'\nTRANSFER_ONCE_SUFFIX='/transfer_once'\n\ndef _has_admin_permission(p, container_info):\n base_perm = p.base_permission\n if base_perm.permission in ( INSTALL_APP_PERMISSION,\n ADMIN_NUCLEAR_PERMISSION,\n LOGIN_PERMISSION,\n SITE_PERMISSION,\n GUEST_PERMISSION,\n DAEMON_PERMISSION ):\n return True\n else:\n return INET_PERM_RE.fullmatch(base_perm.permission) is not None\n\ndef get_builtin_perm(perm_name):\n if perm_name == ADMIN_NUCLEAR_PERMISSION:\n return { 'needs_site': True,\n 'needs_persona': True,\n 'needs_app': False,\n 'needs_login': True,\n 'max_ttl': 10 * 60 } # TODO make this configurable\n elif perm_name == SITE_PERMISSION:\n return { 'needs_site': True,\n 'needs_persona': False,\n 'needs_app': False,\n 'needs_login': False,\n 'max_ttl': 24 * 60 * 60 } # TODO make this configurable\n elif perm_name in LOGIN_PERMISSION:\n return { 'needs_site': False,\n 'needs_persona': True,\n 'needs_app': False,\n 'needs_login': False,\n 'max_ttl': 24 * 60 * 60 }\n elif perm_name == GUEST_PERMISSION:\n return { 'needs_site': False,\n 'needs_persona': True,\n 'needs_app': False,\n 'needs_login': False,\n 'max_ttl': None }\n elif perm_name == DAEMON_PERMISSION or \\\n INET_PERM_RE.fullmatch(perm_name) is not None:\n return { 'needs_site': False,\n 'needs_persona': True,\n 'needs_app': True,\n 'needs_login': False,\n 'max_ttl': None }\n else:\n return None\n\ndef find_perm(perms, perm_name):\n for i, p in enumerate(perms):\n if 'name' in p and p['name'] == perm_name:\n return p, i\n elif 'regex' in p and re.fullmatch(p['regex'], perm_name):\n return p, i\n return None, None\n\nclass ApplicationUrl(object):\n def __init__(self, app_domain, app_name):\n self.domain = app_domain\n self.name = app_name\n\n\nclass PermSecurity(object):\n '''Permissions can only be assigned to tokens that meet certain criteria.\n\n For example, intrustd+perm://admin.intrustd.com/nuclear (the highest\n privilege) cannot be given to a guest or a non-site token.\n\n This class describes what kind of token is required for this permission\n '''\n\n __slots__ = ( 'description',\n 'needs_site',\n 'needs_app',\n 'needs_persona',\n 'needs_login',\n 'max_ttl',\n 'dynamic',\n 'index',\n 'superuser_only', )\n\n def __init__(self,\n needs_site = False,\n needs_persona = False,\n needs_login = False,\n needs_app = False,\n max_ttl = None,\n dynamic = False,\n index = None,\n description = None,\n superuser_only = False):\n '''Initialize this permission security object.\n\n :param bool needs_site Whether or not this permission needs to be specific to a site\n :param bool needs_persona Whether or not this permission needs to be specific to a persona\n :param bool needs_login Whether or not this permission can only be applied to a token that has a contemporaneous login\n '''\n\n self.needs_site = needs_site\n self.needs_persona = needs_persona\n self.needs_login = needs_login\n self.needs_app = needs_app\n self.max_ttl = max_ttl\n self.dynamic = dynamic\n self.index = index\n self.description = description\n self.superuser_only = superuser_only\n\n def __str__(self):\n return repr(self)\n\n def __repr__(self):\n return 'PermSecurity(needs_site={self.needs_site}, needs_persona={self.needs_persona}, needs_app={self.needs_app}, needs_login={self.needs_login}, superuser_only={self.superuser_only}, max_ttl={self.max_ttl}, dynamic={self.dynamic}, index={self.index}, description={self.description})'.format(self=self)\n\n def __or__(self, other):\n max_ttl = self.max_ttl\n if max_ttl is None:\n max_ttl = other.max_ttl\n elif other.max_ttl is not None:\n max_ttl = min(self.max_ttl, other.max_ttl)\n return PermSecurity(needs_site=self.needs_site or other.needs_site,\n needs_persona=self.needs_persona or other.needs_persona,\n needs_login=self.needs_login or other.needs_login,\n needs_app=self.needs_app or other.needs_app,\n max_ttl=max_ttl,\n dynamic=self.dynamic or other.dynamic,\n superuser_only=self.superuser_only or other.superuser_only)\n\n @staticmethod\n def from_json(d, index=None):\n return PermSecurity(needs_site=d.get('needs_site'),\n needs_persona=d.get('needs_persona'),\n needs_login=d.get('needs_login'),\n needs_app=d.get('needs_app', False),\n max_ttl=d.get('max_ttl'),\n dynamic=d.get('dynamic', False),\n description=d.get('description', None),\n index=index,\n superuser_only=d.get('superuser', False))\n\nclass Permission(object):\n def __init__(self, url_or_perm, app_url=None, relative_to=None):\n if app_url is None:\n try:\n res = urlparse(url_or_perm)\n except ValueError:\n raise TypeError(\"%s is not a valid URL\" % url)\n\n if res.scheme != 'intrustd+perm':\n if relative_to is None:\n raise TypeError(\"Expected intrustd+perm as permissions URL scheme\")\n else:\n self.app = relative_to\n else:\n self.app = res.hostname\n\n if len(res.path) == 0 or res.path[0] != '/':\n raise ValueError(\"Invalid path name in permission\")\n\n path = os.path.normpath(res.path)\n components = path.split('/')\n if path.startswith('//'):\n components = components[1:]\n\n if len(components) < 2:\n raise ValueError(\"Need at least one component in permission path\")\n\n self.permission = '/'.join(components[1:])\n else:\n self.app = app_url\n self.permission = url_or_perm\n\n def __repr__(self):\n return 'Permission({})'.format(self.canonical)\n\n def __str__(self):\n return self.canonical\n\n def __hash__(self):\n return hash(self.canonical)\n\n def __eq__(self, a):\n return isinstance(a, Permission) and \\\n self.app == a.app and \\\n self.permission == a.permission\n\n @property\n def transferred(self):\n if self.permission.endswith(TRANSFER_SUFFIX):\n transferred = Permission(self.permission[:-len(TRANSFER_SUFFIX)], app_url=self.app)\n transferred_once = Permission(transferred.permission + TRANSFER_ONCE_SUFFIX, app_url=self.app)\n return set([transferred, transferred_once, self])\n elif self.permission.endswith(TRANSFER_ONCE_SUFFIX):\n transferred = Permission(self.permission[:-len(TRANSFER_ONCE_SUFFIX)], app_url=self.app)\n return set([transferred])\n else:\n return set()\n\n @property\n def base_permission(self):\n if self.permission.endswith(TRANSFER_SUFFIX):\n return Permission(self.permission[:-len(TRANSFER_SUFFIX)], app_url=self.app).base_permission\n elif self.permission.endswith(TRANSFER_ONCE_SUFFIX):\n return Permission(self.permission[:-len(TRANSFER_ONCE_SUFFIX)], app_url=self.app).base_permission\n else:\n return self\n\n @property\n def is_base(self):\n return not self.permission.endswith(TRANSFER_SUFFIX) and \\\n not self.permission.endswith(TRANSFER_ONCE_SUFFIX)\n\n @property\n def canonical(self):\n return 'intrustd+perm://{}/{}'.format(self.app, self.permission)\n\n @property\n def application(self):\n return self.app\n\n def __hash__(self):\n return hash(self.canonical)\n\n def __eq__(self, other):\n if isinstance(other, Permission):\n return self.canonical == other.canonical\n else:\n return False\n\n def perm_security(self, api=None, persona_id=None):\n if hasattr(self, '_perm_security'):\n return self._perm_security\n else:\n self._perm_security = self.lookup_perm_security(api, persona_id)\n return self._perm_security\n\n def lookup_perm_security(self, api=None, persona_id=None):\n '''Permission information is stored at /intrustd/perms.json\n\n Example:\n [ { name: \"name\", needs_site: true/false, needs_persona: true/false },\n { regex: \"regex\", dynamic: true/false } ]\n '''\n\n if not self.is_base:\n return self.base_permission.lookup_perm_security(api=api, persona_id=persona_id)\n\n # Find the application closure directory\n app_info = api.get_application_info(self.application)\n if app_info is None:\n raise NoSuchAppError(self.application)\n\n manifest = app_info['manifest']\n closure = manifest.nix_closure\n\n # Open the permissions file\n try:\n with open(os.path.join(closure, \"permissions.json\")) as perms:\n perms_info = json.load(perms)\n perm, i = find_perm(perms_info, self.permission)\n except FileNotFoundError:\n perm = None\n\n if perm is None and self.application == ADMIN_APP_URL:\n perm = get_builtin_perm(self.permission)\n\n if perm is None:\n raise NoSuchPermissionError(self.canonical)\n\n if perm.get('dynamic', False):\n cmd = \"/app/perms --lookup /{permission} {persona_flag} --application {application}\".format(\n persona_flag = (\"--persona {}\".format(persona_id) if persona_id is not None else \"\"),\n permission=self.permission, application=self.application)\n\n proc = api.run_in_app(self.application, cmd, persona=persona_id, wait=True,\n stdout=api.PIPE, stdin=None, stderr=sys.stdout)\n\n stdout, stderr = proc.communicate()\n\n if proc.returncode == 0:\n return PermSecurity.from_json(json.loads(stdout), i)\n else:\n raise NoSuchPermissionError(self.permission)\n else:\n return PermSecurity.from_json(perm, i)\n\nclass TokenSet(object):\n def __init__(self, api, token_names):\n self.tokens = []\n for token_name in set(token_names):\n token = api.open_token(token_name)\n if token is not None:\n self.tokens.append(Token.from_dict(token))\n\n def check_permission(self, perm):\n return any(token.check_permission(perm) for token in self.tokens)\n\n @property\n def all_permissions(self):\n if hasattr(self, '_all_permissions'):\n return self._all_permissions\n else:\n perms = set()\n for token in self.tokens:\n perms |= token.permissions\n self._all_permissions = list(perms)\n return self._all_permissions\n\n def __iter__(self):\n return iter(self.tokens)\n\nclass TokenRequest(object):\n def __init__(self, permissions, ttl=None, site=None):\n self.permissions = permissions\n self.site = site\n if ttl is None:\n self.expiry = None\n else:\n self.expiry = datetime.now() + timedelta(seconds=ttl)\n\n @property\n def is_transfer(self):\n return self.site is not None\n\n def tokenize(self, api, persona_id=None, site_id=None, app_url=None):\n securities = []\n missing_apps = set()\n\n for p in self.permissions:\n try:\n securities.append(p.perm_security(api, persona_id))\n except NoSuchAppError as e:\n missing_apps.add(e.app)\n\n if len(missing_apps) > 0:\n raise NoSuchAppsError(missing_apps)\n\n if any(security is None for security in securities):\n return None\n\n required_security = reduce(operator.or_, securities,\n PermSecurity())\n\n site_needed = None\n if required_security.needs_site and site_id is None and self.site is None:\n raise PermissionsError.site_required()\n elif required_security.needs_site:\n site_needed = site_id\n if self.site is not None:\n site_needed = self.site\n\n persona_needed = None\n if required_security.needs_persona and persona_id is None:\n raise PermissionsError.persona_required()\n elif required_security.needs_persona:\n persona_needed = persona_id\n\n app_needed = None\n if required_security.needs_app and app_url is None:\n raise PermissionsError.app_instance_required()\n elif required_security.needs_app:\n app_needed = app_url\n\n required_expiry = None\n if required_security.max_ttl is not None:\n required_expiry = datetime.now() + timedelta(seconds=required_security.max_ttl)\n\n new_expiry = self.expiry\n if new_expiry is None or (required_expiry is not None and required_expiry < new_expiry):\n new_expiry = required_expiry\n\n return Token(persona_id=persona_needed, site_id=site_needed,\n app_url=app_needed, login_required=required_security.needs_login,\n expires=new_expiry,\n permissions=self.permissions)\n\nclass VerificationResult(object):\n __slots__ = ('accepted', 'denied')\n def __init__(self, accepted=None, denied=None):\n if accepted is None:\n accepted = []\n\n if denied is None:\n denied = []\n\n self.accepted = accepted\n self.denied = denied\n\n @property\n def all_accepted(self):\n return len(self.denied) == 0\n\nclass Token(object):\n def __init__(self, persona_id=None, site_id=None, app_url=None, login_required=False,\n permissions=None, expires=None):\n if permissions is None:\n permissions = []\n\n self.persona = persona_id\n self.site = site_id\n self.app_url = app_url\n self.login_required = bool(login_required)\n self.expires = expires\n\n self.delegated = False\n\n self.permissions = set(self._make_permission(p) for p in permissions)\n\n def grouped_permissions(self):\n ret = {}\n for p in self.permissions:\n if p.app in ret:\n ret[p.app].append(p)\n else:\n ret[p.app] = [p]\n return ret\n\n @staticmethod\n def _make_permission(p):\n if isinstance(p, Permission):\n return p\n elif isinstance(p, str):\n return Permission(p)\n else:\n raise TypeError(\"Permission should be 'Permission' object or string\")\n\n def check_permission(self, p):\n return self._make_permission(p) in self.permissions\n\n def _verify_dynamic_permissions(self, api, app, persona_id, cur_set, needed):\n persona_flag = \"\"\n\n if persona_id is not None:\n persona_flag = \"--persona {}\".format(persona_id)\n\n needed_arg = \" \".join((\"/\" + x.permission) for x in needed)\n\n cmd = \"/app/perms --check {persona_flag} --application {app} {needed_arg}\".format(**locals())\n\n proc = api.run_in_app(app, cmd, persona=persona_id, wait=True,\n stdout=api.PIPE, stdin=api.PIPE, stderr=sys.stdout)\n\n stdout, _ = proc.communicate(\"\\n\".join(str(p) for p in cur_set))\n\n if proc.returncode == 0:\n result = json.loads(stdout)\n\n accepted = set()\n denied = set()\n\n for a in result.get('accepted', []):\n p = Permission(a, relative_to=app)\n if p in needed:\n accepted.add(p)\n else:\n print(\"Got permission that was not requested: {} (permission={}, app={})\".format(a, p.permission, p.app))\n\n for a in result.get('denied', []):\n p = Permission(a, relative_to=app)\n if p in needed:\n denied.add(p)\n else:\n print(\"Got permission that was not requested: {} (permission={}, app={})\".format(a, p.permission, p.app))\n\n needed_set = needed\n missing_set = needed_set - accepted - denied\n\n denied |= missing_set\n\n return VerificationResult(accepted=accepted, denied=denied)\n else:\n return VerificationResult(accepted=set(), denied=set(needed))\n\n def _verify_transfer(self, transferrable_perms, app, perms, api, persona_id=None):\n '''Verify that we have the rights to transfer permissions\n\n We have the right to transfer permissions if we have the\n perm/transfer_once or perm/transfer permissions. However,\n these permissions may be implied by the presence of others.\n\n In order to determine this, we take all the permissions we\n have that are transferrable and collect them. Then, we look up\n the information for this permission in the app's\n permission.json. If the permission is marked as dynamic, then\n we ask the application whether or not the current set of\n transferrable permissions allows us to transfer this\n one. Otherwise, we assume the permission is transferrable.\n\n '''\n denied = set()\n accepted = set()\n\n for p in perms:\n if self._make_permission(p) in transferrable_perms:\n accepted.add(p)\n else:\n denied.add(p)\n\n # If any denied perm is dynamic, ask if this transfer is possible\n denied_perms_security = reduce(operator.or_, (p.perm_security(api, persona_id) for p in denied), PermSecurity())\n if denied_perms_security.dynamic:\n res = self._verify_dynamic_permissions(api, app, persona_id, transferrable_perms, denied)\n accepted |= res.accepted\n denied = res.denied\n\n return VerificationResult(accepted=accepted,\n denied=denied)\n\n def verify_permissions(self, api, container_info, is_transfer=False):\n accepted = []\n denied = []\n\n persona_id = container_info.get('persona_id')\n persona = api.get_persona_info(persona_id)\n\n tokens = TokenSet(api, container_info.get('tokens', []))\n # Collect all transferrable permissions from tokens\n transferrable = reduce(operator.or_, (self._make_permission(p).transferred for p in tokens.all_permissions), DEFAULT_TRANSFERRABLE)\n\n if container_info.get('logged_in', False) or \\\n tokens.check_permission(Permission(ADMIN_NUCLEAR_PERMISSION, app_url=ADMIN_APP_URL)):\n for p in self.permissions:\n if p.app == ADMIN_APP_URL:\n if _has_admin_permission(p, container_info):\n accepted.append(p)\n else:\n denied.append(p)\n else:\n if p.perm_security(api, persona_id).superuser_only and not persona['superuser']:\n denied.append(p)\n else:\n accepted.append(p)\n else:\n for app, perms in self.grouped_permissions().items():\n res = self._verify_transfer(transferrable, app, perms, api, persona_id=persona_id)\n accepted.extend(res.accepted)\n denied.extend(res.denied)\n\n return VerificationResult(accepted=accepted, denied=denied)\n\n def to_dict(self):\n app_set = set(p.application for p in self.permissions)\n ret = { 'permissions': [p.canonical for p in self.permissions],\n 'applications': list(app_set),\n 'login_required': self.login_required }\n if self.persona is not None:\n ret['persona'] = self.persona\n if self.app_url is not None:\n ret['app_url'] = self.app_url\n if self.site is not None:\n ret['site'] = self.site\n\n if self.expires is not None:\n ret['expiration'] = self.expires.isoformat()\n\n return ret\n\n @staticmethod\n def from_dict(d):\n kwargs = {}\n kwargs['permissions'] = [Permission(p) for p in d.get('permissions', [])]\n if 'persona' in d:\n kwargs['persona_id'] = d['persona']\n if 'site' in d:\n kwargs['site_id'] = d['site']\n if 'expiration' in d:\n kwargs['expires'] = datetime.strptime(d['expiration'], \"%Y-%m-%dT%H:%M:%S.%f\")\n if 'app_url' in d:\n kwargs['app_url'] = d['app_url']\n kwargs['login_required'] = d.get('login_required', False)\n return Token(**kwargs)\n\n def _mint_secret(self):\n MIN_SECRET_LENGTH = 128\n return hexlify(os.urandom(MIN_SECRET_LENGTH)).decode('ascii')\n\n @staticmethod\n def from_delegated(delegated):\n if not verify_hex_digest(delegated):\n print('from_delegated: invalid hex digest', delegated)\n return None\n\n try:\n with open(os.path.join(INTRUSTD_DELEGATED_TOKENS_DIR, delegated), 'rt') as f:\n return Token.from_dict(json.load(f))\n except IOError:\n return None\n\n def delegate(self):\n if not self.delegated:\n self.delegated = True\n latest_expires = datetime.now() + MAX_DELEGATION_TIME\n if self.expires is not None:\n self.expires = min(self.expires, latest_expires)\n else:\n self.expires = latest_expires\n\n os.makedirs(INTRUSTD_DELEGATED_TOKENS_DIR, exist_ok=True)\n return self._save(INTRUSTD_DELEGATED_TOKENS_DIR)\n\n def save(self, api):\n return self._save(api.tokens_dir)\n\n def _save(self, directory):\n '''Saves this permission by writing it to a temporary file while\n calculating the sha256sum.\n\n Then, signs the sha256sum with our signing key and base64\n encodes the result.\n\n This becomes the token identifier.\n '''\n with NamedTemporaryFile(mode='wb', dir=directory) as fl:\n\n json_data = self.to_dict()\n json_data['secret'] = self._mint_secret()\n\n data = json.dumps(json_data, ensure_ascii=True)\n fl.write(data.encode('ascii'))\n\n sfl = Signature(data)\n token = sfl.hex_digest\n\n token_filename = os.path.join(directory, sfl.hex_digest)\n\n try:\n os.link(fl.name, token_filename)\n except FileExistsError:\n pass\n\n return token\n\n def describe(self, api, persona_id):\n r = TokenDescription(self.permissions)\n\n grouped = self.grouped_permissions()\n for app, perms in grouped.items():\n app_info = api.get_application_info(app)\n section = r.get_section(app_info['manifest'])\n\n if app == ADMIN_APP_URL:\n entries = _describe_admin_perms(perms)\n else:\n\n # Ask for a dynamic description\n cmd = \"/app/perms --describe {persona_flag} --application {application}\".format(\n persona_flag = '' if persona_id is None else \"--persona {}\".format(persona_id),\n application = app)\n\n proc = api.run_in_app(app, cmd, persona=persona_id, wait=True,\n stdout=api.PIPE, stdin=api.PIPE, stderr=sys.stdout)\n\n stdout, _ = proc.communicate(\"\\n\".join(p.canonical for p in perms))\n\n if proc.returncode == 0:\n entries = json.loads(stdout)\n\n else:\n raise ValueError(\"Could not describe permissions for {}: process exited with {}\".format(app, proc.returncode))\n\n for e in entries:\n section.add_entry(e)\n\n return r\n\nclass TokenDescriptionEntry(object):\n __slots__ = ( 'short',\n 'long',\n 'image', )\n\n def __init__(self, short_or_desc, long=None, image=None):\n if isinstance(short_or_desc, str):\n self.short = short_or_desc\n self.long = long\n self.image = image\n\n elif isinstance(short_or_desc, dict):\n self.short = short_or_desc.get('short')\n self.long = short_or_desc.get('long', '')\n self.image = short_or_desc.get('image')\n\n else:\n raise TypeError(\"Expected either short description or JSON-dict\")\n\n if self.short is None:\n raise TypeError(\"No short description given for permission\")\n\n def to_json(self):\n r = { 'short': self.short }\n\n if self.long is not None:\n r['long'] = self.long\n\n if self.image is not None:\n r['image'] = self.image\n\n return r\n\nclass TokenDescriptionSection(object):\n '''Description of permissions associated with a particular application\n '''\n def __init__(self, mf, api=None, persona_id=None):\n self.app_manifest = mf\n self.entries = []\n self._api = api\n self._persona_id = persona_id\n self._all_permissions = None\n self._permissions = set()\n self._dynamic_permissions = {}\n\n @property\n def all_permissions(self):\n if self._all_permissions is None:\n with open(os.path.join(self.app_manifest.nix_closure, \"permissions.json\")) as perms:\n self._all_permissions = json.loads(perms)\n return self._all_permissions\n\n def add_entry(self, short_or_desc):\n entry = TokenDescriptionEntry(short_or_desc)\n self.entries.append(entry)\n return entry\n\n def to_json(self):\n return { 'domain': self.app_manifest.domain,\n 'name': self.app_manifest.name,\n 'run-as-admin': self.app_manifest.run_as_admin,\n 'singleton': self.app_manifest.singleton,\n 'version': self.app_manifest.version,\n 'icon': self.app_manifest.icon,\n\n 'entries': [ e.to_json() for e in self.entries ] }\n\nclass TokenDescription(object):\n '''Description of a set of permissions\n '''\n def __init__(self, perms):\n self.apps = OrderedDict()\n self.perms = perms\n\n def get_section(self, app_manifest):\n if app_manifest.domain not in self.apps:\n self.apps[app_manifest.domain] = TokenDescriptionSection(app_manifest)\n\n return self.apps[app_manifest.domain]\n\n def to_json(self):\n return { 'sections': [section.to_json() for section in self.apps.values()],\n 'perms': [p.canonical for p in self.perms] }\n\ndef has_install_permission(perms):\n return Permission(INSTALL_APP_PERMISSION, ADMIN_APP_URL) in perms or \\\n Permission(ADMIN_NUCLEAR_PERMISSION, ADMIN_APP_URL) in perms\n\ndef _describe_admin_perms(ps):\n ps = set(p.permission for p in ps)\n r = []\n\n if ADMIN_NUCLEAR_PERMISSION in ps:\n return [ { 'short': 'Administer this user' } ]\n\n if INSTALL_APP_PERMISSION in ps:\n r.append({ 'short': 'Install applications for this user' })\n\n if LOGIN_PERMISSION in ps:\n r.append({ 'short': 'Login as this user' })\n\n if GUEST_PERMISSION in ps:\n r.append({ 'short': 'Invite others to view this user\\'s data' })\n\n return r\n\ndef can_request_perms_for(requestor=None, for_ip=None, api=None):\n if requestor is None or for_ip is None:\n raise TypeError(\"Expected 'requestor' and 'for_ip' arguments\")\n\n if api is None:\n with local_api() as api:\n return can_request_perms_for(requestor=requestor, for_ip=for_ip, api=api)\n else:\n ip_info = None\n if for_ip != request.remote_addr:\n info = api.get_container_info(request.remote_addr)\n if info is None:\n return False\n\n if info['type'] == 'app_instance':\n ip_info = api.get_container_info(for_ip)\n if ip_info is None:\n return False\n\n if ip_info.get('logged_in', False):\n return True\n\n ip_tokens = TokenSet(api, ip_info.get('tokens', []))\n if all(p.app != info['app_url'] for p in ip_tokens.all_permissions):\n return False\n\n elif info['type'] == 'persona':\n tokens = TokenSet(api, info.get('tokens', []))\n # Find permission nuclear\n if not tokens.check_permission(Permission(ADMIN_APP_URL, ADMIN_NUCLEAR_PERMISSION)):\n return False\n\n else:\n return False\n\n return True\n else:\n return True\n\nDEFAULT_TRANSFERRABLE = \\\n Permission(SITE_PERMISSION + TRANSFER_SUFFIX, ADMIN_APP_URL).transferred\n","sub_path":"intrustd/admin/permission.py","file_name":"permission.py","file_ext":"py","file_size_in_byte":30507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"100660096","text":"from flask import Flask, request, redirect, render_template, session, flash\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://build-a-blog:beproductive@localhost:3306/build-a-blog'\napp.config['SQLALCHEMY_ECHO'] = True\ndb = SQLAlchemy(app)\napp.secret_key = 'learningblog101'\n\nclass Blog(db.Model):\n\n id = db.Column(db.Integer, primary_key=True)\n post_title = db.Column(db.String(120))\n post_comment = db.Column(db.Text)\n\n def __init__(self, post_title, post_comment):\n self.post_title = post_title\n self.post_comment = post_comment\n\n@app.route('/', methods=['POST','GET'])\ndef redirect_to_blog():\n return redirect('/blog')\n\n@app.route('/blog', methods=['POST', 'GET'])\ndef index():\n post_id = request.args.get(\"id\")\n if post_id:\n blog = Blog.query.get(post_id)\n return render_template('single_post.html', blog=blog)\n blogs = Blog.query.all()\n return render_template('main_blog.html', blogs=blogs)\n\n@app.route('/add_blog', methods=['POST', 'GET'])\ndef add_blog():\n title = \"\"\n comment = \"\"\n if request.method == \"POST\":\n title = request.form['new_blog']\n comment = request.form['blog_entry']\n if title and comment:\n post = Blog(title, comment)\n db.session.add(post)\n db.session.commit()\n post_link = \"/blog?id=\" + str(post.id)\n return redirect(post_link)\n return render_template('add_blog.html', new_blog=title, blog_entry=comment) \n\nif __name__ == '__main__':\n app.run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"323761998","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Tests for ReconstructionGeometry.\"\"\"\n\n\nimport unittest\nimport numpy as np\nimport astra\nimport tomosipo as ts\n\n\nclass TestReconstructionGeometry(unittest.TestCase):\n \"\"\"Tests for ReconstructionGeometry.\"\"\"\n\n def setUp(self):\n \"\"\"Set up test fixtures, if any.\"\"\"\n pass\n\n def tearDown(self):\n \"\"\"Tear down test fixtures, if any.\"\"\"\n pass\n\n def test_init(self):\n \"\"\"Test ReconstructionGeometry init.\"\"\"\n\n pd = ts.Data(ts.cone())\n vd = ts.Data(ts.VolumeGeometry())\n\n r = ts.ReconstructionGeometry(pd, vd)\n\n r = ts.ReconstructionGeometry(\n pd, vd, detector_supersampling=2, voxel_supersampling=2\n )\n\n def test_forward_backward(self):\n pd = ts.Data(ts.cone().reshape(10))\n vd = ts.Data(ts.VolumeGeometry().reshape(10))\n\n rs = [\n ts.ReconstructionGeometry(pd, vd),\n ts.ReconstructionGeometry(\n pd, vd, detector_supersampling=2, voxel_supersampling=2\n ),\n ts.ReconstructionGeometry(\n pd, vd, detector_supersampling=1, voxel_supersampling=2\n ),\n ts.ReconstructionGeometry(\n pd, vd, detector_supersampling=2, voxel_supersampling=1\n ),\n ]\n\n for r in rs:\n r.forward()\n r.backward()\n\n def test_fdk(self):\n interactive = False\n p = ts.Data(ts.cone(angles=100).reshape(100))\n v = ts.Data(ts.volume_from_projection_geometry(p.geometry).reshape(100))\n\n # Fill the projection data with random noise:\n proj = p.get()\n proj[:] = np.random.normal(size=proj.shape)\n proj[:] = abs(proj)\n\n r = ts.ReconstructionGeometry(p, v)\n\n if interactive:\n ts.display_data(p)\n\n ts.fdk(r)\n\n if interactive:\n ts.display_data(v)\n","sub_path":"tests/test_ReconstructionGeometry.py","file_name":"test_ReconstructionGeometry.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"133682268","text":"try:\n from django.conf import settings\n\n settings.configure(\n DEBUG=True,\n USE_TZ=True,\n DATABASES={\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": \"{{ cookiecutter.app_name }}.test.db\"\n }\n },\n ROOT_URLCONF=\"{{ cookiecutter.app_name }}.urls\",\n INSTALLED_APPS=[\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sites\",\n \"{{ cookiecutter.app_name }}\",\n ],\n SITE_ID=1,\n NOSE_ARGS=['-s'],\n )\n\n try:\n import django\n setup = django.setup\n except AttributeError:\n pass\n else:\n setup()\n\nexcept ImportError:\n raise ImportError(\n \"To fix this error, run: pip install -r requirements-test.txt\"\n )\n","sub_path":"{{cookiecutter.repo_name}}/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"274803033","text":"import os\nimport uuid\nimport mimetypes\nimport urllib2\n\n\ndef stringifyImage(url, directory):\n tmpdir = directory\n mmtp = mimetypes.guess_type(url, strict=True)\n if not mmtp[0]:\n return False\n\n ext = mimetypes.guess_extension(mmtp[0])\n temp_file_name = tmpdir + '/' + str(uuid.uuid4()) + ext\n f = open(temp_file_name, 'wb')\n f.write(urllib2.urlopen(url).read())\n f.close()\n img = open(temp_file_name, \"rb\").read().encode(\"base64\").replace(\"\\n\", \"\")\n os.remove(temp_file_name)\n return 'data:' + mmtp[0] + ';base64,' + img","sub_path":"main/image_utility.py","file_name":"image_utility.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"428851095","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.shortcuts import HttpResponse, render, redirect\nfrom models import userInfo\n\nfrom userForm import myform, ajaxform\n\n# Create your views here.\ndef users(request):\n user_list = userInfo.objects.all()\n return render(request, 'users.html', locals())\n\n\n'''\n auto_id:\n \n 生成的HTML元素中是否带有 id 属性\n \n 值:\n True(带id属性) | False(不带id属性) | 'id_%s'(带id属性,id格式是:id_元素的name名)\n\n label_suffix:\n\n 也可以在创建Field 的时候设置,如:\n fields.CharField(label='密码', label_suffix='? ')\n 这样就会覆盖在 f = myform(auto_id=False, label_suffix=':') 时所设置 label_suffix\n'''\n\ndef adduserpage(request):\n\n f = myform(auto_id='id_%s', label_suffix=':')\n\n return render(request, 'adduser.html', locals())\n\ndef add_user(request):\n\n '''\n 使用普通方式添加 userinfo, 没有做输入验证\n :param request:\n :return:\n\n\n uname = request.POST.get('uname')\n upwd = request.POST.get('upwd')\n age = request.POST.get('age')\n email = request.POST.get('email')\n userInfo.objects.create(uname=uname, upwd=upwd, age=age, email=email)\n\n return redirect('users.html')\n\n '''\n '''\n 使用 form 做输入验证\n\n 上传文件时需要给 form 对象传 files=request.FILES 参数\n \n '''\n\n f = myform(request.POST, auto_id=False, files=request.FILES)\n\n if f.is_valid():\n userInfo.objects.create(**f.cleaned_data)\n return redirect('users.html')\n else:\n return render(request, 'adduser.html', {'f': f})\n\n\nimport json\nfrom django.forms.utils import ErrorDict\n\ndef adduser_ajax(request):\n\n ret = {'status':False, 'message':None}\n\n if request.method == 'GET':\n f = ajaxform()\n return render(request, 'adduser_ajax.html', {'f': f})\n else:\n f = ajaxform(request.POST)\n if f.is_valid():\n ret['status'] = True\n else:\n\n print(type(f.errors))\n\n '''\n 打印发现f.errors 是一个django.forms.utils.ErrorDict 类型, \n 导入 from django.forms.utils import ErrorDict,进入 ErrorDict 的源码发现,\n ErrorDict 继承自 Dict, 而且还有 as_data(), as_json(), as_ul(), as_text() 这几个可以转换数据格式\n 的方法\n \n '''\n # print(f.errors.as_ul())\n # print(f.errors.as_data())\n # print(f.errors.as_json())\n # print(f.errors.as_text())\n\n\n\n ret['message'] = f.errors\n\n ret = json.dumps(ret)\n print(ret)\n return HttpResponse(ret)\n\n\n","sub_path":"Exercise/Django/django_lesson9/app01/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"172037299","text":"#!/usr/bin/env python\n\nfrom plumbum import local, cli, FG\nfrom plumbum.cmd import sudo\n\nnewboard = local['./newboard.py']\nclear_tables = local['./clear_tables.pl']\nclass timedboard(cli.Application):\n \n total_time = cli.SwitchAttr([\"--time\",\"-t\"], float, default=10000)\n ntimes = cli.SwitchAttr([\"--num\",\"-n\"], int, default=1)\n \n def main(self):\n for i in range(self.ntimes):\n sudo[newboard]['-t', self.total_time] & FG\n clear_tables()\n \nif __name__ == '__main__':\n timedboard.run()\n \n","sub_path":"timedboard.py","file_name":"timedboard.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"194537284","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-unsafe\n\nfrom __future__ import annotations\n\nimport enum\nimport logging\nfrom collections import namedtuple\nfrom datetime import datetime\nfrom decimal import Decimal\nfrom itertools import islice\nfrom typing import Any, Dict, List, Optional, Set, Tuple, Type\n\nfrom graphene_sqlalchemy.converter import (\n convert_column_to_int_or_id,\n convert_column_to_string,\n convert_sqlalchemy_type,\n)\nfrom munch import Munch\nfrom sqlalchemy import (\n Boolean,\n Column,\n DateTime,\n Enum,\n Float,\n Index,\n Integer,\n MetaData,\n String,\n Table,\n and_,\n exc,\n func,\n inspect,\n or_,\n types,\n)\nfrom sqlalchemy.dialects import mysql, sqlite\nfrom sqlalchemy.dialects.mysql import BIGINT, INTEGER\nfrom sqlalchemy.exc import NoSuchTableError\nfrom sqlalchemy.ext.associationproxy import association_proxy\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import Session, relationship\n\nfrom .db import DB\nfrom .db_support import (\n DBID,\n BIGDBIDType,\n DBIDType,\n MutableRecordMixin,\n PrepareMixin,\n PrimaryKeyBase,\n PrimaryKeyGeneratorBase,\n RecordMixin,\n)\nfrom .decorators import classproperty\nfrom .errors import AIException\nfrom .iterutil import split_every\n\n\nlog: logging.Logger = logging.getLogger(\"sapp\")\n\n\nBase = declarative_base()\nINNODB_MAX_INDEX_LENGTH = 767\nHANDLE_LENGTH = 255\nMESSAGE_LENGTH = 4096\nSHARED_TEXT_LENGTH = 4096\n\n\"\"\"Models used to represent DB entries\n\nAn Issue is a particular problem found. It can exist across multiple commits. A\nRun is a single run of Zoncolan over a specific commit. It may find new Issues,\nor existing Issues. Each run is tied to Issues through IssueInstances.\nIssueInstances have per run information, like source location, while Issues have\nattributes like the status of an issue.\n\"\"\"\n\n\nclass SourceLocation(object):\n \"\"\"The location in a source file that an error occurred in\n\n If end_column is defined then we have a range, otherwise it defaults to\n begin_column and we have a single point.\n \"\"\"\n\n def __init__(self, line_no, begin_column, end_column=None) -> None:\n self.line_no = line_no\n self.begin_column = begin_column\n self.end_column = end_column or self.begin_column\n\n def __eq__(self, other):\n return (\n self.line_no == other.line_no\n and self.begin_column == other.begin_column\n and self.end_column == other.end_column\n )\n\n def __str__(self):\n return SourceLocation.to_string(self)\n\n @staticmethod\n def from_string(location_string) -> SourceLocation:\n location_points = location_string.split(\"|\")\n assert len(location_points) == 3, \"Invalid location string %s\" % location_string\n return SourceLocation(*location_points)\n\n @staticmethod\n def to_string(location) -> str:\n return \"|\".join(\n map(str, [location.line_no, location.begin_column, location.end_column])\n )\n\n\nclass CaseSensitiveStringType(types.TypeDecorator):\n impl = types.String\n\n def load_dialect_impl(self, dialect):\n if dialect.name == \"mysql\":\n return dialect.type_descriptor(\n mysql.VARCHAR(length=255, collation=\"latin1_general_cs\")\n )\n elif dialect.name == \"sqlite\":\n return dialect.type_descriptor(\n sqlite.VARCHAR(length=255, collation=\"binary\")\n )\n else:\n raise AIException(\"%s not supported\" % dialect.name)\n\n\nclass SourceLocationType(types.TypeDecorator):\n \"\"\"Defines a new type of SQLAlchemy to store source locations.\n\n In python land we use SourceLocation, but when stored in the databae we just\n split the fields with |\n \"\"\"\n\n impl = types.String\n\n def __init__(self) -> None:\n super(SourceLocationType, self).__init__(length=255)\n\n def process_bind_param(self, value, dialect):\n \"\"\"\n SQLAlchemy uses this to convert a SourceLocation object into a string.\n \"\"\"\n if value is None:\n return None\n return SourceLocation.to_string(value)\n\n def process_result_value(self, value, dialect) -> Optional[SourceLocation]:\n \"\"\"\n SQLAlchemy uses this to convert a string into a SourceLocation object.\n We separate the fields by a |\n \"\"\"\n if value is None:\n return None\n\n p = value.split(\"|\")\n\n if len(p) == 0:\n return None\n return SourceLocation(*map(int, p))\n\n\nclass SourceLocationsType(types.TypeDecorator):\n \"\"\"Defines a type to store multiple source locations in a single string\"\"\"\n\n impl = types.String\n\n def __init__(self) -> None:\n super(SourceLocationsType, self).__init__(length=4096)\n\n def process_bind_param(self, value, dialect) -> Optional[str]:\n if value is None:\n return None\n return \",\".join([SourceLocation.to_string(l) for l in value])\n\n def process_result_value(self, value, dialect):\n if value is None or value == \"\":\n return []\n assert isinstance(value, str), \"Invalid SourceLocationsType %s\" % str(value)\n locations = value.split(\",\")\n return [SourceLocation.from_string(location) for location in locations]\n\n\n# See Issue.merge for information about replace_assocs\n\n\nclass IssueDBID(DBID):\n __slots__ = [\"replace_assocs\"]\n\n def __init__(self, id=None) -> None:\n super().__init__(id)\n self.replace_assocs = False\n\n\nclass IssueDBIDType(DBIDType):\n def process_result_value(self, value, dialect) -> IssueDBID:\n return IssueDBID(value)\n\n\nclass IssueBIGDBIDType(BIGDBIDType):\n def process_result_value(self, value, dialect) -> IssueDBID:\n return IssueDBID(value)\n\n\nclass IssueInstanceTraceFrameAssoc(Base, PrepareMixin, RecordMixin): # noqa\n\n __tablename__ = \"issue_instance_trace_frame_assoc\"\n\n issue_instance_id = Column(\n \"issue_instance_id\", BIGDBIDType, primary_key=True, nullable=False\n )\n\n trace_frame_id = Column(\n \"trace_frame_id\", BIGDBIDType, primary_key=True, nullable=False, index=True\n )\n\n issue_instance = relationship(\n \"IssueInstance\",\n primaryjoin=(\n \"IssueInstanceTraceFrameAssoc.issue_instance_id == \"\n \"foreign(IssueInstance.id)\"\n ),\n uselist=False,\n )\n\n trace_frame = relationship(\n \"TraceFrame\",\n primaryjoin=(\n \"IssueInstanceTraceFrameAssoc.trace_frame_id == \" \"foreign(TraceFrame.id)\"\n ),\n uselist=False,\n )\n\n @classmethod\n def merge(cls, session, items):\n return cls._merge_assocs(\n session, items, cls.issue_instance_id, cls.trace_frame_id\n )\n\n\nclass SharedTextKind(enum.Enum):\n # Do NOT reorder the enums. Depending on the type of database, existing\n # DBs may have these enums represented internally as ints based on the\n # order shown here, and changing it here messes up existing data. This\n # also means that new enums should be added AT THE END of the list.\n feature = enum.auto()\n message = enum.auto()\n source = enum.auto()\n sink = enum.auto()\n callable = enum.auto()\n filename = enum.auto()\n source_detail = enum.auto()\n sink_detail = enum.auto()\n\n @classproperty\n def FEATURE(cls):\n return cls.feature\n\n @classproperty\n def MESSAGE(cls):\n return cls.message\n\n @classproperty\n def SOURCE(cls):\n return cls.source\n\n @classproperty\n def SINK(cls):\n return cls.sink\n\n @classproperty\n def CALLABLE(cls):\n return cls.callable\n\n @classproperty\n def FILENAME(cls):\n return cls.filename\n\n @classproperty\n def SOURCE_DETAIL(cls):\n return cls.source_detail\n\n @classproperty\n def SINK_DETAIL(cls):\n return cls.sink_detail\n\n\nclass SharedText(Base, PrepareMixin, RecordMixin): # noqa\n \"\"\"Any string-ish type that can be shared as a property of some other\n object. (e.g. features, sources, sinks). The table name 'messages' is due\n to legacy reasons.\"\"\"\n\n __tablename__ = \"messages\"\n\n __table_args__ = (Index(\"ix_messages_handle\", \"contents\", \"kind\"),)\n\n # pyre-fixme[8]: Attribute has type `DBID`; used as `Column[typing.Any]`.\n id: DBID = Column(BIGDBIDType, primary_key=True)\n\n # pyre-fixme[8]: Attribute has type `str`; used as `Column[str]`.\n contents: str = Column(\n String(length=SHARED_TEXT_LENGTH), nullable=False, index=True\n )\n\n # pyre-fixme[8]: Attribute has type `SharedTextKind`; used as `Column[str]`.\n kind: SharedTextKind = Column(\n Enum(SharedTextKind), server_default=\"feature\", nullable=False, index=True\n )\n\n issue_instances = association_proxy(\"shared_text_issue_instance\", \"issue_instance\")\n\n shared_text_issue_instance = relationship(\n \"IssueInstanceSharedTextAssoc\",\n primaryjoin=(\n \"SharedText.id == foreign(IssueInstanceSharedTextAssoc.shared_text_id)\"\n ),\n )\n\n trace_frames = association_proxy(\"shared_text_trace_frame\", \"trace_frames\")\n\n shared_text_trace_frame = relationship(\n \"TraceFrameLeafAssoc\",\n primaryjoin=(\"SharedText.id == foreign(TraceFrameLeafAssoc.leaf_id)\"),\n )\n\n @classmethod\n def merge(cls, session, items):\n return cls._merge_by_keys(\n session,\n items,\n lambda item: \"%s:%s\" % (item.contents, item.kind),\n cls.contents,\n cls.kind,\n )\n\n\nclass IssueInstanceSharedTextAssoc(Base, PrepareMixin, RecordMixin): # noqa\n \"\"\"Assoc table between issue instances and its properties that are\n representable by a string. The DB table name and column names are due to\n legacy reasons and warrant some explanation:\n - 'Features' used to be the only shared text of the assoc, now, the assoc\n also accounts for 'Sources' and 'Sinks' and possibly more.\n - 'messages' table used to be only for 'messages', now, it contains\n features, sources and sinks and possibly more.\n - It is expensive to rename the DB tables, so renaming only happened in\n the model. This is why it looks like we have 3 different terms for the\n same thing: 'messages', 'shared_text', 'features'.\n\n When in doubt, trust the property and method names used in the model and\n refer to the relationship joins for how objects relate to each other.\n \"\"\"\n\n __tablename__ = \"issue_instance_feature_assoc\"\n\n issue_instance_id = Column(\n \"issue_instance_id\", BIGDBIDType, primary_key=True, nullable=False\n )\n\n shared_text_id = Column(\"feature_id\", BIGDBIDType, primary_key=True, nullable=False)\n\n issue_instance = relationship(\n \"IssueInstance\",\n primaryjoin=(\n \"IssueInstanceSharedTextAssoc.issue_instance_id ==\"\n \"foreign(IssueInstance.id)\"\n ),\n uselist=False,\n )\n\n shared_text = relationship(\n \"SharedText\",\n primaryjoin=(\n \"IssueInstanceSharedTextAssoc.shared_text_id == \" \"foreign(SharedText.id)\"\n ),\n uselist=False,\n )\n\n @classmethod\n def merge(cls, session, items):\n return cls._merge_assocs(\n session, items, cls.issue_instance_id, cls.shared_text_id\n )\n\n\nclass TraceKind(enum.Enum):\n # Do NOT reorder the enums. Depending on the type of database, existing\n # DBs may have these enums represented internally as ints based on the\n # order shown here, and changing it here messes up existing data. This\n # also means that new enums should be added AT THE END of the list.\n precondition = enum.auto()\n postcondition = enum.auto()\n\n @classproperty\n def PRECONDITION(cls):\n return cls.precondition\n\n @classproperty\n def POSTCONDITION(cls):\n return cls.postcondition\n\n\nclass IssueInstance(Base, PrepareMixin, MutableRecordMixin): # noqa\n \"\"\"A particularly instance of an issue found in a run\"\"\"\n\n __tablename__ = \"issue_instances\"\n\n # pyre-fixme[8]: Attribute has type `DBID`; used as `Column[typing.Any]`.\n id: DBID = Column(BIGDBIDType, primary_key=True)\n\n location = Column(\n SourceLocationType,\n nullable=False,\n doc=\"Location (possibly a range) of the issue\",\n )\n\n filename_id = Column(BIGDBIDType, nullable=False, server_default=\"0\", default=0)\n\n filename = relationship(\n \"SharedText\",\n primaryjoin=\"foreign(SharedText.id) == IssueInstance.filename_id\",\n uselist=False,\n )\n\n callable_id = Column(BIGDBIDType, nullable=False, server_default=\"0\", default=0)\n\n callable = relationship(\n \"SharedText\",\n primaryjoin=\"foreign(SharedText.id) == IssueInstance.callable_id\",\n uselist=False,\n )\n\n is_new_issue: Column[Optional[bool]] = Column(\n Boolean,\n index=True,\n default=False,\n doc=\"True if the issue did not exist before this instance\",\n )\n\n run_id = Column(BIGDBIDType, nullable=False, index=True)\n\n issue_id = Column(BIGDBIDType, nullable=False, index=True)\n\n issue = relationship(\n \"Issue\",\n primaryjoin=\"foreign(Issue.id) == IssueInstance.issue_id\",\n uselist=False,\n )\n\n fix_info_id = Column(BIGDBIDType, nullable=True)\n\n fix_info = relationship(\n \"IssueInstanceFixInfo\",\n primaryjoin=(\n \"foreign(IssueInstanceFixInfo.id) == \" \"IssueInstance.fix_info_id\"\n ),\n uselist=False,\n )\n\n message_id = Column(BIGDBIDType, nullable=True)\n\n message = relationship(\n \"SharedText\",\n primaryjoin=\"foreign(SharedText.id) == IssueInstance.message_id\",\n uselist=False,\n )\n\n trace_frames = association_proxy(\"issue_instance_trace_frame\", \"trace_frame\")\n\n issue_instance_trace_frame = relationship(\n \"IssueInstanceTraceFrameAssoc\",\n primaryjoin=(\n \"IssueInstance.id == \"\n \"foreign(IssueInstanceTraceFrameAssoc.issue_instance_id)\"\n ),\n )\n\n shared_texts = association_proxy(\"issue_instance_shared_text\", \"shared_text\")\n\n issue_instance_shared_text = relationship(\n \"IssueInstanceSharedTextAssoc\",\n primaryjoin=(\n \"IssueInstance.id == \"\n \"foreign(IssueInstanceSharedTextAssoc.issue_instance_id)\"\n ),\n )\n\n min_trace_length_to_sources: Column[Optional[int]] = Column(\n Integer, nullable=True, doc=\"The minimum trace length to sources\"\n )\n\n min_trace_length_to_sinks: Column[Optional[int]] = Column(\n Integer, nullable=True, doc=\"The minimum trace length to sinks\"\n )\n\n rank: Column[Optional[int]] = Column(\n Integer,\n server_default=\"0\",\n doc=\"The higher the rank, the higher the priority for this issue\",\n )\n\n callable_count: Column[Optional[int]] = Column(\n Integer,\n server_default=\"0\",\n doc=\"Number of issues in this callable for this run\",\n )\n\n min_trace_length_to_entrypoints: Column[Optional[int]] = Column(\n Integer, nullable=True, doc=\"The minimum trace length to entrypoints\"\n )\n\n def get_shared_texts_by_kind(self, kind: SharedTextKind):\n return [text for text in self.shared_texts if text.kind == kind]\n\n def get_trace_frames_by_kind(self, kind: TraceKind):\n return [frame for frame in self.trace_frames if frame.kind == kind]\n\n @classmethod\n def merge(cls, session, items):\n for i in items:\n # If the issue is new, then the instance has to be new. But note\n # that we still may need RunDiffer, because issues that disappeared\n # for a while and then came back are also marked new.\n i.is_new_issue = i.issue_id.is_new\n yield i\n\n\nclass IssueStatus(enum.Enum):\n \"\"\"Issues are born uncategorized. Humans can\n set it to FALSE_POSITIVE or VALID_BUG upon review.\"\"\"\n\n # Do NOT reorder the enums. Depending on the type of database, existing\n # DBs may have these enums represented internally as ints based on the\n # order shown here, and changing it here messes up existing data. This\n # also means that new enums should be added AT THE END of the list.\n \"\"\"An issue that hasn't been marked as a bug or FP\"\"\"\n uncategorized = enum.auto()\n\n \"\"\"Not a security bug, but a bad practice. Still needs fixing.\"\"\"\n bad_practice = enum.auto()\n\n \"\"\"False positive from analysis\"\"\"\n false_positive = enum.auto()\n\n \"\"\"Reviewed and seen to be a valid bug that needs fixing\"\"\"\n valid_bug = enum.auto()\n\n \"\"\"I don't care about this particular issue,\n but still want to see issues of this kind.\"\"\"\n do_not_care = enum.auto()\n\n @classproperty\n def UNCATEGORIZED(cls):\n return cls.uncategorized\n\n @classproperty\n def BAD_PRACTICE(cls):\n return cls.bad_practice\n\n @classproperty\n def FALSE_POSITIVE(cls):\n return cls.false_positive\n\n @classproperty\n def VALID_BUG(cls):\n return cls.valid_bug\n\n @classproperty\n def DO_NOT_CARE(cls):\n return cls.do_not_care\n\n\nclass Issue(Base, PrepareMixin, MutableRecordMixin): # noqa\n \"\"\"An issue coming from the static analysis.\n\n An issue can persist across multiple runs, even if it moves around in the\n code.\n \"\"\"\n\n __tablename__ = \"issues\"\n\n # pyre-fixme[8]: Attribute has type `IssueDBID`; used as `Column[typing.Any]`.\n id: IssueDBID = Column(IssueBIGDBIDType, primary_key=True, nullable=False)\n\n handle: Column[str] = Column(\n String(length=HANDLE_LENGTH),\n nullable=False,\n unique=True,\n doc=\"This handle should uniquely identify an issue across runs on \"\n + \"different code revisions\",\n )\n\n code: Column[int] = Column(\n Integer, doc=\"Code identifiying the issue type\", nullable=False, index=True\n )\n\n instances = relationship(\n \"IssueInstance\", primaryjoin=\"Issue.id == foreign(IssueInstance.issue_id)\"\n )\n\n first_seen: Column[datetime] = Column(\n DateTime,\n doc=\"time of the first run that found this issue\",\n nullable=False,\n index=True,\n )\n\n status: Column[str] = Column(\n Enum(IssueStatus),\n doc=\"Shows the issue status from the latest run\",\n server_default=\"uncategorized\",\n nullable=False,\n index=True,\n )\n\n task_number: Column[Optional[int]] = Column(\n Integer, doc=\"Task number (not fbid) that is tracking this issue\"\n )\n\n triage_history_fbid: Column[Optional[int]] = Column(\n BIGINT(unsigned=True),\n nullable=True,\n doc=\"FBID for EntZoncolanIssueTriageHistory\",\n )\n\n feedback_fbid: Column[Optional[int]] = Column(\n BIGINT(unsigned=True), nullable=True, doc=\"FBID for EntZoncolanFeedback\"\n )\n\n json: Column[Optional[str]] = Column(\n types.TEXT, doc=\"Raw JSON of original issue\", nullable=True\n )\n\n @classmethod\n def _take(cls, n, iterable):\n \"Return first n items of the iterable as a list\"\n return list(islice(iterable, n))\n\n @classmethod\n def merge(cls, session, issues):\n return cls._merge_by_key(session, issues, cls.handle)\n\n\nclass RunStatus(enum.Enum):\n # Do NOT reorder the enums. Depending on the type of database, existing\n # DBs may have these enums represented internally as ints based on the\n # order shown here, and changing it here messes up existing data. This\n # also means that new enums should be added AT THE END of the list.\n finished = enum.auto()\n incomplete = enum.auto()\n skipped = enum.auto()\n failed = enum.auto()\n\n @classproperty\n def FINISHED(cls):\n return cls.finished\n\n @classproperty\n def INCOMPLETE(cls):\n return cls.incomplete\n\n @classproperty\n def SKIPPED(cls):\n return cls.skipped\n\n @classproperty\n def FAILED(cls):\n return cls.failed\n\n\nCURRENT_DB_VERSION = 1\n\n\nclass Run(Base): # noqa\n \"\"\"A particular run of the static analyzer.\n\n Each time output is parsed from the static analyzer we generate a new run. A\n run has multiple IssueInstances.\"\"\"\n\n __tablename__ = \"runs\"\n\n id = Column(BIGDBIDType, primary_key=True)\n\n job_id: Column[Optional[str]] = Column(String(length=255), index=True)\n\n date: Column[datetime] = Column(\n DateTime, doc=\"The date/time the analysis was run\", nullable=False\n )\n\n commit_hash: Column[Optional[str]] = Column(\n String(length=255),\n doc=\"The commit hash of the codebase\",\n nullable=True,\n index=True,\n )\n\n revision_id: Column[Optional[int]] = Column(\n Integer, doc=\"Differential revision (DXXXXXX)\", nullable=True, index=True\n )\n\n differential_id: Column[Optional[int]] = Column(\n Integer,\n doc=\"Differential diff (instance of revision)\",\n nullable=True,\n index=True,\n )\n\n hh_version: Column[Optional[str]] = Column(\n String(length=255), doc=\"The output of hh_server --version\"\n )\n\n branch: Column[Optional[str]] = Column(\n String(length=255),\n doc=\"Branch the commit is based on\",\n nullable=True,\n index=True,\n )\n\n issue_instances = relationship(\n \"IssueInstance\",\n primaryjoin=\"Run.id == foreign(IssueInstance.run_id)\",\n backref=\"run\",\n )\n\n status: Column[str] = Column(\n Enum(RunStatus), server_default=\"finished\", nullable=False, index=True\n )\n\n status_description: Column[Optional[str]] = Column(\n String(length=255), doc=\"The reason why a run didn't finish\", nullable=True\n )\n\n kind: Column[Optional[str]] = Column(\n String(length=255),\n doc=(\n \"Specify different kinds of runs, e.g. MASTER vs. TEST., GKFORXXX, etc. \"\n \"in the same DB\"\n ),\n nullable=True,\n index=True,\n )\n\n repository: Column[Optional[str]] = Column(\n String(length=255),\n doc=(\"The repository that static analysis was run on.\"),\n nullable=True,\n )\n\n db_version: Column[int] = Column(\n Integer,\n doc=\"Tracks under which DB version this was written (for migrations)\",\n nullable=False,\n default=CURRENT_DB_VERSION,\n server_default=\"0\",\n )\n\n def get_summary(self, **kwargs) -> RunSummary:\n session = Session.object_session(self)\n\n return RunSummary(\n commit_hash=self.commit_hash,\n differential_id=self.differential_id,\n id=self.id.resolved(),\n job_id=self.job_id,\n num_new_issues=self._get_num_new_issue_instances(session),\n num_total_issues=self._get_num_total_issues(session),\n alarm_counts=self._get_alarm_counts(session),\n )\n\n def new_issue_instances(self):\n session = Session.object_session(self)\n return (\n session.query(IssueInstance)\n .filter(IssueInstance.run_id == self.id)\n .filter(IssueInstance.is_new_issue.is_(True))\n .all()\n )\n\n def _get_num_new_issue_instances(self, session):\n return (\n session.query(IssueInstance)\n .filter(IssueInstance.run_id == self.id)\n .filter(IssueInstance.is_new_issue.is_(True))\n .count()\n )\n\n def _get_num_total_issues(self, session):\n return (\n session.query(IssueInstance).filter(IssueInstance.run_id == self.id).count()\n )\n\n def _get_alarm_counts(self, session):\n return dict(\n session.query(Issue.code, func.count(Issue.code))\n .filter(IssueInstance.run_id == self.id)\n .outerjoin(IssueInstance.issue)\n .group_by(Issue.code)\n .all()\n )\n\n\nclass MetaRun(Base): # noqa\n \"\"\"An identifier that represents multiple runs which should be grouped semantically.\n\n Meta-runs and runs have a many-to-many relationship, and the purpose of a meta-run\n is to allow querying & displaying results for all related runs without having to\n browse each of them separately.\"\"\"\n\n __tablename__ = \"metaruns\"\n\n id = Column(BIGDBIDType, primary_key=True, autoincrement=False)\n\n # This is the moral equivalent of job_id, but named in a more intuitive manner.\n # Allows determining the latest meta run for each custom run separately.\n custom_run_name: Column[Optional[str]] = Column(String(length=255), nullable=True)\n\n date: Column[datetime] = Column(\n DateTime, doc=\"The date/time the meta-run was generated\", nullable=False\n )\n\n # We want to be able to filter meta-runs by completion. Towards that end, we plan on\n # using the information of number of total runs vs. the number of runs written in\n # the database.\n expected_run_count: Column[Optional[int]] = Column(Integer, nullable=True)\n\n kind: Column[Optional[str]] = Column(\n String(length=255),\n doc=(\n \"Specify different kinds of runs, e.g. MASTER vs. TEST., GKFORXXX, etc. \"\n \"in the same DB\"\n ),\n nullable=True,\n index=True,\n )\n\n db_version: Column[int] = Column(\n Integer,\n doc=\"Tracks under which DB version this was written (for migrations)\",\n nullable=False,\n default=CURRENT_DB_VERSION,\n )\n\n\nclass RunSummary:\n def __init__(\n self,\n commit_hash,\n differential_id,\n id,\n job_id,\n num_new_issues,\n num_total_issues,\n num_missing_preconditions: int = -1,\n num_missing_postconditions: int = -1,\n alarm_counts=None,\n ) -> None:\n self.commit_hash = commit_hash\n self.differential_id = differential_id\n self.id = id\n self.job_id = job_id\n self.num_new_issues = num_new_issues\n self.num_total_issues = num_total_issues\n self.num_missing_preconditions = num_missing_preconditions\n self.num_missing_postconditions = num_missing_postconditions\n self.alarm_counts = alarm_counts or {}\n\n def todict(self) -> Dict[str, Any]:\n return self.__dict__\n\n @classmethod\n def fromdict(cls, d):\n return cls(**d)\n\n\nclass MetaRunToRunAssoc(Base, PrepareMixin, RecordMixin): # noqa\n \"\"\"The responsibility of filling out the meta-run to run assoc is on the child jobs\n of a larger run.\n \"\"\"\n\n __tablename__ = \"metarun_run_assoc\"\n\n meta_run_id = Column(BIGDBIDType, nullable=False, primary_key=True)\n run_id = Column(BIGDBIDType, nullable=False, primary_key=True)\n meta_run = relationship(\n \"MetaRun\",\n primaryjoin=(\"MetaRunToRunAssoc.meta_run_id == foreign(MetaRun.id)\"),\n uselist=False,\n )\n run = relationship(\n \"Run\",\n primaryjoin=(\"MetaRunToRunAssoc.run_id == foreign(Run.id)\"),\n uselist=False,\n )\n\n @classmethod\n def merge(cls, session, items):\n return cls._merge_assocs(session, items, cls.meta_run_id, cls.run_id)\n\n\nclass TraceFrameLeafAssoc(Base, PrepareMixin, RecordMixin): # noqa\n\n __tablename__ = \"trace_frame_message_assoc\"\n\n trace_frame_id = Column(BIGDBIDType, nullable=False, primary_key=True)\n\n leaf_id = Column(\"message_id\", BIGDBIDType, nullable=False, primary_key=True)\n\n # The minimum trace length unfortunately can be off and actually lead to\n # loops. This is a known problem and any code generating traces should\n # additionally have cycle detection.\n trace_length: Column[Optional[int]] = Column(\n Integer, doc=\"minimum trace length to the given leaf\", nullable=True\n )\n\n trace_frame = relationship(\n \"TraceFrame\",\n primaryjoin=(\"TraceFrameLeafAssoc.trace_frame_id == \" \"foreign(TraceFrame.id)\"),\n uselist=False,\n )\n\n leaves = relationship(\n \"SharedText\",\n primaryjoin=\"TraceFrameLeafAssoc.leaf_id == foreign(SharedText.id)\",\n uselist=False,\n )\n\n @classmethod\n def merge(cls, session, items):\n return cls._merge_assocs(session, items, cls.trace_frame_id, cls.leaf_id)\n\n\nclass IssueInstanceFixInfo(Base, PrepareMixin, RecordMixin): # noqa\n __tablename__ = \"issue_instance_fix_info\"\n\n # pyre-fixme[8]: Attribute has type `DBID`; used as `Column[typing.Any]`.\n id: DBID = Column(BIGDBIDType, nullable=False, primary_key=True)\n\n fix_info: Column[str] = Column(\n String(length=INNODB_MAX_INDEX_LENGTH), nullable=False\n )\n\n issue_instance = relationship(\n \"IssueInstance\",\n primaryjoin=(\n \"foreign(IssueInstance.fix_info_id) == \" \"IssueInstanceFixInfo.id\"\n ),\n uselist=False,\n )\n\n\nclass TraceFrame(Base, PrepareMixin, RecordMixin): # noqa\n\n __tablename__ = \"trace_frames\"\n\n __table_args__ = (\n Index(\"ix_traceframe_run_caller_port\", \"run_id\", \"caller_id\", \"caller_port\"),\n Index(\"ix_traceframe_run_callee_port\", \"run_id\", \"callee_id\", \"callee_port\"),\n )\n\n # pyre-fixme[8]: Attribute has type `DBID`; used as `Column[typing.Any]`.\n id: DBID = Column(BIGDBIDType, nullable=False, primary_key=True)\n\n kind: Column[str] = Column(Enum(TraceKind), nullable=False, index=False)\n\n caller_id = Column(BIGDBIDType, nullable=False, server_default=\"0\", default=0)\n\n caller = relationship(\n \"SharedText\",\n primaryjoin=\"foreign(SharedText.id) == TraceFrame.caller_id\",\n uselist=False,\n )\n\n # pyre-fixme[8]: Attribute has type `str`; used as `Column[str]`.\n caller_port: str = Column(\n String(length=INNODB_MAX_INDEX_LENGTH),\n nullable=False,\n server_default=\"\",\n doc=\"The caller port of this call edge\",\n )\n\n callee_id = Column(BIGDBIDType, nullable=False, server_default=\"0\", default=0)\n\n callee = relationship(\n \"SharedText\",\n primaryjoin=\"foreign(SharedText.id) == TraceFrame.callee_id\",\n uselist=False,\n )\n\n callee_location = Column(\n SourceLocationType,\n nullable=False,\n doc=\"The location of the callee in the source code (line|start|end)\",\n )\n\n # pyre-fixme[8]: Attribute has type `str`; used as `Column[str]`.\n callee_port: str = Column(\n String(length=INNODB_MAX_INDEX_LENGTH),\n nullable=False,\n server_default=\"\",\n doc=\"The callee port of this call edge'\",\n )\n\n filename_id = Column(BIGDBIDType, nullable=False, server_default=\"0\", default=0)\n\n run_id = Column(\"run_id\", BIGDBIDType, nullable=False, index=False)\n\n type_interval_lower: Column[Optional[int]] = Column(\n Integer, nullable=True, doc=\"Class interval lower-bound (inclusive)\"\n )\n\n type_interval_upper: Column[Optional[int]] = Column(\n Integer, nullable=True, doc=\"Class interval upper-bound (inclusive)\"\n )\n\n migrated_id = Column(\n BIGDBIDType,\n nullable=True,\n doc=(\n \"ID of the corresponding pre/postcondition. Temporary column used \"\n \"for migrating existing pre/postconditions into trace frames. \"\n \"Will be removed once migration is completed. Use None if not \"\n \"in data migration mode.\"\n ),\n )\n\n preserves_type_context: Column[bool] = Column(\n Boolean,\n default=False,\n server_default=\"0\",\n nullable=False,\n doc=\"Whether the call preserves the calling type context\",\n )\n\n titos = Column(\n SourceLocationsType,\n doc=\"Locations of TITOs aka abductions for the trace frame\",\n nullable=False,\n server_default=\"\",\n )\n\n annotations = relationship(\n \"TraceFrameAnnotation\",\n primaryjoin=(\n \"TraceFrame.id == \" \"foreign(TraceFrameAnnotation.trace_frame_id)\"\n ),\n uselist=True,\n )\n\n leaves = association_proxy(\"leaf_assoc\", \"leaves\")\n\n leaf_assoc = relationship(\n \"TraceFrameLeafAssoc\",\n primaryjoin=(\"TraceFrame.id == \" \"foreign(TraceFrameLeafAssoc.trace_frame_id)\"),\n uselist=True,\n )\n\n issue_instances = association_proxy(\"trace_frame_issue_instance\", \"issue_instance\")\n\n trace_frame_issue_instance = relationship(\n \"IssueInstanceTraceFrameAssoc\",\n primaryjoin=(\n \"TraceFrame.id == \" \"foreign(IssueInstanceTraceFrameAssoc.trace_frame_id)\"\n ),\n )\n\n\n# Extra bits of information we can show on a TraceFrame.\n# This may be a message description, or it may be the start of another series\n# of traces leading to some other leaf. TraceFrameAnnotationTraceFrameAssoc\n# contains the first hop towards that leaf..\nclass TraceFrameAnnotation(Base, PrepareMixin, RecordMixin): # noqa\n\n __tablename__ = \"trace_frame_annotations\"\n\n # pyre-fixme[8]: Attribute has type `DBID`; used as `Column[typing.Any]`.\n id: DBID = Column(BIGDBIDType, nullable=False, primary_key=True)\n\n location = Column(\n SourceLocationType, nullable=False, doc=\"The location for the message\"\n )\n\n kind: Column[Optional[str]] = Column(String(length=255), nullable=True, index=True)\n\n # pyre-fixme[8]: Attribute has type `str`; used as `Column[str]`.\n message: str = Column(\n String(length=4096),\n doc=\"Message describing info about the trace\",\n nullable=False,\n )\n\n leaf_id = Column(BIGDBIDType, nullable=True)\n leaf = relationship(\n \"SharedText\",\n primaryjoin=\"foreign(SharedText.id) == TraceFrameAnnotation.leaf_id\",\n uselist=False,\n )\n\n # pyre-fixme[8]: Attribute has type `Optional[str]`; used as `Column[str]`.\n link: Optional[str] = Column(\n String(length=4096),\n doc=\"An optional URL linking the message to more info (Quandary)\",\n nullable=True,\n )\n\n # pyre-fixme[8]: Attribute has type `Optional[str]`; used as `Column[str]`.\n trace_key: Optional[str] = Column(\n String(length=INNODB_MAX_INDEX_LENGTH),\n nullable=True,\n doc=\"Link to possible pre/post traces (caller_condition).\",\n )\n\n # pyre-fixme[8]: Attribute has type `DBID`; used as `Column[typing.Any]`.\n trace_frame_id: DBID = Column(BIGDBIDType, nullable=False, index=True)\n trace_frame = relationship(\n \"TraceFrame\",\n primaryjoin=(\n \"TraceFrame.id == \" \"foreign(TraceFrameAnnotation.trace_frame_id)\"\n ),\n uselist=True,\n )\n\n child_trace_frames = association_proxy(\n \"trace_frame_annotation_trace_frame\", \"trace_frame\"\n )\n trace_frame_annotation_trace_frame = relationship(\n \"TraceFrameAnnotationTraceFrameAssoc\",\n primaryjoin=(\n \"TraceFrameAnnotation.id == \"\n \"foreign(TraceFrameAnnotationTraceFrameAssoc.trace_frame_annotation_id)\"\n ),\n )\n\n\n# A TraceFrameAnnotation may indicate more traces branching out from a trace\n# frame towards a different leaf/trace kind. In that case, this assoc describes\n# the first hop trace frame from the annotation. It is similar to\n# IssueInstanceTraceFrameAssoc, which indicates the first hop trace frame from\n# the issue instance.\nclass TraceFrameAnnotationTraceFrameAssoc(Base, PrepareMixin, RecordMixin): # noqa\n\n __tablename__ = \"trace_frame_annotation_trace_frame_assoc\"\n\n trace_frame_annotation_id = Column(\n \"trace_frame_annotation_id\", BIGDBIDType, primary_key=True, nullable=False\n )\n\n trace_frame_id = Column(\n \"trace_frame_id\", BIGDBIDType, primary_key=True, nullable=False, index=True\n )\n\n trace_frame_annotation = relationship(\n \"TraceFrameAnnotation\",\n primaryjoin=(\n \"TraceFrameAnnotationTraceFrameAssoc.trace_frame_annotation_id == \"\n \"foreign(TraceFrameAnnotation.id)\"\n ),\n uselist=False,\n )\n\n trace_frame = relationship(\n \"TraceFrame\",\n primaryjoin=(\n \"TraceFrameAnnotationTraceFrameAssoc.trace_frame_id == \"\n \"foreign(TraceFrame.id)\"\n ),\n uselist=False,\n )\n\n @classmethod\n def merge(cls, session, items):\n return cls._merge_assocs(\n session, items, cls.trace_frame_annotation_id, cls.trace_frame_id\n )\n\n\nclass WarningMessage(Base): # noqa\n __tablename__ = \"warning_messages\"\n\n code: Column[int] = Column(Integer, autoincrement=False, primary_key=True)\n\n message: Column[str] = Column(String(length=4096), nullable=False)\n\n\nclass WarningCodeCategory(enum.Enum):\n # Do NOT reorder the enums. Depending on the type of database, existing\n # DBs may have these enums represented internally as ints based on the\n # order shown here, and changing it here messes up existing data. This\n # also means that new enums should be added AT THE END of the list.\n bug = enum.auto()\n code_smell = enum.auto()\n\n @classproperty\n def BUG(cls):\n return cls.bug\n\n @classproperty\n def CODE_SMELL(cls):\n return cls.code_smell\n\n\nclass WarningCodeProperties(Base): # noqa\n \"\"\"Contains properties describing each warning code\"\"\"\n\n __tablename__ = \"warning_code_properties\"\n\n code: Column[int] = Column(\n Integer,\n autoincrement=False,\n nullable=False,\n primary_key=True,\n doc=\"Code identifiying the issue type\",\n )\n\n category: Column[Optional[str]] = Column(\n Enum(WarningCodeCategory),\n nullable=True,\n index=False,\n # pyre-fixme[6]: Expected `str` for 4th param but got `Tuple[str]`.\n doc=(\n \"The category of problems that issues in with this warning code \"\n \"can result in \",\n ),\n )\n\n new_issue_rate: Column[Optional[Decimal]] = Column(\n Float,\n nullable=True,\n index=False,\n doc=\"Average number of new issues per day (computed column)\",\n )\n\n bug_count: Column[Optional[int]] = Column(\n Integer,\n nullable=True,\n index=False,\n doc=\"Number of issues in this category (computed column)\",\n )\n\n avg_trace_len: Column[Optional[Decimal]] = Column(\n Float, nullable=True, index=False, doc=\"Deprecated. See avg_fwd/bwd_trace_len\"\n )\n\n avg_fwd_trace_len: Column[Optional[Decimal]] = Column(\n Float,\n nullable=True,\n index=False,\n # pyre-fixme[6]: Expected `str` for 4th param but got `Tuple[str]`.\n doc=(\n \"Average (min) length of forward traces for the given warning code \"\n \"(computed column)\",\n ),\n )\n\n avg_bwd_trace_len: Column[Optional[Decimal]] = Column(\n Float,\n nullable=True,\n index=False,\n # pyre-fixme[6]: Expected `str` for 4th param but got `Tuple[str]`.\n doc=(\n \"Average (min) length of backward traces for the given warning \"\n \"code (computed column)\",\n ),\n )\n\n snr: Column[Optional[Decimal]] = Column(\n Float,\n nullable=True,\n index=False,\n doc=(\n \"Signal to noise ratio based on triaged issues (computed column). \"\n \"Ratio of (valid + bad practice) to (false positive + don't care)\"\n ),\n )\n\n is_snr_significant: Column[Optional[bool]] = Column(\n Boolean,\n nullable=True,\n index=False,\n doc=(\n \"True if we are confident about the snr (computed column). \"\n \"Depends on percentage of triaged issues and number of issues.\"\n ),\n )\n\n discoverable: Column[Optional[bool]] = Column(\n Boolean,\n nullable=True,\n index=False,\n doc=\"True if an attacker can discover the issue\",\n )\n\n health_score: Column[Optional[Decimal]] = Column(\n Float,\n nullable=True,\n index=False,\n doc=(\n \"Scoring for the health of the warning code, between 0 and 1, \"\n \"based on the values in the other columns (computed column)\"\n ),\n )\n\n notes: Column[Optional[str]] = Column(\n String(length=4096),\n nullable=True,\n index=False,\n doc=\"Free form field for note-taking\",\n )\n\n\nclass PrimaryKey(Base, PrimaryKeyBase):\n pass\n\n\nclass PrimaryKeyGenerator(PrimaryKeyGeneratorBase):\n\n PRIMARY_KEY: Type = PrimaryKey\n\n QUERY_CLASSES: Set[Type] = {\n Issue,\n IssueInstance,\n IssueInstanceFixInfo,\n SharedText,\n Run,\n TraceFrame,\n TraceFrameAnnotation,\n }\n\n\ndef create(db: DB) -> None:\n try:\n Base.metadata.create_all(db.engine)\n except NoSuchTableError:\n pass\n\n\nconvert_sqlalchemy_type.register(SourceLocationType)(convert_column_to_string)\nconvert_sqlalchemy_type.register(BIGDBIDType)(convert_column_to_int_or_id)\n","sub_path":"tools/sapp/sapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":40512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"589992250","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, exceptions\n\nclass Poll(models.Model):\n \"\"\"Defines polls by events\"\"\"\n\n _name = 'poll.poll'\n\n name = fields.Char(\n string='Name',\n required=True\n )\n event_id = fields.Many2one(\n string='Event',\n comodel_name='poll.event',\n required=True\n )\n start_date = fields.Date(\n string='Start Date',\n readonly=True\n )\n finish_date = fields.Date(\n string='Finish Date',\n readonly=True\n )\n state = fields.Selection(\n string='State',\n default='new',\n selection=[\n ('new', 'New'),\n ('doing', 'Doing'),\n ('done', 'Done'),\n ('canceled', 'Canceled')\n ]\n )\n\n def state_doing(self):\n \"\"\"Set poll as doing \"\"\"\n self.check_doing_new()\n self.check_doing_unique()\n self.set_doing()\n self.set_doing_start_date()\n\n def set_doing(self):\n self.state = self.event_id.state = 'doing'\n\n def set_doing_start_date(self):\n self.start_date = self.event_id.start_date = fields.Datetime.now()\n\n def check_doing_new(self):\n if self.state not in ['new']:\n raise exceptions.ValidationError(\n 'Only new polls can be marked as doing'\n )\n\n def check_doing_unique(self):\n if self.event_id.poll_ids.filtered(lambda x: x.state in ['doing']):\n raise exceptions.ValidationError(\n 'It cannot be more than one poll doing per event'\n )\n\n def state_done(self):\n self.check_done_doing()\n self.set_done()\n self.set_done_finish_date()\n\n\n def check_done_doing(self):\n if self.state not in ['doing']:\n raise exceptions.ValidationError(\n 'Only doing polls can be marked as done'\n )\n\n def set_done(self):\n self.state = 'done'\n\n def set_done_finish_date(self):\n self.finish_date = fields.Datetime.now()\n\n def set_relatives_done(self):\n if not self.event_id.poll_ids.filtered(lambda x: x.state in ['new']):\n self.event_id.set_done()\n self.event_id.set_finish_date()\n","sub_path":"poll/models/poll_poll.py","file_name":"poll_poll.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"183155512","text":"torgo_path = \"/project_bdda3/bdda/sjhu/Data/torgo/\"\n\nfile_path = \"/project_bdda3/bdda/sjhu/Projects/AASR-TORGO/all_speakers/\"\noptions = [\"sentence\", \"word\"]\n\nfor op in options:\n op_path = file_path + op\n wav_path = op_path + \"/wav.scp\"\n with open(wav_path,\"r\") as f:\n wavs = f.readlines()\n \n #wavs = [\"F01-Session1-arrayMic-0008-trainSet-S62\"] \n \n for i in range(len(wavs)):\n wav = wavs[i]\n \n spk = wav.split(\"-\")[0]\n sess = wav.split(\"-\")[1]\n \n d_path = torgo_path + spk + \"/\" + sess + \"/wav.mapped/\" + wav.rstrip(\"\\n\") + \".wav\"\n \n wavs[i] = wavs[i].rstrip(\"\\n\") + \" \" + d_path + \"\\n\"\n \n #print(wavs[i])\n with open(wav_path,\"w\") as f:\n f.writelines(wavs)\n","sub_path":"all_speakers/scripts/make_scp.py","file_name":"make_scp.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"555561128","text":"#!/usr/bin/python\n#coding:utf8\n\nimport csv\n\n'''\ncsvモジュールで 区切り文字を変更するサンプル\nここでは, TSV(タブ区切り) を読み書きしている\n'''\n\nwith open('input.tsv','rb') as r:\n reader=csv.reader(r,delimiter='\\t') # TSVファイルを読み込む\n lines = [line for line in reader]\n\nwith open('output.tsv','wb') as w:\n writer=csv.writer(w,delimiter='\\t',lineterminator='\\n') # TSVファイルを書き込む\n for line in lines:\n writer.writerow(line)\n\n\n","sub_path":"library/stdlib/csv/change_delimiter.py","file_name":"change_delimiter.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"373717986","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('stocks', '0005_auto_20151112_0158'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='trade',\n name='shares',\n field=models.DecimalField(max_digits=50, decimal_places=10, validators=[django.core.validators.MinValueValidator(0.0)]),\n ),\n ]\n","sub_path":"stocks/migrations/0006_auto_20151112_0212.py","file_name":"0006_auto_20151112_0212.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"618094431","text":"from kivy.uix.image import Image\n\nfrom mrdriller.utils.utilities import Utilities\n\n\nclass Sprite(Image):\n\n def __init__(self, **kwargs):\n super(Sprite, self).__init__(allow_stretch=True, **kwargs)\n self.texture.mag_filter = 'nearest' # OpenGL\n w, h = self.texture_size\n self.size = (scale * w, scale * h)\n\n\nscale = Utilities.get_scale()\n","sub_path":"mrdriller/utils/sprite.py","file_name":"sprite.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"360016203","text":"\"\"\"ResNet in PyTorch.\nFor Pre-activation ResNet, see 'preact_resnet.py'.\nReference:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\n\nfrom utils.builder import get_builder\nfrom args import args\n\n# Binary activation function with gradient estimator\nimport torch\nclass F_BinAct(torch.autograd.Function):\n @staticmethod\n def forward(ctx, inp):\n # Save input for backward\n ctx.save_for_backward(inp)\n # Unscaled sign function\n return torch.sign(inp)\n\n @staticmethod\n def backward(ctx, grad_out):\n # Get input from saved ctx\n inp, = ctx.saved_tensors\n # Clone grad_out\n grad_input = grad_out.clone()\n # Gradient approximation from quadratic spline\n inp = torch.clamp(inp, min=-1.0, max=1.0)\n inp = 2*(1 - torch.abs(inp))\n # Return gradient\n return grad_input * inp\n\nclass BiRealAct(nn.Module):\n def __init__(self):\n super(BiRealAct, self).__init__()\n\n def forward(self, input):\n return F_BinAct.apply(input)\n\n\n\nclass BasicBlock_BinAct(nn.Module):\n expansion = 1\n\n def __init__(self, builder, in_planes, planes, stride=1):\n super(BasicBlock_BinAct, self).__init__()\n self.conv1 = builder.conv3x3(in_planes, planes, stride=stride)\n self.bn1 = builder.batchnorm(planes)\n self.conv2 = builder.conv3x3(planes, planes, stride=1)\n self.bn2 = builder.batchnorm(planes)\n self.relu = (lambda: BiRealAct())()\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n builder.conv1x1(in_planes, self.expansion * planes, stride=stride),\n builder.batchnorm(self.expansion * planes),\n )\n\n def forward(self, x):\n out = self.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = self.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, builder, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = builder.conv1x1(in_planes, planes)\n self.bn1 = builder.batchnorm(planes)\n self.conv2 = builder.conv3x3(planes, planes, stride=stride)\n self.bn2 = builder.batchnorm(planes)\n self.conv3 = builder.conv1x1(planes, self.expansion * planes)\n self.bn3 = builder.batchnorm(self.expansion * planes)\n self.relu = (lambda: BiRealAct())()\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n builder.conv1x1(in_planes, self.expansion * planes, stride=stride),\n builder.batchnorm(self.expansion * planes),\n )\n\n def forward(self, x):\n out = self.relu(self.bn1(self.conv1(x)))\n out = self.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = self.relu(out)\n\n return out\n\n\nclass ResNet_BinAct(nn.Module):\n def __init__(self, builder, block, num_blocks):\n super(ResNet_BinAct, self).__init__()\n self.in_planes = 64\n self.builder = builder\n\n self.conv1 = builder.conv3x3(3, 64, stride=1, first_layer=True)\n self.bn1 = builder.batchnorm(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.relu = (lambda: BiRealAct())()\n\n if args.last_layer_dense:\n self.fc = nn.Conv2d(512 * block.expansion, 10, 1)\n else:\n self.fc = builder.conv1x1(512 * block.expansion, 10)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.builder, self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = self.fc(out)\n return out.flatten(1)\n\n\nclass Bottleneck2(nn.Module):\n expansion = 1\n\n def __init__(self, builder, in_planes, planes, cardinality, stride=1, base_width=64, widen_factor=1):\n super(Bottleneck2, self).__init__()\n width_ratio = planes / (widen_factor * 64.)\n D = cardinality * int(base_width * width_ratio)\n self.conv1 = builder.conv1x1(in_planes, D)\n self.bn1 = builder.batchnorm(D)\n self.conv2 = builder.group_conv3x3(D, D, groups=cardinality, stride=stride)\n self.bn2 = builder.batchnorm(D)\n self.conv3 = builder.conv1x1(D, self.expansion * planes)\n self.bn3 = builder.batchnorm(self.expansion * planes)\n self.relu = nn.ReLU()\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n builder.conv1x1(in_planes, self.expansion * planes, stride=stride),\n builder.batchnorm(self.expansion * planes),\n )\n\n def forward(self, x):\n out = self.relu(self.bn1(self.conv1(x)))\n out = self.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = self.relu(out)\n\n return out\n\n\nclass ResNeXtBottleneck(nn.Module):\n \"\"\"\n RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n \"\"\"\n def __init__(self, in_channels, out_channels, stride, cardinality, widen_factor):\n \"\"\" Constructor\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n cardinality: num of convolution groups.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(ResNeXtBottleneck, self).__init__()\n D = cardinality * out_channels // widen_factor\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_reduce = nn.BatchNorm2d(D)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)\n self.bn = nn.BatchNorm2d(D)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_expand = nn.BatchNorm2d(out_channels)\n\n self.shortcut = nn.Sequential()\n if in_channels != out_channels:\n self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride,\n padding=0, bias=False))\n self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))\n\n def forward(self, x):\n bottleneck = self.conv_reduce.forward(x)\n bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)\n bottleneck = self.conv_conv.forward(bottleneck)\n bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)\n bottleneck = self.conv_expand.forward(bottleneck)\n bottleneck = self.bn_expand.forward(bottleneck)\n residual = self.shortcut.forward(x)\n return F.relu(residual + bottleneck, inplace=True)\n\n\nclass WideResNeXt_BinAct(nn.Module):\n def __init__(self, builder, block, num_blocks, cardinality, base_width=64, widen_factor=1):\n super(WideResNeXt_BinAct, self).__init__()\n self.in_planes = 64\n self.builder = builder\n self.base_width = base_width\n self.widen_factor = widen_factor\n\n self.conv1 = builder.conv3x3(3, 64, stride=1, first_layer=True)\n self.bn1 = builder.batchnorm(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], cardinality[0], stride=1)\n self.layer2 = self._make_layer(block, 64*(widen_factor+1), num_blocks[1], cardinality[1], stride=2)\n self.layer3 = self._make_layer(block, 128*(widen_factor+1), num_blocks[2], cardinality[2], stride=2)\n self.layer4 = self._make_layer(block, 256*(widen_factor+1), num_blocks[3], cardinality[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.relu = nn.ReLU()\n\n if args.last_layer_dense:\n self.fc = nn.Conv2d(256*(widen_factor+1) * block.expansion, 10, 1)\n else:\n self.fc = builder.conv1x1(256*(widen_factor+1) * block.expansion, 10)\n\n def _make_layer(self, block, planes, num_blocks, cardinality, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n #layers = []\n layers = nn.ModuleList()\n for stride in strides:\n layers.append(block(self.builder, self.in_planes, planes, cardinality, stride, self.base_width, self.widen_factor))\n self.in_planes = planes * block.expansion\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = self.fc(out)\n return out.flatten(1)\n\n\nclass CifarResNeXt(nn.Module):\n \"\"\"\n ResNext optimized for the Cifar dataset, as specified in\n https://arxiv.org/pdf/1611.05431.pdf\n \"\"\"\n def __init__(self, cardinality=8, depth=29, widen_factor=4, num_classes=10):\n \"\"\" Constructor\n Args:\n cardinality: number of convolution groups.\n depth: number of layers.\n num_classes: number of classes\n widen_factor: factor to adjust the channel dimensionality\n \"\"\"\n super(CifarResNeXt, self).__init__()\n self.cardinality = cardinality\n self.depth = depth\n self.block_depth = (self.depth - 2) // 9\n self.widen_factor = widen_factor\n self.num_classes = num_classes\n self.output_size = 64\n self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]\n\n self.conv1 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)\n self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)\n self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(self.stages[3], num_classes)\n\n for key in self.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal(self.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n self.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n self.state_dict()[key][...] = 0\n init.kaiming_normal(self.fc.weight)\n\n def block(self, name, in_channels, out_channels, pool_stride=2):\n \"\"\" Stack n bottleneck modules where n is inferred from the depth of the network.\n Args:\n name: string name of the current block.\n in_channels: number of input channels\n out_channels: number of output channels\n pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.\n Returns: a Module consisting of n sequential bottlenecks.\n \"\"\"\n block = nn.Sequential()\n for bottleneck in range(self.block_depth):\n name_ = '%s_bottleneck_%d' % (name, bottleneck)\n if bottleneck == 0:\n block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.cardinality,\n self.widen_factor))\n else:\n block.add_module(name_,\n ResNeXtBottleneck(out_channels, out_channels, 1, self.cardinality, self.widen_factor))\n return block\n\n def forward(self, x):\n x = self.conv1.forward(x)\n x = F.relu(self.bn1.forward(x), inplace=True)\n x = self.stage_1.forward(x)\n x = self.stage_2.forward(x)\n x = self.stage_3.forward(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\nclass WideResNet_BinAct(nn.Module):\n def __init__(self, builder, block, num_blocks, widen_factor=1):\n super(WideResNet_BinAct, self).__init__()\n self.in_planes = 64\n self.builder = builder\n\n self.conv1 = builder.conv3x3(3, 64, stride=1, first_layer=True)\n self.bn1 = builder.batchnorm(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 64*(widen_factor+1), num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 128*(widen_factor+1), num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 256*(widen_factor+1), num_blocks[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.relu = (lambda: BiRealAct())()\n\n if args.last_layer_dense:\n self.fc = nn.Conv2d(256*(widen_factor+1) * block.expansion, 10, 1)\n else:\n self.fc = builder.conv1x1(256*(widen_factor+1) * block.expansion, 10)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.builder, self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = self.fc(out)\n return out.flatten(1)\n\n\nclass SmallResNet_BinAct(nn.Module):\n def __init__(self, builder, block, num_blocks):\n super(SmallResNet_BinAct, self).__init__()\n self.in_planes = 16\n self.builder = builder\n\n self.conv1 = builder.conv3x3(3, 16, stride=1, first_layer=True)\n self.bn1 = builder.batchnorm(16)\n self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.relu = (lambda: BiRealAct())()\n\n if args.last_layer_dense:\n self.fc = nn.Conv2d(64 * block.expansion, 10, 1)\n else:\n self.fc = builder.conv1x1(64 * block.expansion, 10)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.builder, self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.avg_pool2d(out, 4)\n out = self.fc(out)\n return out.flatten(1)\n\n\ndef resnext29_8x64d_c10():\n model = CifarResNeXt(cardinality=8, depth=29, widen_factor=4, num_classes=10)\n return model\n\ndef cResNet18_BinAct():\n return ResNet_BinAct(get_builder(), BasicBlock_BinAct, [2, 2, 2, 2])\n\ndef cWideResNet18_2_BinAct():\n return WideResNet_BinAct(get_builder(), BasicBlock_BinAct, [2, 2, 2, 2], widen_factor=2)\n\ndef cWideResNet18_3_BinAct():\n return WideResNet_BinAct(get_builder(), BasicBlock_BinAct, [2, 2, 2, 2], widen_factor=3)\n\ndef cResNet34_BinAct():\n return ResNet_BinAct(get_builder(), BasicBlock_BinAct, [3, 4, 6, 3])\n\n#ResNeXt\ndef cWideResNeXt18_2_BinAct():\n return WideResNeXt_BinAct(get_builder(), Bottleneck2, [1, 2, 6, 2], [4,8,8,16], widen_factor=2)\n\ndef cWideResNeXt18_2_BinAct_small():\n return WideResNeXt_BinAct(get_builder(), Bottleneck2, [1, 2, 6, 2], [4,4,8,8], widen_factor=2)\n\n\n#def cResNet50():\n# return ResNet(get_builder(), Bottleneck, [3, 4, 6, 3])\n#\n#\n#def cResNet101():\n# return ResNet(get_builder(), Bottleneck, [3, 4, 23, 3])\n#\n#\n#def cResNet152():\n# return ResNet(get_builder(), Bottleneck, [3, 8, 36, 3])\n\n\ndef cResNet20_BinAct():\n return SmallResNet_BinAct(get_builder(), BasicBlock_BinAct, [3, 3, 3])\n\ndef cResNet32_BinAct():\n return SmallResNet_BinAct(get_builder(), BasicBlock_BinAct, [5, 5, 5])\n\ndef cResNet44_BinAct():\n return SmallResNet_BinAct(get_builder(), BasicBlock_BinAct, [7, 7, 7])\n\ndef cResNet56_BinAct():\n return SmallResNet_BinAct(get_builder(), BasicBlock_BinAct, [9, 9, 9])\n\ndef cResNet110_BinAct():\n return SmallResNet_BinAct(get_builder(), BasicBlock_BinAct, [18, 18, 18])\n","sub_path":"models/resnet_cifar_BinAct.py","file_name":"resnet_cifar_BinAct.py","file_ext":"py","file_size_in_byte":17547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"639331292","text":"# Copyright (C) 2019 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"Tests for Risk model.\"\"\"\n\nimport datetime\n\nimport ddt\n\nfrom ggrc import db\nfrom ggrc.models import all_models\nfrom integration.ggrc import TestCase, Api\nfrom integration.ggrc import api_helper\nfrom integration.ggrc.generator import ObjectGenerator\nfrom integration.ggrc.models import factories\nfrom integration.ggrc.query_helper import WithQueryApi\n\n\nclass TestRiskGGRC(TestCase):\n \"\"\"Tests for risk model for GGRC users.\"\"\"\n\n def setUp(self):\n \"\"\"setUp, nothing else to add.\"\"\"\n super(TestRiskGGRC, self).setUp()\n self.api = api_helper.Api()\n\n def test_create(self):\n \"\"\"Test risk create with internal user.\"\"\"\n response = self.api.post(all_models.Risk, {\"title\": \"new-title\"})\n self.assert403(response)\n\n risk_count = all_models.Risk.query.filter(\n all_models.Risk.title == \"new-title\").count()\n self.assertEqual(0, risk_count)\n\n def test_update(self):\n \"\"\"Test risk update with internal user.\"\"\"\n risk = factories.RiskFactory()\n old_title = risk.title\n\n response = self.api.put(risk, {\"title\": \"new-title\"})\n self.assert403(response)\n\n risk = all_models.Risk.query.get(risk.id)\n self.assertEqual(old_title, risk.title)\n\n def test_delete(self):\n \"\"\"Test risk delete with internal user.\"\"\"\n risk = factories.RiskFactory()\n\n response = self.api.delete(risk)\n self.assert403(response)\n\n risk = all_models.Risk.query.get(risk.id)\n self.assertIsNotNone(risk.title)\n\n\n@ddt.ddt\nclass TestRiskGGRCQ(TestCase):\n \"\"\"Tests for risk model for GGRCQ users.\"\"\"\n\n def setUp(self):\n \"\"\"setUp, nothing else to add.\"\"\"\n super(TestRiskGGRCQ, self).setUp()\n self.api = api_helper.Api()\n self.api.login_as_external()\n\n @staticmethod\n def generate_risk_body():\n \"\"\"Generate JSON body for Risk.\"\"\"\n body = {\n \"id\": 10,\n \"title\": \"External risk\",\n \"risk_type\": \"External risk\",\n \"created_at\": datetime.datetime(2019, 1, 1, 12, 30),\n \"updated_at\": datetime.datetime(2019, 1, 2, 13, 30),\n \"external_id\": 10,\n \"external_slug\": \"external_slug\",\n \"review_status\": all_models.Review.STATES.UNREVIEWED,\n \"review_status_display_name\": \"some status\",\n }\n\n return body\n\n @staticmethod\n def generate_comment_body():\n \"\"\"Generate JSON body for Risk comment.\"\"\"\n body = {\n \"external_id\": 1,\n \"external_slug\": factories.random_str(),\n \"description\": \"External comment\",\n \"context\": None,\n }\n\n return body\n\n def assert_instance(self, expected, risk):\n \"\"\"Compare expected response body with actual.\"\"\"\n risk_values = {}\n expected_values = {}\n\n for field, value in expected.items():\n expected_values[field] = value\n risk_values[field] = getattr(risk, field, None)\n\n self.assertEqual(expected_values, risk_values)\n\n def test_create(self):\n \"\"\"Test risk create with external user.\"\"\"\n risk_body = self.generate_risk_body()\n\n response = self.api.post(all_models.Risk, {\n \"risk\": risk_body\n })\n\n self.assertEqual(201, response.status_code)\n\n risk = all_models.Risk.query.get(risk_body[\"id\"])\n self.assert_instance(risk_body, risk)\n\n # pylint: disable=invalid-name\n def test_create_without_review_status(self):\n \"\"\"Check risk creation without review_status\"\"\"\n risk_body = self.generate_risk_body()\n del risk_body['review_status']\n\n response = self.api.post(all_models.Risk, risk_body)\n self.assert400(response)\n\n # pylint: disable=invalid-name\n def test_create_with_empty_review_status(self):\n \"\"\"Check risk creation with empty review_status\"\"\"\n risk_body = self.generate_risk_body()\n risk_body['review_status'] = None\n\n response = self.api.post(all_models.Risk, risk_body)\n self.assert400(response)\n\n # pylint: disable=invalid-name\n def test_create_without_review_status_display_name(self):\n \"\"\"Check risk creation without review_status_display_name\"\"\"\n risk_body = self.generate_risk_body()\n del risk_body['review_status_display_name']\n\n response = self.api.post(all_models.Risk, risk_body)\n self.assert400(response)\n\n # pylint: disable=invalid-name\n def test_create_with_empty_review_status_display_name(self):\n \"\"\"Check risk creation with empty review_status_display_name\"\"\"\n risk_body = self.generate_risk_body()\n risk_body['review_status_display_name'] = None\n\n response = self.api.post(all_models.Risk, risk_body)\n self.assert400(response)\n\n def test_update(self):\n \"\"\"Test risk update with external user.\"\"\"\n with factories.single_commit():\n risk_id = factories.RiskFactory().id\n\n new_values = {\n \"title\": \"New risk\",\n \"created_at\": datetime.datetime(2019, 1, 3, 14, 30),\n \"updated_at\": datetime.datetime(2019, 1, 4, 14, 30),\n \"review_status\": all_models.Review.STATES.UNREVIEWED,\n \"review_status_display_name\": \"some status\",\n }\n\n risk = all_models.Risk.query.get(risk_id)\n response = self.api.put(risk, new_values)\n\n self.assertEqual(200, response.status_code)\n\n risk = all_models.Risk.query.get(risk_id)\n self.assert_instance(new_values, risk)\n\n # pylint: disable=invalid-name\n def test_update_review_status_to_null(self):\n \"\"\"Test review_status is not set to None\"\"\"\n risk = factories.RiskFactory()\n response = self.api.put(risk, {\"review_status\": None})\n self.assert400(response)\n self.assertEqual(response.json[\"message\"],\n \"Review status for the object is not specified\")\n\n risk = db.session.query(all_models.Risk).get(risk.id)\n self.assertIsNotNone(risk.external_id)\n\n # pylint: disable=invalid-name\n def test_update_review_status(self):\n \"\"\"Test review_status is updated\"\"\"\n risk = factories.RiskFactory()\n new_value = all_models.Review.STATES.REVIEWED\n self.api.put(risk, {\"review_status\": new_value,\n \"review_status_display_name\": \"some status\"})\n\n risk = db.session.query(all_models.Risk).get(risk.id)\n self.assertEquals(risk.review_status, new_value)\n\n # pylint: disable=invalid-name\n def test_update_review_status_display_name_to_null(self):\n \"\"\"Test review_status_display_name is not set to None\"\"\"\n risk = factories.RiskFactory()\n response = self.api.put(risk, {\"review_status_display_name\": None})\n self.assert400(response)\n self.assertEqual(response.json[\"message\"],\n \"Review status display for the object is not specified\")\n\n risk = db.session.query(all_models.Risk).get(risk.id)\n self.assertIsNotNone(risk.external_id)\n\n # pylint: disable=invalid-name\n def test_update_review_status_display_name(self):\n \"\"\"Test review_status_display_name is updated\"\"\"\n risk = factories.RiskFactory()\n new_value = \"test123\"\n self.api.put(risk, {\"review_status_display_name\": new_value,\n \"review_status\": all_models.Review.STATES.UNREVIEWED})\n\n risk = db.session.query(all_models.Risk).get(risk.id)\n self.assertEquals(risk.review_status_display_name, new_value)\n\n def test_create_comments(self):\n \"\"\"Test external comments creation for risk.\"\"\"\n risk_body = self.generate_risk_body()\n response = self.api.post(all_models.Risk, {\n \"risk\": risk_body,\n })\n self.assertEqual(response.status_code, 201)\n\n comment_body = self.generate_comment_body()\n response = self.api.post(all_models.ExternalComment, {\n \"external_comment\": comment_body,\n })\n\n self.assertEqual(response.status_code, 201)\n comment = db.session.query(all_models.ExternalComment.description).one()\n self.assertEqual(comment, (comment_body[\"description\"],))\n\n risk_id = db.session.query(all_models.Risk.id).one()[0]\n comment_id = db.session.query(all_models.ExternalComment.id).one()[0]\n\n response = self.api.post(all_models.Relationship, {\n \"relationship\": {\n \"source\": {\"id\": risk_id, \"type\": \"Risk\"},\n \"destination\": {\"id\": comment_id, \"type\": \"ExternalComment\"},\n \"context\": None,\n \"is_external\": True\n },\n })\n self.assertEqual(response.status_code, 201)\n rels = all_models.Relationship.query.filter_by(\n source_type=\"Risk\",\n source_id=risk_id,\n destination_type=\"ExternalComment\",\n destination_id=comment_id\n )\n self.assertEqual(rels.count(), 1)\n\n def test_get_risk_external_comment(self):\n \"\"\"Test query endpoint for risk ExternalComments.\"\"\"\n with factories.single_commit():\n risk = factories.RiskFactory()\n comment = factories.ExternalCommentFactory(description=\"comment\")\n factories.RelationshipFactory(source=risk, destination=comment)\n\n request_data = [{\n \"filters\": {\n \"expression\": {\n \"object_name\": \"Risk\",\n \"op\": {\n \"name\": \"relevant\"\n },\n \"ids\": [risk.id]\n },\n },\n \"object_name\":\"ExternalComment\",\n \"order_by\": [{\"name\": \"created_at\", \"desc\": \"true\"}]\n }]\n response = self.api.send_request(\n self.api.client.post,\n data=request_data,\n api_link=\"/query\"\n )\n self.assert200(response)\n response_data = response.json[0][\"ExternalComment\"]\n self.assertEqual(response_data[\"count\"], 1)\n self.assertEqual(response_data[\"values\"][0][\"description\"], \"comment\")\n\n @ddt.data(\n (\"Due Date\", \"due_date\"),\n (\"Last Owner Reviewed Date\", \"last_submitted_at\"),\n (\"Last Compliance Reviewed Date\", \"last_verified_at\"),\n )\n @ddt.unpack\n def test_search_risk_by_dates(self, field, attr):\n \"\"\"Test query endpoint for risk by dates.\"\"\"\n current_date = datetime.date.today()\n\n with factories.single_commit():\n factories.RiskFactory(**{attr: current_date})\n\n request_data = [{\n \"filters\": {\n \"expression\": {\n \"left\": {\"left\": field,\n \"op\": {\"name\": \"~\"},\n \"right\": current_date.strftime(\"%Y-%m-%d\")},\n \"op\": {\"name\": \"AND\"},\n \"right\": {\"left\": \"Status\",\n \"op\": {\"name\": \"IN\"},\n \"right\": [\"Active\", \"Draft\", \"Deprecated\"]}\n }\n },\n \"object_name\": \"Risk\",\n \"order_by\": [{\"name\": \"updated_at\", \"desc\": \"true\"}]\n }]\n response = self.api.send_request(\n self.api.client.post,\n data=request_data,\n api_link=\"/query\"\n )\n self.assert200(response)\n response_data = response.json[0][\"Risk\"]\n self.assertEqual(response_data[\"count\"], 1)\n self.assertEqual(response_data[\"values\"][0][attr],\n current_date.strftime(\"%Y-%m-%d\"))\n\n @ddt.data(\n (\"Created By\", \"created_by\"),\n (\"Last Owner Reviewed By\", \"last_submitted_by\"),\n (\"Last Compliance Reviewed By\", \"last_verified_by\"),\n )\n @ddt.unpack\n def test_search_risk_by_users(self, field, attr):\n \"\"\"Test query endpoint for risk by users.\"\"\"\n\n with factories.single_commit():\n person = factories.PersonFactory()\n factories.RiskFactory(**{attr: person})\n\n request_data = [{\n \"filters\": {\n \"expression\": {\n \"left\": {\"left\": field,\n \"op\": {\"name\": \"~\"},\n \"right\": person.email},\n \"op\": {\"name\": \"AND\"},\n \"right\": {\"left\": \"Status\",\n \"op\": {\"name\": \"IN\"},\n \"right\": [\"Active\", \"Draft\", \"Deprecated\"]}\n }\n },\n \"object_name\": \"Risk\",\n \"order_by\": [{\"name\": \"updated_at\", \"desc\": \"true\"}]\n }]\n response = self.api.send_request(\n self.api.client.post,\n data=request_data,\n api_link=\"/query\"\n )\n self.assert200(response)\n response_data = response.json[0][\"Risk\"]\n self.assertEqual(response_data[\"count\"], 1)\n self.assertEqual(response_data[\"values\"][0][attr]['email'],\n person.email)\n\n\nclass TestRiskQueryApi(WithQueryApi, TestCase):\n \"\"\"Tests for query Api.\"\"\"\n\n # pylint: disable=invalid-name\n def setUp(self):\n super(TestRiskQueryApi, self).setUp()\n self.client.get(\"/login\")\n self.api = Api()\n\n def test_review_status_search(self):\n \"\"\"Review status search.\n\n The query should take data form review_status_display_name field\n \"\"\"\n risk_id = factories.RiskFactory(\n review_status_display_name=\"Review Needed\"\n ).id\n\n risk_by_review_status = self.simple_query(\n \"Risk\",\n expression=[\"Review Status\", \"=\", \"Review Needed\"]\n )\n self.assertEquals(1, len(risk_by_review_status))\n self.assertEquals(risk_id, risk_by_review_status[0][\"id\"])\n\n\nclass TestRiskSnapshotting(TestCase):\n \"\"\"Risk snapshot tests\"\"\"\n\n def setUp(self):\n \"\"\"setUp, nothing else to add.\"\"\"\n super(TestRiskSnapshotting, self).setUp()\n self.api = api_helper.Api()\n self.api.login_as_external()\n self.objgen = ObjectGenerator()\n\n def test_update_risk_snapshot(self):\n \"\"\"Update risk snapshot to the latest version\"\"\"\n with factories.single_commit():\n program = factories.ProgramFactory(title=\"P1\")\n risk = factories.RiskFactory(title=\"R1\")\n risk_id = risk.id\n factories.RelationshipFactory(source=program, destination=risk)\n\n # Risk snapshot created for audit during mapping audit to program\n self.objgen.generate_object(all_models.Audit, {\n \"title\": \"A1\",\n \"program\": {\"id\": program.id},\n \"status\": \"Planned\",\n \"snapshots\": {\n \"operation\": \"create\",\n }\n })\n\n # Update risk to get outdated snapshot (new risk revision)\n risk = all_models.Risk.query.get(risk_id)\n self.api.put(risk, {\n \"title\": \"New risk title\"\n })\n\n audit = all_models.Audit.query.filter_by(title=\"A1\").one()\n snapshot = all_models.Snapshot.query.first()\n self.assertEquals(audit, snapshot.parent)\n\n # Update snapshot to the latest revision\n res = self.api.put(snapshot, {\n \"update_revision\": \"latest\"\n })\n\n self.assert200(res)\n self.assertTrue(res.json[\"snapshot\"][\"is_latest_revision\"])\n","sub_path":"test/integration/ggrc/models/test_risk.py","file_name":"test_risk.py","file_ext":"py","file_size_in_byte":14198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"348365208","text":"#@num 017\n#@link https://leetcode.com/problems/letter-combinations-of-a-phone-number/\n#@title Letter Combinations of a Phone Number\n\nfrom itertools import product\n\nclass Solution(object):\n\n\n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n if len(digits) == 0:\n return []\n map = {'1' : '*',\n '2' : 'abc',\n '3' : 'def',\n '4' : 'ghi',\n '5' : 'jkl',\n '6' : 'mno',\n '7' : 'pqrs',\n '8' : 'tuv',\n '9' : 'wxyz',\n '0' : '*'}\n #print digits\n digit_list = [map[c] for c in digits]\n #print digit_list\n return [''.join(tup) for tup in product(*digit_list)]\n","sub_path":"leetcode/017_comb_phone_num.py","file_name":"017_comb_phone_num.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"634289209","text":"import os\nimport time\nimport load_data\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport numpy as np\nfrom models.LSTM import LSTMClassifier\nfrom models.CNN import CNN\nimport cca_core\nfrom copy import deepcopy\n\nTEXT, vocab_size, word_embeddings, train_iter, valid_iter, test_iter = load_data.load_dataset()\n\ndef clip_gradient(model, clip_value):\n params = list(filter(lambda p: p.grad is not None, model.parameters()))\n for p in params:\n p.grad.data.clamp_(-clip_value, clip_value)\n\nuse_cuda = torch.cuda.is_available()\ntorch.manual_seed(1)\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\npositive_dictionary = []\nwith open('positive.txt','r') as f:\n for line in f:\n positive_dictionary.append(line.strip().lower())\nnegative_dictionary = []\nwith open('negative.txt','r') as f:\n for line in f:\n negative_dictionary.append(line.strip().lower())\n\ndef train_model(model, optim, train_iter, epoch):\n total_epoch_loss = 0\n total_epoch_acc = 0\n model.to(device)\n model.train() \n steps = 0 \n for idx, batch in enumerate(train_iter):\n text = batch.text[0]\n target = batch.label\n #target = torch.autograd.Variable(target).long() \n text = text.to(device)\n target = target.to(device)\n if (text.size()[0] is not 32):# One of the batch returned by BucketIterator has length different than 32.\n continue\n optim.zero_grad()\n prediction, _ = model(text)\n loss = loss_fn(prediction, target)\n num_corrects = (torch.max(prediction, 1)[1].view(target.size()).data == target.data).float().sum()\n acc = 100.0 * num_corrects/len(batch)\n loss.backward()\n clip_gradient(model, 1e-1)\n optim.step()\n steps += 1\n \n if steps % 10000 == 0:\n print(\"Epoch: \", epoch+1)\n print(\"Idx: \", idx+1)\n print(\"Training Loss: \", loss.item())\n print(\"Training Accuracy: \", acc.item())\n \n total_epoch_loss += loss.item()\n total_epoch_acc += acc.item()\n \n return total_epoch_loss/len(train_iter), total_epoch_acc/len(train_iter)\n\ndef eval_model(model, val_iter):\n total_epoch_loss = 0\n total_epoch_acc = 0\n model.to(device)\n model.eval()\n with torch.no_grad():\n for idx, batch in enumerate(val_iter):\n text = batch.text[0]\n if (text.size()[0] is not 32):\n continue\n target = batch.label\n #target = torch.autograd.Variable(target).long()\n text = text.to(device)\n target = target.to(device) \n prediction, all_out = model(text)\n loss = loss_fn(prediction, target)\n num_corrects = (torch.max(prediction, 1)[1].view(target.size()).data == target.data).sum()\n acc = 100.0 * num_corrects/len(batch)\n total_epoch_loss += loss.item()\n total_epoch_acc += acc.item()\n\n return total_epoch_loss/len(val_iter), total_epoch_acc/len(val_iter)\n\ndef svcca_two_models(model1, model2, val_iter):\n model1.eval()\n model2.eval()\n with torch.no_grad():\n for idx, batch in enumerate(val_iter):\n text = batch.text[0]\n target = batch.label\n if idx == 0:\n all_text = text\n all_target = target\n else:\n all_text = torch.cat([all_text,text], 0)\n all_target = torch.cat([all_target,target], 0)\n all_text = all_text.to(device)\n all_target = all_target.to(device) \n prediction, model1_allout = model1(all_text)\n prediction, model2_allout = model2(all_text)\n results = cca_core.get_cca_similarity(model2_allout.t().to(\"cpu\").numpy(), model1_allout.t().to(\"cpu\").numpy(), verbose=True)\n print(np.mean(results['cca_coef1']))\n\ndef svcca_no_sentiment(model1, val_iter):\n model1.eval() \n with torch.no_grad():\n for idx, batch in enumerate(val_iter):\n text = batch.text[0]\n target = batch.label\n if idx == 0:\n all_text = text\n all_target = target\n else:\n all_text = torch.cat([all_text,text], 0)\n all_target = torch.cat([all_target,target], 0)\n all_text = all_text.to(device)\n all_target = all_target.to(device) \n prediction, model1_allout = model1(all_text) \n\n all_text_no_sentiment = deepcopy(all_text)\n for example_ind, ex in enumerate(all_text):\n for word_ind, word in enumerate(ex):\n if (TEXT.vocab.itos[word] in positive_dictionary):\n all_text_no_sentiment[example_ind][word_ind] = 1\n if (TEXT.vocab.itos[word] in negative_dictionary):\n all_text_no_sentiment[example_ind][word_ind] = 1\n\n prediction, model1_allout_no_sentiment = model1(all_text_no_sentiment) \n # print(model1_allout.shape)\n # print(model1_allout_no_sentiment.shape)\n results = cca_core.get_cca_similarity(model1.allout.t().to(\"cpu\").numpy(), model1_allout_no_sentiment.t().to(\"cpu\").numpy(), verbose=True) \n print(np.mean(results['cca_coef1']))\n\nlearning_rate = 2e-5\nbatch_size = 32\noutput_size = 5\nhidden_size = 256\nembedding_length = 300\n\n# CNN hyperparameters\n# in_channels = 1\n# out_channels = 200\n# kernel_heights = (3,4,5)\n# stride = 1\n# padding = 0\n# keep_probab = 0.3\n\n# model = CNN(batch_size, output_size, in_channels, out_channels, kernel_heights, stride, padding, keep_probab, vocab_size, embedding_length, word_embeddings) \nmodel = LSTMClassifier(batch_size, output_size, hidden_size, vocab_size, embedding_length, word_embeddings)\n\n\nloss_fn = F.cross_entropy\n# optim = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr = 0.001, momentum=0.9)\noptim = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()))\n\nepochs = 30\nfor epoch in range(epochs):\n val_loss, val_acc = eval_model(model, valid_iter)\n print(\"Val loss\", val_loss)\n print(\"Val acc\", val_acc)\n train_loss, train_acc = train_model(model, optim, train_iter, epoch)\n #print(f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%')\n\nsvcca_no_sentiment(model, valid_iter) \n#test_loss, test_acc = eval_model(model, test_iter)\n#print(f'Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%')\n\n# ''' Let us now predict the sentiment on a single sentence just for the testing purpose. '''\n# test_sen1 = \"This is one of the best creation of Nolan. I can say, it's his magnum opus. Loved the soundtrack and especially those creative dialogues.\"\n# test_sen2 = \"Ohh, such a ridiculous movie. Not gonna recommend it to anyone. Complete waste of time and money.\"\n\n# test_sen1 = TEXT.preprocess(test_sen1)\n# test_sen1 = [[TEXT.vocab.stoi[x] for x in test_sen1]]\n\n# test_sen2 = TEXT.preprocess(test_sen2)\n# test_sen2 = [[TEXT.vocab.stoi[x] for x in test_sen2]]\n\n# test_sen = np.asarray(test_sen1)\n# test_sen = torch.LongTensor(test_sen)\n# test_tensor = Variable(test_sen, volatile=True)\n# test_tensor = test_tensor.cuda()\n# model.eval()\n# output = model(test_tensor, 1)\n# out = F.softmax(output, 1)\n# if (torch.argmax(out[0]) == 1):\n# print (\"Sentiment: Positive\")\n# else:\n# print (\"Sentiment: Negative\")\n\n","sub_path":"Text-Classification-Pytorch/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"504705679","text":"from currency_converter import CurrencyConverter\nimport pandas as pd\nimport numpy as np\nimport os, sys, inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0,parentdir) \n\nfrom db_controller import Db_Controller\nfrom indicators import atr\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\npd.options.mode.chained_assignment = None\nnp.seterr(divide='ignore', invalid='ignore')\n\nif __name__=='__main__':\n from general_functions import pip_value_cal, leverage_cal\nelse:\n from risk_management.general_functions import pip_value_cal, leverage_cal\n\n\nrisk_management_name='Equity and ATR based risk management'\n\nrisk_management_description=\"\"\"\n\n\n\nEquity and ATR based risk management calculate stop loss, limit and position size based on current value of ATR indicator and\nthe specified ATR multipy\n\nThe conditions is as follow:\n\nStop loss is calculated by subtracting or adding (depending on position type 'buy or sell') currenct ATR multiplied specified multiply\nto current price. Using ATR enables dynamic risk management.\n\nLimit is calculated by subtracting or adding (depending on position type 'buy or sell') currenct ATR multiplied specified multiply\nto current price.\n\nPosition size is calculated based on this formula:\n\nPosition size = ((equity x risk per trade) / calculated stop loss value based on pip)/ pip value per standard lot\nLot Number of unit\nStandard\t100,000\nMini\t 10,000\nMicro\t 1,000\nNano\t 100\n\n\n\n\n\n\"\"\"\n\ninputs_name_dict={\n 'ATR period':['atr_period', 200],\n 'Stop loss ATR multiply':['stop_loss_atr_multiply', 3],\n 'Limit ATR multiply':['limit_atr_multiply', 30],\n 'Risk percent':['risk_percent', 1]\n }\n\n\nclass equity_atr_based_risk_management:\n def __init__(self, account_currency, account_id, symbol, timeframe, atr_period, stop_loss_atr_multiply, limit_atr_multiply, risk_percent):\n self.account_currency=account_currency\n self.account_id=account_id\n self.symbol=symbol\n self.timeframe=timeframe\n self.atr_period=int(atr_period)\n self.stop_loss_atr_multiply=stop_loss_atr_multiply\n self.limit_atr_multiply=limit_atr_multiply\n self.risk_percent=risk_percent\n self.db=Db_Controller()\n\n\n def get_account_info(self):\n fxcm_info=self.db.query_table('Fxcm_Info', ('*',), fields=(\"accountId\",), values=(self.account_id,))\n return fxcm_info\n\n def stop_loss_limit(self, price, last_atr, position_type):\n try:\n '''\n stop loss is placed stop_loss_atr_multiply time of atr\n '''\n\n if position_type=='buy':\n stop_loss_pip=last_atr*self.stop_loss_atr_multiply\n stop_loss=price-stop_loss_pip\n limit_pip=last_atr*self.limit_atr_multiply\n limit=price+limit_pip\n else:\n stop_loss_pip=last_atr*self.stop_loss_atr_multiply\n stop_loss=price+stop_loss_pip\n limit_pip=last_atr*self.limit_atr_multiply\n limit=price-limit_pip\n if self.symbol[3:]=='JPY':\n stop_loss_pip=stop_loss_pip*100\n limit_pip=limit_pip*100\n else:\n stop_loss_pip=stop_loss_pip*10000\n limit_pip=limit_pip*10000\n\n\n return stop_loss, limit, stop_loss_pip, limit_pip\n except Exception as e:\n print(e, 'stop_loss_limit')\n\n def position_size_stop_loss(self, position_type):\n try:\n ''' Lot Number of unit\n Standard\t100,000\n Mini\t 10,000\n Micro\t 1,000\n Nano\t 100\n Position size = ((account value x risk per trade) / pips risked)/ pip value per standard lot\n Margin Requirement = Current Price × Units Traded × Margin\n '''\n print(11)\n data=self.db.query_price_data(self.symbol, self.timeframe, self.atr_period*2)\n data['atr']=atr(list(data.bidclose), self.atr_period)\n last_atr=data.atr.iloc[-1]\n price=data.bidclose.iloc[-1]\n print(22)\n fxcm_info=self.get_account_info()[0]\n print(33)\n equity=fxcm_info[4]\n print(equity)\n print(44)\n stop_loss, limit, stop_loss_pip, limit_pip=self.stop_loss_limit(price, last_atr, position_type)\n print(5)\n leverage=leverage_cal(self.symbol, equity)\n standard_lot_pip_value=pip_value_cal(self.symbol, self.account_currency, price, 100000)\n position_size=int(((((equity*self.risk_percent/100)/stop_loss_pip)/standard_lot_pip_value)*100)*1000)\n required_margin=int(price*position_size/leverage)\n c = CurrencyConverter()\n required_margin=int(c.convert(required_margin, self.symbol[:3], self.account_currency))\n if self.symbol[3:]=='JPY':\n required_margin=required_margin/100\n\n\n print(666666)\n print(position_size/1000, required_margin, stop_loss, limit, stop_loss_pip, limit_pip)\n print(666666)\n return position_size/1000, required_margin, stop_loss, limit, stop_loss_pip, limit_pip\n except Exception as e:\n print(e, 'position_size_stop_loss')\n\n def backtest(self, position_type, data, equity):\n try:\n ''' Lot Number of unit\n Standard\t100,000\n Mini\t 10,000\n Micro\t 1,000\n Nano\t 100\n Position size = ((account value x risk per trade) / pips risked)/ pip value per standard lot\n Margin Requirement = Current Price × Units Traded × Margin\n '''\n data['atr']=atr(list(data.bidclose), self.atr_period)\n last_atr=data.atr.iloc[-1]\n price=data.bidclose.iloc[-1]\n \n stop_loss, limit, stop_loss_pip, limit_pip=self.stop_loss_limit(price, last_atr, position_type)\n leverage=leverage_cal(self.symbol, equity)\n standard_lot_pip_value=pip_value_cal(self.symbol, self.account_currency, price, 100000)\n position_size=int(((((equity*self.risk_percent/100)/stop_loss_pip)/standard_lot_pip_value)*100)*1000)\n required_margin=int(price*position_size/leverage)\n c = CurrencyConverter()\n required_margin=int(c.convert(required_margin, self.symbol[:3], self.account_currency))\n pip_value=pip_value_cal(self.symbol, self.account_currency, price, position_size)\n if self.symbol[3:]=='JPY':\n required_margin=required_margin/100\n\n return position_size, required_margin, stop_loss, limit, stop_loss_pip, limit_pip, pip_value\n except Exception as e:\n print(e, 'backtest')\n\n\nif __name__==\"__main__\":\n #rk=equity_atr_based_risk_management('EURUSD', 'm5', 200, 3, 10, 2)\n #print(rk.position_size_stop_loss('buy'))\n pass\n","sub_path":"risk_management/equity_atr_based_risk_management.py","file_name":"equity_atr_based_risk_management.py","file_ext":"py","file_size_in_byte":7128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"585891292","text":"from django.shortcuts import get_object_or_404, render, redirect\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Q\nimport time, datetime\nimport csv, io\nimport re\n\nfrom directorio.models import Profesor, PAC, Valoracion, Organizacion\nfrom directorio.forms import ProfesorForm, ModProfForm, ProfCsvForm, ProgFilterForm, OrgFilterForm\n\n###-------------------Gestión de profesores----------------------------###\n# Lista de profesores\n@login_required\ndef profesores(request):\n\tfiltr = request.GET.get('filtr')\n\tif filtr is None:\n\t\tfiltr = 0\n\telse:\n\t\tfiltr = re.sub(\"[^0-9]\", \"\", filtr)\t\t# Eliminar todo carácter no dígito\n\t\tif filtr == '':\t\t\t\t\t\t\t# Por si queda vacío tras el re.sub\n\t\t\tfiltr = 0\n\n\tif request.method == 'POST':\n\t\tprof_filtr_form = OrgFilterForm(filtr, request.POST)\n\t\tfiltr = request.POST['organizaciones']\n\telse:\t# GET\n\t\tprof_filtr_form = OrgFilterForm(filtr)\n\n\tif int(filtr) < 1:\n\t\tlista_profesores = Profesor.objects.filter(archivado=False).order_by('apellidos')\n\telse:\n\t\tlista_profesores = Profesor.objects.filter(organizacion__id = filtr).filter(archivado=False).order_by('apellidos')\n\t\n\tpaginator = Paginator(lista_profesores, 10)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tprofesores = paginator.page(page)\n\texcept PageNotAnInteger:\t\t# If page is not an integer, deliver first page.\n\t\tprofesores = paginator.page(1)\n\texcept EmptyPage:\t\t\t\t# If page is out of range (e.g. 9999), deliver last page of results.\n\t\tprofesores = paginator.page(paginator.num_pages)\n\t\n\tcontext = {\n\t\t'profesores': profesores,\n\t\t'prof_filtr_form': prof_filtr_form,\n\t\t'filtr': filtr\n\t}\n\n\treturn render(request, 'directorio/profesores.html', context)\n\n\n# Lista de profesores archivados\n@login_required\ndef profesores_archivados(request):\n\tlista_profesores = Profesor.objects.filter(archivado=True).order_by('apellidos')\n\tpaginator = Paginator(lista_profesores, 10)\n\n\tpage = request.GET.get('page')\n\ttry:\n\t\tprofesores = paginator.page(page)\n\texcept PageNotAnInteger:\t\t# If page is not an integer, deliver first page.\n\t\tprofesores = paginator.page(1)\n\texcept EmptyPage:\t\t\t\t# If page is out of range (e.g. 9999), deliver last page of results.\n\t\tprofesores = paginator.page(paginator.num_pages)\n\t\n\tcontext = {\n\t\t'profesores': profesores,\n\t}\n\n\treturn render(request, 'directorio/profesores_archivados.html', context)\n\n\n# Informacion de un profesor\n@login_required\ndef profesor(request, id_profesor):\n\tprofesor = get_object_or_404(Profesor, pk=id_profesor)\n\tpac_convs = PAC.objects.filter(profesores = id_profesor).distinct('convocatorias__anio', 'convocatorias__mes').order_by('-convocatorias__anio', '-convocatorias__mes')\n\tpac_asigs = PAC.objects.filter(profesores = id_profesor).order_by('asignaturas__nombre')\n\t\n\tcontext = {\n\t\t'profesor': profesor,\n\t\t'pac_convs': pac_convs,\n\t\t'pac_asigs': pac_asigs\n\t}\n\n\treturn render(request, 'directorio/profesor.html', context)\n\n\t\n\n# Página con formulario para crear nuevo profesor\n@login_required\n@permission_required('auth.es_gestor', login_url=\"directorio:index\")\ndef nuevo_profesor(request):\n \n if request.method == 'GET':\n pform = ProfesorForm()\n\n return render(request, 'directorio/nuevo_prof.html', { 'pform': pform })\n \n else: # POST\n pform = ProfesorForm(request.POST, request.FILES)\n\n if pform.is_valid():\n nuevo_profesor = pform.save()\n\n return redirect('directorio:profesor', nuevo_profesor.id)\n \n else: # Formulario no válido\n return render(request, 'directorio/nuevo_prof.html', { 'pform': pform })\n\n\t\n# Modificar un profesor\n@login_required\n@permission_required('auth.es_gestor', login_url=\"directorio:index\")\ndef mod_profesor(request, id_profesor):\n\tprofesor = get_object_or_404(Profesor, pk=id_profesor)\n\t\n\tif request.method == 'GET':\n\t\tmodform = ModProfForm(profesor)\n\t\tcontext = { \n\t\t\t'profesor': profesor,\n\t\t\t'modform': modform\n\t\t}\n\n\t\treturn render(request, 'directorio/mod_prof.html', context)\n\telse:\t# POST\n\t\tmodform = ModProfForm(profesor, request.POST, request.FILES, instance = profesor)\t# Primer objeto para inicializar, el ultimo para actualizarlo (no crear)\n\t\t\n\t\tif modform.is_valid():\n\t\t\tmod_prof = modform.save()\n\n\t\t\treturn redirect('directorio:profesor', profesor.id)\n\t\t\t\n\t\telse:\t# Formulario no válido\n\t\t\tcontext = { \n\t\t\t\t'profesor': profesor,\n\t\t\t\t'modform': modform\n\t\t\t}\n\t\t\t\n\t\t\treturn render(request, 'directorio/mod_prof.html', context)\n\t\t\t\n\t\t\t\n# Crear profesores masivamente con archivo .csv\n@login_required\n@permission_required('auth.es_gestor', login_url=\"directorio:index\")\ndef profesor_csv(request):\n\n\tif request.method == 'POST':\n\t\tcsvform = ProfCsvForm(request.POST, request.FILES)\n\t\t\n\t\tif csvform.is_valid():\n\t\t\tf = io.StringIO(request.FILES['csv_file'].read().decode('latin-1'))\n\t\t\treader = csv.reader(f, delimiter=';')\n\t\t\tnext(reader, None) \t# Saltar primera fila (cabeceras)\n\n\t\t\tfor row in reader:\n\t\t\t\tif row[0] != \"\":\t\t# Si la fila tiene nombre\n\t\t\t\t\tnombre = apellidos = email = user = contr = ''\n\n\t\t\t\t\t# Nombre (en la columna número 0)\n\t\t\t\t\tnom_y_ape = row[0].split(' ')\t\t\t\t\t# Separar por palabras\n\t\t\t\t\t\n\t\t\t\t\tif len(nom_y_ape) == 1:\t\t\t\t\t\t\t\t# Una palabra: Nombre\n\t\t\t\t\t\tnombre = nom_y_ape[0]\n\t\t\t\t\telif len(nom_y_ape) == 2:\t\t\t\t\t\t\t# Dos palabras: Nombre y apellido\n\t\t\t\t\t\tnombre = nom_y_ape[0]\n\t\t\t\t\t\tapellidos = nom_y_ape[1]\n\t\t\t\t\tif len(nom_y_ape) == 3:\t\t\t\t\t\t\t\t# Tres palabras: Nombre y 2 apellidos\n\t\t\t\t\t\tnombre = nom_y_ape[0]\n\t\t\t\t\t\tapellidos = nom_y_ape[1] + \" \" + nom_y_ape[2]\n\t\t\t\t\tif len(nom_y_ape) == 4:\t\t\t\t\t\t\t\t# Cuatro palabras: 2 nombres y 2 apellidos\n\t\t\t\t\t\tnombre = nom_y_ape[0] + \" \" + nom_y_ape[1]\n\t\t\t\t\t\tapellidos = nom_y_ape[2] + \" \" + nom_y_ape[3]\n\t\t\t\t\tif len(nom_y_ape) > 4:\t\t\t\t\t\t\t\t# Más de cuatro: 2 y el resto apellidos\n\t\t\t\t\t\tnombre = nom_y_ape[0] + \" \" + nom_y_ape[1]\n\t\t\t\t\t\tapellidos = re.sub(\"(\\w+) (\\w+) \", \"\", row[0], count=1)\t\t# Eliminar dos primeras palabras\n\t\t\t\t\t\n\t\t\t\t\t# Email (columna 12)\n\t\t\t\t\tif row[12] != '':\n\t\t\t\t\t\temail = row[12].split('/')[0]\t\t# Coger solo el primero\n\t\t\t\t\t\temail = re.sub(r\"\\s+\", \"\", email, flags=re.UNICODE)\n\t\t\t\t\telse:\t\t\t\t\t# Hay campos email vacíos -> email de ángel\n\t\t\t\t\t\temail = 'academico.aa@iep.edu.es'\n\t\t\t\t\t\n\t\t\t\t\t# Telefono (columna 14)\n\t\t\t\t\ttelf = row[14]\n\t\t\t\t\t\n\t\t\t\t\t# Nombre de usuario Telecor / Moodle (columna 17)\n\t\t\t\t\tif row[17] != '':\n\t\t\t\t\t\tu = row[17].split('/')\n\t\t\t\t\t\t\n\t\t\t\t\t\tif len(u) > 1:\n\t\t\t\t\t\t\tuser = u[1]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tuser = row[17]\n\t\t\t\t\t\n\t\t\t\t\t\tuser = re.sub(r\"\\s+\", \"\", user, flags=re.UNICODE)\n\t\t\t\t\t\t\n\t\t\t\t\t\t# pass Telecor / Moodle (columna 18)\n\t\t\t\t\t\tc = row[18].split('/')\n\t\t\t\t\t\t\n\t\t\t\t\t\tif len(c) > 1:\n\t\t\t\t\t\t\tcontr = c[1]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcontr = row[18]\n\t\t\t\t\t\n\t\t\t\t\t\tcontr = re.sub(r\"\\s+\", \"\", contr, flags=re.UNICODE)\n\t\t\t\t\t\n\t\t\t\t\t# Observaciones (columna 19)\n\t\t\t\t\tobs = row[19]\n\t\t\t\t\t\n\t\t\t\t\tif (nombre != '' and apellidos != '' and email != ''):\n\t\t\t\t\t\to = request.POST['organizaciones']\n\t\t\t\t\t\torg = next(iter(Organizacion.objects.filter(id = o)), None)\t\t# get_or_none\n\t\t\t\t\t\n\t\t\t\t\t\tupdated_values = {'telefono': telf, 'usuario': user, 'passw': contr, 'comentarios': obs, 'organizacion': org}\n\t\t\t\t\t\tProfesor.objects.update_or_create(nombre=nombre, apellidos=apellidos, email=email, defaults=updated_values)\n\n\t\t\treturn redirect('directorio:profesores')\n\t\t\n\t\telse:\t# Formulario no válido\n\t\t\tcontext = { 'csvform': csvform }\n\t\t\t\n\t\t\treturn render(request, 'directorio/prof_csv.html', context)\n\telse:\t# GET\n\t\tcsvform = ProfCsvForm()\n\t\tcontext = { 'csvform': csvform }\n\n\t\treturn render(request, 'directorio/prof_csv.html', context)\n\n\n# Valoracion del profesor\n@login_required\n@permission_required('auth.es_gestor', login_url=\"directorio:index\")\ndef valoracion_prof(request, id_profesor):\n\tprofesor = get_object_or_404(Profesor, pk=id_profesor)\n\tpacs_vals = []\n\tmedias_arit = {}\n\txdata = []\n\tasigs = []\n\targs = Q()\t\t\t\t# Argumentos para el queryset (para filtrar sin saber el numero de secciones)\n\tsecciones = [\"TUTOR\", \"CONOCIMIENTODELTUTOR\"]\t\t\t# Unicas secciones posibles en esta vista\n\tfiltro = \"- Todos -\" \n\t\n\tif request.method == 'POST':\n\t\tfiltr_form = ProgFilterForm(filtro, request.POST)\n\t\tfiltro = request.POST['programa']\n\telse:\t# GET\n\t\tfiltr_form = ProgFilterForm(filtro)\n\t\n\tif filtro == \"- Todos -\" or filtro == '':\n\t\tpacs = PAC.objects.filter(profesores = id_profesor).order_by('convocatorias__anio', 'convocatorias__mes', 'asignaturas__nombre')\n\telse:\n\t\tpacs = PAC.objects.filter(profesores = id_profesor).filter(programas__nombre = filtro).order_by('convocatorias__anio', 'convocatorias__mes', 'asignaturas__nombre')\n\t\n\n\tfor secc in secciones:\n\t\targs = args | Q(seccion = secc)\n\t\n\tfor seccion in secciones:\t\t# Inicializar medias\n\t\tmedias_arit[seccion] = 0\n\t\n\tfor pac in pacs:\n\t\tval = Valoracion.objects.filter(pac = pac.id).filter(*(args,)).order_by(\"seccion\")\n\t\t\n\t\tif val:\n\t\t\tpacs_vals.append([pac, val])\n\t\t\td = int(time.mktime(datetime.date(pac.convocatorias.anio, pac.convocatorias.mes+1, 1).timetuple())) * 1000\n\t\t\txdata.append(d)\n\t\t\t\n\t\t\tif pac.asignaturas.nombre not in asigs:\n\t\t\t\tasigs.append(pac.asignaturas.nombre)\n\t\t\t\n\t\t\tfor v in val:\n\t\t\t\tmedias_arit[v.seccion] += v.media\t\t\t\t\t# Sumar todos los valores para las medias\n\t\n\tif pacs_vals:\n\t\tfor key, value in medias_arit.items():\t\t\t\t\t\t\t\t# Terminar de calcular medias y formatearlas\n\t\t\tmedias_arit[key] = value / len(pacs_vals)\n\t\t\tmedias_arit[key] = float(format(medias_arit[key], '.2f'))\n\t\n\textra_serie = {}\n\tchartdata = {\n\t\t'x': xdata,\n\t}\n\tcharttype = \"lineChart\"\n\tchartcontainer = 'linechart_container'\n\ty_number = 1\n\t\n\tfor asig in asigs:\n\t\tfor secc in secciones:\n\t\t\tydata = []\n\t\t\tvals = Valoracion.objects.filter(seccion = secc)\n\t\t\t\n\t\t\tfor val in vals:\n\t\t\t\tif val.pac.asignaturas.nombre == asig and int(val.pac.profesores.id) == int(id_profesor):\n\t\t\t\t\tydata.append(val.media)\n\t\t\t\t\t\n\t\t\tname_key = 'name' + str(y_number)\n\t\t\tname_value = asig + \" - \" + secc\n\t\t\ty_key = 'y' + str(y_number)\n\t\t\t\n\t\t\tnew_data = {name_key : name_value, y_key : ydata}\n\t\t\tchartdata.update(new_data)\n\t\t\ty_number += 1\n\t\n\tfor index, seccion in enumerate(secciones):\t\t\t\t\t\t# Mejorar legibilidad de ciertas secciones\n\t\tif seccion == 'CONOCIMIENTODELTUTOR':\n\t\t\tsecciones[index] = 'CONOCIMIENTO DEL TUTOR'\n\t\n\tcontext = {\n\t\t'profesor': profesor,\n\t\t'filtr_form': filtr_form,\n\t\t'pacs_vals': pacs_vals,\n\t\t'secciones': secciones,\n\t\t'medias_arit': sorted(medias_arit.items()),\n\t\t'charttype': charttype,\n\t\t'chartdata': chartdata,\n\t\t'chartcontainer': chartcontainer,\n\t\t'extra': {\n\t\t\t'x_is_date': True,\n\t\t\t'x_axis_format': '%b %Y',\n\t\t\t'tag_script_js': True,\n\t\t\t'jquery_on_ready': True,\n\t\t}\n\t}\n\t\n\treturn render(request, 'directorio/valoracion_prof.html', context)\n\n","sub_path":"directorio/views/vistas_profesores.py","file_name":"vistas_profesores.py","file_ext":"py","file_size_in_byte":10656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"642274806","text":"from wagtail.admin.panels import (\n FieldPanel,\n InlinePanel,\n MultiFieldPanel,\n ObjectList,\n TabbedInterface,\n)\nfrom wagtailmedia.widgets import AdminMediaChooser\n\n\nclass BookingsInlinePanel(InlinePanel):\n class BoundPanel(InlinePanel.BoundPanel):\n template_name = 'wagtailadmin/export_academy/panels/bookings_inline_panel.html'\n\n\nclass ExportAcademyPagePanels:\n content_panels = [\n FieldPanel('title'),\n MultiFieldPanel(\n heading='Hero',\n classname='collapsible',\n children=[\n FieldPanel('hero_image'),\n FieldPanel('hero_text'),\n FieldPanel('hero_cta'),\n FieldPanel('hero_text_below_cta_logged_out'),\n ],\n ),\n MultiFieldPanel(\n heading='Logged in variations',\n children=[\n FieldPanel('hero_cta_logged_in'),\n ],\n ),\n MultiFieldPanel(\n heading='Temporary Banner',\n classname='collapsible',\n children=[\n FieldPanel('banner_label'),\n FieldPanel('banner_content'),\n ],\n ),\n FieldPanel('intro_text'),\n MultiFieldPanel(\n heading='Steps',\n classname='collapsible',\n children=[\n FieldPanel('steps_heading'),\n FieldPanel('steps'),\n ],\n ),\n MultiFieldPanel(\n heading='Main Content',\n classname='collapsible',\n children=[\n FieldPanel('panel_description'),\n FieldPanel('panels'),\n ],\n ),\n FieldPanel(\n 'next_cta',\n ),\n ]\n\n settings_panels = [\n FieldPanel('slug'),\n ]\n\n\nclass EventPanel:\n event_panel = [\n MultiFieldPanel(\n heading='Details',\n children=[\n FieldPanel('name'),\n FieldPanel('description'),\n FieldPanel('link'),\n FieldPanel('format'),\n FieldPanel('types', heading='Types'),\n FieldPanel('location', heading='Event Location'),\n ],\n ),\n MultiFieldPanel(\n heading='Date',\n children=[\n FieldPanel('start_date'),\n FieldPanel('end_date'),\n ],\n ),\n MultiFieldPanel(\n heading='Event Complete Actions',\n children=[\n FieldPanel('document'),\n FieldPanel('video_recording', widget=AdminMediaChooser),\n FieldPanel('completed'),\n ],\n ),\n FieldPanel('live'),\n FieldPanel('closed', heading='closed for bookings'),\n ]\n\n attendance_panel = [BookingsInlinePanel('bookings', panels=[FieldPanel('status')], label='Bookings')]\n\n edit_handler = TabbedInterface(\n [\n ObjectList(event_panel, heading='Event'),\n ObjectList(attendance_panel, heading='Attendance'),\n ]\n )\n","sub_path":"export_academy/cms_panels.py","file_name":"cms_panels.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"634710458","text":"# -*- coding: utf-8 -*-\n# -*- coding: utf-8 -*-\nimport json\n\nimport allure\nimport pytest\nimport yaml\nfrom hamcrest import *\n\nfrom test_work_weixin_tag_ao.tag import Tag\n\n\n@allure.feature(\"通讯录管理-标签管理\")\nclass TestWeiXin:\n def setup_class(self):\n # 实例化department类,供测试用例调用\n # 传入通讯录的secret\n conf = yaml.safe_load(\n open(r\"D:\\PycharmProjects\\HogWartsXly3\\test_work_weixin_tag_ao\\conf.yml\"))\n secret = conf[\"tag\"]\n corpid = conf[\"corpid\"]\n self.tag = Tag(secret=secret, corpid=corpid)\n\n # @pytest.mark.parametrize(\"tagname,tagid\",\n # [\n # # (\"行政\", \"7\"),\n # # (\"销售\", \"6\"),\n # # (\"研发\", \"5\")\n # (\"323232323232323232323232323232323\", \"12\")\n # ]\n # )\n @allure.story(\"创建标签测试用例\")\n @allure.title(\"创建标签\")\n @pytest.mark.tag\n @pytest.mark.run(order=1)\n def test_create_tag_fixture(self, create_tag):\n # print(reate_tag[0],create_tag[1])\n r = self.tag.create_tag(create_tag[0], create_tag[1])\n assert r.status_code == 200\n print(r.json())\n if len(create_tag[0]) > 32:\n # assert r[\"errcode\"] == 40058\n assert self.tag.jsonpath_req(r.json(), \"$..errcode\")[0] == 40058\n print(f\"当前标签名称长度为{len(create_tag[0])},超出32个字符要求\")\n else:\n assert self.tag.jsonpath_req(r.json(), \"$..errmsg\")[0] == \"created\"\n\n # @pytest.mark.parametrize(\"tagid,tagname\",\n # [(\"7\", \"【行政】\")\n #\n # ]\n # )\n @allure.story(\"更新标签名字\")\n @allure.title(\"更新标签名字\")\n @pytest.mark.run(order=2)\n def test_updata_tag(self, update_tag):\n r = self.tag.update_tag(update_tag[0], update_tag[1])\n assert r[\"errcode\"] == 0\n\n # @pytest.mark.parametrize(\"tagid\",\n # [\"7\"]\n # )\n @allure.story(\"删除标签\")\n @allure.title(\"删除标签\")\n @pytest.mark.run(order=5)\n def test_delete_tag(self, delete_tag):\n r = self.tag.delete_tag(delete_tag)\n print(r)\n assert_that('errmsg' == 'deleted', equal_to('errmsg' == 'deleted'))\n assert self.tag.jsonpath_req(r, \"$..errmsg\")[0] == \"deleted\"\n\n # @pytest.mark.parametrize(\"tagid\",\n # [(\"7\")]\n # )\n @allure.story(\"获取标签成员\")\n @allure.title(\"获取标签成员\")\n @pytest.mark.run(order=6)\n def test_get_tag_user(self, get_tag_user):\n r = self.tag.get_tag_user(get_tag_user)\n print(f\"\\n获取标签id为{get_tag_user}的成员: \", self.tag.jsonpath_req(r, \"$..name\"))\n\n # @pytest.mark.parametrize(\"tagid,userlist,partylist\",\n # [(\"7\", ['app-user1', 'app-user2'], [])\n # # [(\"8\", [], ['2'])\n # ])\n @allure.story(\"增加标签成员\")\n @allure.title(\"增加标签成员\")\n @pytest.mark.run(order=3)\n def test_create_tag_user(self, create_tag_user):\n r = self.tag.create_tag_user(create_tag_user[0], create_tag_user[1], create_tag_user[2])\n print(r)\n assert self.tag.jsonpath_req(r, \"$..errcode\")[0] == 0\n\n # @pytest.mark.parametrize(\"tagid,userlist,partylist\",\n # [(\"7\", ['app-user1', 'app-user2'], [])\n # # [(\"8\", [], ['2'])\n # ])\n @allure.story(\"删除标签成员\")\n @allure.title(\"删除标签成员\")\n @pytest.mark.run(order=4)\n def test_delete_tag_user(self, delete_tag_user):\n r = self.tag.delete_tag_user(delete_tag_user[0], delete_tag_user[1], delete_tag_user[2])\n print(r)\n assert self.tag.jsonpath_req(r, \"$..errmsg\")[0] == \"deleted\"\n\n @allure.story(\"获取标签列表\")\n @allure.title(\"获取标签列表\")\n @allure.description(\"查看最终的标签列表\")\n @allure.severity(\"trivial\")\n @pytest.mark.run(order=7)\n def test_get_tag_list(self):\n with allure.step(\"1: 打印最终的标签列表\"):\n r = self.tag.get_tag_list()\n print(\"获取标签列表:\", self.tag.jsonpath_req(r, '$..tagname'), \"\\n\")\n with allure.step(\"2: 检验结果\"):\n allure.attach('标签列表打印成功', '期望结果')\n allure.attach('标签列表打印成功', '实际结果')\n","sub_path":"test_work_weixin_tag_ao/test_tag.py","file_name":"test_tag.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"288336951","text":"import pandas as pd\nimport plotly.graph_objects as go\n\n# Get the data\ndata_address = 'https://covid.ourworldindata.org/data/total_cases.csv'\n\n# plot the data\ndf = pd.read_csv(data_address)\ndf = df.set_index('date')\n\nfig = go.Figure()\n\nfor country in list(df):\n fig.add_trace(go.Scatter(\n x=df.index,\n y=df[country],\n name=country,\n ))\n\n# Make the plot look fancy. \nfig.update_layout(title='COVID-19 Cases',\n xaxis_title='Date',\n yaxis_title='Cases')\n\n \nfig.show()\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"168317462","text":"# myapp/urls.py\nfrom django.urls import path, include\nfrom .views import *\n\nurlpatterns = [\n\tpath('', Home, name = 'home-page'),\n\tpath('lastest/', Lastest, name = 'lastest-page'),\n\tpath('about/', About, name = 'about-page'),\n\tpath('contact/', Contact, name = 'contact-page'),\n\tpath('scrapy/', Scrapy, name = 'scrapy-page'),\n\tpath('addproduct/', AddProduct, name = 'addproduct-page'),\n\tpath('allproducts/', Product, name = 'allproducts-page'),\n\tpath('register/', Register, name = 'register-page'),\n\tpath('addtocart/', AddtoCart, name = 'addtocart-page'),\n\tpath('mycart/', MyCart, name = 'mycart-page'),\n\tpath('mycart/edit/', MyCartEdit, name = 'mycartedit-page'),\n\tpath('checkout/', Checkout, name = 'checkout-page'),\n\tpath('orderlist/', OrderListPage, name = 'orderlist-page'),\n\tpath('allorderlist/', AllOrderListPage, name = 'allorderlist-page'),\n\tpath('uploadslip/', UploadSlip, name = 'uploadslip-page'),\n\tpath('updatestatus///', UpdatePaid, name = 'updatestatus'),\n\tpath('updatetracking/', UpdateTracking, name = 'updatetracking'),\n]\n","sub_path":"firstweb/myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"34733315","text":"import random\n\n# Game set up\nprint(\"Welcome to the guessing game!\")\nnumber_of_guesses = 3 # User has three guesses before loosing the game\nuser_won = False\n\n# Computer guesses a random number between 1 and 10\ncorrect_answer = random.randint(1, 10)\n\nwhile number_of_guesses > 0:\n # User guesses the number\n user_guess = input(\"Guess the number: \")\n user_guess = int(user_guess)\n\n # Computer tells user whether guess was too high or too low\n if user_guess == correct_answer:\n print(\"You are correct\")\n user_won = True\n break\n elif user_guess > correct_answer:\n print(\"Sorry, you guessed too high!\")\n elif user_guess < correct_answer:\n print(\"Sorry, you guessed too low!\")\n\n number_of_guesses -= 1\n\nprint()\nif user_won == True:\n print(\"You win!\")\nelse:\n print(\"You loose!\")\n print(\"The number was: \" + str(correct_answer))\n\ninput()","sub_path":"Python 3/guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"369967316","text":"\nfrom __future__ import division, print_function\nimport codecs\nimport argparse\nfrom itertools import groupby\n\ndef get_error_distribution(error_file):\n with codecs.open(error_file, encoding='utf8') as tsv:\n # remove newlines\n row_data = [ l.rstrip().split('\\t') for l in tsv ]\n class_data = [ {'predicted': l[0], 'actual': l[1], 'word': l[2], 'class': l[3] } for l in row_data ]\n\n class_data = sorted(class_data, key=lambda x: x['class'])\n for key, group in groupby(class_data, lambda x: x['class']):\n group_instances = list(group)\n print('ERROR TYPE: {}\\tTOTAL INSTANCES: {}'.format(key, str(len(group_instances))))\n accuracy = sum([1 for i in group_instances if i['predicted'] == i['actual']]) / len(group_instances)\n print(\"group accuracy: {}\".format(str(accuracy)))\n # for i in group_instances:\n # print(i)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', type=str, help='input file -- sentences tagged with errors')\n args = parser.parse_args()\n get_error_distribution(args.input)","sub_path":"marmot/evaluation/check_error_types.py","file_name":"check_error_types.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"576287493","text":"# -*- coding: utf-8 -*-\n#\n#################\n# This script takes as input the NOAA GML SF6 data in .txt format,\n# stripped of the explanatory text,\n# and returns a csv file with the processed time series.\n# Data is provided in ppt (parts per trillion).\n#\n# Last updated: Feb 2021\n# Author: Ignacio Lopez-Gomez\n# \n#################\nimport pandas as pd\n\nraw_data_ = pd.read_csv('../source/monthly_global_sf6_data_clean.txt', delim_whitespace=True)\nproc_data_ = raw_data_.rename(columns = {'decimal' : 'date (decimal)',\n 'average':'monthly mean',\n 'average_unc': 'mon. mean 1-sigma unc.',\n 'trend':'season-filtered fit',\n 'trend_unc': 'fit 1-sigma unc.'})\nproc_data_['date (decimal)'] = round(proc_data_['date (decimal)'], 3)\n\ndata_tidy = proc_data_.melt(id_vars=proc_data_.columns[:3], \n var_name=\"Reported value\", \n value_name=\"Concentration (ppt)\")\n\n# Fix absent value formatting\ndata_tidy[\"Concentration (ppt)\"] = data_tidy[\"Concentration (ppt)\"].astype(float)\ndata_tidy[\"Concentration (ppt)\"] = data_tidy[\"Concentration (ppt)\"].where(data_tidy[\"Concentration (ppt)\"] > 0)\n\n# # Save to file, stripped of index\ndata_tidy.to_csv(r'../processed/monthly_global_sf6_data_processed.csv', index = False)","sub_path":"data/atmosphere_biogeochemistry/NOAA_GML_SF6_trend/code/get_sf6_trend.py","file_name":"get_sf6_trend.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"45548497","text":"# Written by: Travis Owens\n# Date: 2018-10-21\n# Discrption: Add's twitch user to DB along with Channel ID entry\n# Duplicate Checking\n# Deletes user if all discord channels have been deleted\n\nimport config\nimport pymysql.cursors\nfrom time import time, sleep\nimport requests\n\n# Async calls this\ndef addUser(message, channel):\n\n # parse message content\n message = message.split(' ')\n discordChannelID = channel\n\n # If user did not pass a twitch user name, display command usage\n if(len(message) == 1):\n return 'Usage: \\n \\n k!twitch add [twitch user name] [optional: Retention Interval (Hours)] \\n Retention Time: Number of hours before sending notification message again.'\n\n # Twitch user name should be at index 2\n twitchUserName = message[2] # Twitch user names do not have spaces\n\n # If user wants to enable retention notification\n if(len(message) == 4):\n retention = 'True'\n if(int(message[3] >= 1)): #If user enters less than 1 hour, set to 1 hour\n retentionInterval = message[3]\n else:\n retentionInterval = 1\n else: #Disable retention if no hour is given\n retention = 'False'\n retentionInterval = 0\n\n\n if(message[1] == 'add'): #Add channel (Discord)\n twitchLiveOutput = twitchLive().main(twitchUserName, 'add', channel, retention, retentionInterval)\n if(message[1] == 'del'): #Del Channel (Discord)\n twitchLiveOutput = twitchLive().main(twitchUserName, 'del', channel, retention, retentionInterval)\n\n return str(twitchLiveOutput)\n\n\nclass twitchLive(object):\n def __init__(self):\n return\n\n def connection(self):\n # create DB connection\n connection = pymysql.connect(host=config.DB_HOST,\n user=config.DB_USER,\n password=config.DB_PASS,\n db=config.DB_TABLE,\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n return connection\n\n\n def main(self, twitchUserName, addChannel, discordChannelID, retention, retentionInterval):\n\n # Twitch\n self.twitchUserName = twitchUserName\n try:\n self.twitchChannelID = self.getTwitchID()\n except Exception as e:\n # If channel name is not found in the Helix API\n print(e)\n return 'Error: Twitch channel not found.'\n\n\n # Discord\n try:\n self.discordChannelID = str(discordChannelID)\n self.retention = retention\n self.retentionInterval = retentionInterval\n\n #SQL\n self.addUser_MySQL() #If user is not found, adds user\n self.twitchDataID = self.getTwitchDataID_MySQL() #Retrieve twitch data ID from MySQL\n self.findChannel_byID_MySQL() #Retrieves a list of discord channels by twitchDataID\n except Exception as e:\n print(e)\n\n try:\n if(addChannel == 'add'): #If true, add channel, else del channel\n output = self.addDiscordChannel_MySQL() #If channel is not found, adds channel\n if(addChannel == 'del'):\n output = self.delDiscordChannel_MySQL() #If it is the last channel for that user, it will delete the twitch ID\n except Exception as e:\n output = \"Unknown Error\"\n print(e)\n return output\n\n # Using Twitch Helix API\n # Return Twitch Channel ID\n def getTwitchID(self):\n self.helix_URL = 'https://api.twitch.tv/helix/users?login=' + self.twitchUserName\n self.headers = {\n 'client-id': config.twitchAPIToken,\n }\n self.helix_data = requests.get(self.helix_URL, headers=self.headers).json()\n # Return just the channel ID \n return self.helix_data['data'][0]['id']\n\n\n def addUser_MySQL(self):\n # Search for user\n if(self.findUser_MySQL()):\n return\n else:\n #IF user is NOT found\n try:\n conn = self.connection()\n with conn.cursor() as cursor:\n sql = \"INSERT INTO `twitchuserdata` (`twitchChannelID`,`twitchUserName`, `liveStatus`) VALUES (%s, %s, %s)\"\n cursor.execute(sql, (self.twitchChannelID, self.twitchUserName, 'offline'))\n conn.commit()\n finally:\n conn.close()\n return\n\n def delUser_MySQL(self):\n # Delete twitch user from DB\n # Called when last discord channel is deleted\n try:\n conn = self.connection()\n with conn.cursor() as cursor:\n sql = \"DELETE FROM `twitchuserdata` WHERE `twitchDataID`=%s\"\n cursor.execute(sql, (self.twitchDataID))\n\n conn.commit()\n finally:\n conn.close()\n\n return\n\n # Check if twitch user is already in DB\n def findUser_MySQL(self):\n # Connect to the database\n try:\n conn = self.connection()\n with conn.cursor() as cursor:\n sql = \"SELECT `twitchDataID` FROM `twitchuserdata` WHERE `twitchChannelID`=%s\"\n cursor.execute(sql, (self.twitchChannelID))\n\n conn.commit()\n finally:\n conn.close()\n\n # If user is not found return false, else return true\n if(cursor.fetchone() == None):\n return False\n else:\n return True\n\n\n def getTwitchDataID_MySQL(self):\n # Connect to the database\n try:\n conn = self.connection()\n with conn.cursor() as cursor:\n sql = \"SELECT `twitchDataID` FROM `twitchuserdata` WHERE `twitchChannelID`=%s\"\n cursor.execute(sql, (self.twitchChannelID))\n\n conn.commit()\n finally:\n conn.close()\n\n data = cursor.fetchone()\n\n return data['twitchDataID']\n\n\n def addDiscordChannel_MySQL(self):\n # check if channel already exist for twitch user\n if self.discordChannelID in self.channelList: #(self.channelList.contains(self.discordChannelID)):\n # Found twitchID and channelID\n return \"User already added to this channel!\"\n else:\n #SQL add channel\n conn = self.connection()\n try:\n with conn.cursor() as cursor:\n sql = \"INSERT INTO `discordchannels` (`twitchDataID`, `discordChannelID`, `retentionNotification`, `retentionInterval`, `retentionTimeCheck`) VALUES (%s, %s, %s, %s, %s)\"\n cursor.execute(sql, (self.twitchDataID, self.discordChannelID, self.retention, self.retentionInterval, time()))\n conn.commit()\n finally:\n conn.close()\n\n return \"Notification successfully added!\"\n\n # twitch data ID - returns a list of channels by twitch data ID\n def findChannel_byID_MySQL(self):\n # Connect to the database\n print(\"Starting Channel List\")\n conn = self.connection()\n try:\n with conn.cursor() as cursor:\n sql = \"SELECT `discordChannelID` FROM `discordchannels` WHERE `twitchDataID`=%s\"\n cursor.execute(sql, (self.twitchDataID))\n\n conn.commit()\n except Exception as e:\n print(e)\n finally:\n conn.close()\n\n row = cursor.fetchone()\n self.channelList = []\n\n while(row != None):\n self.channelList.append(row['discordChannelID'])\n row = cursor.fetchone()\n\n return\n\n def delDiscordChannel_MySQL(self):\n # remove discord channel and user if no discord channels are left\n # drop discord channel\n try:\n conn = self.connection()\n with conn.cursor() as cursor:\n sql = \"DELETE FROM `discordchannels` WHERE `discordChannelID`=%s AND `twitchDataID`=%s\"\n cursor.execute(sql, (self.discordChannelID, self.twitchDataID))\n\n conn.commit()\n finally:\n conn.close()\n\n self.findChannel_byID_MySQL()\n\n if(len(self.channelList) == 0):\n # Drop twitch user\n self.delUser_MySQL()\n\n\n return \"User successfully deleted!\"\n","sub_path":"addUser/addTwitchUser.py","file_name":"addTwitchUser.py","file_ext":"py","file_size_in_byte":8316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"7184254","text":"import re\n\nfrom django.test import RequestFactory\nfrom tri_declarative import (\n dispatch,\n Namespace,\n)\nfrom tri_struct import Struct\n\nfrom iommi import (\n Table,\n middleware,\n)\nfrom iommi.base import items\nfrom iommi.traversable import (\n Traversable,\n)\n\n\ndef reindent(s, before=\" \", after=\" \"):\n\n def reindent_line(line):\n m = re.match(r'^((' + re.escape(before) + r')*)(.*)', line)\n return after * (len(m.group(1)) // len(before)) + m.group(3)\n\n return \"\\n\".join(reindent_line(line) for line in s.splitlines())\n\n\ndef remove_csrf(html_code):\n csrf_regex = r']+csrfmiddlewaretoken[^>]+>'\n return re.sub(csrf_regex, '', html_code)\n\n\n@dispatch(\n table__call_target=Table,\n)\ndef verify_table_html(*, expected_html, query=None, find=None, table, **kwargs):\n \"\"\"\n Verify that the table renders to the expected markup, modulo formatting\n \"\"\"\n from bs4 import BeautifulSoup\n if find is None:\n find = dict(class_='table')\n if not expected_html.strip():\n expected_html = \"\" # pragma: no cover\n\n if isinstance(table, Namespace):\n table = table()\n\n table: Table\n\n request = RequestFactory().get(\"/\", query)\n if not table._is_bound:\n table = table.bind(request=request)\n\n from django.contrib.auth.models import AnonymousUser\n request.user = AnonymousUser()\n actual_html = remove_csrf(table.__html__(**kwargs))\n\n expected_soup = BeautifulSoup(expected_html, 'html.parser')\n prettified_expected = reindent(expected_soup.find(**find).prettify()).strip()\n actual_soup = BeautifulSoup(actual_html, 'html.parser')\n hit = actual_soup.find(**find)\n if not hit: # pragma: no cover\n print(actual_html)\n assert False, f\"Couldn't find selector {find} in actual output\"\n assert hit, actual_soup\n prettified_actual = reindent(hit.prettify()).strip()\n\n if prettified_actual != prettified_expected: # pragma: no cover\n print(actual_html)\n assert prettified_actual == prettified_expected\n\n\ndef request_with_middleware(response, request):\n def get_response(request):\n del request\n return response\n\n m = middleware(get_response)\n return m(request=request)\n\n\ndef no_auth_middleware_req(method, **data):\n return getattr(RequestFactory(HTTP_REFERER='/'), method.lower())('/', data=data)\n\n\ndef req(method, **data):\n request = no_auth_middleware_req(method, **data)\n request.user = Struct(is_staff=False, is_authenticated=False)\n return request\n\n\ndef user_req(method, **data):\n request = req(method, **data)\n request.user = Struct(is_staff=False, is_authenticated=True)\n return request\n\n\ndef staff_req(method, **data):\n request = req(method, **data)\n request.user = Struct(is_staff=True, is_authenticated=True)\n return request\n\n\ndef get_attrs(x, attrs):\n return {a: x.attrs.get(a) for a in attrs}\n\n\nclass StubTraversable(Traversable):\n def __init__(self, *, _name, members=None):\n super(StubTraversable, self).__init__(_name=_name)\n self._declared_members = members or {}\n\n def on_bind(self):\n self._bound_members = Struct({k: v.bind(parent=self) for k, v in items(self._declared_members)})\n\n\ndef prettify(content):\n from bs4 import BeautifulSoup\n return reindent(BeautifulSoup(content, 'html.parser').prettify().strip())\n","sub_path":"tests/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"20838796","text":"import csv\nfrom django.core.management import BaseCommand\nfrom hurumap.models import Geography\n\n__author__ = 'kenneth'\n\n\nclass Command(BaseCommand):\n g = Geography.objects.filter(geo_level__in=['district', 'country', 'region'])\n help = 'Update location size in square km - From file at db/csv/square-kilometre.csv'\n\n def handle(self, *args, **options):\n with open('db/csv/square-kilometre.csv', 'rb') as csvfile:\n sr = csv.reader(csvfile, delimiter=',', quotechar='\"')\n for row in sr:\n try:\n ax = self.g.get(geo_code=row[0])\n ax.square_kms = int(row[3].replace(',', ''))\n ax.save()\n except Exception as e:\n continue","sub_path":"hurumap_ug/management/commands/update_sqkm.py","file_name":"update_sqkm.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"345449726","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\n\ndef single_mk2_movie_parse(movie_html, date):\n \"\"\"This method parses a HTML element that was found through BeautifulSoup\n and will return a ``dict`` containing the informations of the movie and\n its diffusion hours for a specific cinema.\n\n :param movie_html: A ``ResultSet`` from BeautifulSoup.\n :param date: A ``ResultSet`` from BeautifulSoup.\n\n :return: A dict containing informations about a movie.\n \"\"\"\n\n hours = []\n for li in movie_html.findAll(\"li\"):\n hour_dirt = re.sub(r'(\\s|\\n)*', '', li.text).strip()\n if not hour_dirt:\n continue\n hour_split = re.match(r'(\\d{2}h\\d{2})(VO|VF)', hour_dirt)\n hours.append({\n 'hour': hour_split.group(1),\n 'language': hour_split.group(2)\n })\n\n return {\n 'title': movie_html.find(\"a\", \"fiche-film-title\").text,\n 'duration': movie_html.find(\"div\", \"fiche-film-duration\").text,\n 'screening_hours': hours\n }\n\ndef get_mk2_cinema_schedule(cinema_url):\n cinema_page = requests.get(cinema_url)\n\n if cinema_page.status_code != 200:\n raise Exception(\"MK2 schedule scrapping failed for {}: {}\".format(\n cinema_url, cinema_page.status_code))\n\n bs = BeautifulSoup(cinema_page.text, 'html.parser')\n # All movies that are currently in this cinema are contained inside divs\n # with the `l-session-table` css class.\n # The only thing to do in order to fetch the available movies for a\n # specific date is to look for them inside a `table` which is one of the\n # two children of the element (the other one being the header with the date)\n schedulesTables = bs.findAll(\"div\", \"l-session-table\")\n\n movies_dict = {}\n\n for schedule in schedulesTables:\n # Disabling recursive search here since we know that we need the\n # immediate children\n header = schedule.find(\"div\", recursive=False)\n date = header.find(\"td\", \"schedule-day\")\n movies = schedule.find(\"table\", recursive=False)\n for movie in movies.findAll(\"tr\"):\n dict_movie = single_mk2_movie_parse(movie, date)\n if date:\n movies_dict[date.text] = dict_movie\n \n return movies_dict\n\n","sub_path":"src/movies/importer/mk2.py","file_name":"mk2.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"249542454","text":"# Copyright 2020 Huy Le Nguyen (@usimarit)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\nimport numpy as np\nimport random\n\nfrom ..featurizers.speech_featurizers import read_raw_audio\n\n\ndef get_white_noise(signal: np.ndarray, snr: float = 10):\n if snr < 0:\n return None\n RMS_s = math.sqrt(np.mean(signal ** 2))\n # RMS values of noise\n RMS_n = math.sqrt(RMS_s ** 2 / (pow(10, snr / 20)))\n # Additive white gausian noise. Thereore mean=0\n # Because sample length is large (typically > 40000)\n # we can use the population formula for standard daviation.\n # because mean=0 STD=RMS\n STD_n = RMS_n\n noise = np.random.normal(0, STD_n, signal.shape[0])\n return noise\n\n\ndef get_noise_from_sound(signal: np.ndarray, noise: np.ndarray, snr: float = 10):\n if len(noise) < len(signal) or snr < 0:\n return None\n\n idx = random.choice(range(0, len(noise) - len(signal))) # randomly crop noise wav\n noise = noise[idx:idx + len(signal)]\n\n RMS_s = math.sqrt(np.mean(signal ** 2))\n # required RMS of noise\n RMS_n = math.sqrt(RMS_s ** 2 / (pow(10, snr / 20)))\n\n # current RMS of noise\n RMS_n_current = math.sqrt(np.mean(noise ** 2))\n noise = noise * (RMS_n / (RMS_n_current + 1e-20))\n\n return noise\n\n\ndef add_noise(signal: np.ndarray, noises: list, snr_list: list, max_noises: int, sample_rate=16000):\n num_noises = random.randint(1, max_noises)\n if len(noises) < num_noises:\n num_noises = len(noises)\n random.shuffle(noises)\n selected_noises = random.choices(noises, k=num_noises)\n for noise_type in selected_noises:\n snr = random.choice(snr_list)\n if noise_type == \"white_noise\":\n noise = get_white_noise(signal, snr)\n if noise is not None:\n signal = np.add(signal, noise)\n else:\n noise = read_raw_audio(noise_type, sample_rate=sample_rate)\n noise = get_noise_from_sound(signal, noise, snr)\n if noise is not None:\n signal = np.add(signal, noise)\n return signal\n\n\ndef add_white_noise(signal: np.ndarray, snr_list: list, sample_rate=16000):\n snr = random.choice(snr_list)\n noise = get_white_noise(signal, snr)\n if noise is not None:\n signal = np.add(signal, noise)\n return signal\n\n\ndef add_realworld_noise(signal: np.ndarray, noises: list, snr_list: list, max_noises: int, sample_rate=16000):\n num_noises = random.randint(1, max_noises)\n if len(noises) < num_noises:\n num_noises = len(noises)\n random.shuffle(noises)\n selected_noises = random.choices(noises, k=num_noises)\n for noise_type in selected_noises:\n snr = random.choice(snr_list)\n noise = read_raw_audio(noise_type, sample_rate=sample_rate)\n noise = get_noise_from_sound(signal, noise, snr)\n if noise is not None:\n signal = np.add(signal, noise)\n return signal\n","sub_path":"tiramisu_asr/augmentations/noise_augment.py","file_name":"noise_augment.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"466608517","text":"# Copyright 2018 NCHC\nimport os\nos.environ['LANG'] = \"en_US.utf8\"\nfrom twcc.session import session_start\n\n__version__ = '0.0.1'\n\n\n#\n# Get our data path to be added to botocore's search path\n#\n_TWCC_data_path_ = []\nif not 'TWCC_DATA_PATH' in os.environ:\n _TWCC_data_path_.append(\n os.path.join(os.environ['HOME'], '.twcc_data')\n )\n os.environ['TWCC_DATA_PATH'] = os.pathsep.join(_TWCC_data_path_)\n\n\nSCALAR_TYPES = set([\n 'string', 'float', 'integer', 'long', 'boolean', 'double',\n 'blob', 'timestamp'\n])\nCOMPLEX_TYPES = set(['structure', 'map', 'list'])\n\n__all__ = [\"clidriver\", \"util\", \"services\"]\n\nos.environ['_STAGE_'] = \"production\"\n\n_TWCC_SESSION_ = session_start()\n","sub_path":"src/twcc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"153269330","text":"from os import getcwd, makedirs, system\r\nimport nibabel as nib\r\nimport numpy as np\r\nimport glob, shutil, pydicom\r\nimport matplotlib.pyplot as plt\r\nfrom pydicom.data import get_testdata_files\r\nimport matplotlib\r\nfrom os.path import exists, join\r\n\r\nmatplotlib.use('Agg')\r\n\r\n########################################################\r\n\r\ndef average(imagesDir):\r\n initImage = nib.load(imagesDir[0])\r\n initImageData = initImage.get_fdata()\r\n imagesDir.remove(imagesDir[0])\r\n\r\n # promedio general dada la cantidad de imagenes \r\n for index, imageDir in enumerate(imagesDir):\r\n nextImage = nib.load(imageDir).get_fdata()\r\n initImageData = ((initImageData[:,:,:] * (index + 1) ) + nextImage[:,:,:]) / (index + 2)\r\n nib.save(nib.Nifti1Image(initImageData, initImage.affine, initImage.header), join(getcwd(),'averagepro_rigid.nii'))\r\n\r\n#######################################################\r\n\r\ndef rigidFlirt(path, files):\r\n\r\n if not exists(join(path, \"1.originals\")):\r\n makedirs(join(path, \"1.originals\"))\r\n\r\n if not exists(join(path, \"2.flirt_mat\")):\r\n makedirs(join(path, \"2.flirt_mat\"))\r\n\r\n for f in files:\r\n if (\"converted_PIL.nii\" in f):\r\n #shutil.move(join(path, f), join(path, \"1.originals\"))\r\n command = \"/usr/local/fsl/bin/flirt -in {} -ref {} -out {} -omat {}.mat -bins 256 -cost corratio -searchrx -90 90 -searchry -90 90 -searchrz -90 90 -dof 6 -interp trilinear\"\r\n system(command.format(join(path, \"1.originals\", f),\r\n join(path, \"refimage.nii\"),\r\n join(path, f.split(\".\")[0]+\"_rigid_pil.nii\"),\r\n join(path, f.split(\".\")[0])))\r\n #shutil.move(\r\n # join(path, f.split(\".\")[0]+\".mat\"), join(path, \"2.flirt_mat\"))\r\n\r\n\r\n\r\nACTUAL_DIRECTORY = getcwd()\r\nimagesDir = glob.glob(join(ACTUAL_DIRECTORY, \"**\", \"*converted_PIL.nii\"), recursive=True)\r\n\r\n#Rigid Flirt\r\nrigidFlirt(ACTUAL_DIRECTORY, imagesDir)\r\nrigidFlirtImagesDir = glob.glob(join(ACTUAL_DIRECTORY, \"**\", \"*rigid_pil.nii.gz\"), recursive=True)\r\naverage(rigidFlirtImagesDir)\r\n","sub_path":"testScripts/averageProRigidFSL.py","file_name":"averageProRigidFSL.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"184363885","text":"import string\n\nwith open('input.txt') as file:\n questionnaire_data = file.read().splitlines()\n\n\ndef find_groups(data: list) -> list:\n groups = []\n group = []\n for line in data:\n if line == '':\n groups.append(group)\n group = []\n else:\n group.append(line)\n if len(group) > 0:\n groups.append(group)\n return groups\n\n\ndef find_answered(group: list) -> int:\n count = len(group)\n all_answers = ''\n result = 0\n for persons_answers in group:\n all_answers += persons_answers\n for char in string.ascii_lowercase:\n if all_answers.count(char) == count:\n result += 1\n return result\n\n\ngroups = find_groups(questionnaire_data)\n\nresult = 0\n\nfor group in groups:\n result += find_answered(group)\n\nprint(result)\n\n","sub_path":"day06/day6-2.py","file_name":"day6-2.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"141448920","text":"# ----------------------------------------------------------------------------------------------------------------------\n# COMS 6998_008 Fall 2020: Computation and the Brain\n# Final Project\n# Group Members: Kartik Balasubramaniam (kb3127), Brett Karopczyc (bjk2161), Vincent Lin (vcl2122), Basile Van Hoorick (bv2279)\n# Author(s): Brett Karopczyc\n# ----------------------------------------------------------------------------------------------------------------------\n# Imports\nimport torch\nfrom .TablePlasticityRule import TablePlasticityRule\n\n# ----------------------------------------------------------------------------------------------------------------------\n\nclass TableRule_PostCount(TablePlasticityRule):\n \"\"\"\n This class implements the following table-based plasticity rule:\n\n The rule used to update a synapse between hidden-layer nodes i and j is a table of size 2 * (cap + 1):\n * \n\n The rule used to update a synapse between hidden-layer node i and output label j is a table of size 2 * (cap + 1):\n * \n\n As a result, we require that all layers this rule applies to have the same cap (for the presynaptic layer).\n \"\"\"\n\n def __init__(self):\n super().__init__()\n # Define attributes used by this class\n self.presynaptic_cap = None # Will be assigned in initialize()\n\n def initialize(self, layers=None):\n # Determine the common cap of all presynaptic layers\n if self.isOutputRule:\n self.presynaptic_cap = self.ff_net.cap[-1]\n else:\n caps = {self.ff_net.cap[lay-1] for lay in layers}\n assert len(caps) == 1, \"Caps of presynaptic layers were inconsistent\"\n self.presynaptic_cap = caps.pop()\n\n # Call up to our super's initialize()\n super().initialize(layers)\n\n def rule_shape(self):\n return 2, self.presynaptic_cap+1 # Postsyn fired/Is Label node?, Count of incoming nodes that fired\n\n def hidden_layer_rule_index_arrays(self, h):\n \"\"\"\n Return index arrays for each dimension of the plasticity rule for the weight matrix between hidden layers h and h-1\n \"\"\"\n # Get details of the presynaptic and postsynaptic layers, their connectivity, and their latest firing patterns\n net = self.ff_net\n presyn_width = net.w[h-1]\n presyn_acts = net.hidden_layer_activations[h-1]\n postsyn_acts = net.hidden_layer_activations[h]\n connectivity = net.hidden_layers[h]\n\n # Rule dimension 0: 1 if the postsynaptic neuron fired, 0 otherwise\n dim0_idx = postsyn_acts.view(-1, 1).repeat(1, presyn_width).long() # Repeat as cols for each presynaptic neuron\n\n # Rule dimension 1: count of incoming neurons that fired per postsynaptic neuron\n incoming_firings = presyn_acts * connectivity # Broadcasts across rows of connectivity, yielding activity for each postsynaptic neuron\n incoming_firings = incoming_firings.sum(dim=1, keepdim=True) # Count the number of incoming nodes that fired\n dim1_idx = incoming_firings.repeat(1, presyn_width).long() # Repeat as cols for each presynaptic neuron\n\n # Return the index arrays\n return dim0_idx, dim1_idx\n\n def output_rule_index_arrays(self, prediction, label):\n \"\"\"\n Return index arrays for each dimension of the plasticity rule for the weight matrix between the last hidden layer and the output layer\n \"\"\"\n # Get details of the presynaptic (last hidden) and postsynaptic (output) layers, and the latest firing pattern of the last hidden layer\n net = self.ff_net\n presyn_width = net.w[-1]\n postsyn_width = net.m\n presyn_acts = net.hidden_layer_activations[-1]\n connectivity = net.output_layer\n\n # Rule dimension 0: 1 if the postsynaptic node is the label, 0 otherwise\n dim0_idx = torch.zeros(postsyn_width, presyn_width, dtype=torch.long)\n dim0_idx[label] = 1\n\n # Rule dimension 1: count of incoming neurons that fired per postsynaptic neuron\n incoming_firings = presyn_acts * connectivity # Broadcasts across rows of output_layer\n incoming_firings = incoming_firings.sum(dim=1, keepdim=True) # Count the number of incoming nodes that fired per label\n dim1_idx = incoming_firings.repeat(1, presyn_width).long() # Repeat as cols for each presynaptic neuron\n\n # Return the index arrays\n return dim0_idx, dim1_idx\n\n# ----------------------------------------------------------------------------------------------------------------------\n","sub_path":"BrainNet/FFLocalPlasticityRules/TableRule_PostCount.py","file_name":"TableRule_PostCount.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"197098112","text":"\"\"\"\n***************************************************************************************************\nThe script examples provided by Cisco for your use are provided for\nreference only as a customer courtesay.\n\nThey are intended to facilitate development of your own scripts and software\nthat interoperate with Cisco switches and software.\n\nAlthough Cisco has made efforts to create script examples that will be effective\nas aids to script or software development,\n\nCisco assumes no liability for or support obligations related to the use of the script examples or\nany results obtained using or referring to the script examples.\n\n***************************************************************************************************\n\n\"\"\"\n#!/usr/bin/env python\n#\n# NexusAccess_m .py,pyc\n# Author: Robert Stellman (rostellm)\n# Date : 27 Sep 2012 - New file from NexusAccess.py\n#\n# Date : 01 Oct 2012 - Incorporating sockets for a remote client\n#\n\n\n\nimport socket\nfrom Nexus_Object import *\n\nimport os\nfrom datetime import datetime\nimport time\nimport shutil\nfrom shutil import *\nimport curses, curses.panel\n\n\nHOSTS=(\"172.25.187.155\",\"172.25.187.50\",\"172.25.187.156\")\nHOST = '172.25.187.155' # The remote host\nPORT = 50007 # The same port as used by the server\n\n\nNexus1 = Nexus_switch(HOST,PORT)\n \ndef stringNexusCLI (sbuffer='',host=HOSTS[0]):\n \n try: buffer = repr(Nexus1.s_socket(sbuffer,host,PORT))\n except: return(\"Socket off-line\\n ....\\n\") \n \n return (buffer)\n\ndef dsp_output_str(bufferText):\n \"\"\" Displays output \\n\"\"\"\n \n length = len (bufferText)\n lenx = length - 1\n \n if (length <= 1): return()\n \n pad = curses.newpad(100,100) # lines, columns\n ymax, xmax = pad.getmaxyx()\n if (ymax >= 30): ymax = 30\n if (xmax >= 86): xmax = 86\n \n y=4; x=0 \n \n # 'Write a loop with lenx and 2500 as the increment until n*2500 < lenx\n\n\n n = 0\n while ( n*2500 < length):\n mu = 2500*(n+1)\n ml = 2500*n\n \n pad.clear()\n try: pad.addstr(y,x, bufferText[ml:mu])\n except curses.error: pass \n \n try: pad.refresh (0,0,1,1,ymax-2, xmax-2) # pad.refresh (y,x,ymin,xmin,ymax,xmax) \n except curses.error: pass\n\n n = n + 1\n if ( n*2500 < length ): \n\n title_string = \" NexusAccess-0.12m \"\n screen.addstr(1, 1, title_string,curses.A_REVERSE)\n screen.addstr (1, 59 ,host, curses.A_REVERSE)\n smenu1 = get_menu(mline) # Menu \n screen.addstr (3, 1, \"Press Enter for more\", curses.A_REVERSE)\n screen.refresh()\n screen.move (1,76)\n xinput = screen.getch() \n if xinput == ord('q'): \n screen.addstr (3, 1, \" \");break\n\n \n # .... End of loop ........\n \n return()\n\ndef get_mgmt0_ip(mline=24,host=HOSTS[0]):\n \n try: inf1 = stringNexusCLI(\"show int mgmt0 brief | grep mgmt0\", host)\n except: return ('xx.xx.xx.xx')\n \n o = inf1\n o = Nexus1.stringNexusFormat (o)\n \n o = o.replace (\"mgmt0\",\"\")\n o = o.replace (\"--\",\"\")\n o = o.replace (\"up\",\"\")\n inf = o.lstrip()\n inf1= inf.split(\" \")[0] \n buffer_mgmt0 = \"[\"+inf1+\"] \"\n \n return(buffer_mgmt0) \n \ndef get_script(nexusLogFile = \"/bootflash/logs/buffer-nexus.logx\"):\n \"\"\" Runs the script file on the Nexus Chassis \\n\"\"\"\n \n bufferText=\"show host\"\n \n try: buffer1 = open(nexusLogFile,'r')\n except: return(\"\")\n \n bufferText=buffer1.read()\n buffer1.close()\n \n return(bufferText)\n\ndef get_menu(mline=3):\n \n COL2 = 17\n\n hcolor = curses.color_pair(1)\n hmenu1 = curses.A_BOLD\n \n screen.addstr(mline, 1 , \" Script \",hcolor)\n screen.addstr(mline, 2 , \"S\", hmenu1)\n screen.addstr(mline, 9 , \"0\", hmenu1)\n screen.addstr(mline, 11 , \"1\", hmenu1)\n screen.addstr(mline, 13 , \"2\", hmenu1)\n screen.addstr(mline, 15 , \"m\", hmenu1)\n\n screen.addstr(mline, COL2, \" Queues \",hcolor)\n screen.addstr(mline, COL2+1, \"Q\", hmenu1)\n \n COL2 = COL2+10\n screen.addstr(mline, COL2, \" Log file \",hcolor)\n screen.addstr(mline, COL2+1, \"L\", hmenu1)\n \n COL2 = COL2+10\n screen.addstr(mline, COL2, \" Inter. \",hcolor)\n screen.addstr(mline, COL2+1, \"I\" ,hmenu1)\n \n COL2 = COL2+10\n screen.addstr(mline, COL2, \" Buffer \",hcolor)\n screen.addstr(mline, COL2+1, \"B\", hmenu1)\n\n COL2 = COL2+10\n screen.addstr(mline, COL2, \" Routing \",hcolor)\n screen.addstr(mline, COL2+1, \"R\", hmenu1)\n\n COL2 = COL2+10\n screen.addstr(mline, COL2, \" EXIT \",hcolor)\n screen.addstr(mline, COL2+2, \"X\", hmenu1)\n screen.addstr(mline, COL2+6, \" \",hcolor)\n\n smenu1 = \"qlibrx\"\n screen.refresh()\n \n return (smenu1)\n\ndef get_cli_data(cli_string=\"\", host ='0', skip=0):\n\n data = \"\"\n data = stringNexusCLI(cli_string,host)\n data = Nexus1.stringNexusFormat(data,skip) \n dsp_output_str(data)\n \n return(data)\n \ndef get_mcli_data( cli_string=\"\", host ='0'): \n\n data = \"\"\n data = stringNexusCLI(cli_string,host)\n data = Nexus1.stringNexusFormat(data,0)\n data = data.replace ('\"','\\n')\n data = data.replace (\"\\n \",\"\")\n \n # dsp_output_str(data)\n \n return(data)\n\ndef get_menu_data (xinput=' ', host = HOSTS[0]):\n \n screen.move(2,16) \n bufferText = \"\" \n if xinput == ord(smenu1[0]): get_cli_data ('show platform software qd info global\\n', host)\n \n if xinput == ord(smenu1[1]): # Get Monitor Status\n bufferText = get_script(\"/root/scripts/srun.txt\") \n bufferText = Nexus1.stringNexusFormat (bufferText) \n dsp_output_str(bufferText)\n \n if xinput == ord(smenu1[2]): get_cli_data ('show int brief\\n',host)\n \n if xinput == ord(smenu1[3]): get_cli_data ('BMdata',host,1) \n \n if xinput == ord(smenu1[4]): get_cli_data ('sh ip route vrf management\\n',host,0)\n \n if xinput == ord('s'): # Run Script\n bufferText = get_script('/root/scripts/script.txt') \n bufferm = get_cli_data (bufferText,host,0)\n Nexus1.s_write (\"/root/scripts/srun.txt\", bufferm)\n \n if xinput == ord('0'): host = HOSTS[0] \n if xinput == ord('1'): host = HOSTS[1] \n if xinput == ord('2'): host = HOSTS[2]\n \n if xinput == ord('m'): # Run multi-script\n bufferText = get_script('/root/scripts/mscript.txt') \n \n i = 0; bufferm =\"\"; n = 3\n while (i < n):\n buffer1 = HOSTS[i] + '\\n '+ get_mcli_data (bufferText,HOSTS[i]) \n bufferm = bufferm + buffer1\n i = i + 1\n \n dsp_output_str(bufferm)\n Nexus1.s_write (\"/root/scripts/mrun.txt\", bufferm)\n \n return(host)\n\n# Main\n# .. Create Object\n# .. call getNexusData \n#\n\nos.environ['TERM']='xterm-color'\nscreen = curses.initscr()\n\nif (curses.has_colors()):\n curses.start_color()\n curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)\n hcolor = curses.color_pair(1)\nelse:\n hcolor = curses.A_NORMAL\n \n\n\nxinput = 0\nscreen.clear()\nhost = HOSTS[0]\n#buffer_mgmt0 = get_mgmt0_ip(2,host) # Management IP address\n\n\nwhile xinput != ord('x'):\n \n # screen.clear()\n # screen.border(0)\n \n COL1 = 8\n COL2 = 40\n mline = 2\n\n #buffer_mgmt0 = get_mgmt0_ip(2, host) # Management IP address\n\n title_string = \" NexusAccess-0.12m \"\n screen.addstr(1, 1, title_string,curses.A_REVERSE)\n screen.addstr (1, 59 ,host, curses.A_REVERSE)\n smenu1 = get_menu(mline) # Menu \n\n xinput = screen.getch() \n \n host = get_menu_data (xinput, host) \n\ncurses.endwin()\n","sub_path":"NexusAccessx.py","file_name":"NexusAccessx.py","file_ext":"py","file_size_in_byte":8972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"551887008","text":"# Integers, Floats, Strings, Boolean, None\n# More information refer to Udacity CS101\n\n# integers and floats\nnumber = 3\npi = 3.1415926\n\n# cast types\nint(number) == 3\nfloat(pi) == 3.1415926\n\n# Strings\n\"hello\".capitalize() == \"Hello\"\n\"hello\".replace(\"e\", \"a\") == \"hallo\"\n\"hello\".isalpha() == True\n\"123\".isdigit() == True\n\"some, csv, values\".split(\",\") == [\"some\", \"csv\", \"values\"]\n\n# String Format Function\nname = \"PythonBo\"\nmachine = \"HAL\"\nprint(\"Nice to meet you {0}. I am {1}\".format(name, machine))\n\n\n# None is identical to Null in Python\n# could be used as a placeholder value\n\nalens_found = None\n\n\n';'.join(['height', 'width', 'Max'])\n\n'unforgetable'.partition(\"forget\")\n\npos = (65.2, 23.1, 82.2)\n\"Galactic position x={pos[0]} y={pos[1]} z={pos[2]}\".format(pos = pos)\n\nimport math\n\"Mathconstants: pi={m.pi: .3f}, e={m.e: .3f}\".format(m = math)\n","sub_path":"pluralsight-python-getting-started/l3_00_integers_floats_strings_boolean_none.py","file_name":"l3_00_integers_floats_strings_boolean_none.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"590703169","text":"import numpy as np\nfrom torch import nn as nn\n\nfrom agent.model.map_transformations import affine_2d\nfrom agent.model.map_transformations import pose\nfrom agent.model.map_transformations import util\n\nPROFILE = False\nCONCURRENT = False\n\n\nclass MapAffine(nn.Module):\n def __init__(self, source_map_size, dest_map_size, world_size_px, world_size_m):\n super(MapAffine, self).__init__()\n self._is_cuda = False\n self._cuda_device = None\n self._source_map_size_px = source_map_size\n self._dest_map_size_px = dest_map_size\n self._world_in_map_size_px = world_size_px\n self._world_size_m = world_size_m\n\n self._affine_2d = affine_2d.Affine2D()\n\n pos = np.asarray([self._source_map_size_px / 2, self._source_map_size_px / 2])\n rot = np.asarray([0])\n self._canonical_pose_src = pose.Pose(pos, rot)\n\n pos = np.asarray([self._dest_map_size_px / 2, self._dest_map_size_px / 2])\n rot = np.asarray([0])\n self._canonical_pose_dst = pose.Pose(pos, rot)\n\n def cuda(self, device=None):\n nn.Module.cuda(self, device)\n self._is_cuda = True\n self._cuda_device = device\n self._affine_2d.cuda(device)\n return self\n\n def get_affine_matrices(self, map_poses, cam_poses, batch_size):\n if map_poses is not None:\n map_poses = map_poses.numpy()\n map_poses_img = util.poses_m_to_px(map_poses,\n self._source_map_size_px,\n [self._world_in_map_size_px, self._world_in_map_size_px],\n self._world_size_m,\n batch_dim=True)\n else:\n map_poses_img = self._canonical_pose_src.repeat_np(batch_size)\n\n if cam_poses is not None:\n cam_poses = cam_poses.numpy()\n cam_poses_img = util.poses_m_to_px(cam_poses,\n self._dest_map_size_px,\n [self._world_in_map_size_px, self._world_in_map_size_px],\n self._world_size_m,\n batch_dim=True)\n else:\n cam_poses_img = self._canonical_pose_dst.repeat_np(batch_size)\n\n # Get the affine transformation matrix to transform the map to the new camera pose\n affines = self.get_old_to_new_pose_matrices(map_poses_img, cam_poses_img)\n\n return affines\n\n def get_old_to_new_pose_matrices(self, old_pose, new_pose):\n old_t_inv = util.poses_2d_to_matrix(old_pose, self._source_map_size_px, inverse=True)\n new_t = util.poses_2d_to_matrix(new_pose, self._dest_map_size_px, inverse=False)\n matrix = np.matmul(new_t, old_t_inv)\n mat_t = util.np_to_tensor(matrix, insert_batch_dim=False, cuda=False)\n return mat_t\n\n def forward(self, maps, current_poses, new_poses):\n batch_size = maps.size(0)\n affine_matrices_cpu = self.get_affine_matrices(current_poses, new_poses, batch_size)\n\n # Apply the affine transformation on the map\n # The affine matrices should be on CPU (if not, they'll be copied to CPU anyway!)\n maps_out = self._affine_2d(maps, affine_matrices_cpu, out_size=[self._dest_map_size_px, self._dest_map_size_px])\n\n return maps_out\n","sub_path":"agent/model/map_transformations/map_affine.py","file_name":"map_affine.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"349566922","text":"from django.conf.urls import url, include\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n\r\n # Site Nav\r\n\r\n url(r'^$', views.post_list, name='index'),\r\n url(r'^blog/', views.post_list),\r\n url(r'^post/(?P\\d+)/$', views.post_detail, name='post_detail'),\r\n url(r'^post/new/$', views.post_new, name='post_new'),\r\n url(r'^post/(?P\\d+)/edit/$', views.post_edit, name='post_edit'),\r\n url(r'^drafts/$', views.post_draft_list, name='post_draft_list'),\r\n url(r'^categories/add/$', views.add_categories, name='add_categories'),\r\n url(r'^post/(?P\\d+)/publish/$', views.post_publish, name='post_publish'),\r\n url(r'^post/(?P\\d+)/remove/$', views.post_remove, name='post_remove'),\r\n url(r'^post/(?P\\d+)/comment/$', views.add_comment_to_post, name='add_comment_to_post'),\r\n url(r'^post/remove-all/$', views.post_remove_all, name='post_remove_all'),\r\n url(r'^post/category/(?P\\d+)/$', views.cat_post, name='cat_post'),\r\n url(r'^comment/(?P\\d+)/approve/$', views.comment_approve, name='comment_approve'),\r\n url(r'^comment/(?P\\d+)/approve/main/$', views.comment_approve_main, name='comment_approve_main'),\r\n url(r'^comment/(?P\\d+)/remove/$', views.comment_remove, name='comment_remove'),\r\n url(r'^comment/(?P\\d+)/remove/main/$', views.comment_remove_main, name='comment_remove_main'),\r\n url(r'^comment/drafts/$', views.comment_draft_list, name='comment_draft_list'),\r\n url(r'^user/profile/$', views.user_profile, name='user_profile'),\r\n url(r'^user/profile/(?P\\d+)/edit/$', views.profile_update, name='update_profile'),\r\n\r\n\r\n # User Auth\r\n\r\n url(r'^login/', 'django.contrib.auth.views.login',\r\n name='login',\r\n kwargs={'template_name': 'blog/login.html'}),\r\n\r\n url(r'^logout/', 'django.contrib.auth.views.logout',\r\n name='logout',\r\n kwargs={'template_name': 'blog/index.html'}),\r\n\r\n\r\n # User registration\r\n\r\n url(r'^register/', 'blog.views.register_user', name='register'),\r\n]\r\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"154079118","text":"import sys\nT,F=map(int,input().split())\n\ndef get(x):\n print(x)\n sys.stdout.flush()\n inp=str(input())\n assert inp!='N'\n return inp\ndef get_pos(x,y):\n return get(x*5+y+1)\n\naux=[23,5,1,0]\nfor cas in range(T):\n ans=\"\"\n candidate=range(119)\n letters='ABCDE'\n for word_pos in range(4):\n record={'A':[],'B':[],'C':[],'D':[],'E':[]}\n \n for i in candidate: # 119 23 5 1\n record[get_pos(i,word_pos)].append(i)\n \n for e in letters:\n if len(record[e]) == aux[word_pos]:\n letters=letters.replace(e,'')\n break\n ans+=e\n candidate=[]\n for i in record[e]:\n candidate.append(i)\n ans+=letters\n get(ans)","sub_path":"algo/codejam/2019/1c/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"341223920","text":"import pygame, sys\nfrom random import randint\n\nfrom players.gracz1 import gracz1\nfrom players.gracz2 import gracz2\nfrom players.bot import bot\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (129, 187, 129)\nGRAY = (225, 225, 225)\n\nWIDTH = 720\nHEIGHT = 488\n\nclass Game(object):\n\n def __init__(self):\n self.max_fps = 60.0\n pygame.init()\n font = pygame.font.Font('ostrich-regular.ttf', 32)\n\n #SCREEN\n self.screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.display.set_caption('The Foxyball Game')\n self.background = pygame.transform.scale(pygame.image.load('textures/background.png'), (WIDTH, HEIGHT))\n\n #POCZĄTKOWA POZYCJA GRACZA 1:\n self.x_gracz1 = 0\n self.y_gracz1 = 0\n\n #POCZĄTKOWA POZYCJA GRACZA 2:\n self.x_gracz2 = 200\n self.y_gracz2 = 0\n\n #PĘTLA\n self.clock = pygame.time.Clock()\n self.czas = 0.0\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n pygame.quit()\n sys.exit()\n self.czas += self.clock.tick()/1000.0\n \n while self.czas > 1/self.max_fps:\n self.czas -= 1/self.max_fps\n\n #RUCH GRACZY:\n self.poruszanie_gracz1()\n self.poruszanie_gracz2()\n\n #WYŚWIETLANIE OBECNEJ POZYCJI OBIEKTÓW:\n text = font.render(\"GRACZ 1: x:%d y:%d GRACZ 2: x:%d y:%d\" % (self.x_gracz1, self.y_gracz1, self.x_gracz2, self.y_gracz2), True, WHITE, BLACK)\n textRect = text.get_rect()\n textRect.center = (WIDTH/2, HEIGHT/2)\n\n #WYŚWIETLANIE OBIEKTÓW:\n self.screen.blit(self.background, (0, 0))\n self.screen.blit(text, textRect)\n self.rysuj()\n pygame.display.flip()\n\n def poruszanie_gracz1(self):\n klawiatura = pygame.key.get_pressed()\n if klawiatura[pygame.K_d]:\n self.x_gracz1 += 10\n elif klawiatura[pygame.K_a]:\n self.x_gracz1 -= 10\n elif klawiatura[pygame.K_w]:\n self.y_gracz1 -= 10\n elif klawiatura[pygame.K_s]:\n self.y_gracz1 += 10\n\n def poruszanie_gracz2(self):\n klawiatura = pygame.key.get_pressed()\n if klawiatura[pygame.K_l]:\n self.x_gracz2 += 10\n elif klawiatura[pygame.K_j]:\n self.x_gracz2 -= 10\n elif klawiatura[pygame.K_i]:\n self.y_gracz2 -= 10\n elif klawiatura[pygame.K_k]:\n self.y_gracz2 += 10\n\n def rysuj(self):\n self.gracz1 = pygame.Rect(self.x_gracz1, self.y_gracz1, 50, 50)\n pygame.draw.rect(self.screen, BLACK, self.gracz1)\n self.gracz2 = pygame.Rect(self.x_gracz2, self.y_gracz2, 50, 50)\n pygame.draw.rect(self.screen, WHITE, self.gracz2)\n\nGame()\n","sub_path":"the_foxyball_game/versions/1.0/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"593516135","text":"\"\"\"Nodes tests\"\"\"\nfrom .nodes import get_nodes_from_stellarbeat, get_nodes_by_public_key, \\\n get_node_dependencies\n\ndef test_stellarbeat_nodes():\n \"\"\"Test get_nodes_from_stellarbeat()\"\"\"\n nodes = get_nodes_from_stellarbeat()\n assert isinstance(nodes, list)\n for node in nodes:\n assert isinstance(node, dict)\n assert 'publicKey' in node\n\ndef test_node_dependencies():\n \"\"\"Test get_node_dependencies()\"\"\"\n nodes = [\n {\n 'publicKey': 'A',\n 'quorumSet': {'threshold': 2, 'validators': ['A', 'B'], 'innerQuorumSets': []}\n },\n {\n 'publicKey': 'B',\n 'quorumSet': {\n 'threshold': 2,\n 'validators': ['B'],\n 'innerQuorumSets':[\n {'threshold': 1, 'validators': ['A', 'C'], 'innerQuorumSets': []}\n ]\n }\n },\n {\n 'publicKey': 'C',\n 'quorumSet': {'threshold': 2, 'validators': ['A', 'C'], 'innerQuorumSets': []}\n },\n {\n 'publicKey': 'D',\n 'quorumSet': {'threshold': 2, 'validators': ['A', 'B', 'C', 'D'], 'innerQuorumSets': []}\n },\n ]\n nodes_by_public_key = get_nodes_by_public_key(nodes)\n dependencies_a = get_node_dependencies(nodes_by_public_key, 'A')\n assert frozenset(dependencies_a) == frozenset(['A', 'B', 'C'])\n dependencies_d = get_node_dependencies(nodes_by_public_key, 'D')\n assert frozenset(dependencies_d) == frozenset(['A', 'B', 'C', 'D'])\n","sub_path":"stellarobservatory/nodes_test.py","file_name":"nodes_test.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"47758061","text":"'''\r\nCreated on 14/07/2015\r\n\r\n@author: Mota\r\n'''\r\nimport json\r\nimport re\r\nimport string\r\nimport glob\r\nimport utils.nlp.tokenizer as tokenizer\r\nimport utils.nlp.tfidf as tfidf\r\nimport utils.nlp.stopwords as stopwords\r\n\r\nclass TextPreprocessing(object):\r\n '''\r\n classdocs\r\n \r\n Performs the following preprocessing tasks:\r\n - steming\r\n - stop word removal\r\n - remove punctuation\r\n - lower case\r\n These tasks can be enabled/disabled through the input_preprocessing part of the .yalm config file\r\n It also generates the whitelist.txt\r\n '''\r\n def __init__(self, input_preprocessing_configs):\r\n self.input_preprocessing_configs = input_preprocessing_configs\r\n self.domain = input_preprocessing_configs['domain']\r\n self.n = input_preprocessing_configs['n']\r\n self.customSWFile = input_preprocessing_configs['custom_stop_words']\r\n self.raw_text = self.domain + \"/data/rawtext/\"\r\n self.white_list = self.domain + '/data/whitelist.txt'\r\n \r\n if(self.input_preprocessing_configs['expand_contractions']):\r\n with open(input_preprocessing_configs['contractions_dic_file']) as contractions_dic_file:\r\n self.contractions_dic = json.load(contractions_dic_file)\r\n \r\n def stem_input(self, docStr):\r\n stemDoc = tokenizer.tokenize(docStr)\r\n return \" \".join(stemDoc)\r\n \r\n def remove_stop_words(self, docStr):\r\n no_sw_doc = stopwords.removeStopWords(docStr, self.input_preprocessing_configs['custom_stop_words'])\r\n return no_sw_doc\r\n \r\n def remove_punctuation(self, docStr):\r\n return docStr.encode('utf-8').translate(None, string.punctuation+'0123456789')\r\n \r\n def lower_case(self, docStr):\r\n return docStr.lower()\r\n \r\n def expand(self, docStr):\r\n contractions_re = re.compile('(%s)' % '|'.join(self.contractions_dic.keys()))\r\n def replace(match):\r\n return self.contractions_dic[match.group(0)]\r\n \r\n return contractions_re.sub(replace, docStr)\r\n \r\n '''\r\n Generated the list of words that can appear on a metro station (whitelist)\r\n '''\r\n def mkwhitelist(self, outPath):\r\n keywords = tfidf.getkeywords(self.raw_text, self.n, self.customSWFile)\r\n with open(outPath, 'w') as file:\r\n for keyword in keywords:\r\n file.write(\"{}\\n\".format(keyword))\r\n \r\n def run(self):\r\n for doc in glob.glob(self.raw_text + \"/*\"):\r\n doc = doc.replace(\"\\\\\", \"/\")\r\n doc_name = doc.split('/')[-1]\r\n with open (doc, \"r+\") as docFile, open(self.input_preprocessing_configs['domain'] + '/data/swtext/' + doc_name, \"w\") as docWithSW:\r\n docStr = docFile.read()\r\n docStr = filter(lambda x: x in string.printable, docStr)\r\n \r\n if(self.input_preprocessing_configs['expand_contractions']):\r\n docStr = self.expand(docStr)\r\n \r\n if(self.input_preprocessing_configs['lower_case']):\r\n docStr = self.lower_case(docStr)\r\n \r\n if(self.input_preprocessing_configs['remove_punctuation']):\r\n docStr = self.remove_punctuation(docStr)\r\n \r\n if(self.input_preprocessing_configs['steming']):\r\n tokenizer.stemTokens()\r\n docStr = self.stem_input(docStr)\r\n \r\n '''\r\n Major Hack: generating metro stations with RAKE requires the text with stop words,\r\n since the multi-word candidate generation is based on them.\r\n '''\r\n docWithSW.write(docStr)\r\n \r\n if(self.input_preprocessing_configs['remove_stopwords']):\r\n docStr = self.remove_stop_words(docStr)\r\n \r\n docFile.seek(0)\r\n docFile.write(docStr)\r\n docFile.truncate()\r\n \r\n #printing stem mapping\r\n tokenizer.writeStemMap(\"./resources/debug/stemMap.txt\")\r\n \r\n if(self.input_preprocessing_configs['gen_whitelist']):\r\n self.mkwhitelist(self.white_list)\r\n ","sub_path":"mm/inputhelpers/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"546737777","text":"import numpy as np\nfrom collections import OrderedDict\n\n\ndef read_header_database(fname):\n header = ''\n with open(fname, 'r') as f:\n for line in f:\n if line[0] == '#':\n header += line\n else:\n break\n return header\n\n\nclass VarRange:\n def __init__(self, rng):\n self.min = rng[0]\n self.max = rng[1]\n self.step = rng[2]\n self.lvls = rng[3]\n\n self._array = None\n\n @property\n def array(self):\n if self._array is None:\n if self.step > 0:\n self._array = np.arange(self.min, self.max, self.step)\n else:\n self._array = np.linspace(self.min, self.max, self.lvls)\n return self._array\n\n\ndef parse_range(rng):\n try:\n vmin, vmax, vstep = rng.strip().split(':')\n except:\n raise Exception('Cannot parse {}'.format(rng))\n else:\n vmin, vmax, vstep = float(vmin), float(vmax), complex(vstep)\n vlvls = int(vstep.imag)\n vstep = int(vstep.real)\n\n return [vmin, vmax, vstep, vlvls]\n\n\ndef parse_header_database(header):\n # composition range\n Trange = []\n crange = OrderedDict()\n\n for line in header.strip().split('\\n'):\n line = line.strip('# ')\n try:\n el, rng = line.split()\n rng = parse_range(rng)\n except:\n print('Cannot parse {}'.format(line))\n else:\n if el == 'T':\n Trange = VarRange(rng)\n else:\n crange[el] = VarRange(rng)\n\n return Trange, crange\n","sub_path":"tests_regression/new_sklearn_tests/parse_database.py","file_name":"parse_database.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"612168788","text":"\r\n\r\nfrom PIL import Image\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as mp\r\n\r\nband1=Image.open('band1.gif')\r\nband2=Image.open('band2.gif')\r\nband3=Image.open('band3.gif')\r\nband4=Image.open('band4.gif')\r\n\r\nband1=np.asarray(band1)\r\nband2=np.asarray(band2)\r\nband3=np.asarray(band3)\r\nband4=np.asarray(band4)\r\nTrain=np.zeros([150,5])\r\n\r\n\r\ndata= np.vstack((band1.flatten(),band2.flatten(), band3.flatten(),band4.flatten())).reshape((4,512*512))\r\ndata= data.astype(np.float64)\r\n\r\ndata=data.transpose()\r\n\r\nk=3\r\ncodebook = np.zeros((k,4))\r\nidx = [0,3,4]\r\ncodebook = data[idx,:]\r\n\r\n\r\nval=5\r\nD0=0\r\nk0 =0\r\ntemp=0\r\ntc = np.array(np.zeros(data.shape[0]))\r\nwhile val>0:\r\n for i in range(data.shape[0]):\r\n mindist = 999999\r\n tc[i] = 0\r\n for j in range(codebook.shape[0]):\r\n dist = np.linalg.norm(np.array(data[i,:]) - np.array(codebook[j,:]))\r\n if(mindist>dist):\r\n mindist = dist\r\n tc[i] = j\r\n for i in range(k):\r\n \r\n codebook[i,:] = np.mean(data[tc==i,:],axis=0)\r\n print(val)\r\n val=val-1\r\ntc = tc.reshape((512,512))\r\n\r\nmp.matshow(tc)\r\n","sub_path":"kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"494777830","text":"import math\nimport random\n\nclass TreeNode():\n\n # TODO: wow\n # def __init__(self, board, parent):\n def __init__(self, board, parent, cell):\n self.board = board\n # the depth of the game is 64 moves in total\n self.is_terminal = (self.board.round > 64)\n self.is_fully_expanded = self.is_terminal\n self.parent = parent\n # TODO: wow\n self.cell = cell\n self.visits = 0\n self.score = 0\n self.children = {}\n\nclass MCTS():\n \n # search for the best move in the current position\n def search(self, initial_state):\n\n # if initial_state.round == 1:\n # the_node = TreeNode(initial_state.move(15), None, 15)\n # print(15)\n # return the_node\n\n # if initial_state.round == 64:\n # the_node = TreeNode(initial_state.move(11), None, 11)\n # print(11)\n # return the_node\n\n self.root = TreeNode(initial_state, None, None)\n\n # TODO: modify iteration number (the larger, the better, but slower)\n for iteration in range(11):\n node = self.select(self.root) # select a node (selection phase)\n score = self.rollout(node.board) # score current node (simulation phase)\n self.backpropagate(node, score)\n \n return self.get_best_move(self.root, 0)\n\n\n # select most promising node\n def select(self, node):\n while not node.is_terminal:\n if node.is_fully_expanded:\n node = self.get_best_move(node, 2)\n else:\n node = self.expand(node)\n return node\n\n\n def expand(self, node):\n states = node.board.generate_states()\n # TODO: wow\n # for state in states:\n for cell, state in states:\n # make sure that the current state is not present in child nodes\n if str(state.game_board) not in node.children:\n # TODO: wow\n # new_node = TreeNode(state, node)\n new_node = TreeNode(state, node, cell)\n node.children[str(state.game_board)] = new_node\n # when node is fully expanded\n if len(states) == len(node.children):\n node.is_fully_expanded = True\n return new_node\n\n\n # simulate the game via random moves until reaching the end of the game\n def rollout(self, board):\n line_home_previous = 0\n line_away_previous = 0\n while board.round < 64:\n # board = random.choice(board.generate_states())\n r = random.randint(0, len(board.generate_states())-1)\n # TODO: wow\n # board = board.generate_states()[r]\n cell, board = board.generate_states()[r]\n # board.print_game_board()\n board.count_point()\n\n if board.line_home > line_home_previous:\n new_line_number = board.line_home - line_home_previous\n while new_line_number > 0:\n board.line_order.append(1)\n new_line_number -= 1\n line_home_previous = board.line_home\n elif board.line_away > line_away_previous:\n new_line_number = board.line_away - line_away_previous\n while new_line_number > 0:\n board.line_order.append(-1)\n new_line_number -= 1\n line_away_previous = board.line_away\n\n # print(board.line_home, board.line_away)\n # print(board.score_home, board.score_away)\n # print('==============================')\n # print(board.line_home, board.line_away)\n # print(board.score_home, board.score_away)\n # print(board.line_order)\n # TODO:\n if board.current_player == 1:\n return board.score_home - board.score_away\n elif board.current_player == -1:\n return board.score_away - board.score_home\n\n\n # backpropagate the number of visits and score up to the root node\n def backpropagate(self, node, score):\n # update nodes up to root node\n while node is not None:\n node.visits += 1\n node.score += score\n node = node.parent\n\n\n\n # select the best node based on UCB1 formula\n def get_best_move(self, node, exploration_constant):\n # TODO: 可能會有 bug,畢竟有正反方!\n best_score = float('-inf')\n best_moves = []\n\n for child_node in node.children.values():\n # get move score using UCT (Upper Confidence Bounds to Trees) formula\n # TODO: 暫時先註解掉下面那一行,應該用不到\n # current_player = child_node.board.current_player\n # TODO: 可能會有 bug,畢竟有正反方!\n # move_score = current_player * child_node.score / child_node.visits + \\\n # exploration_constant * math.sqrt(math.log(node.visits / child_node.visits))\n move_score = child_node.score / child_node.visits + \\\n exploration_constant * math.sqrt(math.log(node.visits / child_node.visits))\n\n # better move has been found\n if move_score > best_score:\n # print(child_node)\n best_score = move_score\n best_moves = [child_node]\n # found as good move as already available\n elif move_score == best_score:\n best_moves.append(child_node)\n\n # return one of the best moves randomly\n # TODO: not sure it's really random or not\n # the_node = random.choice(best_moves)\n length = len(best_moves)\n if length > 1:\n r = random.randint(0, length-1)\n else:\n r = 0\n the_node = best_moves[r]\n print(the_node.cell)\n return the_node","sub_path":"mcts.py","file_name":"mcts.py","file_ext":"py","file_size_in_byte":5138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"373200707","text":"import socket\nimport time\nimport select\n\n#IP = '172.30.18.90'\nIP = \"localhost\"\nPORT = 8210\n\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_socket.bind((IP, PORT))\nserver_socket.listen(10)\nprint(\"Server Run!\")\n\nsocket_list = [server_socket]\nclients = {} ; clients_name = {}\ni = 0 ; j = 0 ; k = 0 ; host = [] ; gest = []\n\nwhile True:\n read_socket, write_socket, exception_socket = select.select(\n socket_list, [], socket_list)\n for s in read_socket:\n if s == server_socket:\n client_socket, address = server_socket.accept()\n if client_socket:\n client_socket.send(bytes(\"welcome!\", 'utf-8'))\n socket_list.append(client_socket)\n user = address[0]\n clients[client_socket] = user\n print(\"Connection Established from {}\".format(address))\n for client_sockets in clients:\n if client_sockets != client_socket:\n k = 1\n client_sockets.send(\n bytes(\"{} joined Group!\".format(address), 'utf-8'))\n\n else:\n print(clients_name)\n if i == 0:\n if j == 0:\n message = s.recv(1024)\n encode_message = message.decode('utf-8')\n host.append(message.decode('utf-8'))\n clients_name[client_socket] = encode_message\n client_socket.send(bytes(\"\\nPlZ wait...\\n\", \"utf-8\"))\n j = 1\n\n elif j == 1:\n message = s.recv(1024)\n encode_message = message.decode('utf-8')\n gest.append(message.decode('utf-8'))\n gest.append(host[0])\n host.pop()\n j = 2\n\n if k == 1:\n message = s.recv(1024)\n encode_message = message.decode('utf-8')\n if encode_message in gest:\n socket_list[1].send(bytes(\"\\nYour friend is online\\n\", \"utf-8\"))\n client_socket.send(bytes(\"\\nYou are in private chat\\n\", \"utf-8\"))\n i = 2\n else:\n socket_list.remove(s)\n del clients[s]\n\n if i == 2:\n message = s.recv(1024)\n if not message:\n socket_list.remove(s)\n del clients[s]\n continue\n for client_socket in clients:\n if client_socket != s:\n client_socket.send(message)\n for s in exception_socket:\n socket_list.remove(s)\n del clients[s]\n # print(socket_list)\n# server_socket.close()\n","sub_path":"chatRoom/privateServerChat.py","file_name":"privateServerChat.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"130169811","text":"import math\n\n## zad 1\nzmienna1 = 3.14\nzmienna2 = 3\nprint(zmienna1)\nprint(zmienna2)\n\n## zad 2\na = int(input(\"Wprowadz liczbe a: \"))\nb = int(input(\"Wprowadz liczbe b: \"))\nsuma = a + b\nroznica = a - b\niloczyn = a * b\nif b == 0: iloraz = \"Dzielenie przez zero\"\nelse: iloraz = a / b\nmodulo = a % b\npotega = a ** b\nprint(f'Dodawanie: {suma}')\nprint(f'Odejmowanie: {roznica}')\nprint(f'Mnozenie: {iloczyn}')\nprint(f'Dzielenie: {iloraz}')\nprint(f'Reszta z dzielenia a przez b: {modulo}')\nprint(f'Potega: {iloraz}')\n\n## zad3\na += 1\nprint(a)\na -= 1\nprint(a)\na *= 2\nprint(a)\na /= 2\nprint(a)\na **= 2\nprint(a)\na %= 2\nprint(a)\n\n## zad4\nprint(math.e ** 10)\nprint((math.log(5+(math.sin(8)**2)))**1/6)\nprint(math.fabs(3.55))\nprint(math.fabs(4.80))\n\n## zad5\nimie = \"BARTOSZ\"\nnazwisko = \"BOCHOMULSKI\"\nimie = imie.capitalize()\nnazwisko = nazwisko.capitalize()\nprint(f\"{imie} {nazwisko}\")\n\n## zad6\nsong = \"la la la la la la la\"\nla = song.count(\"la\")\nprint(f\"Ilosc powtorzen 'la': {la}\")\n\n## zad7\nlancuch = \"lancuch znakow\"\nprint(f\"Drugi znak: {lancuch[1]}\\nOstatni znak: {lancuch[len(lancuch)-1]}\")\n\n## zad8\nprint(song.split())\n\n## zad9\nzmienna1 = \"napis\"\nzmienna2 = 3.14145234\nzmienna3 = hex(12)\nprint(zmienna1)\nprint(zmienna2)\nprint(zmienna3)\n\n## zad10\nfilmy = [\"Wladca Pierscieni\",\"Harry Potter\", \"Interstellar\", \"Incepcja\"]\nfilmy.sort()\nprint(\"Lista filmow: \", filmy)\n\n## zad11\nsinus = [0,math.sin(math.pi/6),math.sin(math.pi/4),math.sin(math.pi/3),1]\ncosinus = [1,math.cos(math.pi/6),math.cos(math.pi/4),math.cos(math.pi/3),0]\ntangens = [0,math.tan(math.pi/6),1,math.tan(math.pi/3),\"nie istnieje\"]\ncotangens = [\"nie istnieje\",1/(math.tan(math.pi/6)),1,1/(math.tan(math.pi/3)),0]\nprint(\"sin = \",sinus)\nprint(\"cos = \",cosinus)\nprint(\"tg = \",tangens)\nprint(\"ctg = \",cotangens)\n\n## zad12\nzdanie = \"Ala ma kota a kot ma Ale\"\nzdanie = zdanie.split()\nprint(zdanie)\n\n## zad13\nslownik = {\n 'ksywka1' : 'Rafal',\n 'ksywka2' : 'Kacper',\n 'ksywka3' : 'Michal',\n 'ksywka4' : 'Bartek'\n}\nprint(slownik['ksywka3'])\nprint(slownik['ksywka4'])\n\n## zad15\nrzymskie = {\n 'I' : 1,\n 'II' : 2,\n 'III' : 3,\n 'IV' : 4,\n 'V' : 5,\n 'VI' : 6,\n 'VII' : 7,\n 'VIII' : 8,\n 'IX' : 9,\n 'X' : 10\n}\nindex = list(rzymskie.keys())\nvalues = list(rzymskie.values())\nprint(f\"Index: {index[3]}\\nValue: {values[2]}\")\n\n## zad16\ndict_gry = {\n '000001' : 'Gothic',\n '000002' : 'Gothic II',\n '000003' : 'Gothic III',\n '000004' : 'Arcania'\n}\nprint(len(dict_gry))","sub_path":"zadania1.py","file_name":"zadania1.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"249011610","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 19 11:58:42 2021\r\nscaling a vector using a scalar \r\n@author: Loulou\r\n\"\"\"\r\n# import NumPy and Matplotlib \r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# Define vector \r\nv = np.array([1,1])\r\n\r\n# define scalar \r\na = 3\r\n\r\n# define vector av as vector v multiplied by a scalar a\r\nav = a * v\r\n\r\n# Plot vector v as blue arrow with red dot at origin(0,0) using Matplotlib\r\nax = plt.axes()\r\n\r\n# plot red dot at origin(0,0)\r\nax.plot(0,0,'or')\r\n\r\n# plot vector v as blue arrow starting at origin 0,0\r\nax.arrow(0,0, *v, color='blue', linewidth=2.5, head_width=0.30, head_length=0.35)\r\n\r\n# plot vector v as dotted vector of cyan color\r\nax.arrow(0,0, *av, color='cyan', linestyle='dotted', linewidth=2.5, \r\n head_width=0.30, head_length=0.35)\r\n\r\n# format X-axis \r\n# Set limits for plot for X-axis\r\nax.set_xlim(-1,5)\r\n\r\n# set major ticks for x-axis\r\nmajor_xticks = np.arange(-1,5)\r\nax.set_xticks(major_xticks)\r\n\r\n# Set limits for plot for Y-axis\r\nax.set_ylim(-1,5)\r\n\r\n# set major ticks for y-axis\r\nmajor_yticks = np.arange(-1,5)\r\nax.set_yticks(major_yticks)\r\n\r\n# create gridlines for only major tick marks\r\nplt.grid(b=True, which='major')\r\n\r\n# Display final plot\r\nplt.show()\r\n\r\n","sub_path":"linearalgebra-vectors_lab/vectors_lab_mine_scalar_mul.py","file_name":"vectors_lab_mine_scalar_mul.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"454106643","text":"\"\"\"Default configurations for Ralph\"\"\"\n\nfrom enum import Enum\nfrom os import environ\nfrom pathlib import Path\n\nimport yaml\nfrom click import get_app_dir\n\nfrom ralph.exceptions import ConfigurationException\n\n\nclass DatabaseBackends(Enum):\n \"\"\"Enumerate active database backend modules.\n\n Adding an entry to this enum will make it available to the CLI.\n \"\"\"\n\n ES = \"ralph.backends.database.es.ESDatabase\"\n\n\nclass Parsers(Enum):\n \"\"\"Enumerate active parsers modules.\n\n Adding an entry to this enum will make it available to the CLI.\n \"\"\"\n\n GELF = \"ralph.parsers.GELFParser\"\n\n\nclass StorageBackends(Enum):\n \"\"\"Enumerate active storage backend modules.\n\n Adding an entry to this enum will make it available to the CLI.\n \"\"\"\n\n LDP = \"ralph.backends.storage.ldp.LDPStorage\"\n FS = \"ralph.backends.storage.fs.FSStorage\"\n SWIFT = \"ralph.backends.storage.swift.SwiftStorage\"\n\n\ndef load_config(config_file_path):\n \"\"\"Return a dictionary representing Ralph's configuration.\"\"\"\n\n try:\n with open(config_file_path) as config_file:\n return yaml.safe_load(config_file)\n except yaml.scanner.ScannerError as exc:\n raise ConfigurationException(\"Configuration could not be loaded\") from exc\n except FileNotFoundError:\n return None\n\n\ndef config(key, default_value):\n \"\"\"\n Get a value based on its key returning the first of (in order):\n 1. Environment\n 2. Config file\n 3. default_value\n \"\"\"\n\n value = environ.get(key, None)\n if value is not None:\n return value\n\n if CONFIG is not None and key in CONFIG:\n return CONFIG[key]\n\n return default_value\n\n\nDEFAULT_LOGGING_CONFIG = {\n \"version\": 1,\n \"propagate\": True,\n \"formatters\": {\n \"ralph\": {\"format\": \"%(asctime)-23s %(levelname)-8s %(name)-8s %(message)s\"},\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"INFO\",\n \"stream\": \"ext://sys.stderr\",\n \"formatter\": \"ralph\",\n },\n },\n \"loggers\": {\n \"ralph\": {\n \"handlers\": [\"console\"],\n \"level\": \"INFO\",\n },\n \"swiftclient\": {\n \"handlers\": [\"console\"],\n \"level\": \"ERROR\",\n },\n },\n}\n\nAPP_DIR = Path(environ.get(\"RALPH_APP_DIR\", get_app_dir(\"ralph\")))\nCONFIG_FILE = APP_DIR / \"config.yml\"\nCONFIG = load_config(CONFIG_FILE)\nENVVAR_PREFIX = \"RALPH\"\nDEFAULT_BACKEND_CHUNK_SIZE = config(\"RALPH_DEFAULT_BACKEND_CHUNK_SIZE\", 500)\nFS_STORAGE_DEFAULT_PATH = Path(\n config(\"RALPH_FS_STORAGE_DEFAULT_PATH\", APP_DIR / \"archives\")\n)\nHISTORY_FILE = Path(config(\"RALPH_HISTORY_FILE\", APP_DIR / \"history.json\"))\nLOGGING_CONFIG = config(\"RALPH_LOGGING\", DEFAULT_LOGGING_CONFIG)\nMODEL_PATH_SEPARATOR = \"__\"\nSENTRY_DSN = config(\"RALPH_SENTRY_DSN\", None)\nSWIFT_OS_AUTH_URL = config(\"RALPH_SWIFT_OS_AUTH_URL\", \"https://auth.cloud.ovh.net/\")\nSWIFT_OS_IDENTITY_API_VERSION = config(\"RALPH_SWIFT_OS_IDENTITY_API_VERSION\", \"3\")\nSWIFT_OS_PROJECT_DOMAIN_NAME = config(\"RALPH_SWIFT_OS_PROJECT_DOMAIN_NAME\", \"Default\")\nSWIFT_OS_USER_DOMAIN_NAME = config(\"RALPH_SWIFT_OS_USER_DOMAIN_NAME\", \"Default\")\nCONVERTER_EDX_XAPI_UUID_NAMESPACE = config(\n \"RALPH_CONVERTER_EDX_XAPI_UUID_NAMESPACE\", None\n)\nEXECUTION_ENVIRONMENT = config(\"RALPH_EXECUTION_ENVIRONMENT\", \"development\")\n","sub_path":"src/ralph/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"30085037","text":"import json\nimport os\n\n\nfrom datetime import datetime\n\nfrom google.cloud import storage\n\n\nstorage_client = storage.Client()\n\n# string event_id = 1;\n# google.protobuf.Timestamp event_time = 2;\n# google.protobuf.Timestamp process_time = 3;\n# string resource_id = 4; // identificador inmueble\n# string user_id = 5; // identificador del usuario\n# string country_code = 6; // ISO3166\n# google.protobuf.Duration duration = 7;\n#double item_price = 8;\n\ndef eventos(request):\n request_json = request.get_json(silent=True) \n\n\n if request_json and 'eventId' in request_json \\\n and 'eventTime' in request_json \\\n and 'processTime' in request_json \\\n and 'resourceId' in request_json \\\n and 'userId' in request_json \\\n and 'countryCode' in request_json \\\n and 'duration' in request_json:\n\n payloadAsString = json.dumps(request_json)\n\n eventId = request_json['eventId']\n eventTime = request_json['eventTime']\n processTime = request_json['processTime']\n resourceId = request_json['resourceId']\n userId = request_json['userId']\n countryCode = request_json['countryCode']\n duration = request_json['duration']\n\n bucket_name = os.getenv('BUCKET_NAME')\n bucket = storage_client.bucket(bucket_name)\n\n new_blob = bucket.blob(f'airbnb/event-'+str(eventId)+'.json')\n new_blob.upload_from_string(payloadAsString)\n\n return json.dumps({'externalId': 'airbnb/event-'+str(eventId)+'.json'})\n\n else:\n return 'Error 500'\n","sub_path":"eventos/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"636826074","text":"from models.models import *\n\nfrom utils import *\n\n\nclass CreateModules():\n \n def __init__(self, data_base: Database):\n self.db = data_base\n\n # create modules:\n # table files\n # data_access file\n\n\n def create_table_files(self):\n tables = self.db.tables\n\n for tbl_name in tables:\n tbl = tables[tbl_name]\n with open(tbl.get_file_name(), 'w') as f:\n f.write(self.get_table_code(tbl))\n \n def update_table_files(self):\n tables = self.db.tables\n\n for tbl_name in tables:\n tbl = tables[tbl_name]\n self.update_table_file(tbl)\n\n\n\n def get_table_code(self, tbl: Table):\n \n # Create Header\n s = ''\n s += 'B4J=true\\n'\n s += 'Group=Models\\n'\n s += 'ModulesStructureVersion=1\\n'\n s += 'Type=Class\\n'\n s += f'Version={self.db.b4j_version}\\n'\n s += '@EndOfDesignText@\\n'\n\n s += f\"'[Table: {tbl.name}]\\n\"\n s += f\"'[Database: {tbl.db_name}]\\n\"\n\n s += 'Sub Class_Globals\\n'\n\n s += \"\\t'\\n\"\n for field in tbl.columns:\n col = tbl.columns[field]\n # format: Public Id As Int '[PrimaryKey, AutoIncrement]\n s += \"\\t\\t{0}\\n\\n\".format(col.to_full_string())\n s += \"\\t'\\n\"\n\n s += \"\\t'\\n\"\n for rel in tbl.relationships:\n tbl1_cname = self.db.get_table_cname(rel.table1)\n tbl2_cname = self.db.get_table_cname(rel.table2)\n\n # s += \"\\t\\t'[{}]\\n\".format(rel.to_string())\n if rel.rel_type == '11':\n s += \"\\t\\tPrivate {}_ As {} '[{}]\\n\\n\".format(\n tbl2_cname, tbl2_cname, rel.to_string())\n elif rel.rel_type == '1N':\n s += \"\\t\\tPrivate {}_ As List '[{}]\\n\\n\".format(\n rel.table2, rel.to_string())\n elif rel.rel_type == 'N1':\n s += \"\\t\\tPrivate {}_ As {} '[{}]\\n\\n\".format(\n tbl2_cname, tbl2_cname, rel.to_string())\n \n s += \"\\t'\\n\"\n\n s += 'End Sub\\n'\n\n s += 'Public Sub Initialize\\n\\n'\n\n s += 'End Sub\\n'\n\n s += '#Region Relationships\\n'\n\n # Create Relationships subs\n for rel in tbl.relationships:\n tbl1_cname = self.db.get_table_cname(rel.table1)\n tbl2_cname = self.db.get_table_cname(rel.table2)\n\n s += \"'{}\\n\".format(rel.to_string())\n\n if rel.rel_type == '11':\n \n s += 'Public Sub get{0} As {1}\\n'.format(tbl2_cname, tbl2_cname)\n s += '\\tIf {0}.IsInitialized Then\\n'.format(tbl2_cname)\n s += '\\t\\tReturn {0}_\\n'.format(tbl2_cname)\n s += '\\tElse\\n'\n s += '\\t\\tDim da As {}DataAccess\\n'.format(self.db.name)\n s += '\\t\\tda.Initialize\\n'\n if rel.field1.upper() == 'ID':\n s += '\\t\\t{0}_ = da.{1}_Where(\"{2}Id = ?\", Array(Id))\\n'.format(tbl2_cname, rel.table2, tbl1_cname)\n else:\n s += '\\t\\t{0}_ = da.{1}_Where(\"Id = ?\", Array({2}Id))\\n'.format(tbl2_cname, rel.table2, tbl2_cname)\n s += '\\t\\tda.Dispose\\n'\n s += '\\t\\tReturn {0}_\\n'.format(tbl2_cname)\n s += '\\tEnd If\\n'\n s += 'End Sub\\n'\n\n elif rel.rel_type == '1N':\n \n s += 'Public Sub get{0} As List\\n'.format(rel.table2)\n s += '\\tIf {0}_.IsInitialized then\\n'.format(rel.table2)\n s += '\\t\\tReturn {0}_\\n'.format(rel.table2)\n s += '\\tElse\\n'\n s += '\\t\\tDim da As {0}DataAccess\\n'.format(self.db.name)\n s += '\\t\\tda.Initialize\\n'\n s += '\\t\\t{0}_ = da.{1}_Where2(\"{2}Id=?\", Array(Id))\\n'.format(rel.table2, rel.table2, tbl1_cname)\n s += '\\t\\tda.Dispose\\n'\n s += '\\t\\tReturn {0}_\\n'.format(rel.table2)\n s += '\\tEnd If\\n'\n s += 'End Sub\\n'\n\n elif rel.rel_type == 'N1':\n \n s += 'Public Sub get{0} As {1}\\n'.format(tbl2_cname, tbl2_cname)\n s += '\\tIf {0}_.IsInitialized then\\n'.format(tbl2_cname)\n s += '\\t\\tReturn {0}_\\n'.format(tbl2_cname)\n s += '\\tElse\\n'\n s += '\\t\\tDim da As {0}DataAccess\\n'.format(self.db.name)\n s += '\\t\\tda.Initialize\\n'\n s += '\\t\\t{0}_ = da.{1}_Where(\"Id=?\", Array({2}Id))\\n'.format(tbl2_cname, rel.table2, tbl2_cname)\n s += '\\t\\tda.Dispose\\n'\n s += '\\t\\tReturn {0}_\\n'.format(tbl2_cname)\n s += '\\tEnd If\\n'\n s += 'End Sub\\n'\n\n s += '#End Region\\n\\n'\n\n return s\n\n\n\n\n def update_table_file(self, tbl: Table):\n \n f_obj = open(tbl.get_file_name())\n\n f_code = f_obj.readlines()\n # old code with all special characters\n f_old_code = list(f_code)\n\n # remove all special characters in file\n f_code = [code.strip() for code in f_code]\n\n tbl_code = self.get_table_code(tbl)\n\n # get index of #Region Subs\n try:\n subs_start = f_code.index('#Region Subs')\n if subs_start:\n tbl_code += '\\n'\n for i in range(subs_start, len(f_code)):\n tbl_code += f_old_code[i]\n if f_code[i] == '#End Region':\n break\n tbl_code += '\\n'\n except:\n pass\n finally:\n with open(tbl.get_file_name(), 'w') as f:\n f.write(tbl_code) \n \n\n def create_data_access_file(self):\n \n # HEADER of file\n s = ''\n s += 'B4J=true\\n'\n s += 'Group=Controllers\\n'\n s += 'ModulesStructureVersion=1\\n'\n s += 'Type=Class\\n'\n s += 'Version='+self.db.b4j_version+'\\n'\n s += '@EndOfDesignText@\\n'\n\n s += 'Sub Class_Globals\\n'\n s += '\\tPrivate fx as JFX\\n'\n s += '\\tPrivate SQL1 As SQL\\n'\n s += 'End Sub\\n\\n'\n\n # FUNCTION: Initialize\n s += 'Public Sub initialize\\n'\n s += '\\tSQL1.InitializeSQLite(File.DirApp, \"{}.db\", True)\\n'.format(\n self.db.name)\n s += 'End Sub\\n'\n\n # FUNCTION: Dispose\n s += 'Public Sub Dispose\\n'\n s += '\\tSQL1.Close\\n'\n s += 'End Sub\\n\\n'\n\n # FUNCTION: GetSQL\n s += 'Public Sub getSQL As SQL\\n'\n s += '\\tReturn SQL1\\n'\n s += 'End Sub\\n\\n'\n\n s += '#Region Subs\\n'\n s += '\\t\\'Write here the subs you want to save\\n'\n s += '#End Region\\n\\n'\n # TODO: save #Region subs \n\n # BODY of file\n # get code of all tables\n\n tables = self.db.tables\n for tbl_name in tables:\n s += self.get_data_access_code(tables[tbl_name])\n\n with open(self.db.get_data_access_file_name(), 'w') as f:\n f.write(s)\n\n\n def get_data_access_code(self, tbl: Table):\n\n # Starts with #Region table \n s = ''\n s += '\\n#Region Table <{0}>\\n'.format(tbl.name)\n\n # SUB: CreateTable\n s += '\\nPublic Sub {0}_CreateTable()'.format(tbl.name)\n s += '\\n\\tDim mData As Map'\n s += '\\n\\tmData.Initialize'\n columns = tbl.get_all_columns()\n for field in columns:\n col = columns[field]\n if col.foreign_key != ():\n t, f = col.foreign_key\n s += '\\n\\tmData.Put(\"{0}\", \"{1}{2}{3}{4}{5}\")'.format(col.field, col.get_sql_type(),\n ' PRIMARY KEY' if col.primary_key else '',\n ' ASC AUTOINCREMENT' if col.auto_increment else '',\n ' NOT NULL' if col.not_null else '',\n ' REFERENCES {} ({})'.format(t, f) if col.foreign_key != () else '')\n\n s += '\\n\\tDBUtils.CreateTable(SQL1, \"{0}\", mData, \"\")'.format(tbl.name)\n s += '\\nEnd Sub\\n'\n\n # SUB: Insert\n s += '\\nPublic Sub {0}_Insert(t As {1})'.format(tbl.name, tbl.class_name)\n s += '\\n\\tDim m As Map'\n s += '\\n\\tm.Initialize'\n for field in columns:\n if not columns[field].auto_increment:\n s += '\\n\\tm.Put(\"{0}\", t.{0})'.format(columns[field].field)\n s += '\\n\\tDBUtils.InsertMaps(SQL1, \"{0}\", Array As Object(m))'.format(tbl.name)\n s += '\\nEnd Sub\\n'\n\n # SUB: Insert2\n s += '\\nPublic Sub {0}_Insert2({1})'.format(tbl.name,\n tbl.get_columns_csv())\n s += '\\n\\tDim m As Map'\n s += '\\n\\tm.Initialize'\n for field in columns:\n if not columns[field].auto_increment:\n s += '\\n\\tm.Put(\"{0}\", {0})'.format(columns[field].field)\n s += '\\n\\tDBUtils.InsertMaps(SQL1, \"{0}\", Array As Object(m))'.format(tbl.name)\n s += '\\nEnd Sub'\n s += '\\n'\n\n # SUB: Delete\n s += '\\nPublic Sub {0}_Delete(t As {1})'.format(tbl.name, tbl.class_name)\n s += '\\n\\tDim m As Map = CreateMap(\"Id\": t.Id)'\n s += '\\n\\tDBUtils.DeleteRecord(SQL1, \"{0}\", m)'.format(tbl.name)\n s += '\\nEnd Sub\\n'\n\n # SUB: Update\n s += '\\nPublic Sub {0}_Update(t As {1})'.format(tbl.name, tbl.class_name)\n s += '\\n\\tDim m As Map'\n s += '\\n\\tm.Initialize'\n for field in columns:\n if not columns[field].auto_increment:\n s += '\\n\\tm.Put(\"{0}\", t.{0})'.format(columns[field].field)\n s += '\\n\\tDim WhereFields As Map = CreateMap(\"Id\": t.Id)'\n s += '\\n\\tDBUtils.UpdateRecord2(SQL1, \"{0}\", m, WhereFields)'.format(tbl.name)\n s += '\\nEnd Sub\\n'\n\n # SUB: GetById\n s += '\\nPublic Sub {0}_GetById(Id As Int) As {1}'.format(tbl.name, tbl.class_name)\n s += '\\n\\tReturn {0}_Where(\"Id=?\", Array(Id))'.format(tbl.name)\n s += '\\nEnd Sub\\n'\n\n # SUB: ToList\n s += '\\nPublic Sub {0}_ToList As List'.format(tbl.name)\n s += '\\n\\tDim rs As ResultSet'\n s += '\\n\\tDim lstResult As List'\n s += '\\n\\tlstResult.Initialize'\n s += '\\n\\trs = SQL1.ExecQuery(\"SELECT * FROM {}\")'.format(tbl.name)\n s += '\\n\\tDo While rs.NextRow'\n s += '\\n\\t\\tDim t As {0}'.format(tbl.class_name)\n s += '\\n\\t\\tt.Initialize'\n for field in columns:\n s += '\\n\\t\\tt.{0} = rs.Get{1}(\"{0}\")'.format(field, columns[field].get_b4j_type())\n s += '\\n\\t\\tlstResult.Add(t)'\n s += '\\n\\tLoop'\n s += '\\n\\tReturn lstResult'\n s += '\\nEnd Sub\\n'\n\n # SUB: Where\n s += '\\nPublic Sub {0}_Where(WhereCondition As String, ArgList As List) As {1}'.format(tbl.name, tbl.class_name)\n s += '\\n\\tDim rs As ResultSet'\n s += '\\n\\trs = SQL1.ExecQuery2(\"SELECT * FROM {0} WHERE \" & WhereCondition, ArgList)'.format(tbl.name)\n s += '\\n\\tIf rs.NextRow = False Then Return Null'\n s += '\\n\\tDim t As {0}'.format(tbl.class_name)\n s += '\\n\\tt.Initialize'\n for field in columns:\n s += '\\n\\tt.{0} = rs.Get{1}(\"{0}\")'.format(field, columns[field].get_b4j_type())\n s += '\\n\\tReturn t'\n s += '\\nEnd Sub\\n'\n\n # SUB: Where2\n s += '\\nPublic Sub {0}_Where2(WhereCondition As String, ArgList As List) As List'.format(tbl.name)\n s += '\\n\\tDim rs As ResultSet'\n s += '\\n\\tDim lstResult As List'\n s += '\\n\\tlstResult.Initialize'\n s += '\\n\\trs = SQL1.ExecQuery2(\"SELECT * FROM {0} WHERE \" & WhereCondition, ArgList)'.format(tbl.name)\n s += '\\n\\tDo While rs.NextRow'\n s += '\\n\\t\\tDim t As {0}'.format(tbl.class_name)\n s += '\\n\\t\\tt.Initialize'\n for field in columns:\n s += '\\n\\t\\tt.{0} = rs.Get{1}(\"{0}\")'.format(field, columns[field].get_b4j_type())\n s += '\\n\\t\\tlstResult.Add(t)'\n s += '\\n\\tLoop'\n s += '\\n\\tReturn lstResult'\n s += '\\nEnd Sub\\n'\n\n # SUB: RowCount\n s += '\\nPublic Sub {0}_RowCount() As Int'.format(tbl.name)\n s += '\\n\\tReturn SQL1.ExecQuerySingleResult(\"SELECT Count(*) FROM {0}\")'.format(tbl.name)\n s += '\\nEnd Sub\\n'\n\n # Ends with #End Region\n s += '\\n#End Region'\n\n return s\n","sub_path":"sqlm/models/create_modules.py","file_name":"create_modules.py","file_ext":"py","file_size_in_byte":12487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"455844386","text":"#!/usr/bin/env python\n\n\"\"\" This module contains enumerations for Layers 1-4 Protocols. Layers 1 & 3 have addresses associated. This module also contains those typing's and reverse lookups from protocol to address type \"\"\"\n\n__author__ = \"Patrick Berry\"\n\n######################################\nfrom enum import Enum,IntEnum;\nfrom utilities import addReverseEnumLookup;\nfrom mac_address import MacAddress;\nfrom ipaddress import IPv4Address,IPv6Address;\nfrom typing import Union;\n######################################\n\n__all__ = ('Layer1Protocol','Layer2Protocol','Layer3Protocol','Layer4Protocol', \\\n 'lookupLayer1AddressType','Layer1Address', \\\n 'lookupLayer3AddressType','Layer3Address', \\\n);\n\n######################################\nclass Layer1Protocol(Enum):\n \"\"\" Enum representing Layer1 Protocols\n Typically ETHERNET\n \n Source: https://www.iana.org/assignments/arp-parameters/arp-parameters-2.csv \"\"\"\n \n #Aka ArpHardwareType\n RESERVED = 0;\n \n ##########\n Ethernet = 1;\n Ethernet_10Mb = Ethernet;\n ETHERNET = Ethernet;\n ##########\n \n Experimental_Ethernet_3Mb = 2;\n Amateur_Radio_AX_25 = 3;\n Proteon_ProNET_Token_Ring = 4;\n ##########\n Chaos = 5;\n CHAOS = Chaos;\n ##########\n IEEE_802_Networks= 6;\n ARCNET = 7;\n Hyperchannel = 8;\n Lanstar = 9;\n Autonet_Short_Address= 10;\n LocalTalk = 11;\n LocalNet_IBM_PCNet_or_SYTEK_LocalNET= 12;\n Ultra_link = 13;\n SMDS = 14;\n Frame_Relay = 15;\n Asynchronous_Transmission_Mode_ATM= (16, 19, 21);\n HDLC = 17; \n Fibre_Channel = 18;\n Serial_Line = 20;\n MIL_STD_188_220 = 22;\n Metricom = 23;\n IEEE_1394_1995 = 24;\n MAPOS = 25;\n Twinaxial = 26;\n EUI_64 = 27;\n HIPARP = 28;\n IP_and_ARP_over_ISO_7816_3 = 29;\n ARPSec = 30;\n IPsec_tunnel = 31;\n InfiniBand_TM = 32;\n TIA_102_Project_25_Common_Air_Interface_CAI = 33;\n Wiegand_Interface = 34;\n Pure_IP = 35;\n HW_EXP1 = 36;\n HFI = 37;\n HW_EXP2 = 256;\n AEthernet = 257;\n UNASSIGNED = (range(38+1, 255+1), range(258, 65534+1));\n RESERVED_1 = 65535;\n #aliases\n \n #Nonstandard Osi Model Testing\n OsiModel = 38;\n default = RESERVED;\n##\naddReverseEnumLookup(Layer1Protocol);\nLayer1Address = Union[MacAddress];\n_l1p_lut = {Layer1Protocol.ETHERNET :MacAddress, \\\n Layer1Protocol.ETHERNET.value:MacAddress, \\\n Layer1Protocol.OsiModel :MacAddress, \\\n Layer1Protocol.OsiModel.value:MacAddress, \\\n Layer1Protocol.CHAOS :MacAddress, #just for fun \\\n Layer1Protocol.CHAOS.value:MacAddress, #just for fun \\\n };\ndef lookupLayer1AddressType(value:Union[Layer1Protocol,int])->type:\n \"\"\" Translates an int or enum value to a Layer1Address type\n \n >>> expected = MacAddress\n >>> out = lookupLayer1AddressType(0x01);\n >>> (expected==out);\n True\n >>> out = lookupLayer1AddressType(Layer1Protocol.Ethernet);\n >>> (expected==out);\n True\n >>> expected = None;\n >>> out = lookupLayer1AddressType(235)\n >>> (expected==out);\n True\n \"\"\"\n try:\n out = _l1p_lut[value];\n except KeyError:\n out = None;\n ##\n return out;\n##\n\nclass Layer2Protocol(Enum): \n \"\"\" Enumeration that represents Layer2Protocols aka Ethertype\n Typically ARP, IPv4, IPv6\n Source: https://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers-1.csv \"\"\"\n \n #Aka EtherType\n IEEE802_3_Length_Field = (range(0,256+1),range(514,1023+1),range(1025,1500+1));\n Experimental = range(257, 511+1);\n XEROX_PUP_see_0A00 = 512;\n PUP_Addr_Trans_see_0A01 = 513\n Nixdorf = 1024;\n XEROX_NS_IDP = 1536;\n DLOG = (1632, 1633);\n ############\n IPv4 = 2048;\n Internet_Protocol_version_4_IPv4 = IPv4;\n ############\n X_75_Internet = 2049; \n NBS_Internet = 2050;\n ECMA_Internet = 2051; \n ############\n CHAOS = 2052;\n Chaosnet = CHAOS;\n ############\n X_25_Level_3 = 2053; \n ############\n ARP = 2054;\n Address_Resolution_Protocol_ARP = ARP;\n ############\n XNS_Compatability = 2055;\n Frame_Relay_ARP = 2056; \n #Conflict Symbolics_Private = (2076, range(33031, 33033+1));\n Xyplex = (range(2184, 2186+1), range(33207, 33209+1)); \n Ungermann_Bass_net_debugr = 2304;\n Xerox_IEEE802_3_PUP = 2560; \n PUP_Addr_Trans = 2561; \n Banyan_VINES = 2989; \n VINES_Loopback = 2990; \n VINES_Echo = 2991; \n Berkeley_Trailer_nego = 4096; \n Berkeley_Trailer_encap_IP = range(4097, 4111+1); \n Valid_Systems = 5632; \n TRILL = 8947; \n L2_IS_IS = 8948; \n PCS_Basic_Block_Protocol = 16962; \n BBN_Simnet = 21000; \n DEC_Unassigned_Exp = 24576; \n DEC_MOP_Dump_Load = 24577; \n DEC_MOP_Remote_Console = 24578; \n DEC_DECNET_Phase_IV_Route = 24579;\n DEC_LAT = 24580;\n DEC_Diagnostic_Protocol = 24581; \n DEC_Customer_Protocol = 24582;\n DEC_LAVC_SCA = 24583;\n #Conflict DEC_Unassigned = (range(24584, 24585+1),\\\n # range(32825, 32828+1),\\\n # 32830 ,\\\n # range(32832, 32834+1),\\\n # ); \n _3Com_Corporation = range(24592, 24596+1);\n Trans_Ether_Bridging = 25944;\n Raw_Frame_Relay = 25945;\n Ungermann_Bass_download = 28672; \n Ungermann_Bass_dia_loop = 28674; \n LRT = range(28704, 28713+1); \n Proteon = 28720;\n Cabletron = 28724;\n Cronus_VLN = 32771;\n Cronus_Direct = 32772; \n HP_Probe = 32773; \n Nestar = 32774; \n AT_T = (32776, 32838, 32839, 32873); \n Excelan = 32784; \n SGI_diagnostics = 32787;\n SGI_network_games = 32788; \n SGI_reserved = 32789; \n SGI_bounce_server = 32790; \n Apollo_Domain = 32793;\n Tymshare = 32814;\n Tigan_Inc = 32815;\n ############\n RARP = 32821;\n Reverse_Address_Resolution_Protocol_RARP = RARP;\n ############\n Aeonic_Systems = 32822;\n DEC_LANBridge = 32824;\n DEC_Ethernet_Encryption = 32829;\n DEC_LAN_Traffic_Monitor = 32831;\n Planning_Research_Corp = 32836;\n ExperData = 32841;\n Stanford_V_Kernel_exp = 32859;\n Stanford_V_Kernel_prod = 32860;\n Evans_Sutherland = 32861; Little_Machines = 32864;\n #Conflict Counterpoint_Computers = (32866, range(32897, 32899+1));\n Univ_of_Mass_Amherst = (32869, 32870);\n Veeco_Integrated_Auto = 32871; \n General_Dynamics = 32872;\n Autophon = 32874;\n ComDesign = 32876;\n Computgraphic_Corp = 32877;\n Landmark_Graphics_Corp = range(32878, 32887+1);\n Matra = 32890;\n Dansk_Data_Elektronik = 32891;\n Merit_Internodal = 32892;\n Vitalink_Communications = range(32893, 32895+1);\n Vitalink_TransLAN_III = 32896;\n Appletalk = 32923;\n Datability = (range(32924, 32926+1), range(32996, 33008+1));\n Spider_Systems_Ltd = 32927;\n Nixdorf_Computers = 32931;\n Siemens_Gammasonics_Inc = range(32932, 32947+1);\n DCA_Data_Exchange_Cluster = range(32960, 32963+1);\n Banyan_Systems = (32964, 32965);\n Pacer_Software = 32966; \n Applitek_Corporation = 32967;\n Intergraph_Corporation = range(32968, 32972+1);\n Harris_Corporation = range(32973, 32974+1);\n Taylor_Instrument = range(32975, 32978+1);\n Rosemount_Corporation = range(32979, 32980+1);\n IBM_SNA_Service_on_Ether = 32981;\n Varian_Associates = 32989;\n Integrated_Solutions_TRFS = range(32990, 32991+1);\n Allen_Bradley = range(32992, 32995);\n Retix = 33010;\n AppleTalk_AARP_Kinetics = 33011;\n Kinetics = range(33012, 33013);\n Apollo_Computer = 33015; \n #Conflict Wellfleet_Communications = (33023, range(33025, 33027+1));\n #Conflict Customer_VLAN_Tag_Type_C_Tag_formerly_called_the_Q_Tag_initially_Wellfleet = 33024;\n Hayes_Microcomputers = 33072;\n VG_Laboratory_Systems = 33073;\n Bridge_Communications = range(33074, 33078+1);\n Novell_Inc = range(33079, 33080+1);\n KTI = range(33081, 33085+1);\n Logicraft = 33096;\n Network_Computing_Devices = 33097;\n Alpha_Micro = 33098;\n SNMP = 33100;\n BIIN = (33101, 33102);\n Technically_Elite_Concept = 33103;\n Rational_Corp = 33104;\n Qualcomm = (range(33105, 33107+1), range(33178, 33187+1));\n Computer_Protocol_Pty_Ltd = range(33116, 33118+1);\n Charles_River_Data_System = (range(33124, 33126+1), range(33379, 33386+1));\n XTP = 33149;\n SGI_Time_Warner_prop = 33150;\n HIPPI_FP_encapsulation = 33152;\n STP_HIPPI_ST = 33153;\n Reserved_for_HIPPI_6400 = (33154, 33155);\n Silicon_Graphics_prop = range(33156, 33164+1);\n Motorola_Computer = 33165;\n ARAI_Bunkichi = 33188;\n RAD_Network_Devices = range(33189, 33198+1);\n Apricot_Computers = range(33228, 33237+1);\n Artisoft = range(33238, 33245+1);\n Polygon = range(33254, 33263+1);\n Comsat_Labs = range(33264, 33266+1);\n SAIC = range(33267, 33269+1);\n VG_Analytical = range(33270, 33272+1); \n Quantum_Software = range(33283, 33285+1);\n Ascom_Banking_Systems = range(33313, 33314+1);\n Advanced_Encryption_Syste = range(33342, 33344+1);\n Athena_Programming = range(33407, 33410+1);\n Inst_Ind_Info_Tech = range(33434, 33435+1);\n Taurus_Controls = range(33436, 33451+1);\n Walker_Richer_Quinn = range(33452, 34451+1);\n Idea_Courier = range(34452, 34461+1);\n Computer_Network_Tech = range(34462, 34465+1);\n Gateway_Communications = range(34467, 34476+1); \n SECTRA = 34523; \n Delta_Controls = 34526; \n ############\n IPv6 = 34525; \n Internet_Protocol_version_6_IPv6 = IPv6;\n ############\n ATOMIC = 34527; \n Landis_Gyr_Powers = range(34528, 34543+1); \n Motorola = range(34560, 34576+1); \n TCP_IP_Compression = 34667; \n IP_Autonomous_Systems = 34668; \n Secure_Data = 34669; \n IEEE_Std_802_3_Ethernet_Passive_Optical_Network_EPON = 34824;\n Point_to_Point_Protocol_PPP = 34827;\n General_Switch_Management_Protocol_GSMP = 34828; \n MPLS = 34887;\n MPLS_with_upstream_assigned_label = 34888;\n Multicast_Channel_Allocation_Protocol_MCAP = 34913; \n PPP_over_Ethernet_PPPoE_Discovery_Stage = 34915;\n PPP_over_Ethernet_PPPoE_Session_Stage = 34916; \n IEEE_Std_802_1X_Port_based_network_access_control = 34958; \n IEEE_Std_802_1Q_Service_VLAN_tag_identifier_S_Tag = 34984; \n Invisible_Software = range(35478, 35479+1); \n IEEE_Std_802_Local_Experimental_Ethertype = (34997, 34998); \n IEEE_Std_802_OUI_Extended_Ethertype = 34999;\n IEEE_Std_802_11_Pre_Authentication_802_11i = 35015; \n IEEE_Std_802_1AB_Link_Layer_Discovery_Protocol_LLDP = 35020; \n IEEE_Std_802_1AE_Media_Access_Control_Security = 35045; \n Provider_Backbone_Bridging_Instance_tag = 35047; \n IEEE_Std_802_1Q_Multiple_VLAN_Registration_Protocol_MVRP = 35061; \n IEEE_Std_802_1Q_Multiple_Multicast_Registration_Protocol_MMRP = 35062;\n IEEE_Std_802_11_Fast_Roaming_Remote_Request_802_11r = 35085; \n IEEE_Std_802_21_Media_Independent_Handover_Protocol = 35095; \n IEEE_Std_802_1Qbe_Multiple_I_SID_Registration_Protocol = 35113;\n TRILL_Fine_Grained_Labeling_FGL = 35131; \n IEEE_Std_802_1Qbg_ECP_Protocol_also_used_in_802_1BR = 35136; \n TRILL_RBridge_Channel = 35142;\n GeoNetworking_as_defined_in_ETSI_EN_302_636_4_1 = 35143;\n NSH_Network_Service_Header = 35151; \n Loopback = 36864;\n _3Com_Bridge_XNS_Sys_Mgmt = 36865;\n _3Com_Bridge_TCP_IP_Sys = 36866;\n _3Com_Bridge_loop_detect = 36867;\n Multi_Topology = 39458; \n LoWPAN_encapsulation = 41197; \n GRE_Encapsulated_Control_Messages = 47082;\n BBN_VITAL_LanBridge_cache = 65280;\n ISC_Bunker_Ramo = range(65280+1, 65295+1);\n RESERVED = 65535;\n \n #aliases\n default = RESERVED;\n##\naddReverseEnumLookup(Layer2Protocol);\nEtherType=Layer2Protocol;\n\nclass Layer3Protocol(IntEnum):\n \"\"\" Enumeration that represents Layer3Protocols \"\"\"\n IPv4 = Layer2Protocol.IPv4.value;\n IPv6 = Layer2Protocol.IPv6.value;\n default=IPv4;\n##\naddReverseEnumLookup(Layer3Protocol);\nLayer3Address = Union[IPv4Address,IPv6Address];\n_l3p_lut = {Layer3Protocol.IPv4 :IPv4Address, \\\n Layer3Protocol.IPv4.value:IPv4Address, \\\n Layer3Protocol.IPv6 :IPv6Address, \\\n Layer3Protocol.IPv6.value:IPv6Address, \\\n };\ndef lookupLayer3AddressType(value:Union[Layer3Protocol,int])->type:\n \"\"\" Translates an int or enum value to a Layer3Address type\n \n >>> expected = IPv4Address;\n >>> out = lookupLayer3AddressType(0x800);\n >>> (expected==out);\n True\n >>> out = lookupLayer3AddressType(Layer3Protocol.IPv4);\n >>> (expected==out);\n True\n >>> expected = None;\n >>> out = lookupLayer1AddressType(235)\n >>> (expected==out);\n True\n \"\"\"\n try:\n out = _l3p_lut[value];\n except KeyError:\n out = None;\n ##\n return out;\n##\n\n\nclass Layer4Protocol(Enum):\n \"\"\" Enumeration that represents Layer4 protocols \n Typically UDP or TCP\n Source: https://www.iana.org/assignments/protocol-numbers/protocol-numbers-1.csv \"\"\"\n #Date: 1551245349.7453902\n HOPOPT=0; #IPv6 Hop-by-Hop Option [RFC8200]\n ICMP =1; #Internet Control Message [RFC792]\n IGMP =2; #Internet Group Management [RFC1112]\n GGP =3; #Gateway-to-Gateway [RFC823]\n IPv4 =4; #IPv4 encapsulation [RFC2003]\n ST =5; #Stream [RFC1190][RFC1819]\n TCP =6; #Transmission Control [RFC793]\n CBT =7; #CBT [Tony_Ballardie]\n EGP =8; #Exterior Gateway Protocol [RFC888][David_Mills]\n IGP =9; #any private interior gateway (used by Cisco for their IGRP) [Internet_Assigned_Numbers_Authority]\n BBN_RCC_MON=10; #BBN RCC Monitoring [Steve_Chipman]\n NVP_II=11; #Network Voice Protocol [RFC741][Steve_Casner]\n PUP=12; #PUP [Boggs, D., J. Shoch, E. Taft, and R. Metcalfe, \n # \"PUP: An Internetwork Architecture\", XEROX Palo Alto Research Center,\n # CSL-79-10, July 1979; also in IEEE Transactions on\n # Communication, Volume COM-28, Number 4, April 1980.][[XEROX]]\n ARGUS=13; #ARGUS (deprecated) [Robert_W_Scheifler]\n EMCON=14; #EMCON []\n XNET=15; #Cross Net Debugger [Haverty, J., \"XNET Formats for Internet Protocol Version 4\",\n # IEN 158, October 1980.][Jack_Haverty]\n CHAOS=16; #Chaos [J_Noel_Chiappa]\n UDP=17; #User Datagram [RFC768][Jon_Postel]\n MUX=18; #Multiplexing [Cohen, D. and J. Postel, \"Multiplexing Protocol\", IEN 90,\n # USC/Information Sciences Institute, May 1979.][Jon_Postel]\n DCN_MEAS=19; #DCN Measurement Subsystems [David_Mills]\n HMP=20; #Host Monitoring [RFC869][Bob_Hinden]\n PRM=21; #Packet Radio Measurement [Zaw_Sing_Su]\n XNS_IDP=22; #XEROX NS IDP [\"The Ethernet, A Local Area Network: Data Link Layer and\n # Physical Layer Specification\", AA-K759B-TK, Digital Equipment Corporation, \n # Maynard, MA. Also as: \"The Ethernet - A Local Area Network\", Version 1.0, Digital\n # Equipment Corporation, Intel Corporation, Xerox Corporation, September 1980. \n # And: \"The Ethernet, A Local Area Network: Data Link Layer and Physical Layer\n # Specifications\", Digital, Intel and Xerox, November 1982. And: XEROX, \n # \"The Ethernet, A Local Area Network: Data Link Layer and Physical Layer Specification\",\n # X3T51/80-50, Xerox Corporation, Stamford, CT., October 1980.][[XEROX]]\n TRUNK_1=23; #Trunk-1 [Barry_Boehm]\n TRUNK_2=24; #Trunk-2 [Barry_Boehm]\n LEAF_1=25; #Leaf-1 [Barry_Boehm]\n LEAF_2=26; #Leaf-2 [Barry_Boehm]\n RDP=27; #Reliable Data Protocol [RFC908][Bob_Hinden]\n IRTP=28; #Internet Reliable Transaction [RFC938][Trudy_Miller]\n ISO_TP4=29; #ISO Transport Protocol Class 4 [RFC905][]\n NETBLT=30; #Bulk Data Transfer Protocol [RFC969][David_Clark]\n MFE_NSP=31; #MFE Network Services Protocol\n #[Shuttleworth, B., \"A Documentary of MFENet, a National Computer Network\", UCRL-52317, Lawrence Livermore Labs, Livermore, California, June 1977.][Barry_Howard]\n MERIT_INP=32; #MERIT Internodal Protocol [Hans_Werner_Braun]\n DCCP=33; #Datagram Congestion Control Protocol [RFC4340]\n _3PC=34; #Third Party Connect Protocol [Stuart_A_Friedberg]\n IDPR=35; #Inter-Domain Policy Routing Protocol [Martha_Steenstrup]\n XTP=36; #XTP [Greg_Chesson]\n DDP=37; #Datagram Delivery Protocol [Wesley_Craig]\n IDPR_CMTP=38; #IDPR Control Message Transport Proto [Martha_Steenstrup]\n TPpp=39; #TP++ Transport Protocol [Dirk_Fromhein]\n IL=40; #IL Transport Protocol [Dave_Presotto]\n IPv6=41; #IPv6 encapsulation [RFC2473]\n SDRP=42; #Source Demand Routing Protocol [Deborah_Estrin]\n IPv6_Route=43; #Routing Header for IPv6 [Steve_Deering]\n IPv6_Frag=44; #Fragment Header for IPv6 [Steve_Deering]\n IDRP=45; #Inter-Domain Routing Protocol [Sue_Hares]\n RSVP=46; #Reservation Protocol [RFC2205][RFC3209][Bob_Braden]\n GRE=47; #Generic Routing Encapsulation [RFC2784][Tony_Li]\n DSR=48; #Dynamic Source Routing Protocol [RFC4728]\n BNA=49; #BNA [Gary Salamon]\n ESP=50; #Encap Security Payload [RFC4303]\n AH=51; #Authentication Header [RFC4302]\n I_NLSP=52; #Integrated Net Layer Security TUBA [K_Robert_Glenn]\n SWIPE=53; #SWIPE (deprecated) IP with Encryption [John_Ioannidis]\n NARP=54; #NBMA Address Resolution Protocol [RFC1735]\n MOBILE=55; #IP Mobility [Charlie_Perkins]\n TLSP=56; #Transport Layer Security Protocol using Kryptonet key management [Christer_Oberg]\n SKIP=57; #SKIP [Tom_Markson]\n IPv6_ICMP=58; #ICMP for IPv6 [RFC8200]\n IPv6_NoNxt=59; #No Next Header for IPv6 [RFC8200]\n IPv6_Opts=60; #Destination Options for IPv6 [RFC8200]\n Any_Host_Internal_Protocol=61; #any host internal protocol [Internet_Assigned_Numbers_Authority]\n CFTP=62; #CFTP [Forsdick, H., \"CFTP\", Network Message, Bolt Beranek and Newman, January 1982.][Harry_Forsdick]\n Any_Local_Network=63; #any local network [Internet_Assigned_Numbers_Authority]\n SAT_EXPAK=64; #SATNET and Backroom EXPAK [Steven_Blumenthal]\n KRYPTOLAN=65; #Kryptolan [Paul Liu]\n RVD=66; #MIT Remote Virtual Disk Protocol [Michael_Greenwald]\n IPPC=67; #Internet Pluribus Packet Core [Steven_Blumenthal]\n Any_Distributed_File_System=68; #any distributed file system [Internet_Assigned_Numbers_Authority]\n SAT_MON=69; #SATNET Monitoring [Steven_Blumenthal]\n VISA=70; #VISA Protocol [Gene_Tsudik]\n IPCV=71; #Internet Packet Core Utility [Steven_Blumenthal]\n CPNX=72; #Computer Protocol Network Executive [David Mittnacht]\n CPHB=73; #Computer Protocol Heart Beat [David Mittnacht]\n WSN=74; #Wang Span Network [Victor Dafoulas]\n PVP=75; #Packet Video Protocol [Steve_Casner]\n BR_SAT_MON=76; #Backroom SATNET Monitoring [Steven_Blumenthal]\n SUN_ND=77; #SUN ND PROTOCOL-Temporary [William_Melohn]\n WB_MON=78; #WIDEBAND Monitoring [Steven_Blumenthal]\n WB_EXPAK=79; #WIDEBAND EXPAK [Steven_Blumenthal]\n ISO_IP=80; #ISO Internet Protocol [Marshall_T_Rose]\n VMTP=81; #VMTP [Dave_Cheriton]\n SECURE_VMTP=82; #SECURE-VMTP [Dave_Cheriton]\n VINES=83; #VINES [Brian Horn]\n TTP=84; #Transaction Transport Protocol [Jim_Stevens]\n IPTM=84; #Internet Protocol Traffic Manager [Jim_Stevens]\n NSFNET_IGP=85; #NSFNET-IGP [Hans_Werner_Braun]\n DGP=86; #Dissimilar Gateway Protocol [M/A-COM Government Systems, \n # \"Dissimilar Gateway Protocol Specification, Draft Version\",\n # Contract no. CS901145, November 16, 1987.][Mike_Little]\n TCF=87; #TCF [Guillermo_A_Loyola]\n EIGRP=88; #EIGRP [RFC7868]\n OSPFIGP=89; #OSPFIGP [RFC1583][RFC2328][RFC5340][John_Moy]\n Sprite_RPC=90; #Sprite RPC Protocol [Welch, B., \"The Sprite Remote Procedure Call System\",\n # Technical Report, UCB/Computer Science Dept., 86/302,\n #University of California at Berkeley, June 1986.][Bruce Willins]\n LARP=91; #Locus Address Resolution Protocol [Brian Horn]\n MTP=92; #Multicast Transport Protocol [Susie_Armstrong]\n AX_25=93; #AX.25 Frames [Brian_Kantor]\n IPIP=94; #IP-within-IP Encapsulation Protocol [John_Ioannidis]\n MICP=95; #Mobile Internetworking Control Pro. (deprecated) [John_Ioannidis]\n SCC_SP=96; #Semaphore Communications Sec. Pro. [Howard_Hart]\n ETHERIP=97; #Ethernet-within-IP Encapsulation [RFC3378]\n ENCAP=98; #Encapsulation Header [RFC1241][Robert_Woodburn]\n Any_Private_Encryption_Scheme=99; #any private encryption scheme [Internet_Assigned_Numbers_Authority]\n GMTP=100; #GMTP [[RXB5]]\n IFMP=101; #Ipsilon Flow Management Protocol [Bob_Hinden][November 1995, 1997.]\n PNNI=102; #PNNI over IP [Ross_Callon]\n PIM=103; #Protocol Independent Multicast [RFC7761][Dino_Farinacci]\n ARIS=104; #ARIS [Nancy_Feldman]\n SCPS=105; #SCPS [Robert_Durst]\n QNX=106; #QNX [Michael_Hunter]\n A_N=107; #Active Networks [Bob_Braden]\n IPComp=108; #IP Payload Compression Protocol [RFC2393]\n SNP=109; #Sitara Networks Protocol [Manickam_R_Sridhar]\n Compaq_Peer=110; #Compaq Peer Protocol [Victor_Volpe]\n IPX_in_IP=111; #IPX in IP [CJ_Lee]\n VRRP=112; #Virtual Router Redundancy Protocol [RFC5798]\n PGM=113; #PGM Reliable Transport Protocol [Tony_Speakman]\n Any_0_Hop_Protocol=114; #any 0-hop protocol [Internet_Assigned_Numbers_Authority]\n L2TP=115; #Layer Two Tunneling Protocol [RFC3931][Bernard_Aboba]\n DDX=116; #D-II Data Exchange (DDX) [John_Worley]\n IATP=117; #Interactive Agent Transfer Protocol [John_Murphy]\n STP=118; #Schedule Transfer Protocol [Jean_Michel_Pittet]\n SRP=119; #SpectraLink Radio Protocol [Mark_Hamilton]\n UTI=120; #UTI [Peter_Lothberg]\n SMP=121; #Simple Message Protocol [Leif_Ekblad]\n SM=122; #Simple Multicast Protocol (deprecated) [Jon_Crowcroft][draft-perlman-simple-multicast]\n PTP=123; #Performance Transparency Protocol [Michael_Welzl]\n ISIS_over_IPv4=124; # [Tony_Przygienda]\n FIRE=125; # [Criag_Partridge]\n CRTP=126; #Combat Radio Transport Protocol [Robert_Sautter]\n CRUDP=127; #Combat Radio User Datagram [Robert_Sautter]\n SSCOPMCE=128; # [Kurt_Waber]\n IPLT=129; # [[Hollbach]]\n SPS=130; #Secure Packet Shield [Bill_McIntosh]\n PIPE=131; #Private IP Encapsulation within IP [Bernhard_Petri]\n SCTP=132; #Stream Control Transmission Protocol [Randall_R_Stewart]\n FC=133; #Fibre Channel [Murali_Rajagopal][RFC6172]\n RSVP_E2E_IGNORE=134; # [RFC3175]\n Mobility_Header=135; # [RFC6275]\n UDPLite=136; # [RFC3828]\n MPLS_in_IP=137; # [RFC4023]\n manet=138; #MANET Protocols [RFC5498]\n HIP=139; #Host Identity Protocol [RFC7401]\n Shim6=140; #Shim6 Protocol [RFC5533]\n WESP=141; #Wrapped Encapsulating Security Payload [RFC5840]\n ROHC=142; #Robust Header Compression [RFC5858]\n Unassigned=range(143,252+1); #Unassigned [Internet_Assigned_Numbers_Authority]\n Experimentation=(253,254); #Use for experimentation and testing [RFC3692]\n Reserved=255; # [Internet_Assigned_Numbers_Authority]\n##\naddReverseEnumLookup(Layer4Protocol);\n\n\n\n####################\ndef _test(verbose=False):\n \"\"\" Helper function for unittesting this module \"\"\"\n from utilities import wrapIfScalar;\n \n tests = [];\n enums_to_check = [Layer1Protocol,Layer2Protocol,Layer3Protocol,Layer4Protocol];\n for E in enums_to_check:\n d = {};\n for k in E:\n try:\n d[k]=k.value;\n tests.append(True);\n except TypeError:\n if verbose:\n print('Hash Fail: ',k,k.value);\n ##\n tests.append(False);\n ##\n for V in wrapIfScalar(k.value):\n for v in wrapIfScalar(V):\n test = (E.valueLookup(v)==k);\n if not test and verbose:\n print('Fail: ',k,E.valueLookup(v),v);\n ##\n tests.append(test);\n ##\n ##\n ##\n ##\n passed = sum(tests);\n total =len(tests);\n return {'failed':total-passed,'total':total};\n##\n \ndef _unittest(verbose=False):\n \"\"\" Unittest for this module \"\"\"\n import lib.typeguard as typeguard;\n import doctest;\n \n results = [];\n if verbose:\n print(f'Unittest {__name__}');\n ##\n extraglobs = {};\n try:\n typeguard.groups[__name__]=True;\n \n x = doctest.testmod(extraglobs=extraglobs,optionflags=doctest.IGNORE_EXCEPTION_DETAIL);\n if verbose:\n print(x);\n ##\n results.append({'failed':x.failed,'total':x.attempted});\n results.append(_test(verbose));\n except Exception as e:\n if verbose:\n raise;\n ##\n ##\n totals = {k:sum(i[k] for i in results) for k in ('failed','total',)};\n return totals;\n##\n\n\nif __name__==\"__main__\":\n print(_unittest(verbose=True));\n##\n","sub_path":"protocol_enums.py","file_name":"protocol_enums.py","file_ext":"py","file_size_in_byte":23462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"589203907","text":"import yaml\nimport pathlib\nimport os\nfrom collections import namedtuple\nimport argparse\nimport shutil\n\nPath = namedtuple('Path', ['type','path'])\nsign_dir = 'd'\nsign_file = 'f'\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-f', '--force', action='store_true', help='Overwrite the existing objects')\nparser.add_argument('-e', '--export', action='store_true', help=\"Generate .yml\")\nparser.add_argument('-r', '--reorg', action='store_true', help=\"\")\narg = parser.parse_args()\n\n\ndef loadOrganization():\n # Load organization.yml\n file_dir = './organization.yml'\n with open(file_dir) as f:\n paths_yml = yaml.load(f, Loader=yaml.SafeLoader)\n return paths_yml\n\n\ndef isDirectory(d):\n if isinstance(d,dict):\n return True\n return False\n\n\ndef getDirName(d):\n return list(d.keys())[0]\n\n\ndef getDirContents(d):\n return d[getDirName(d)]\n\n\ndef getPaths(paths_yml):\n path = ['.']\n paths = []\n def recursiveMkdir(paths_yml,path):\n for d in paths_yml:\n if isDirectory(d):\n path.append(getDirName(d))\n paths.append(Path(sign_dir, '/'.join(path)))\n recursiveMkdir(getDirContents(d),path)\n elif d is not None:\n paths.append(Path(sign_file, '/'.join(path)+'/'+d))\n path.pop()\n recursiveMkdir(paths_yml,path)\n return paths\n\n\ndef checkExistence(paths):\n paths_exist = [p.path for p in paths if os.path.exists(p.path)]\n if paths_exist:\n print('Error: The object(s) already exist(s). If you overwrite, please specify `--force` option.')\n for p in paths_exist:\n print(p)\n return True\n return False\n\n\ndef deleteObjects(paths):\n paths_exist = [p.path for p in paths if os.path.exists(p.path)]\n for p in paths_exist:\n if os.path.exists(p):\n shutil.rmtree(p)\n\n\ndef makeObject(p):\n if p.type==sign_dir:\n os.makedirs(p.path)\n else:\n pathlib.Path(p.path).touch()\n\n\ndef main():\n paths_yml = loadOrganization()\n paths = getPaths(paths_yml)\n\n if arg.force:\n deleteObjects(paths)\n elif checkExistence(paths):\n return\n\n print('%d directories/files are successfully created!'%len(paths))\n for p in paths:\n print('', p.path)\n makeObject(p)\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"mkdr.py","file_name":"mkdr.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"239576320","text":"# 查询后的偶数和\nclass Solution(object):\n def sumEvenAfterQueries(self, A, queries):\n S = sum(x for x in A if x % 2 == 0)\n ans = []\n\n for x, k in queries:\n if A[k] % 2 == 0:\n S -= A[k]\n A[k] += x\n if A[k] % 2 == 0:\n S += A[k]\n ans.append(S)\n\n return ans\n\n\nprint(Solution().sumEvenAfterQueries(\n [1, 2, 3, 4], [[1, 0], [-3, 1], [-4, 0], [2, 3]]))\n","sub_path":"algorithms/901-/985.sum-of-even-numbers-after-queries.py","file_name":"985.sum-of-even-numbers-after-queries.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"49537128","text":"#! /usr/bin/python3\n# create html table from csv\n# Author(s): Chris Trombley \n# Version 2 - added css class to all columns except header\n# https://github.com/ctroms/snippets/blob/master/csvtotable.py\n# https://github.com/iamliamc/CSVtoHTML/blob/master/script4.py\n# http://www.ctroms.com/blog/code/python/2011/04/20/csv-to-html-table-with-python/\n# http://stackoverflow.com/questions/4521426/delete-blank-rows-from-csv\n# http://stackoverflow.com/questions/191359/how-to-convert-a-file-to-utf-8-in-python\nimport time\nimport re\nimport sys\nimport csv\nimport io\nfrom chardet.universaldetector import UniversalDetector\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\ntargetFormat = 'utf-8'\ndetector = UniversalDetector()\nfileName = sys.argv[1] \nOriginalName = sys.argv[1]\n\ndef get_encoding_type(fileName):\n detector.reset()\n for line in open(fileName, 'rb'):\n detector.feed(line)\n if detector.done: break\n detector.close()\n return detector.result['encoding']\n\ndef convertFileWithDetection(fileName):\n print(\"Converting '\" + fileName + \"'...\")\n format=get_encoding_type(fileName)\n try:\n with io.open(fileName, 'rU', encoding=format) as sourceFile:\n writeConversion(sourceFile)\n print('Done.')\n return\n except UnicodeDecodeError:\n pass\n print(\"Error: failed to convert '\" + fileName + \"'.\")\n\ndef writeConversion(file):\n global OriginalName\n OriginalName,Ext = OriginalName.split(\".\",1)\n OriginalName = OriginalName + \"_utf8.\" + Ext\n with io.open(OriginalName, 'w', encoding=targetFormat) as targetFile:\n for line in file:\n targetFile.write(line)\n\n\ndef subToHebrew(cPlace, oldArg, subArg):\n global header_dynamic\n global c\n if re.match(\"(.*)\"+oldArg+\"(.*)\", str(column)):\n if cPlace != \"q\":# if there are more then 1 colunm with the same header\n c[cPlace] += 1\n if c[cPlace] == 1:\n header_dynamic += ' \\n'\n else:\n header_dynamic += ' \\n'\n \ndef dnx(findStr, subStr):\n htmlfile.write(' \\n')\n for column in row:\n column = re.sub(findStr, subStr+round, str(column))\n htmlfile.write(' \\n')\n htmlfile.write(' \\n')\n\n#if len(sys.argv) < 3:\n\nOriginalCodecs = get_encoding_type(fileName)\nprint(\"\\nfile codecs is \"+OriginalCodecs)\nif OriginalCodecs != \"utf-8\" and OriginalCodecs != \"UTF-8-SIG\":\n convertFileWithDetection(fileName)\n print (bcolors.FAIL + \"file not utf-8, converted to new file: \" + OriginalName + \" ,trying to create the HTML file...\\n\" + bcolors.ENDC)\n# exit(0)\n\n# Create the HTML file for output\nfilename = OriginalName.split(\".\",1)\nfilename = '{0}_ConvertedHTML_{1}.txt'.format(filename[0],time.strftime(\"%Y%m%d_%H%M\"))\n #exit(1)\n#else:\n# filename = sys.argv[1]\n# filename = filename.split(\".\",1)\n# filename = '{0}.txt'.format(filename[0])\n \nhtmlfile = open(filename,\"w\")\n\nheader_fixed = '\\n\\\n \\n\\\n \\n\\\n \\n\\\n \\n\\\n \\n\\\n \\n\\\n \\n\\\n \\n\\\n \\n\\\n \\n\\\n \\n\\\n \\n\\\n'\n\n# build the dynamic header\nreaderheader = csv.reader(open(OriginalName), delimiter='\\t') # for the header\nheader_dynamic = '\\n \\n'\nRunNum = 20\nc= []\nfor y in range(1, 130): # a list for run numbers (from 1 to RunNum) and to check duplicated in the header (101-130 )\n c = c + [0]\n#if str(sys.argv[2]) == \"a\":\nif len(sys.argv) < 3:\n round = \"הקפה\"\nelse:\n round = \"מקצה\"\n \nfor row in readerheader:\n# print('unsorted:'+str(c))\n c.sort(reverse=True)\n# print('sorted:'+str(c))\n if c[1] > 0:# stop checking rows after we processed the real header row \n break\n for column in row:\n subToHebrew(101,\"(P|p)os.\",\"מקום\")\n subToHebrew(102, \"(R|r)nk\", \"מקום\")\n if re.match(\"(.*)(R|r)anking(.*)\", str(row)):\n pass\n else:\n subToHebrew(103, \"(R|r)ank\", \"מקום\")\n subToHebrew(104, \"(N|n)um\", \"מספר\")\n subToHebrew(105, \"(N|n)o.\", \"מספר\")\n subToHebrew(106, \"(B|b)ib\", \"מספר\")\n if re.match(\"(.*)(N|n)ame(.*)\", str(column)) or re.match(\"(.*)(D|d)river(.*)\", str(column)):\n if re.match(\"(.*)(D|d)river's (L|l)ast (N|n)ame(.*)\", str(column)):\n c[121] += 1\n if c[121] == 1:\n header_dynamic += ' \\n'\n elif re.match(\"(.*)(L|l)ast (N|n)ame(.*)\", str(column)):\n c[107] += 1\n if c[107] == 1:\n header_dynamic += ' \\n'\n elif re.match(\"(.*)(F|f)irst (N|n)ame(.*)\", str(column)):\n c[108] += 1\n if c[108] == 1:\n header_dynamic += ' \\n'\n elif re.match(\"(.*)(D|d)river(.*)\", str(column)):\n c[110] += 1\n if c[110] == 1:\n header_dynamic += ' \\n'\n else:\n c[109] += 1\n if c[109] == 1:\n header_dynamic += ' \\n'\n# subToHebrew(110, \"(D|d)river\", \"שם\")\n for i in range(1, RunNum):\n subToHebrew(i, \"(R|r)un \"+str(i), round+' '+str(i))\n# subToHebrew(111, \"(L|l)aps\", \"הקפות\")\n subToHebrew(112, \"(T|t)ime\", \"זמן\")\n subToHebrew(113, \"(G|g)ap\", \"פער\")\n subToHebrew(114, \"(D|d)iff. with leader\", \"פער\")\n# subToHebrew(115, \"(B|b).(L|l)ap\", \"הקפה מהירה\")\n# subToHebrew(116, \"(B|b)est (L|l)ap\", \"הקפה מהירה\")\n if re.match(\"(.*)(L|l)ap(.*)\", str(column)):\n if re.match(\"(.*)(B|b).(L|l)ap(.*)\", str(column)):\n c[116] += 1\n if c[116] == 1:\n header_dynamic += ' \\n'\n elif re.match(\"(.*)(B|b)est (L|l)ap(.*)\", str(column)):\n c[117] += 1\n if c[117] == 1:\n header_dynamic += ' \\n'\n elif re.match(\"(.*)(L|l)aps(.*)\", str(column)):\n c[111] += 1\n if c[111] == 1:\n header_dynamic += ' \\n'\n else:\n c[119] += 1\n if c[119] == 1:\n header_dynamic += ' \\n'\n subToHebrew(\"q\", \"(P|p)enalty\", \"עונשין\")\n subToHebrew(117, \"(S|s)eq\", \"סידורי\")\n subToHebrew(118, \"(H|h)our\", \"זמן כולל\")\n subToHebrew(120, \"(P|p)oints\", \"נקודות\")\n subToHebrew(122, \"(C|c)ategory\", \"קטגוריה\")\n\n# print('column:'+str(c))\nheader_dynamic += ' \\n'\n\n# set which header to use\nheader = header_dynamic\n#header = header_fixed\n\n# start building the html file\n# Open the CSV file for reading\nreader = csv.reader(open(OriginalName), delimiter='\\t')\n\n# print header to shell to check if correct\nprint (bcolors.HEADER + \"\\nThis is the header we'll use:\" + bcolors.ENDC)\nprint(header)\n\n# initialize rownum variable\nrownum = 0\n\n# write
    '+subArg+''+subArg+'
    ' + column + '
    מקוםמספרנהגנווטמקצה 3מקצה 4מקצה 5זמןעונשיןפער
    שםנהגנווטשםשםהקפה מהירההקפה מהירההקפותהקפה
    tag\nhtmlfile.write('
    \\n')\n\n# generate table contents\nfor row in reader: # Read a single row from the CSV file\n headerItems = ['Rank', 'Rnk', 'Bib.', 'Event Ranking', 'Gap', 'Num', 'Pos.', 'Name', 'All mountain']\n if not any(item in row for item in headerItems): # delete english header row\n\n # write header row. assumes first row in csv contains header (its wrong but we do not use it)\n if reader.line_num == 1:\n # if rownum == 0:\n htmlfile.write(' \\n')\n for column in row:\n column = re.sub('(l|L)aps', 'הקפות', str(column))#for lap by lap\n column = re.sub('(l|L)ap', 'הקפה', str(column))#for lap by lap\n htmlfile.write(' \\n')\n htmlfile.write(' \\n')\n\n #write all other rows\n else:\n if any(row):#check if row not empty so not to get empty td.\n if re.match(\"(.*)DNS(.*)\", str(row)):\n dnx('DNS - Did not start - Run', 'DNS - לא התחיל - ')\n elif re.match(\"(.*)DNF(.*)\", str(row)):\n dnx('DNF - Do not finish - Run', 'DNF - לא סיים - ')\n elif re.match(\"(.*)DISQ(.*)\", str(row)):\n dnx('DISQ - Disqualified - Run', 'DSQ - נפסל - ')\n elif re.match(\"(.*)DSQ(.*)\", str(row)):\n dnx('DSQ - Disqualified - Run', 'DSQ - נפסל - ')\n elif re.match(\"(.*)(b|B)est lap(.*)\", str(row)):\n htmlfile.write(' \\n')\n for column in row:\n column = re.sub('(b|B)est lap', 'הקפה מהירה', str(column))\n htmlfile.write(' \\n')\n htmlfile.write(' \\n')\n elif len(row) == 1:\n htmlfile.write(' \\n')\n for column in row:\n htmlfile.write(' \\n')\n htmlfile.write(' \\n')\n htmlfile.write(header)\n else:\n htmlfile.write(' \\n')\n for column in row:\n column = re.sub('(l|L)aps', 'הקפות', str(column))\n column = re.sub('(l|L)ap', 'הקפה', str(column))\n column = re.sub('1h', '01:', str(column))\n column = re.sub('2h', '02:', str(column))\n column = re.sub('3h', '03:', str(column))\n column = re.sub('4h', '04:', str(column))\n htmlfile.write(' \\n')\n htmlfile.write(' \\n')\n\n #increment row count\n rownum += 1\n\n# write
    ' + column + '
    ' + column + '
    ' + column + '
    ' + column + '
    tag\nhtmlfile.write('\\n')\n# close the new created file\nhtmlfile.close()\n# print results to shell\nprint (bcolors.OKBLUE + \"Created \" + str(rownum) + \" row table.\")\nprint (bcolors.OKGREEN + \"\\ndone, see converted file:\",filename,\"\\n\" + bcolors.ENDC)\nexit(0)\n","sub_path":"manual/csvTohtmlold20170717.py","file_name":"csvTohtmlold20170717.py","file_ext":"py","file_size_in_byte":11346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"606639113","text":"import sys\nimport json\nimport UserDict\nimport csv\n\nclass Node(object):\n def __init__(self, nid, parent, name):\n self.nid = nid\n self.parent = parent\n self.children = []\n self.name = name\n\nclass NodeDict(UserDict.UserDict):\n def addNodes(self, nodes):\n \"\"\" Add every node as a child to its parent by doing two passes.\"\"\"\n for i in (1, 2):\n for node in nodes:\n self.data[node.nid] = node\n if node.parent in self.data.keys():\n if node.parent != \"none\" and node not in self.data[node.parent].children:\n self.data[node.parent].children.append(node)\n\nclass NodeJSONEncoder(json.JSONEncoder):\n def default(self, node):\n if type(node) == Node:\n return {\"nid\":node.nid, \"name\":node.name, \"children\":node.children}\n raise TypeError(\"{} is not an instance of Node\".format(node))\n\nif __name__ == \"__main__\":\n nodes = []\n\n with open('data_trial.csv', 'r') as f:\n for row in f.readlines()[1:]:\n nid, parent, name = row.split()\n nodes.append(Node(nid, parent, name))\n\n nodeDict = NodeDict()\n nodeDict.addNodes(nodes)\n\n rootNodes = [node for nid, node in nodeDict.items()\n if node.parent == \"none\"]\n for rootNode in rootNodes:\n print (NodeJSONEncoder().encode(rootNode))\n","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"540786930","text":"\n\nfrom xai.brain.wordbase.verbs._lengthen import _LENGTHEN\n\n#calss header\nclass _LENGTHENING(_LENGTHEN, ):\n\tdef __init__(self,): \n\t\t_LENGTHEN.__init__(self)\n\t\tself.name = \"LENGTHENING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"lengthen\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_lengthening.py","file_name":"_lengthening.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"238358878","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api, tools\nfrom odoo.tools.translate import _\nfrom odoo.exceptions import Warning\nimport datetime\n\nclass simpanan(models.Model):\n _name = 'ksp.simpanan'\n\n name = fields.Char(string='No. Transaksi', copy=False, index=True, default=lambda self: _('New'))\n partner_id = fields.Many2one('res.partner','Nasabah')\n simpanan_ids = fields.One2many('ksp.simpanan.detail', 'simpanan_id', string='Simpanan')\n amount_total = fields.Float(compute='_compute_total', string='Total', store=True)\n wajib_id = fields.Float(compute='_compute_wajib', string='Simpanan Wajib', store=True)\n sukarela_id = fields.Float(compute='_compute_sukarela', string='Simpanan Sukarela', store=True)\n pokok_id = fields.Float(compute='_compute_pokok', string='Simpanan Pokok', default=25000, store=True)\n tanggal = fields.Date(string='Tanggal Pembuatan', default=fields.Date.today())\n pekerjaan = fields.Selection([(\"it\",\"IT\"),(\"dr\",\"Delivery\")], string='Pekerjaan')\n \n @api.depends('simpanan_ids')\n def _compute_total(self):\n for doc in self:\n amount_total = sum(doc.simpanan_ids.mapped('total'))\n doc.amount_total = amount_total\n \n @api.depends('simpanan_ids')\n def _compute_wajib(self):\n for doc in self:\n amount_total = sum(doc.simpanan_ids.mapped('simpanan_wajib'))\n doc.wajib_id = amount_total\n\n @api.depends('simpanan_ids')\n def _compute_sukarela(self):\n for doc in self:\n amount_total = sum(doc.simpanan_ids.mapped('simpanan_sukarela'))\n doc.sukarela_id = amount_total\n\n @api.depends('simpanan_ids')\n def _compute_pokok(self):\n for doc in self:\n amount_total = sum(doc.simpanan_ids.mapped('simpanan_pokok'))\n doc.pokok_id = amount_total\n \n\nclass simpananDetail(models.Model):\n _name = 'ksp.simpanan.detail'\n simpanan_id = fields.Many2one('ksp.simpanan', string='Simpanan')\n tanggal_simpan = fields.Date(string='Tanggal', default=fields.Date.today())\n simpanan_wajib = fields.Float(string='Wajib')\n simpanan_sukarela = fields.Float(string='Sukarela')\n simpanan_pokok = fields.Float(string='Pokok')\n total = fields.Float(string='Total', compute='_get_total', store=True)\n\n @api.multi\n @api.depends('simpanan_wajib','simpanan_sukarela','simpanan_pokok')\n def _get_total(self):\n for doc in self:\n doc.total = doc.simpanan_wajib + doc.simpanan_sukarela + doc.simpanan_pokok\n\nclass SimpananReport(models.Model):\n _name = \"simpanan.report\"\n _auto = False\n\n\n partner_id = fields.Many2one('res.partner', string='Nasabah')\n wajib_id = fields.Float(string='Simpanan Wajib')\n sukarela_id = fields.Float(string='Simpanan Sukarela')\n pokok_id = fields.Float(string='Simpanan Pokok')\n amount_total = fields.Float(string='Total')\n tanggal = fields.Date(string='Tanggal Pembuatan')\n\n @api.model_cr\n def init(self):\n tools.drop_view_if_exists(self._cr, 'simpanan_report')\n self._cr.execute(\"\"\"\n create or replace view simpanan_report as (\n SELECT\n min(ol.id) as id,\n ol.partner_id as partner_id,\n ol.wajib_id as wajib_id,\n ol.sukarela_id as sukarela_id,\n ol.pokok_id as pokok_id,\n ol.amount_total as amount_total,\n ol.tanggal as tanggal\n FROM ksp_simpanan ol\n GROUP BY\n ol.partner_id,\n ol.wajib_id,\n ol.sukarela_id,\n ol.pokok_id,\n ol.amount_total,\n ol.tanggal\n )\"\"\")","sub_path":"models/simpanan.py","file_name":"simpanan.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"90301483","text":"\"\"\"Workbook operations by Jian Li\"\"\"\n\nfrom adal import AuthenticationContext\nimport requests\n\nimport config\nimport haichao_util\n\nlogger = haichao_util.logger\nDEBUG = 0\n\ndef device_flow_session():\n ctx = AuthenticationContext(config.AUTHORITY_URL, api_version=1.0)\n token_r = ctx.acquire_token_with_username_password(\n config.RESOURCE,\n config.USERNAME,\n config.PASSWORD,\n config.CLIENT_ID)\n\n if not token_r.get('accessToken', None):\n return None\n session = requests.Session()\n _auth = '{} {}'.format(token_r.get('tokenType'), token_r.get('accessToken'))\n session.headers.update({'Authorization': _auth,\n 'Content-type': 'application/json',\n 'persistChanges': 'true'})\n return session\n\n\n#SESSION = device_flow_session()\n\n\nclass Session():\n URL_PREFIX = '{}/{}/'.format(config.RESOURCE, config.API_VERSION)\n PATH = 'me/drive/root:'\n\n def session(self):\n self.sess = device_flow_session()\n return self.sess\n\n def get_drive_path(self, file_path):\n return '{}{}'.format(self.PATH, file_path)\n\n def get_drive_children(self, file_path):\n return '{}:/children'.format(self.get_drive_path(file_path))\n\n def get_drive_content(self, file_path):\n return '{}:/content'.format(self.get_drive_path(file_path))\n\n def upload_file(self, remote_file, local_file):\n with open(local_file, 'rb') as data:\n resp = self.put(_path=self.get_drive_content(remote_file),\n data=data)\n return haichao_util.get_short_vmware_url(resp.get('webUrl'))\n\n def create_folder(self, path, name):\n self.post(_path=self.get_drive_children(path),\n data={\"name\": name,\n 'folder': {}})\n\n\nclass Excel(Session):\n '''\n Class 'Excel' is child of class 'Session'. Workbook is another name of Excel in Cloud\n This class is to deal with all operations on a workbook/worksheet/cell/table/image\n We can access the workbook file via below 2 ways, by name or by id. Please refer to\n https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/excel\n URL_PREFIX + 'me/drive/root:/CC.xlsx:/workbook/'\n URL_PREFIX + \"me/drive/items/01BRT2R2MO7WTQTLS4ANBIGUCCYCJECHOP/workbook/'\n I think that the 'name' one is much better only if we do not use special char in\n file name. And it is not easy to remember an ID.\n '''\n\n def __init__(self, o3file):\n self.o3handler = self.session()\n self.root = '%s/%s/%s:/workbook' %(self.URL_PREFIX, self.PATH, o3file)\n logger.debug('GET {}'.format(self.root))\n resp = self.o3handler.get(self.root).json()\n if DEBUG: useless = logger.debug('{}'.format(resp))\n self.ws = self.root + '/worksheets'\n\n def get_worksheets(self):\n '''\n Get list of worksheets in filename\n get_worksheets()\n ['Summary', 'IPSec', 'MultiLAG_P0', 'NetIOCv3', 'NetFlow', 'DynInfraRP', 'NIOCEnhance']\n '''\n logger.debug('GET {}'.format(self.ws))\n resp = self.o3handler.get(self.ws).json()\n if DEBUG: useless = logger.debug('{}'.format(resp))\n return [x['name'] for x in resp['value']]\n\n def add_worksheet(self, ws_name):\n '''\n Add a new worksheet into file \n add_worksheet(ws_name=\"sh_3\")\n ''\n '''\n state = \"Both id and name of workbook are successful\"\n data = {\"name\": ws_name}\n logger.debug('POST {} with {}'.format(self.ws, data))\n resp = self.o3handler.post(self.ws, json=data)\n if DEBUG: useless = logger.debug('{}'.format(resp))\n return resp\n\n def del_worksheet(self, ws_name):\n state = \"TODO: not done yet\"\n url = '%s/worksheets/%s' % (self.root, ws_name)\n logger.debug('DELETE {}'.format(url))\n resp = self.o3handler.delete(url)\n if DEBUG: useless = logger.debug('{}'.format(resp))\n return resp\n\n def get_cell(self, ws_name, row, column):\n '''\n Add a new worksheet into file \n get_cell(ws_name=\"Summary\", row=3, column=1)\n 'esxallcov-vmk-hostd'\n '''\n state = \"Result code is 200 but no value returned.\"\n state2 = \"Suffix .json() must be added to get() method so as to have value!\"\n url_template = 'me/drive/root:/CC.xlsx:/workbook/worksheets/NetFlow/Cell(row=3, column=1)'\n\n url = self.ws + '/%s/Cell(row=%d,column=%d)' %(ws_name, row, column)\n logger.debug('GET {}'.format(url))\n resp = self.o3handler.get(url).json()\n if DEBUG: useless = logger.debug('{}'.format(resp))\n resp = resp['text'][0][0]\n return resp\n\n def get_range(self, ws_name, block):\n '''\n Get content of a range of specified worksheet in file \n return a 2-D array of list\n get_range(ws_name=\"NetIOCv3\", block=\"A4:B9\")\n url_template = \"me/drive/root:/CC.xlsx:/workbook/worksheets('NetFlow')/range(address='A1:B4')\"\n '''\n url = self.ws + \"/%s/range(address='%s')\" %(ws_name, block)\n logger.debug('GET {}'.format(url))\n resp = self.o3handler.get(url).json()\n if DEBUG: useless = logger.debug('{}'.format(resp))\n resp = resp['text']\n return resp\n\n def get_usedrange(self, ws_name):\n '''\n Get all used cells in specified worksheet, hardly used\n get_usedrange(ws_name=\"DynInfraRP\")\n url_template = \"me/drive/root:/CC.xlsx:/workbook/worksheets('NetFlow')/UsedRange(valuesOnly=true)\"\n '''\n url = self.ws + \"/%s/UsedRange(valuesOnly=true)\" % ws_name\n logger.debug('GET {}'.format(url))\n resp = self.o3handler.get(url).json()\n if DEBUG: useless = logger.debug('{}'.format(resp))\n resp = resp['text']\n return resp\n\n def get_usedsize(self, ws_name):\n '''\n return a tuple (width, height)\n '''\n data = self.get_usedrange(ws_name)\n width = len(data[0])\n height = len(data)\n return (width, height)\n\n def update_range(self, ws_name, block, data):\n '''\n Update cells in a range of specified worksheet\n data_template = [[\"A\", \"B\"], [\"C\", \"D\"] ]\n update_range(ws_name=\"NetIOCv3\", block=\"A7:B8\", data=data_template)\n ''\n state = \"Update range/cell must use HTTP patch method\"\n url_template = \"me/drive/root:/CC.xlsx:/workbook/worksheets('NetFlow')/range(address='F6:G7')\"\n '''\n url = self.ws + \"/%s/range(address='%s')\" %(ws_name, block)\n logger.debug('PATCH {} with {}'.format(url, data))\n resp = self.o3handler.patch(url, json={\"values\": data})\n if DEBUG: useless = logger.debug('{}'.format(resp))\n return resp.json()\n\n def set_column_width(self, ws_name, block, width):\n '''\n Set width for a block area of specified worksheet\n set_column_width(ws_name=\"NetIOCv3\", block=\"A1:B8\", width=20)\n Refer to: https://docs.microsoft.com/en-us/graph/api/rangeformat-update?view=graph-rest-1.0\n Use HTTP patch method\n url_template = \"me/drive/root:/CC.xlsx:/workbook/worksheets('NetFlow')/range(address='F6:G7')\"\n '''\n url = self.ws + \"/%s/range(address='%s')/format\" %(ws_name, block)\n logger.debug('PATCH {} with {}'.format(url, width))\n resp = self.o3handler.patch(url, json={\"columnWidth\": width})\n if DEBUG: useless = logger.debug('{}'.format(resp))\n return resp.json()\n\nif __name__ == '__main__':\n s = Excel('vSphere/cat_result/v2019_Jian_Q2_main.xlsx')\n #print(s.get_usedsize('Summary'))\n resp = s.set_column_width('NIOCEnhance_beta', 'A1:B8', 40)\n print(resp.json())\n #import doctest\n #doctest.testmod(optionflags=doctest.ELLIPSIS)\n","sub_path":"ms_excel.py","file_name":"ms_excel.py","file_ext":"py","file_size_in_byte":7831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"85059570","text":"import json\nimport sys\nimport glob\nfrom collections import OrderedDict\nfrom User import User\nfrom utility import *\n\nitems = None\nrequests = []\ndomainsData = None\nthirdPartSites = dict()\nthirdPartSites_ios = dict()\nthirdPartSites_android = dict()\nattributes = None\nadblockList = None\nthirdPartEnabled = True\n\nwith open(\"data/domains/domains.json\") as domanisFile:\n domainsData = json.load(domanisFile)\n\nwith open('data/domains/easylist_solo_domini.txt') as file:\n raw_data = file.readlines()\n adblockList = raw_data\n\n\nif len(sys.argv) > 1:\n print(sys.argv[1])\n oS = sys.argv[1]\n if oS.lower() == \"ios\":\n osName = [\"ios\"]\n dataFile = dataFile + \"iOS.json\"\n elif oS.lower() == \"android\":\n osName = [\"andorid\"]\n dataFile = dataFile+\"Android.json\"\n else:\n Exception(\"Sistema Operativo non valido! (ios | android)\")\nelse:\n osName = [\"android\", \"ios\"]\n dataFileNames = [\"data/PII/data_Android.json\", \"data/PII/data_iOS.json\"]\n\nwith open(\"data/domains/third_part_domains.json\", 'w') as outJsonDomains:\n for oS in osName:\n user = None\n if oS == osName[0]:\n user = User(dataFileNames[0])\n else:\n user = User(dataFileNames[1])\n attributes = list(user.__dict__.keys())\n\n for testFile in glob.glob(\"data/testFiles_\" + oS + \"/*.har\"):\n nomeFile = testFile.split('/')[2].split('.')[0]\n nomeApp = nomeFile.split('_')[0].lower()\n domainsList = domainsData[nomeApp]\n with open(testFile) as file:\n httpData = json.load(file)\n traffic = httpData[\"log\"][\"entries\"]\n count = 0\n for index, entry in enumerate(traffic):\n for item in entry:\n raw = str(entry[item])\n if (item == \"request\"):\n if \"url\" in entry[item]:\n requestUrl = entry[item][\"url\"]\n splittedUrl = requestUrl.split(\n '://')[1].split('/')[0].split('.')\n domain = splittedUrl[len(\n splittedUrl)-2] + \".\" + splittedUrl[len(splittedUrl)-1]\n\n if (thirdPartEnabled):\n if (domain not in domainsList) and (domain+\"\\n\" in adblockList):\n\n if oS == osName[0]:\n addDomain(thirdPartSites_android,\n entry, nomeApp, domain, user, attributes)\n elif oS == osName[1]:\n addDomain(thirdPartSites_ios,\n entry, nomeApp, domain, user, attributes)\n\n addDomain(thirdPartSites, entry,\n nomeApp, domain, user, attributes)\n else:\n if (domain not in domainsList) and (domain+\"\\n\" in adblockList):\n\n if oS == osName[0]:\n addDomain(thirdPartSites_android,\n entry, nomeApp, domain, user, attributes)\n elif oS == osName[1]:\n addDomain(thirdPartSites_ios,\n entry, nomeApp, domain, user, attributes)\n\n addDomain(thirdPartSites, entry,\n nomeApp, domain, user, attributes)\n\n # else:\n # #generare file per siti non di terze parti\n # print(\"non faccio nulla\")\n\n\n# costruisco chart 1\nbuildChart1(thirdPartSites,\" malicious\")\n\n# costruisco chart2\nbuildChart2(thirdPartSites_ios, thirdPartSites_android,\" malicious\")\n\n# costruisco chart3\nbuildChart3(thirdPartSites, attributes,\" malicious\")\n\n# costruisco chart4\nbuildChart4(thirdPartSites_android, thirdPartSites_ios, attributes,\" malicious\")\n\n# costruisco chart5\nbuildChart5(thirdPartSites, \"data/category.json\", attributes,\" malicious\")\n\nwith open(\"output.json\", 'w') as outFile:\n json.dump(thirdPartSites, outFile)\n","sub_path":"src/thirdPart_malicious.py","file_name":"thirdPart_malicious.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"} +{"seq_id":"278710252","text":"from django.template import RequestContext, loader\nfrom django.core.context_processors import csrf\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import render_to_response, render\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect, HttpRequest\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.views import logout\nfrom django.contrib.auth.models import User\nfrom django.template import Context, Template\n\n# Create your views here.\n@csrf_exempt \ndef search(request):\n return render_to_response('ds/search.html')\n\n@csrf_exempt \ndef bsearch(request):\n if request.POST:\n try:\n num1 = request.POST.get('num1')\n num2 = request.POST.get('num2')\n num3 = request.POST.get('num3')\n num4 = request.POST.get('num4')\n num5 = request.POST.get('num5')\n num6 = request.POST.get('num6')\n nums = request.POST.get('nums')\n return render_to_response('ds/bsearch.html', {'num1' : num1, 'num2' : num2, 'num3' : num3, 'num4': num4, 'num5': num5, 'num6': num6, 'nums' : nums})\n except:\n return HttpResponse(\"Error\")\n return HttpResponse(\"No post\")\n\ndef bsearch2(request, num1, num2, num3, num4, num5, num6, nums):\n numlist = [num1, num2, num3, num4, num5, num6]\n low = 1\n high = 6\n mid = []\n i = 1\n while(low < high):\n mid.append((low+high)/2)\n low = low+1\n #a = numlist[mid]\n #if(a == nums):\n # return HttpResponse(mid)\n #i = i+1\n return HttpResponse(mid)\n \n \n \n \n","sub_path":"fmanagement/ds/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"22"}